after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def core():
shutting_down = utils.shutting_down # early bind
def module_unload(handle):
# If we are not shutting down, we must be called due to
# Context.reset() of Context.unload_module(). Both must have
# cleared the module reference from the context.
assert shutting_down() or handle.value not in modules
driver.cuModuleUnload(handle)
if dealloc is not None:
dealloc.add_item(module_unload, handle)
else:
# Check the impossible case.
assert shutting_down(), "dealloc is None but interpreter is not being shutdown!"
|
def core():
shutting_down = utils.shutting_down # early bind
def module_unload(handle):
# If we are not shutting down, we must be called due to
# Context.reset() of Context.unload_module(). Both must have
# cleared the module reference from the context.
assert shutting_down() or handle.value not in modules
driver.cuModuleUnload(handle)
dealloc.add_item(module_unload, handle)
|
https://github.com/numba/numba/issues/4352
|
Traceback (most recent call last):
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/utils.py", line 745, in _exitfunc
f()
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/utils.py", line 669, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/cuda/cudadrv/driver.py", line 1037, in core
dealloc.add_item(module_unload, handle)
AttributeError: 'NoneType' object has no attribute 'add_item'
|
AttributeError
|
def _module_finalizer(context, handle):
dealloc = context.deallocations
modules = context.modules
def core():
shutting_down = utils.shutting_down # early bind
def module_unload(handle):
# If we are not shutting down, we must be called due to
# Context.reset() of Context.unload_module(). Both must have
# cleared the module reference from the context.
assert shutting_down() or handle.value not in modules
driver.cuModuleUnload(handle)
dealloc.add_item(module_unload, handle)
return core
|
def _module_finalizer(context, handle):
dealloc = context.deallocations
modules = context.modules
def core():
shutting_down = utils.shutting_down # early bind
def module_unload(handle):
# If we are not shutting down, we must be called due to
# Context.reset() of Context.unload_module(). Both must have
# cleared the module reference from the context.
assert shutting_down() or handle.value not in modules
driver.cuModuleUnload(handle)
if dealloc is not None:
dealloc.add_item(module_unload, handle)
else:
# Check the impossible case.
assert shutting_down(), (
"dealloc is None but interpreter is not being shutdown!"
)
return core
|
https://github.com/numba/numba/issues/4352
|
Traceback (most recent call last):
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/utils.py", line 745, in _exitfunc
f()
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/utils.py", line 669, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/cuda/cudadrv/driver.py", line 1037, in core
dealloc.add_item(module_unload, handle)
AttributeError: 'NoneType' object has no attribute 'add_item'
|
AttributeError
|
def core():
shutting_down = utils.shutting_down # early bind
def module_unload(handle):
# If we are not shutting down, we must be called due to
# Context.reset() of Context.unload_module(). Both must have
# cleared the module reference from the context.
assert shutting_down() or handle.value not in modules
driver.cuModuleUnload(handle)
dealloc.add_item(module_unload, handle)
|
def core():
shutting_down = utils.shutting_down # early bind
def module_unload(handle):
# If we are not shutting down, we must be called due to
# Context.reset() of Context.unload_module(). Both must have
# cleared the module reference from the context.
assert shutting_down() or handle.value not in modules
driver.cuModuleUnload(handle)
if dealloc is not None:
dealloc.add_item(module_unload, handle)
else:
# Check the impossible case.
assert shutting_down(), "dealloc is None but interpreter is not being shutdown!"
|
https://github.com/numba/numba/issues/4352
|
Traceback (most recent call last):
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/utils.py", line 745, in _exitfunc
f()
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/utils.py", line 669, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "/conda/envs/gdf/lib/python3.6/site-packages/numba/cuda/cudadrv/driver.py", line 1037, in core
dealloc.add_item(module_unload, handle)
AttributeError: 'NoneType' object has no attribute 'add_item'
|
AttributeError
|
def raise_on_unsupported_feature(func_ir, typemap):
"""
Helper function to walk IR and raise if it finds op codes
that are unsupported. Could be extended to cover IR sequences
as well as op codes. Intended use is to call it as a pipeline
stage just prior to lowering to prevent LoweringErrors for known
unsupported features.
"""
gdb_calls = [] # accumulate calls to gdb/gdb_init
# issue 2195: check for excessively large tuples
for arg_name in func_ir.arg_names:
if (
arg_name in typemap
and isinstance(typemap[arg_name], types.containers.UniTuple)
and typemap[arg_name].count > 1000
):
# Raise an exception when len(tuple) > 1000. The choice of this number (1000)
# was entirely arbitrary
msg = (
"Tuple '{}' length must be smaller than 1000.\n"
"Large tuples lead to the generation of a prohibitively large "
"LLVM IR which causes excessive memory pressure "
"and large compile times.\n"
"As an alternative, the use of a 'list' is recommended in "
"place of a 'tuple' as lists do not suffer from this problem.".format(
arg_name
)
)
raise UnsupportedError(msg, func_ir.loc)
for blk in func_ir.blocks.values():
for stmt in blk.find_insts(ir.Assign):
# This raises on finding `make_function`
if isinstance(stmt.value, ir.Expr):
if stmt.value.op == "make_function":
val = stmt.value
# See if the construct name can be refined
code = getattr(val, "code", None)
if code is not None:
# check if this is a closure, the co_name will
# be the captured function name which is not
# useful so be explicit
if getattr(val, "closure", None) is not None:
use = "<creating a function from a closure>"
expr = ""
else:
use = code.co_name
expr = "(%s) " % use
else:
use = "<could not ascertain use case>"
expr = ""
msg = (
"Numba encountered the use of a language "
"feature it does not support in this context: "
"%s (op code: make_function not supported). If "
"the feature is explicitly supported it is "
"likely that the result of the expression %s"
"is being used in an unsupported manner."
) % (use, expr)
raise UnsupportedError(msg, stmt.value.loc)
# this checks for gdb initilization calls, only one is permitted
if isinstance(stmt.value, (ir.Global, ir.FreeVar)):
val = stmt.value
val = getattr(val, "value", None)
if val is None:
continue
# check global function
found = False
if isinstance(val, pytypes.FunctionType):
found = val in {numba.gdb, numba.gdb_init}
if not found: # freevar bind to intrinsic
found = getattr(val, "_name", "") == "gdb_internal"
if found:
gdb_calls.append(stmt.loc) # report last seen location
# this checks that np.<type> was called if view is called
if isinstance(stmt.value, ir.Expr):
if stmt.value.op == "getattr" and stmt.value.attr == "view":
var = stmt.value.value.name
if isinstance(typemap[var], types.Array):
continue
df = func_ir.get_definition(var)
cn = guard(find_callname, func_ir, df)
if cn and cn[1] == "numpy":
ty = getattr(numpy, cn[0])
if numpy.issubdtype(ty, numpy.integer) or numpy.issubdtype(
ty, numpy.floating
):
continue
vardescr = "" if var.startswith("$") else "'{}' ".format(var)
raise TypingError(
"'view' can only be called on NumPy dtypes, "
"try wrapping the variable {}with 'np.<dtype>()'".format(
vardescr
),
loc=stmt.loc,
)
# checks for globals that are also reflected
if isinstance(stmt.value, ir.Global):
ty = typemap[stmt.target.name]
msg = (
"The use of a %s type, assigned to variable '%s' in "
"globals, is not supported as globals are considered "
"compile-time constants and there is no known way to "
"compile a %s type as a constant."
)
if getattr(ty, "reflected", False) or isinstance(ty, types.DictType):
raise TypingError(msg % (ty, stmt.value.name, ty), loc=stmt.loc)
# There is more than one call to function gdb/gdb_init
if len(gdb_calls) > 1:
msg = (
"Calling either numba.gdb() or numba.gdb_init() more than once "
"in a function is unsupported (strange things happen!), use "
"numba.gdb_breakpoint() to create additional breakpoints "
"instead.\n\nRelevant documentation is available here:\n"
"http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html"
"/troubleshoot.html#using-numba-s-direct-gdb-bindings-in-"
"nopython-mode\n\nConflicting calls found at:\n %s"
)
buf = "\n".join([x.strformat() for x in gdb_calls])
raise UnsupportedError(msg % buf)
|
def raise_on_unsupported_feature(func_ir, typemap):
"""
Helper function to walk IR and raise if it finds op codes
that are unsupported. Could be extended to cover IR sequences
as well as op codes. Intended use is to call it as a pipeline
stage just prior to lowering to prevent LoweringErrors for known
unsupported features.
"""
gdb_calls = [] # accumulate calls to gdb/gdb_init
# issue 2195: check for excessively large tuples
for arg_name in func_ir.arg_names:
if (
arg_name in typemap
and isinstance(typemap[arg_name], types.containers.UniTuple)
and typemap[arg_name].count > 1000
):
# Raise an exception when len(tuple) > 1000. The choice of this number (1000)
# was entirely arbitrary
msg = (
"Tuple '{}' length must be smaller than 1000.\n"
"Large tuples lead to the generation of a prohibitively large "
"LLVM IR which causes excessive memory pressure "
"and large compile times.\n"
"As an alternative, the use of a 'list' is recommended in "
"place of a 'tuple' as lists do not suffer from this problem.".format(
arg_name
)
)
raise UnsupportedError(msg, func_ir.loc)
for blk in func_ir.blocks.values():
for stmt in blk.find_insts(ir.Assign):
# This raises on finding `make_function`
if isinstance(stmt.value, ir.Expr):
if stmt.value.op == "make_function":
val = stmt.value
# See if the construct name can be refined
code = getattr(val, "code", None)
if code is not None:
# check if this is a closure, the co_name will
# be the captured function name which is not
# useful so be explicit
if getattr(val, "closure", None) is not None:
use = "<creating a function from a closure>"
expr = ""
else:
use = code.co_name
expr = "(%s) " % use
else:
use = "<could not ascertain use case>"
expr = ""
msg = (
"Numba encountered the use of a language "
"feature it does not support in this context: "
"%s (op code: make_function not supported). If "
"the feature is explicitly supported it is "
"likely that the result of the expression %s"
"is being used in an unsupported manner."
) % (use, expr)
raise UnsupportedError(msg, stmt.value.loc)
# this checks for gdb initilization calls, only one is permitted
if isinstance(stmt.value, (ir.Global, ir.FreeVar)):
val = stmt.value
val = getattr(val, "value", None)
if val is None:
continue
# check global function
found = False
if isinstance(val, pytypes.FunctionType):
found = val in {numba.gdb, numba.gdb_init}
if not found: # freevar bind to intrinsic
found = getattr(val, "_name", "") == "gdb_internal"
if found:
gdb_calls.append(stmt.loc) # report last seen location
# this checks that np.<type> was called if view is called
if isinstance(stmt.value, ir.Expr):
if stmt.value.op == "getattr" and stmt.value.attr == "view":
var = stmt.value.value.name
if isinstance(typemap[var], types.Array):
continue
df = func_ir.get_definition(var)
cn = guard(find_callname, func_ir, df)
if cn and cn[1] == "numpy":
ty = getattr(numpy, cn[0])
if numpy.issubdtype(ty, numpy.integer) or numpy.issubdtype(
ty, numpy.floating
):
continue
vardescr = "" if var.startswith("$") else "'{}' ".format(var)
raise TypingError(
"'view' can only be called on NumPy dtypes, "
"try wrapping the variable {}with 'np.<dtype>()'".format(
vardescr
),
loc=stmt.loc,
)
# checks for globals that are also reflected
if isinstance(stmt.value, ir.Global):
ty = typemap[stmt.target.name]
msg = (
"Writing to a %s defined in globals is not "
"supported as globals are considered compile-time "
"constants."
)
if getattr(ty, "reflected", False) or isinstance(ty, types.DictType):
raise TypingError(msg % ty, loc=stmt.loc)
# There is more than one call to function gdb/gdb_init
if len(gdb_calls) > 1:
msg = (
"Calling either numba.gdb() or numba.gdb_init() more than once "
"in a function is unsupported (strange things happen!), use "
"numba.gdb_breakpoint() to create additional breakpoints "
"instead.\n\nRelevant documentation is available here:\n"
"http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html"
"/troubleshoot.html#using-numba-s-direct-gdb-bindings-in-"
"nopython-mode\n\nConflicting calls found at:\n %s"
)
buf = "\n".join([x.strformat() for x in gdb_calls])
raise UnsupportedError(msg % buf)
|
https://github.com/numba/numba/issues/4143
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/targets/base.py in get_constant_generic(self, builder, ty, val)
498 try:
--> 499 impl = self._get_constants.find((ty,))
500 return impl(self, builder, ty, val)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/targets/base.py in find(self, sig)
49 if out is None:
---> 50 out = self._find(sig)
51 self._cache[sig] = out
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/targets/base.py in _find(self, sig)
58 else:
---> 59 raise NotImplementedError(self, sig)
60
NotImplementedError: (<numba.targets.base.OverloadSelector object at 0x7ff2ff01e7f0>, (reflected list(reflected list(array(int64, 2d, C))),))
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
626 try:
--> 627 yield
628 except NumbaError as e:
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/lowering.py in lower_block(self, block)
257 loc=self.loc, errcls_=defaulterrcls):
--> 258 self.lower_inst(inst)
259
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/lowering.py in lower_inst(self, inst)
300 ty = self.typeof(inst.target.name)
--> 301 val = self.lower_assign(ty, inst)
302 self.storevar(val, inst.target.name)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/lowering.py in lower_assign(self, ty, inst)
448 res = self.context.get_constant_generic(self.builder, ty,
--> 449 value.value)
450 self.incref(ty, res)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/targets/base.py in get_constant_generic(self, builder, ty, val)
501 except NotImplementedError:
--> 502 raise NotImplementedError("Cannot lower constant of type '%s'" % (ty,))
503
NotImplementedError: Cannot lower constant of type 'reflected list(reflected list(array(int64, 2d, C)))'
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-8-a8e8627a1b30> in <module>
----> 1 T_matrix(0)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
367 e.patch_message(''.join(e.args) + help_msg)
368 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 369 raise e
370
371 def inspect_llvm(self, signature=None):
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
324 argtypes.append(self.typeof_pyval(a))
325 try:
--> 326 return self.compile(tuple(argtypes))
327 except errors.TypingError as e:
328 # Intercept typing error that may be due to an argument
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/dispatcher.py in compile(self, sig)
656
657 self._cache_misses[sig] += 1
--> 658 cres = self._compiler.compile(args, return_type)
659 self.add_overload(cres)
660 self._cache.save_overload(sig, cres)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/dispatcher.py in compile(self, args, return_type)
80 args=args, return_type=return_type,
81 flags=flags, locals=self.locals,
---> 82 pipeline_class=self.pipeline_class)
83 # Check typing error if object mode is used
84 if cres.typing_error is not None and not flags.enable_pyobject:
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
939 pipeline = pipeline_class(typingctx, targetctx, library,
940 args, return_type, flags, locals)
--> 941 return pipeline.compile_extra(func)
942
943
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in compile_extra(self, func)
370 self.lifted = ()
371 self.lifted_from = None
--> 372 return self._compile_bytecode()
373
374 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in _compile_bytecode(self)
870 """
871 assert self.func_ir is None
--> 872 return self._compile_core()
873
874 def _compile_ir(self):
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in _compile_core(self)
857 self.define_pipelines(pm)
858 pm.finalize()
--> 859 res = pm.run(self.status)
860 if res is not None:
861 # Early pipeline completion
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in run(self, status)
251 # No more fallback pipelines?
252 if is_final_pipeline:
--> 253 raise patched_exception
254 # Go to next fallback pipeline
255 else:
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in run(self, status)
242 try:
243 event(stage_name)
--> 244 stage()
245 except _EarlyPipelineCompletion as e:
246 return e.result
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in stage_nopython_backend(self)
729 """
730 lowerfn = self.backend_nopython_mode
--> 731 self._backend(lowerfn, objectmode=False)
732
733 def stage_compile_interp_mode(self):
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
679 self.library.enable_object_caching()
680
--> 681 lowered = lowerfn()
682 signature = typing.signature(self.return_type, *self.args)
683 self.cr = compile_result(
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in backend_nopython_mode(self)
666 self.calltypes,
667 self.flags,
--> 668 self.metadata)
669
670 def _backend(self, lowerfn, objectmode):
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags, metadata)
1061 lower = lowering.Lower(targetctx, library, fndesc, interp,
1062 metadata=metadata)
-> 1063 lower.lower()
1064 if not flags.no_cpython_wrapper:
1065 lower.create_cpython_wrapper(flags.release_gil)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/lowering.py in lower(self)
175 if self.generator_info is None:
176 self.genlower = None
--> 177 self.lower_normal_function(self.fndesc)
178 else:
179 self.genlower = self.GeneratorLower(self)
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
216 # Init argument values
217 self.extract_function_arguments()
--> 218 entry_block_tail = self.lower_function_body()
219
220 # Close tail of entry block
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/lowering.py in lower_function_body(self)
241 bb = self.blkmap[offset]
242 self.builder.position_at_end(bb)
--> 243 self.lower_block(block)
244
245 self.post_lower()
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/lowering.py in lower_block(self, block)
256 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
257 loc=self.loc, errcls_=defaulterrcls):
--> 258 self.lower_inst(inst)
259
260 def create_cpython_wrapper(self, release_gil=False):
~/Miscelania/anaconda3/lib/python3.6/contextlib.py in __exit__(self, type, value, traceback)
97 value = type()
98 try:
---> 99 self.gen.throw(type, value, traceback)
100 except StopIteration as exc:
101 # Suppress StopIteration *unless* it's the same exception that
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
633 from numba import config
634 tb = sys.exc_info()[2] if config.FULL_TRACEBACKS else None
--> 635 six.reraise(type(newerr), newerr, tb)
636
637
~/Miscelania/anaconda3/lib/python3.6/site-packages/numba/six.py in reraise(tp, value, tb)
657 if value.__traceback__ is not tb:
658 raise value.with_traceback(tb)
--> 659 raise value
660
661 else:
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
Cannot lower constant of type 'reflected list(reflected list(array(int64, 2d, C)))'
File "<ipython-input-6-c8c5dfd066cc>", line 17:
def T_matrix (phi):
return ts[0][0]*np.cos(phi) - 1j*ts[3][3]*np.sin(phi)
^
[1] During: lowering "$0.1 = global(ts: [[matrix([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]), matrix([[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]), matrix([[0.+0.j, 0.-1.j, 0.+0.j, 0.-0.j],
[0.+1.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.-0.j, 0.+0.j, 0.-1.j],
[0.+0.j, 0.+0.j, 0.+1.j, 0.+0.j]]), matrix([[ 1, 0, 0, 0],
[ 0, -1, 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, -1]])], [matrix([[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0]]), matrix([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0]]), matrix([[0.+0.j, 0.-0.j, 0.+0.j, 0.-1.j],
[0.+0.j, 0.+0.j, 0.+1.j, 0.+0.j],
[0.+0.j, 0.-1.j, 0.+0.j, 0.-0.j],
[0.+1.j, 0.+0.j, 0.+0.j, 0.+0.j]]), matrix([[ 0, 0, 1, 0],
[ 0, 0, 0, -1],
[ 1, 0, 0, 0],
[ 0, -1, 0, 0]])], [matrix([[0.+0.j, 0.+0.j, 0.-1.j, 0.-0.j],
[0.+0.j, 0.+0.j, 0.-0.j, 0.-1.j],
[0.+1.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+1.j, 0.+0.j, 0.+0.j]]), matrix([[0.+0.j, 0.+0.j, 0.-0.j, 0.-1.j],
[0.+0.j, 0.+0.j, 0.-1.j, 0.-0.j],
[0.+0.j, 0.+1.j, 0.+0.j, 0.+0.j],
[0.+1.j, 0.+0.j, 0.+0.j, 0.+0.j]]), matrix([[ 0.+0.j, 0.-0.j, 0.-0.j, -1.+0.j],
[ 0.+0.j, 0.+0.j, 1.-0.j, 0.-0.j],
[ 0.+0.j, 1.-0.j, 0.+0.j, 0.-0.j],
[-1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]), matrix([[ 0.+0.j, 0.+0.j, 0.-1.j, 0.-0.j],
[ 0.+0.j, -0.+0.j, 0.-0.j, 0.+1.j],
[ 0.+1.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, -0.-1.j, 0.+0.j, -0.+0.j]])], [matrix([[ 1, 0, 0, 0],
[ 0, 1, 0, 0],
[ 0, 0, -1, 0],
[ 0, 0, 0, -1]]), matrix([[ 0, 1, 0, 0],
[ 1, 0, 0, 0],
[ 0, 0, 0, -1],
[ 0, 0, -1, 0]]), matrix([[ 0.+0.j, 0.-1.j, 0.+0.j, 0.-0.j],
[ 0.+1.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j, -0.+0.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, -0.-1.j, -0.+0.j]]), matrix([[ 1, 0, 0, 0],
[ 0, -1, 0, 0],
[ 0, 0, -1, 0],
[ 0, 0, 0, 1]])]])" at <ipython-input-6-c8c5dfd066cc> (17)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
NotImplementedError
|
def _find_definition(self):
# try and find a def, go backwards from error line
fn_name = None
lines = self.get_lines()
for x in reversed(lines[: self.line - 1]):
# the strip and startswith is to handle user code with commented out
# 'def' or use of 'def' in a docstring.
if x.strip().startswith("def "):
fn_name = x
break
return fn_name
|
def _find_definition(self):
# try and find a def, go backwards from error line
fn_name = None
lines = self.get_lines()
for x in reversed(lines[: self.line - 1]):
if "def " in x:
fn_name = x
break
return fn_name
|
https://github.com/numba/numba/issues/4056
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/.local/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
626 try:
--> 627 yield
628 except NumbaError as e:
~/.local/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
257 loc=self.loc, errcls_=defaulterrcls):
--> 258 self.lower_inst(inst)
259
~/.local/lib/python3.7/site-packages/numba/lowering.py in lower_inst(self, inst)
399 elif isinstance(inst, ir.StaticRaise):
--> 400 self.lower_static_raise(inst)
401
~/.local/lib/python3.7/site-packages/numba/lowering.py in lower_static_raise(self, inst)
441 else:
--> 442 self.return_exception(inst.exc_class, inst.exc_args, loc=self.loc)
443
~/.local/lib/python3.7/site-packages/numba/lowering.py in return_exception(self, exc_class, exc_args, loc)
162 loc=loc,
--> 163 func_name=self.func_ir.func_id.func_name)
164
~/.local/lib/python3.7/site-packages/numba/targets/callconv.py in return_user_exc(self, builder, exc, exc_args, loc, func_name)
364 if loc is not None:
--> 365 fname = loc._raw_function_name()
366 if fname is None:
~/.local/lib/python3.7/site-packages/numba/ir.py in _raw_function_name(self)
62 if defn:
---> 63 return self._defmatcher.match(defn.strip()).groups()[0]
64 else:
AttributeError: 'NoneType' object has no attribute 'groups'
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-1-647b0e2d4c3b> in <module>
----> 1 userland stops at function call
~/.local/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
367 e.patch_message(''.join(e.args) + help_msg)
368 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 369 raise e
370
371 def inspect_llvm(self, signature=None):
~/.local/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
324 argtypes.append(self.typeof_pyval(a))
325 try:
--> 326 return self.compile(tuple(argtypes))
327 except errors.TypingError as e:
328 # Intercept typing error that may be due to an argument
~/.local/lib/python3.7/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/.local/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, sig)
656
657 self._cache_misses[sig] += 1
--> 658 cres = self._compiler.compile(args, return_type)
659 self.add_overload(cres)
660 self._cache.save_overload(sig, cres)
~/.local/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, args, return_type)
80 args=args, return_type=return_type,
81 flags=flags, locals=self.locals,
---> 82 pipeline_class=self.pipeline_class)
83 # Check typing error if object mode is used
84 if cres.typing_error is not None and not flags.enable_pyobject:
~/.local/lib/python3.7/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
939 pipeline = pipeline_class(typingctx, targetctx, library,
940 args, return_type, flags, locals)
--> 941 return pipeline.compile_extra(func)
942
943
~/.local/lib/python3.7/site-packages/numba/compiler.py in compile_extra(self, func)
370 self.lifted = ()
371 self.lifted_from = None
--> 372 return self._compile_bytecode()
373
374 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/.local/lib/python3.7/site-packages/numba/compiler.py in _compile_bytecode(self)
870 """
871 assert self.func_ir is None
--> 872 return self._compile_core()
873
874 def _compile_ir(self):
~/.local/lib/python3.7/site-packages/numba/compiler.py in _compile_core(self)
857 self.define_pipelines(pm)
858 pm.finalize()
--> 859 res = pm.run(self.status)
860 if res is not None:
861 # Early pipeline completion
~/.local/lib/python3.7/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/.local/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
251 # No more fallback pipelines?
252 if is_final_pipeline:
--> 253 raise patched_exception
254 # Go to next fallback pipeline
255 else:
~/.local/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
242 try:
243 event(stage_name)
--> 244 stage()
245 except _EarlyPipelineCompletion as e:
246 return e.result
~/.local/lib/python3.7/site-packages/numba/compiler.py in stage_nopython_backend(self)
729 """
730 lowerfn = self.backend_nopython_mode
--> 731 self._backend(lowerfn, objectmode=False)
732
733 def stage_compile_interp_mode(self):
~/.local/lib/python3.7/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
679 self.library.enable_object_caching()
680
--> 681 lowered = lowerfn()
682 signature = typing.signature(self.return_type, *self.args)
683 self.cr = compile_result(
~/.local/lib/python3.7/site-packages/numba/compiler.py in backend_nopython_mode(self)
666 self.calltypes,
667 self.flags,
--> 668 self.metadata)
669
670 def _backend(self, lowerfn, objectmode):
~/.local/lib/python3.7/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags, metadata)
1061 lower = lowering.Lower(targetctx, library, fndesc, interp,
1062 metadata=metadata)
-> 1063 lower.lower()
1064 if not flags.no_cpython_wrapper:
1065 lower.create_cpython_wrapper(flags.release_gil)
~/.local/lib/python3.7/site-packages/numba/lowering.py in lower(self)
175 if self.generator_info is None:
176 self.genlower = None
--> 177 self.lower_normal_function(self.fndesc)
178 else:
179 self.genlower = self.GeneratorLower(self)
~/.local/lib/python3.7/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
216 # Init argument values
217 self.extract_function_arguments()
--> 218 entry_block_tail = self.lower_function_body()
219
220 # Close tail of entry block
~/.local/lib/python3.7/site-packages/numba/lowering.py in lower_function_body(self)
241 bb = self.blkmap[offset]
242 self.builder.position_at_end(bb)
--> 243 self.lower_block(block)
244
245 self.post_lower()
~/.local/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
256 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
257 loc=self.loc, errcls_=defaulterrcls):
--> 258 self.lower_inst(inst)
259
260 def create_cpython_wrapper(self, release_gil=False):
~/.local/envs/dylan3/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
~/.local/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
633 from numba import config
634 tb = sys.exc_info()[2] if config.FULL_TRACEBACKS else None
--> 635 six.reraise(type(newerr), newerr, tb)
636
637
~/.local/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
657 if value.__traceback__ is not tb:
658 raise value.with_traceback(tb)
--> 659 raise value
660
661 else:
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
'NoneType' object has no attribute 'groups'
File "../../../u/dnelson/python/util/sub.py", line 2566:
#def get_random_number():
<source elided>
if count != num:
raise Exception('Mismatch.')
^
[1] During: lowering "raise <class 'Exception'>('Mismatch.')" at /u/dnelson/python/util/subfind.py (2566)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
|
AttributeError
|
def get_cache_path(self):
# We could also use jupyter_core.paths.jupyter_runtime_dir()
# In both cases this is a user-wide directory, so we need to
# be careful when disambiguating if we don't want too many
# conflicts (see below).
try:
from IPython.paths import get_ipython_cache_dir
except ImportError:
# older IPython version
from IPython.utils.path import get_ipython_cache_dir
return os.path.join(get_ipython_cache_dir(), "numba_cache")
|
def get_cache_path(self):
# We could also use jupyter_core.paths.jupyter_runtime_dir()
# In both cases this is a user-wide directory, so we need to
# be careful when disambiguating if we don't want too many
# conflicts (see below).
try:
from IPython.paths import get_ipython_cache_dir
except ImportError:
# older IPython version
from IPython.utils.path import get_ipython_cache_dir
return os.path.join(get_ipython_cache_dir(), "numba")
|
https://github.com/numba/numba/issues/4040
|
(gammapy-dev) hfm-1804a:tmp deil$ ipython
Python 3.7.0 | packaged by conda-forge | (default, Nov 12 2018, 12:34:36)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.3.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: import numba
In [2]: numba.jit
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-509e32de4401> in <module>
----> 1 numba.jit
AttributeError: module 'numba' has no attribute 'jit'
In [3]: numba
Out[3]: <module 'numba' (namespace)>
In [4]: numba.__path__
Out[4]: _NamespacePath(['/Users/deil/.ipython/numba'])
In [5]:
Do you really want to exit ([y]/n)?
(gammapy-dev) hfm-1804a:tmp deil$ ls -lh /Users/deil/.ipython/numba
total 24
-rw-r--r-- 1 deil staff 5.4K Apr 20 11:30 ipython-input-4-f982ea8710c3.f-fc3bc4f3ad.py37m.1.nbc
-rw-r--r-- 1 deil staff 961B Apr 20 11:30 ipython-input-4-f982ea8710c3.f-fc3bc4f3ad.py37m.nbi
|
AttributeError
|
def _lower_array_expr(lowerer, expr):
"""Lower an array expression built by RewriteArrayExprs."""
expr_name = "__numba_array_expr_%s" % (hex(hash(expr)).replace("-", "_"))
expr_filename = expr.loc.filename
expr_var_list = expr.list_vars()
# The expression may use a given variable several times, but we
# should only create one parameter for it.
expr_var_unique = sorted(set(expr_var_list), key=lambda var: var.name)
# Arguments are the names external to the new closure
expr_args = [var.name for var in expr_var_unique]
# 1. Create an AST tree from the array expression.
with _legalize_parameter_names(expr_var_unique) as expr_params:
if hasattr(ast, "arg"):
# Should be Python 3.x
ast_args = [ast.arg(param_name, None) for param_name in expr_params]
else:
# Should be Python 2.x
ast_args = [ast.Name(param_name, ast.Param()) for param_name in expr_params]
# Parse a stub function to ensure the AST is populated with
# reasonable defaults for the Python version.
ast_module = ast.parse(
"def {0}(): return".format(expr_name), expr_filename, "exec"
)
assert hasattr(ast_module, "body") and len(ast_module.body) == 1
ast_fn = ast_module.body[0]
ast_fn.args.args = ast_args
ast_fn.body[0].value, namespace = _arr_expr_to_ast(expr.expr)
ast.fix_missing_locations(ast_module)
# 2. Compile the AST module and extract the Python function.
code_obj = compile(ast_module, expr_filename, "exec")
six.exec_(code_obj, namespace)
impl = namespace[expr_name]
# 3. Now compile a ufunc using the Python function as kernel.
context = lowerer.context
builder = lowerer.builder
outer_sig = expr.ty(*(lowerer.typeof(name) for name in expr_args))
inner_sig_args = []
for argty in outer_sig.args:
if isinstance(argty, types.Optional):
argty = argty.type
if isinstance(argty, types.Array):
inner_sig_args.append(argty.dtype)
else:
inner_sig_args.append(argty)
inner_sig = outer_sig.return_type.dtype(*inner_sig_args)
# Follow the Numpy error model. Note this also allows e.g. vectorizing
# division (issue #1223).
flags = compiler.Flags()
flags.set("error_model", "numpy")
cres = context.compile_subroutine(
builder, impl, inner_sig, flags=flags, caching=False
)
# Create kernel subclass calling our native function
from ..targets import npyimpl
class ExprKernel(npyimpl._Kernel):
def generate(self, *args):
arg_zip = zip(args, self.outer_sig.args, inner_sig.args)
cast_args = [self.cast(val, inty, outty) for val, inty, outty in arg_zip]
result = self.context.call_internal(
builder, cres.fndesc, inner_sig, cast_args
)
return self.cast(result, inner_sig.return_type, self.outer_sig.return_type)
args = [lowerer.loadvar(name) for name in expr_args]
return npyimpl.numpy_ufunc_kernel(
context, builder, outer_sig, args, ExprKernel, explicit_output=False
)
|
def _lower_array_expr(lowerer, expr):
"""Lower an array expression built by RewriteArrayExprs."""
expr_name = "__numba_array_expr_%s" % (hex(hash(expr)).replace("-", "_"))
expr_filename = expr.loc.filename
expr_var_list = expr.list_vars()
# The expression may use a given variable several times, but we
# should only create one parameter for it.
expr_var_unique = sorted(set(expr_var_list), key=lambda var: var.name)
# Arguments are the names external to the new closure
expr_args = [var.name for var in expr_var_unique]
# 1. Create an AST tree from the array expression.
with _legalize_parameter_names(expr_var_unique) as expr_params:
if hasattr(ast, "arg"):
# Should be Python 3.x
ast_args = [ast.arg(param_name, None) for param_name in expr_params]
else:
# Should be Python 2.x
ast_args = [ast.Name(param_name, ast.Param()) for param_name in expr_params]
# Parse a stub function to ensure the AST is populated with
# reasonable defaults for the Python version.
ast_module = ast.parse(
"def {0}(): return".format(expr_name), expr_filename, "exec"
)
assert hasattr(ast_module, "body") and len(ast_module.body) == 1
ast_fn = ast_module.body[0]
ast_fn.args.args = ast_args
ast_fn.body[0].value, namespace = _arr_expr_to_ast(expr.expr)
ast.fix_missing_locations(ast_module)
# 2. Compile the AST module and extract the Python function.
code_obj = compile(ast_module, expr_filename, "exec")
six.exec_(code_obj, namespace)
impl = namespace[expr_name]
# 3. Now compile a ufunc using the Python function as kernel.
context = lowerer.context
builder = lowerer.builder
outer_sig = expr.ty(*(lowerer.typeof(name) for name in expr_args))
inner_sig_args = []
for argty in outer_sig.args:
if isinstance(argty, types.Array):
inner_sig_args.append(argty.dtype)
else:
inner_sig_args.append(argty)
inner_sig = outer_sig.return_type.dtype(*inner_sig_args)
# Follow the Numpy error model. Note this also allows e.g. vectorizing
# division (issue #1223).
flags = compiler.Flags()
flags.set("error_model", "numpy")
cres = context.compile_subroutine(
builder, impl, inner_sig, flags=flags, caching=False
)
# Create kernel subclass calling our native function
from ..targets import npyimpl
class ExprKernel(npyimpl._Kernel):
def generate(self, *args):
arg_zip = zip(args, self.outer_sig.args, inner_sig.args)
cast_args = [self.cast(val, inty, outty) for val, inty, outty in arg_zip]
result = self.context.call_internal(
builder, cres.fndesc, inner_sig, cast_args
)
return self.cast(result, inner_sig.return_type, self.outer_sig.return_type)
args = [lowerer.loadvar(name) for name in expr_args]
return npyimpl.numpy_ufunc_kernel(
context, builder, outer_sig, args, ExprKernel, explicit_output=False
)
|
https://github.com/numba/numba/issues/3972
|
@njit((float_[:], optional(float_[:])))
... def f(x, y):
... if y is None:
... return x
... return x + y
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/decorators.py", line 198, in wrapper
disp.compile(sig)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 255, in run
raise patched_exception
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 246, in run
stage()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 717, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 666, in _backend
lowered = lowerfn()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 653, in backend_nopython_mode
self.metadata)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 1048, in native_lowering_stage
lower.lower()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 178, in lower
self.lower_normal_function(self.fndesc)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 219, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 308, in lower_inst
val = self.lower_assign(ty, inst)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 454, in lower_assign
return self.lower_expr(ty, value)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 1045, in lower_expr
res = self.context.special_ops[expr.op](self, expr)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/npyufunc/array_exprs.py", line 393, in _lower_array_expr
caching=False)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/targets/base.py", line 843, in compile_subroutine
flags=flags)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/targets/base.py", line 815, in _compile_subroutine_no_cache
locals=locals)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 951, in compile_internal
return pipeline.compile_extra(func)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 255, in run
raise patched_exception
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 246, in run
stage()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 485, in stage_nopython_frontend
self.locals)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 1029, in type_inference_stage
infer.propagate()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/typeinfer.py", line 860, in propagate
raise errors[0]
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython frontend)
No conversion from array(float32, 1d, C) to float32 for '$0.4', defined at None
File "<stdin>", line 3:
<source missing, REPL/exec in use?>
[1] During: typing of assignment at <stdin> (3)
File "<stdin>", line 3:
<source missing, REPL/exec in use?>
[1] During: lowering "$12.3 = arrayexpr(expr=(<built-in function add>, [Var(x, <stdin> (3)), Var(y, <stdin> (3))]), ty=array(float32, 1d, C))" at <stdin> (5)
|
numba.errors.TypingError
|
def _prepare_argument(ctxt, bld, inp, tyinp, where="input operand"):
"""returns an instance of the appropriate Helper (either
_ScalarHelper or _ArrayHelper) class to handle the argument.
using the polymorphic interface of the Helper classes, scalar
and array cases can be handled with the same code"""
# first un-Optional Optionals
if isinstance(tyinp, types.Optional):
oty = tyinp
tyinp = tyinp.type
inp = ctxt.cast(bld, inp, oty, tyinp)
# then prepare the arg for a concrete instance
if isinstance(tyinp, types.ArrayCompatible):
ary = ctxt.make_array(tyinp)(ctxt, bld, inp)
shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)
strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)
return _ArrayHelper(
ctxt,
bld,
shape,
strides,
ary.data,
tyinp.layout,
tyinp.dtype,
tyinp.ndim,
inp,
)
elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]):
return _ScalarHelper(ctxt, bld, inp, tyinp)
else:
raise NotImplementedError(
"unsupported type for {0}: {1}".format(where, str(tyinp))
)
|
def _prepare_argument(ctxt, bld, inp, tyinp, where="input operand"):
"""returns an instance of the appropriate Helper (either
_ScalarHelper or _ArrayHelper) class to handle the argument.
using the polymorphic interface of the Helper classes, scalar
and array cases can be handled with the same code"""
if isinstance(tyinp, types.ArrayCompatible):
ary = ctxt.make_array(tyinp)(ctxt, bld, inp)
shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)
strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)
return _ArrayHelper(
ctxt,
bld,
shape,
strides,
ary.data,
tyinp.layout,
tyinp.dtype,
tyinp.ndim,
inp,
)
elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]):
return _ScalarHelper(ctxt, bld, inp, tyinp)
else:
raise NotImplementedError(
"unsupported type for {0}: {1}".format(where, str(tyinp))
)
|
https://github.com/numba/numba/issues/3972
|
@njit((float_[:], optional(float_[:])))
... def f(x, y):
... if y is None:
... return x
... return x + y
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/decorators.py", line 198, in wrapper
disp.compile(sig)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 255, in run
raise patched_exception
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 246, in run
stage()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 717, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 666, in _backend
lowered = lowerfn()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 653, in backend_nopython_mode
self.metadata)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 1048, in native_lowering_stage
lower.lower()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 178, in lower
self.lower_normal_function(self.fndesc)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 219, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 308, in lower_inst
val = self.lower_assign(ty, inst)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 454, in lower_assign
return self.lower_expr(ty, value)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/lowering.py", line 1045, in lower_expr
res = self.context.special_ops[expr.op](self, expr)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/npyufunc/array_exprs.py", line 393, in _lower_array_expr
caching=False)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/targets/base.py", line 843, in compile_subroutine
flags=flags)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/targets/base.py", line 815, in _compile_subroutine_no_cache
locals=locals)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 951, in compile_internal
return pipeline.compile_extra(func)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 255, in run
raise patched_exception
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 246, in run
stage()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 485, in stage_nopython_frontend
self.locals)
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/compiler.py", line 1029, in type_inference_stage
infer.propagate()
File "/Users/lizli/anaconda3/lib/python3.7/site-packages/numba/typeinfer.py", line 860, in propagate
raise errors[0]
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython frontend)
No conversion from array(float32, 1d, C) to float32 for '$0.4', defined at None
File "<stdin>", line 3:
<source missing, REPL/exec in use?>
[1] During: typing of assignment at <stdin> (3)
File "<stdin>", line 3:
<source missing, REPL/exec in use?>
[1] During: lowering "$12.3 = arrayexpr(expr=(<built-in function add>, [Var(x, <stdin> (3)), Var(y, <stdin> (3))]), ty=array(float32, 1d, C))" at <stdin> (5)
|
numba.errors.TypingError
|
def get_sys_info():
# delay these imports until now as they are only needed in this
# function which then exits.
import platform
import json
import multiprocessing
from numba import config
from numba import cuda as cu
from numba.cuda import cudadrv
from numba.cuda.cudadrv.driver import driver as cudriver
from numba import roc
from numba.roc.hlc import hlc, libhlc
import textwrap as tw
import ctypes as ct
import llvmlite.binding as llvmbind
import locale
from datetime import datetime
from itertools import chain
from subprocess import check_output, CalledProcessError
try:
fmt = "%-45s : %-s"
print("-" * 80)
print("__Time Stamp__")
print(datetime.utcnow())
print("")
print("__Hardware Information__")
system_name = platform.system()
print(fmt % ("Machine", platform.machine()))
print(fmt % ("CPU Name", llvmbind.get_host_cpu_name()))
if system_name == "Linux":
strmatch = "Cpus_allowed"
try:
loc = "/proc/self/status"
with open(loc, "rt") as f:
proc_stat = f.read().splitlines()
for x in proc_stat:
if x.startswith(strmatch):
if x.startswith("%s:" % strmatch):
hexnum = "0x%s" % x.split(":")[1].strip()
acc_cpus = int(hexnum, 16)
_n = str(bin(acc_cpus).count("1"))
print(fmt % ("Number of accessible CPU cores", _n))
elif x.startswith("%s_list:" % strmatch):
_a = x.split(":")[1].strip()
print(fmt % ("Listed accessible CPUs cores", _a))
except BaseException:
print(fmt % ("CPU count", multiprocessing.cpu_count()))
# See if CFS is in place
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
try:
def scrape_lines(loc):
with open(loc, "rt") as f:
return f.read().splitlines()
loc = "/sys/fs/cgroup/cpuacct/cpu.cfs_period_us"
cfs_period = int(scrape_lines(loc)[0])
loc = "/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us"
cfs_quota = int(scrape_lines(loc)[0])
if cfs_quota == -1:
print(fmt % ("CFS restrictions", "None"))
else:
runtime_amount = float(cfs_quota) / float(cfs_period)
print(
fmt
% ("CFS restrictions (CPUs worth of runtime)", runtime_amount)
)
except BaseException:
print(fmt % ("CFS restrictions", "Information not available"))
else:
print(fmt % ("CPU count", multiprocessing.cpu_count()))
try:
featuremap = llvmbind.get_host_cpu_features()
except RuntimeError:
print(fmt % ("CPU Features", "NA"))
else:
features = sorted([key for key, value in featuremap.items() if value])
cpu_feat = tw.fill(" ".join(features), 80)
print(fmt % ("CPU Features", ""))
print(cpu_feat)
print("")
print("__OS Information__")
print(fmt % ("Platform", platform.platform(aliased=True)))
print(fmt % ("Release", platform.release()))
print(fmt % ("System Name", system_name))
print(fmt % ("Version", platform.version()))
try:
if system_name == "Linux":
info = platform.linux_distribution()
elif system_name == "Windows":
info = platform.win32_ver()
elif system_name == "Darwin":
info = platform.mac_ver()
else:
raise RuntimeError("Unknown system.")
buf = "".join(
[x if x != "" else " " for x in list(chain.from_iterable(info))]
)
print(fmt % ("OS specific info", buf))
if system_name == "Linux":
print(fmt % ("glibc info", " ".join(platform.libc_ver())))
except:
print("Error: System name incorrectly identified or unknown.")
print("")
print("__Python Information__")
print(fmt % ("Python Compiler", platform.python_compiler()))
print(fmt % ("Python Implementation", platform.python_implementation()))
print(fmt % ("Python Version", platform.python_version()))
lcl = []
try:
for x in locale.getdefaultlocale():
if x is not None:
lcl.append(x)
except BaseException as e:
lcl.append(str(e))
print(fmt % ("Python Locale ", " ".join(lcl)))
print("")
print("__LLVM information__")
print(
fmt
% ("LLVM version", ".".join([str(k) for k in llvmbind.llvm_version_info]))
)
print("")
print("__CUDA Information__")
# Look for GPUs
try:
cu.list_devices()[0] # will a device initialise?
except BaseException as e:
msg_not_found = "CUDA driver library cannot be found"
msg_disabled_by_user = "CUDA is disabled"
msg_end = " or no CUDA enabled devices are present."
msg_generic_problem = "Error: CUDA device intialisation problem."
msg = getattr(e, "msg", None)
if msg is not None:
if msg_not_found in msg:
err_msg = msg_not_found + msg_end
elif msg_disabled_by_user in msg:
err_msg = msg_disabled_by_user + msg_end
else:
err_msg = msg_generic_problem + " Message:" + msg
else:
err_msg = msg_generic_problem + " " + str(e)
# Best effort error report
print("%s\nError class: %s" % (err_msg, str(type(e))))
else:
try:
cu.detect()
dv = ct.c_int(0)
cudriver.cuDriverGetVersion(ct.byref(dv))
print(fmt % ("CUDA driver version", dv.value))
print("CUDA libraries:")
cudadrv.libs.test(sys.platform, print_paths=False)
except:
print(
"Error: Probing CUDA failed (device and driver present, runtime problem?)\n"
)
print("")
print("__ROC Information__")
roc_is_available = roc.is_available()
print(fmt % ("ROC available", roc_is_available))
toolchains = []
try:
libhlc.HLC()
toolchains.append("librocmlite library")
except:
pass
try:
cmd = hlc.CmdLine().check_tooling()
toolchains.append("ROC command line tools")
except:
pass
# if no ROC try and report why
if not roc_is_available:
from numba.roc.hsadrv.driver import hsa
try:
hsa.is_available
except BaseException as e:
msg = str(e)
else:
msg = "No ROC toolchains found."
print(fmt % ("Error initialising ROC due to", msg))
if toolchains:
print(fmt % ("Available Toolchains", ", ".join(toolchains)))
try:
# ROC might not be available due to lack of tool chain, but HSA
# agents may be listed
from numba.roc.hsadrv.driver import hsa, dgpu_count
decode = lambda x: x.decode("utf-8") if isinstance(x, bytes) else x
print("\nFound %s HSA Agents:" % len(hsa.agents))
for i, agent in enumerate(hsa.agents):
print("Agent id : %s" % i)
print(" vendor: %s" % decode(agent.vendor_name))
print(" name: %s" % decode(agent.name))
print(" type: %s" % agent.device)
print("")
_dgpus = []
for a in hsa.agents:
if a.is_component and a.device == "GPU":
_dgpus.append(decode(a.name))
print(fmt % ("Found %s discrete GPU(s)" % dgpu_count(), ", ".join(_dgpus)))
except Exception as e:
print("No HSA Agents found, encountered exception when searching:")
print(e)
print("")
print("__SVML Information__")
# replicate some SVML detection logic from numba.__init__ here.
# if SVML load fails in numba.__init__ the splitting of the logic
# here will help diagnosis of the underlying issue
have_svml_library = True
try:
if sys.platform.startswith("linux"):
llvmbind.load_library_permanently("libsvml.so")
elif sys.platform.startswith("darwin"):
llvmbind.load_library_permanently("libsvml.dylib")
elif sys.platform.startswith("win"):
llvmbind.load_library_permanently("svml_dispmd")
else:
have_svml_library = False
except:
have_svml_library = False
func = getattr(llvmbind.targets, "has_svml", None)
llvm_svml_patched = func() if func is not None else False
svml_operational = config.USING_SVML and llvm_svml_patched and have_svml_library
print(fmt % ("SVML state, config.USING_SVML", config.USING_SVML))
print(fmt % ("SVML library found and loaded", have_svml_library))
print(fmt % ("llvmlite using SVML patched LLVM", llvm_svml_patched))
print(fmt % ("SVML operational", svml_operational))
# Check which threading backends are available.
print("")
print("__Threading Layer Information__")
def parse_error(e, backend):
# parses a linux based error message, this is to provide feedback
# and hide user paths etc
try:
path, problem, symbol = [x.strip() for x in e.msg.split(":")]
extn_dso = os.path.split(path)[1]
if backend in extn_dso:
return "%s: %s" % (problem, symbol)
except BaseException:
pass
return "Unknown import problem."
try:
from numba.npyufunc import tbbpool
print(fmt % ("TBB Threading layer available", True))
except ImportError as e:
# might be a missing symbol due to e.g. tbb libraries missing
print(fmt % ("TBB Threading layer available", False))
print(fmt % ("+--> Disabled due to", parse_error(e, "tbbpool")))
try:
from numba.npyufunc import omppool
print(fmt % ("OpenMP Threading layer available", True))
except ImportError as e:
print(fmt % ("OpenMP Threading layer available", False))
print(fmt % ("+--> Disabled due to", parse_error(e, "omppool")))
try:
from numba.npyufunc import workqueue
print(fmt % ("Workqueue Threading layer available", True))
except ImportError as e:
print(fmt % ("Workqueue Threading layer available", False))
print(fmt % ("+--> Disabled due to", parse_error(e, "workqueue")))
# look for numba env vars that are set
print("")
print("__Numba Environment Variable Information__")
_envvar_found = False
for k, v in os.environ.items():
if k.startswith("NUMBA_"):
print(fmt % (k, v))
_envvar_found = True
if not _envvar_found:
print("None set.")
# Look for conda and conda information
print("")
print("__Conda Information__")
cmd = ["conda", "info", "--json"]
try:
conda_out = check_output(cmd)
except Exception as e:
print("Conda not present/not working.\nError was %s\n" % e)
else:
data = "".join(conda_out.decode("utf-8").splitlines())
jsond = json.loads(data)
keys = [
"conda_build_version",
"conda_env_version",
"platform",
"python_version",
"root_writable",
]
for k in keys:
try:
print(fmt % (k, jsond[k]))
except KeyError:
pass
# get info about current environment
cmd = ["conda", "list"]
try:
conda_out = check_output(cmd)
except CalledProcessError as e:
print("Error: Conda command failed. Error was %s\n" % e.output)
else:
print("")
print("__Current Conda Env__")
data = conda_out.decode("utf-8").splitlines()
for k in data:
if k[0] != "#": # don't show where the env is, personal data
print(k)
print("-" * 80)
except Exception as e:
print("Error: The system reporting tool has failed unexpectedly.")
print("Exception was:")
print(e)
finally:
print(
"%s" % "If requested, please copy and paste the information between\n"
"the dashed (----) lines, or from a given specific section as\n"
"appropriate.\n\n"
"=============================================================\n"
"IMPORTANT: Please ensure that you are happy with sharing the\n"
"contents of the information present, any information that you\n"
"wish to keep private you should remove before sharing.\n"
"=============================================================\n"
)
|
def get_sys_info():
# delay these imports until now as they are only needed in this
# function which then exits.
import platform
import json
import multiprocessing
from numba import config
from numba import cuda as cu
from numba.cuda import cudadrv
from numba.cuda.cudadrv.driver import driver as cudriver
from numba import roc
from numba.roc.hlc import hlc, libhlc
import textwrap as tw
import ctypes as ct
import llvmlite.binding as llvmbind
import locale
from datetime import datetime
from itertools import chain
from subprocess import check_output, CalledProcessError
try:
fmt = "%-45s : %-s"
print("-" * 80)
print("__Time Stamp__")
print(datetime.utcnow())
print("")
print("__Hardware Information__")
system_name = platform.system()
print(fmt % ("Machine", platform.machine()))
print(fmt % ("CPU Name", llvmbind.get_host_cpu_name()))
if system_name == "Linux":
strmatch = "Cpus_allowed"
try:
loc = "/proc/self/status"
with open(loc, "rt") as f:
proc_stat = f.read().splitlines()
for x in proc_stat:
if x.startswith(strmatch):
if x.startswith("%s:" % strmatch):
hexnum = "0x%s" % x.split(":")[1].strip()
acc_cpus = int(hexnum, 16)
_n = str(bin(acc_cpus).count("1"))
print(fmt % ("Number of accessible CPU cores", _n))
elif x.startswith("%s_list:" % strmatch):
_a = x.split(":")[1].strip()
print(fmt % ("Listed accessible CPUs cores", _a))
except BaseException:
print(fmt % ("CPU count", multiprocessing.cpu_count()))
# See if CFS is in place
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
try:
def scrape_lines(loc):
with open(loc, "rt") as f:
return f.read().splitlines()
loc = "/sys/fs/cgroup/cpuacct/cpu.cfs_period_us"
cfs_period = int(scrape_lines(loc)[0])
loc = "/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us"
cfs_quota = int(scrape_lines(loc)[0])
if cfs_quota == -1:
print(fmt % ("CFS restrictions", "None"))
else:
runtime_amount = float(cfs_quota) / float(cfs_period)
print(
fmt
% ("CFS restrictions (CPUs worth of runtime)", runtime_amount)
)
except BaseException:
print(fmt % ("CFS restrictions", "Information not available"))
else:
print(fmt % ("CPU count", multiprocessing.cpu_count()))
try:
featuremap = llvmbind.get_host_cpu_features()
except RuntimeError:
print(fmt % ("CPU Features", "NA"))
else:
features = sorted([key for key, value in featuremap.items() if value])
cpu_feat = tw.fill(" ".join(features), 80)
print(fmt % ("CPU Features", ""))
print(cpu_feat)
print("")
print("__OS Information__")
print(fmt % ("Platform", platform.platform(aliased=True)))
print(fmt % ("Release", platform.release()))
print(fmt % ("System Name", system_name))
print(fmt % ("Version", platform.version()))
try:
if system_name == "Linux":
info = platform.linux_distribution()
elif system_name == "Windows":
info = platform.win32_ver()
elif system_name == "Darwin":
info = platform.mac_ver()
else:
raise RuntimeError("Unknown system.")
buf = "".join(
[x if x != "" else " " for x in list(chain.from_iterable(info))]
)
print(fmt % ("OS specific info", buf))
if system_name == "Linux":
print(fmt % ("glibc info", " ".join(platform.libc_ver())))
except:
print("Error: System name incorrectly identified or unknown.")
print("")
print("__Python Information__")
print(fmt % ("Python Compiler", platform.python_compiler()))
print(fmt % ("Python Implementation", platform.python_implementation()))
print(fmt % ("Python Version", platform.python_version()))
print(
fmt
% (
"Python Locale ",
" ".join([x for x in locale.getdefaultlocale() if x is not None]),
)
)
print("")
print("__LLVM information__")
print(
fmt
% ("LLVM version", ".".join([str(k) for k in llvmbind.llvm_version_info]))
)
print("")
print("__CUDA Information__")
# Look for GPUs
try:
cu.list_devices()[0] # will a device initialise?
except BaseException as e:
msg_not_found = "CUDA driver library cannot be found"
msg_disabled_by_user = "CUDA is disabled"
msg_end = " or no CUDA enabled devices are present."
msg_generic_problem = "Error: CUDA device intialisation problem."
msg = getattr(e, "msg", None)
if msg is not None:
if msg_not_found in msg:
err_msg = msg_not_found + msg_end
elif msg_disabled_by_user in msg:
err_msg = msg_disabled_by_user + msg_end
else:
err_msg = msg_generic_problem + " Message:" + msg
else:
err_msg = msg_generic_problem + " " + str(e)
# Best effort error report
print("%s\nError class: %s" % (err_msg, str(type(e))))
else:
try:
cu.detect()
dv = ct.c_int(0)
cudriver.cuDriverGetVersion(ct.byref(dv))
print(fmt % ("CUDA driver version", dv.value))
print("CUDA libraries:")
cudadrv.libs.test(sys.platform, print_paths=False)
except:
print(
"Error: Probing CUDA failed (device and driver present, runtime problem?)\n"
)
print("")
print("__ROC Information__")
roc_is_available = roc.is_available()
print(fmt % ("ROC available", roc_is_available))
toolchains = []
try:
libhlc.HLC()
toolchains.append("librocmlite library")
except:
pass
try:
cmd = hlc.CmdLine().check_tooling()
toolchains.append("ROC command line tools")
except:
pass
# if no ROC try and report why
if not roc_is_available:
from numba.roc.hsadrv.driver import hsa
try:
hsa.is_available
except BaseException as e:
msg = str(e)
else:
msg = "No ROC toolchains found."
print(fmt % ("Error initialising ROC due to", msg))
if toolchains:
print(fmt % ("Available Toolchains", ", ".join(toolchains)))
try:
# ROC might not be available due to lack of tool chain, but HSA
# agents may be listed
from numba.roc.hsadrv.driver import hsa, dgpu_count
decode = lambda x: x.decode("utf-8") if isinstance(x, bytes) else x
print("\nFound %s HSA Agents:" % len(hsa.agents))
for i, agent in enumerate(hsa.agents):
print("Agent id : %s" % i)
print(" vendor: %s" % decode(agent.vendor_name))
print(" name: %s" % decode(agent.name))
print(" type: %s" % agent.device)
print("")
_dgpus = []
for a in hsa.agents:
if a.is_component and a.device == "GPU":
_dgpus.append(decode(a.name))
print(fmt % ("Found %s discrete GPU(s)" % dgpu_count(), ", ".join(_dgpus)))
except Exception as e:
print("No HSA Agents found, encountered exception when searching:")
print(e)
print("")
print("__SVML Information__")
# replicate some SVML detection logic from numba.__init__ here.
# if SVML load fails in numba.__init__ the splitting of the logic
# here will help diagnosis of the underlying issue
have_svml_library = True
try:
if sys.platform.startswith("linux"):
llvmbind.load_library_permanently("libsvml.so")
elif sys.platform.startswith("darwin"):
llvmbind.load_library_permanently("libsvml.dylib")
elif sys.platform.startswith("win"):
llvmbind.load_library_permanently("svml_dispmd")
else:
have_svml_library = False
except:
have_svml_library = False
func = getattr(llvmbind.targets, "has_svml", None)
llvm_svml_patched = func() if func is not None else False
svml_operational = config.USING_SVML and llvm_svml_patched and have_svml_library
print(fmt % ("SVML state, config.USING_SVML", config.USING_SVML))
print(fmt % ("SVML library found and loaded", have_svml_library))
print(fmt % ("llvmlite using SVML patched LLVM", llvm_svml_patched))
print(fmt % ("SVML operational", svml_operational))
# Check which threading backends are available.
print("")
print("__Threading Layer Information__")
def parse_error(e, backend):
# parses a linux based error message, this is to provide feedback
# and hide user paths etc
try:
path, problem, symbol = [x.strip() for x in e.msg.split(":")]
extn_dso = os.path.split(path)[1]
if backend in extn_dso:
return "%s: %s" % (problem, symbol)
except BaseException:
pass
return "Unknown import problem."
try:
from numba.npyufunc import tbbpool
print(fmt % ("TBB Threading layer available", True))
except ImportError as e:
# might be a missing symbol due to e.g. tbb libraries missing
print(fmt % ("TBB Threading layer available", False))
print(fmt % ("+--> Disabled due to", parse_error(e, "tbbpool")))
try:
from numba.npyufunc import omppool
print(fmt % ("OpenMP Threading layer available", True))
except ImportError as e:
print(fmt % ("OpenMP Threading layer available", False))
print(fmt % ("+--> Disabled due to", parse_error(e, "omppool")))
try:
from numba.npyufunc import workqueue
print(fmt % ("Workqueue Threading layer available", True))
except ImportError as e:
print(fmt % ("Workqueue Threading layer available", False))
print(fmt % ("+--> Disabled due to", parse_error(e, "workqueue")))
# look for numba env vars that are set
print("")
print("__Numba Environment Variable Information__")
_envvar_found = False
for k, v in os.environ.items():
if k.startswith("NUMBA_"):
print(fmt % (k, v))
_envvar_found = True
if not _envvar_found:
print("None set.")
# Look for conda and conda information
print("")
print("__Conda Information__")
cmd = ["conda", "info", "--json"]
try:
conda_out = check_output(cmd)
except Exception as e:
print("Conda not present/not working.\nError was %s\n" % e)
else:
data = "".join(conda_out.decode("utf-8").splitlines())
jsond = json.loads(data)
keys = [
"conda_build_version",
"conda_env_version",
"platform",
"python_version",
"root_writable",
]
for k in keys:
try:
print(fmt % (k, jsond[k]))
except KeyError:
pass
# get info about current environment
cmd = ["conda", "list"]
try:
conda_out = check_output(cmd)
except CalledProcessError as e:
print("Error: Conda command failed. Error was %s\n" % e.output)
else:
print("")
print("__Current Conda Env__")
data = conda_out.decode("utf-8").splitlines()
for k in data:
if k[0] != "#": # don't show where the env is, personal data
print(k)
print("-" * 80)
except Exception as e:
print("Error: The system reporting tool has failed unexpectedly.")
print("Exception was:")
print(e)
finally:
print(
"%s" % "If requested, please copy and paste the information between\n"
"the dashed (----) lines, or from a given specific section as\n"
"appropriate.\n\n"
"=============================================================\n"
"IMPORTANT: Please ensure that you are happy with sharing the\n"
"contents of the information present, any information that you\n"
"wish to keep private you should remove before sharing.\n"
"=============================================================\n"
)
|
https://github.com/numba/numba/issues/3974
|
(base) hfm-1804a:~ deil$ python
Python 3.7.3 (default, Mar 27 2019, 16:54:48)
[Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin
Type "help", "copyright", "credits" or "license" for more information.
import locale
locale.getlocale()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/deil/software/anaconda3/lib/python3.7/locale.py", line 587, in getlocale
return _parse_localename(localename)
File "/Users/deil/software/anaconda3/lib/python3.7/locale.py", line 495, in _parse_localename
raise ValueError('unknown locale: %s' % localename)
ValueError: unknown locale: UTF-8
|
ValueError
|
def stage_preserve_ir(self):
self.func_ir_original = copy.deepcopy(self.func_ir)
|
def stage_preserve_ir(self):
self.func_ir_original = self.func_ir.copy()
|
https://github.com/numba/numba/issues/3659
|
Traceback (most recent call last):
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 617, in new_error_context
yield
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 96, in lower_inst
value = self.lower_assign(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 186, in lower_assign
val = self.loadvar(value.name)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 555, in loadvar
assert name in self._live_vars, name
AssertionError: array.22
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py", line 10, in <module>
print(main())
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 367, in _compile_for_args
raise e
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 255, in run
raise patched_exception
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 246, in run
stage()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 691, in stage_objectmode_backend
self._backend(lowerfn, objectmode=True)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 666, in _backend
lowered = lowerfn()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 638, in backend_object_mode
self.flags)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 1075, in py_lowering_stage
lower.lower()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 178, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 219, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 625, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in object mode pipeline (step: object mode backend)
array.22
File "scratch_6.py", line 7:
def main():
<source elided>
a = np.array(((1, 2), (3, 4)))
return np.array([x for x in a])
^
[1] During: lowering "$0.13 = array.22" at C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py (7)
|
AssertionError
|
def stage_inline_pass(self):
"""
Inline calls to locally defined closures.
"""
# Ensure we have an IR and type information.
assert self.func_ir
# if the return type is a pyobject, there's no type info available and
# no ability to resolve certain typed function calls in the array
# inlining code, use this variable to indicate
typed_pass = not isinstance(self.return_type, types.misc.PyObject)
inline_pass = InlineClosureCallPass(
self.func_ir,
self.flags.auto_parallel,
self.parfor_diagnostics.replaced_fns,
typed_pass,
)
inline_pass.run()
# Remove all Dels, and re-run postproc
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if config.DEBUG or config.DUMP_IR:
name = self.func_ir.func_id.func_qualname
print(("IR DUMP: %s" % name).center(80, "-"))
self.func_ir.dump()
|
def stage_inline_pass(self):
"""
Inline calls to locally defined closures.
"""
# Ensure we have an IR and type information.
assert self.func_ir
inline_pass = InlineClosureCallPass(
self.func_ir, self.flags.auto_parallel, self.parfor_diagnostics.replaced_fns
)
inline_pass.run()
# Remove all Dels, and re-run postproc
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if config.DEBUG or config.DUMP_IR:
name = self.func_ir.func_id.func_qualname
print(("IR DUMP: %s" % name).center(80, "-"))
self.func_ir.dump()
|
https://github.com/numba/numba/issues/3659
|
Traceback (most recent call last):
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 617, in new_error_context
yield
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 96, in lower_inst
value = self.lower_assign(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 186, in lower_assign
val = self.loadvar(value.name)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 555, in loadvar
assert name in self._live_vars, name
AssertionError: array.22
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py", line 10, in <module>
print(main())
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 367, in _compile_for_args
raise e
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 255, in run
raise patched_exception
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 246, in run
stage()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 691, in stage_objectmode_backend
self._backend(lowerfn, objectmode=True)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 666, in _backend
lowered = lowerfn()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 638, in backend_object_mode
self.flags)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 1075, in py_lowering_stage
lower.lower()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 178, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 219, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 625, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in object mode pipeline (step: object mode backend)
array.22
File "scratch_6.py", line 7:
def main():
<source elided>
a = np.array(((1, 2), (3, 4)))
return np.array([x for x in a])
^
[1] During: lowering "$0.13 = array.22" at C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py (7)
|
AssertionError
|
def __init__(self, func_ir, parallel_options, swapped={}, typed=False):
self.func_ir = func_ir
self.parallel_options = parallel_options
self.swapped = swapped
self._processed_stencils = []
self.typed = typed
|
def __init__(self, func_ir, parallel_options, swapped={}):
self.func_ir = func_ir
self.parallel_options = parallel_options
self.swapped = swapped
self._processed_stencils = []
|
https://github.com/numba/numba/issues/3659
|
Traceback (most recent call last):
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 617, in new_error_context
yield
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 96, in lower_inst
value = self.lower_assign(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 186, in lower_assign
val = self.loadvar(value.name)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 555, in loadvar
assert name in self._live_vars, name
AssertionError: array.22
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py", line 10, in <module>
print(main())
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 367, in _compile_for_args
raise e
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 255, in run
raise patched_exception
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 246, in run
stage()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 691, in stage_objectmode_backend
self._backend(lowerfn, objectmode=True)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 666, in _backend
lowered = lowerfn()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 638, in backend_object_mode
self.flags)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 1075, in py_lowering_stage
lower.lower()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 178, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 219, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 625, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in object mode pipeline (step: object mode backend)
array.22
File "scratch_6.py", line 7:
def main():
<source elided>
a = np.array(((1, 2), (3, 4)))
return np.array([x for x in a])
^
[1] During: lowering "$0.13 = array.22" at C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py (7)
|
AssertionError
|
def run(self):
"""Run inline closure call pass."""
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
call_name = guard(find_callname, self.func_ir, expr)
func_def = guard(get_definition, self.func_ir, expr.func)
if guard(
self._inline_reduction, work_list, block, i, expr, call_name
):
modified = True
break # because block structure changed
if guard(self._inline_closure, work_list, block, i, func_def):
modified = True
break # because block structure changed
if guard(self._inline_stencil, instr, call_name, func_def):
modified = True
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir.blocks)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(
_inline_arraycall,
self.func_ir,
cfg,
visited,
loops[k],
self.swapped,
self.parallel_options.comprehension,
self.typed,
):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further
# removed
while remove_dead(self.func_ir.blocks, self.func_ir.arg_names, self.func_ir):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
debug_print("END")
|
def run(self):
"""Run inline closure call pass."""
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
call_name = guard(find_callname, self.func_ir, expr)
func_def = guard(get_definition, self.func_ir, expr.func)
if guard(
self._inline_reduction, work_list, block, i, expr, call_name
):
modified = True
break # because block structure changed
if guard(self._inline_closure, work_list, block, i, func_def):
modified = True
break # because block structure changed
if guard(self._inline_stencil, instr, call_name, func_def):
modified = True
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir.blocks)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(
_inline_arraycall,
self.func_ir,
cfg,
visited,
loops[k],
self.swapped,
self.parallel_options.comprehension,
):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further
# removed
while remove_dead(self.func_ir.blocks, self.func_ir.arg_names, self.func_ir):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
debug_print("END")
|
https://github.com/numba/numba/issues/3659
|
Traceback (most recent call last):
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 617, in new_error_context
yield
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 96, in lower_inst
value = self.lower_assign(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 186, in lower_assign
val = self.loadvar(value.name)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 555, in loadvar
assert name in self._live_vars, name
AssertionError: array.22
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py", line 10, in <module>
print(main())
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 367, in _compile_for_args
raise e
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 255, in run
raise patched_exception
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 246, in run
stage()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 691, in stage_objectmode_backend
self._backend(lowerfn, objectmode=True)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 666, in _backend
lowered = lowerfn()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 638, in backend_object_mode
self.flags)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 1075, in py_lowering_stage
lower.lower()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 178, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 219, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 625, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in object mode pipeline (step: object mode backend)
array.22
File "scratch_6.py", line 7:
def main():
<source elided>
a = np.array(((1, 2), (3, 4)))
return np.array([x for x in a])
^
[1] During: lowering "$0.13 = array.22" at C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py (7)
|
AssertionError
|
def _inline_arraycall(
func_ir, cfg, visited, loop, swapped, enable_prange=False, typed=False
):
"""Look for array(list) call in the exit block of a given loop, and turn list operations into
array operations in the loop if the following conditions are met:
1. The exit block contains an array call on the list;
2. The list variable is no longer live after array call;
3. The list is created in the loop entry block;
4. The loop is created from an range iterator whose length is known prior to the loop;
5. There is only one list_append operation on the list variable in the loop body;
6. The block that contains list_append dominates the loop head, which ensures list
length is the same as loop length;
If any condition check fails, no modification will be made to the incoming IR.
"""
debug_print = _make_debug_print("inline_arraycall")
# There should only be one loop exit
require(len(loop.exits) == 1)
exit_block = next(iter(loop.exits))
list_var, array_call_index, array_kws = _find_arraycall(
func_ir, func_ir.blocks[exit_block]
)
# check if dtype is present in array call
dtype_def = None
dtype_mod_def = None
if "dtype" in array_kws:
require(isinstance(array_kws["dtype"], ir.Var))
# We require that dtype argument to be a constant of getattr Expr, and we'll
# remember its definition for later use.
dtype_def = get_definition(func_ir, array_kws["dtype"])
require(isinstance(dtype_def, ir.Expr) and dtype_def.op == "getattr")
dtype_mod_def = get_definition(func_ir, dtype_def.value)
list_var_def = get_definition(func_ir, list_var)
debug_print("list_var = ", list_var, " def = ", list_var_def)
if isinstance(list_var_def, ir.Expr) and list_var_def.op == "cast":
list_var_def = get_definition(func_ir, list_var_def.value)
# Check if the definition is a build_list
require(isinstance(list_var_def, ir.Expr) and list_var_def.op == "build_list")
# Look for list_append in "last" block in loop body, which should be a block that is
# a post-dominator of the loop header.
list_append_stmts = []
for label in loop.body:
# We have to consider blocks of this loop, but not sub-loops.
# To achieve this, we require the set of "in_loops" of "label" to be visited loops.
in_visited_loops = [l.header in visited for l in cfg.in_loops(label)]
if not all(in_visited_loops):
continue
block = func_ir.blocks[label]
debug_print("check loop body block ", label)
for stmt in block.find_insts(ir.Assign):
lhs = stmt.target
expr = stmt.value
if isinstance(expr, ir.Expr) and expr.op == "call":
func_def = get_definition(func_ir, expr.func)
if (
isinstance(func_def, ir.Expr)
and func_def.op == "getattr"
and func_def.attr == "append"
):
list_def = get_definition(func_ir, func_def.value)
debug_print("list_def = ", list_def, list_def == list_var_def)
if list_def == list_var_def:
# found matching append call
list_append_stmts.append((label, block, stmt))
# Require only one list_append, otherwise we won't know the indices
require(len(list_append_stmts) == 1)
append_block_label, append_block, append_stmt = list_append_stmts[0]
# Check if append_block (besides loop entry) dominates loop header.
# Since CFG doesn't give us this info without loop entry, we approximate
# by checking if the predecessor set of the header block is the same
# as loop_entries plus append_block, which is certainly more restrictive
# than necessary, and can be relaxed if needed.
preds = set(l for l, b in cfg.predecessors(loop.header))
debug_print("preds = ", preds, (loop.entries | set([append_block_label])))
require(preds == (loop.entries | set([append_block_label])))
# Find iterator in loop header
iter_vars = []
iter_first_vars = []
loop_header = func_ir.blocks[loop.header]
for stmt in loop_header.find_insts(ir.Assign):
expr = stmt.value
if isinstance(expr, ir.Expr):
if expr.op == "iternext":
iter_def = get_definition(func_ir, expr.value)
debug_print("iter_def = ", iter_def)
iter_vars.append(expr.value)
elif expr.op == "pair_first":
iter_first_vars.append(stmt.target)
# Require only one iterator in loop header
require(len(iter_vars) == 1 and len(iter_first_vars) == 1)
iter_var = iter_vars[0] # variable that holds the iterator object
iter_first_var = iter_first_vars[0] # variable that holds the value out of iterator
# Final requirement: only one loop entry, and we're going to modify it by:
# 1. replacing the list definition with an array definition;
# 2. adding a counter for the array iteration.
require(len(loop.entries) == 1)
loop_entry = func_ir.blocks[next(iter(loop.entries))]
terminator = loop_entry.terminator
scope = loop_entry.scope
loc = loop_entry.loc
stmts = []
removed = []
def is_removed(val, removed):
if isinstance(val, ir.Var):
for x in removed:
if x.name == val.name:
return True
return False
# Skip list construction and skip terminator, add the rest to stmts
for i in range(len(loop_entry.body) - 1):
stmt = loop_entry.body[i]
if isinstance(stmt, ir.Assign) and (
stmt.value == list_def or is_removed(stmt.value, removed)
):
removed.append(stmt.target)
else:
stmts.append(stmt)
debug_print("removed variables: ", removed)
# Define an index_var to index the array.
# If the range happens to be single step ranges like range(n), or range(m, n),
# then the index_var correlates to iterator index; otherwise we'll have to
# define a new counter.
range_def = guard(_find_iter_range, func_ir, iter_var, swapped)
index_var = ir.Var(scope, mk_unique_var("index"), loc)
if range_def and range_def[0] == 0:
# iterator starts with 0, index_var can just be iter_first_var
index_var = iter_first_var
else:
# index_var = -1 # starting the index with -1 since it will incremented in loop header
stmts.append(
_new_definition(func_ir, index_var, ir.Const(value=-1, loc=loc), loc)
)
# Insert statement to get the size of the loop iterator
size_var = ir.Var(scope, mk_unique_var("size"), loc)
if range_def:
start, stop, range_func_def = range_def
if start == 0:
size_val = stop
else:
size_val = ir.Expr.binop(fn=operator.sub, lhs=stop, rhs=start, loc=loc)
# we can parallelize this loop if enable_prange = True, by changing
# range function from range, to prange.
if enable_prange and isinstance(range_func_def, ir.Global):
range_func_def.name = "internal_prange"
range_func_def.value = internal_prange
else:
# this doesn't work in objmode as it's effectively untyped
if typed:
len_func_var = ir.Var(scope, mk_unique_var("len_func"), loc)
stmts.append(
_new_definition(
func_ir,
len_func_var,
ir.Global("range_iter_len", range_iter_len, loc=loc),
loc,
)
)
size_val = ir.Expr.call(len_func_var, (iter_var,), (), loc=loc)
else:
raise GuardException
stmts.append(_new_definition(func_ir, size_var, size_val, loc))
size_tuple_var = ir.Var(scope, mk_unique_var("size_tuple"), loc)
stmts.append(
_new_definition(
func_ir, size_tuple_var, ir.Expr.build_tuple(items=[size_var], loc=loc), loc
)
)
# Insert array allocation
array_var = ir.Var(scope, mk_unique_var("array"), loc)
empty_func = ir.Var(scope, mk_unique_var("empty_func"), loc)
if dtype_def and dtype_mod_def:
# when dtype is present, we'll call emtpy with dtype
dtype_mod_var = ir.Var(scope, mk_unique_var("dtype_mod"), loc)
dtype_var = ir.Var(scope, mk_unique_var("dtype"), loc)
stmts.append(_new_definition(func_ir, dtype_mod_var, dtype_mod_def, loc))
stmts.append(
_new_definition(
func_ir,
dtype_var,
ir.Expr.getattr(dtype_mod_var, dtype_def.attr, loc),
loc,
)
)
stmts.append(
_new_definition(
func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc
)
)
array_kws = [("dtype", dtype_var)]
else:
# this doesn't work in objmode as it's effectively untyped
if typed:
# otherwise we'll call unsafe_empty_inferred
stmts.append(
_new_definition(
func_ir,
empty_func,
ir.Global("unsafe_empty_inferred", unsafe_empty_inferred, loc=loc),
loc,
)
)
array_kws = []
else:
raise GuardException
# array_var = empty_func(size_tuple_var)
stmts.append(
_new_definition(
func_ir,
array_var,
ir.Expr.call(empty_func, (size_tuple_var,), list(array_kws), loc=loc),
loc,
)
)
# Add back removed just in case they are used by something else
for var in removed:
stmts.append(_new_definition(func_ir, var, array_var, loc))
# Add back terminator
stmts.append(terminator)
# Modify loop_entry
loop_entry.body = stmts
if range_def:
if range_def[0] != 0:
# when range doesn't start from 0, index_var becomes loop index
# (iter_first_var) minus an offset (range_def[0])
terminator = loop_header.terminator
assert isinstance(terminator, ir.Branch)
# find the block in the loop body that header jumps to
block_id = terminator.truebr
blk = func_ir.blocks[block_id]
loc = blk.loc
blk.body.insert(
0,
_new_definition(
func_ir,
index_var,
ir.Expr.binop(
fn=operator.sub, lhs=iter_first_var, rhs=range_def[0], loc=loc
),
loc,
),
)
else:
# Insert index_var increment to the end of loop header
loc = loop_header.loc
terminator = loop_header.terminator
stmts = loop_header.body[0:-1]
next_index_var = ir.Var(scope, mk_unique_var("next_index"), loc)
one = ir.Var(scope, mk_unique_var("one"), loc)
# one = 1
stmts.append(_new_definition(func_ir, one, ir.Const(value=1, loc=loc), loc))
# next_index_var = index_var + 1
stmts.append(
_new_definition(
func_ir,
next_index_var,
ir.Expr.binop(fn=operator.add, lhs=index_var, rhs=one, loc=loc),
loc,
)
)
# index_var = next_index_var
stmts.append(_new_definition(func_ir, index_var, next_index_var, loc))
stmts.append(terminator)
loop_header.body = stmts
# In append_block, change list_append into array assign
for i in range(len(append_block.body)):
if append_block.body[i] == append_stmt:
debug_print("Replace append with SetItem")
append_block.body[i] = ir.SetItem(
target=array_var,
index=index_var,
value=append_stmt.value.args[0],
loc=append_stmt.loc,
)
# replace array call, by changing "a = array(b)" to "a = b"
stmt = func_ir.blocks[exit_block].body[array_call_index]
# stmt can be either array call or SetItem, we only replace array call
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
stmt.value = array_var
func_ir._definitions[stmt.target.name] = [stmt.value]
return True
|
def _inline_arraycall(func_ir, cfg, visited, loop, swapped, enable_prange=False):
"""Look for array(list) call in the exit block of a given loop, and turn list operations into
array operations in the loop if the following conditions are met:
1. The exit block contains an array call on the list;
2. The list variable is no longer live after array call;
3. The list is created in the loop entry block;
4. The loop is created from an range iterator whose length is known prior to the loop;
5. There is only one list_append operation on the list variable in the loop body;
6. The block that contains list_append dominates the loop head, which ensures list
length is the same as loop length;
If any condition check fails, no modification will be made to the incoming IR.
"""
debug_print = _make_debug_print("inline_arraycall")
# There should only be one loop exit
require(len(loop.exits) == 1)
exit_block = next(iter(loop.exits))
list_var, array_call_index, array_kws = _find_arraycall(
func_ir, func_ir.blocks[exit_block]
)
# check if dtype is present in array call
dtype_def = None
dtype_mod_def = None
if "dtype" in array_kws:
require(isinstance(array_kws["dtype"], ir.Var))
# We require that dtype argument to be a constant of getattr Expr, and we'll
# remember its definition for later use.
dtype_def = get_definition(func_ir, array_kws["dtype"])
require(isinstance(dtype_def, ir.Expr) and dtype_def.op == "getattr")
dtype_mod_def = get_definition(func_ir, dtype_def.value)
list_var_def = get_definition(func_ir, list_var)
debug_print("list_var = ", list_var, " def = ", list_var_def)
if isinstance(list_var_def, ir.Expr) and list_var_def.op == "cast":
list_var_def = get_definition(func_ir, list_var_def.value)
# Check if the definition is a build_list
require(isinstance(list_var_def, ir.Expr) and list_var_def.op == "build_list")
# Look for list_append in "last" block in loop body, which should be a block that is
# a post-dominator of the loop header.
list_append_stmts = []
for label in loop.body:
# We have to consider blocks of this loop, but not sub-loops.
# To achieve this, we require the set of "in_loops" of "label" to be visited loops.
in_visited_loops = [l.header in visited for l in cfg.in_loops(label)]
if not all(in_visited_loops):
continue
block = func_ir.blocks[label]
debug_print("check loop body block ", label)
for stmt in block.find_insts(ir.Assign):
lhs = stmt.target
expr = stmt.value
if isinstance(expr, ir.Expr) and expr.op == "call":
func_def = get_definition(func_ir, expr.func)
if (
isinstance(func_def, ir.Expr)
and func_def.op == "getattr"
and func_def.attr == "append"
):
list_def = get_definition(func_ir, func_def.value)
debug_print("list_def = ", list_def, list_def == list_var_def)
if list_def == list_var_def:
# found matching append call
list_append_stmts.append((label, block, stmt))
# Require only one list_append, otherwise we won't know the indices
require(len(list_append_stmts) == 1)
append_block_label, append_block, append_stmt = list_append_stmts[0]
# Check if append_block (besides loop entry) dominates loop header.
# Since CFG doesn't give us this info without loop entry, we approximate
# by checking if the predecessor set of the header block is the same
# as loop_entries plus append_block, which is certainly more restrictive
# than necessary, and can be relaxed if needed.
preds = set(l for l, b in cfg.predecessors(loop.header))
debug_print("preds = ", preds, (loop.entries | set([append_block_label])))
require(preds == (loop.entries | set([append_block_label])))
# Find iterator in loop header
iter_vars = []
iter_first_vars = []
loop_header = func_ir.blocks[loop.header]
for stmt in loop_header.find_insts(ir.Assign):
expr = stmt.value
if isinstance(expr, ir.Expr):
if expr.op == "iternext":
iter_def = get_definition(func_ir, expr.value)
debug_print("iter_def = ", iter_def)
iter_vars.append(expr.value)
elif expr.op == "pair_first":
iter_first_vars.append(stmt.target)
# Require only one iterator in loop header
require(len(iter_vars) == 1 and len(iter_first_vars) == 1)
iter_var = iter_vars[0] # variable that holds the iterator object
iter_first_var = iter_first_vars[0] # variable that holds the value out of iterator
# Final requirement: only one loop entry, and we're going to modify it by:
# 1. replacing the list definition with an array definition;
# 2. adding a counter for the array iteration.
require(len(loop.entries) == 1)
loop_entry = func_ir.blocks[next(iter(loop.entries))]
terminator = loop_entry.terminator
scope = loop_entry.scope
loc = loop_entry.loc
stmts = []
removed = []
def is_removed(val, removed):
if isinstance(val, ir.Var):
for x in removed:
if x.name == val.name:
return True
return False
# Skip list construction and skip terminator, add the rest to stmts
for i in range(len(loop_entry.body) - 1):
stmt = loop_entry.body[i]
if isinstance(stmt, ir.Assign) and (
stmt.value == list_def or is_removed(stmt.value, removed)
):
removed.append(stmt.target)
else:
stmts.append(stmt)
debug_print("removed variables: ", removed)
# Define an index_var to index the array.
# If the range happens to be single step ranges like range(n), or range(m, n),
# then the index_var correlates to iterator index; otherwise we'll have to
# define a new counter.
range_def = guard(_find_iter_range, func_ir, iter_var, swapped)
index_var = ir.Var(scope, mk_unique_var("index"), loc)
if range_def and range_def[0] == 0:
# iterator starts with 0, index_var can just be iter_first_var
index_var = iter_first_var
else:
# index_var = -1 # starting the index with -1 since it will incremented in loop header
stmts.append(
_new_definition(func_ir, index_var, ir.Const(value=-1, loc=loc), loc)
)
# Insert statement to get the size of the loop iterator
size_var = ir.Var(scope, mk_unique_var("size"), loc)
if range_def:
start, stop, range_func_def = range_def
if start == 0:
size_val = stop
else:
size_val = ir.Expr.binop(fn=operator.sub, lhs=stop, rhs=start, loc=loc)
# we can parallelize this loop if enable_prange = True, by changing
# range function from range, to prange.
if enable_prange and isinstance(range_func_def, ir.Global):
range_func_def.name = "internal_prange"
range_func_def.value = internal_prange
else:
len_func_var = ir.Var(scope, mk_unique_var("len_func"), loc)
stmts.append(
_new_definition(
func_ir,
len_func_var,
ir.Global("range_iter_len", range_iter_len, loc=loc),
loc,
)
)
size_val = ir.Expr.call(len_func_var, (iter_var,), (), loc=loc)
stmts.append(_new_definition(func_ir, size_var, size_val, loc))
size_tuple_var = ir.Var(scope, mk_unique_var("size_tuple"), loc)
stmts.append(
_new_definition(
func_ir, size_tuple_var, ir.Expr.build_tuple(items=[size_var], loc=loc), loc
)
)
# Insert array allocation
array_var = ir.Var(scope, mk_unique_var("array"), loc)
empty_func = ir.Var(scope, mk_unique_var("empty_func"), loc)
if dtype_def and dtype_mod_def:
# when dtype is present, we'll call emtpy with dtype
dtype_mod_var = ir.Var(scope, mk_unique_var("dtype_mod"), loc)
dtype_var = ir.Var(scope, mk_unique_var("dtype"), loc)
stmts.append(_new_definition(func_ir, dtype_mod_var, dtype_mod_def, loc))
stmts.append(
_new_definition(
func_ir,
dtype_var,
ir.Expr.getattr(dtype_mod_var, dtype_def.attr, loc),
loc,
)
)
stmts.append(
_new_definition(
func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc
)
)
array_kws = [("dtype", dtype_var)]
else:
# otherwise we'll call unsafe_empty_inferred
stmts.append(
_new_definition(
func_ir,
empty_func,
ir.Global("unsafe_empty_inferred", unsafe_empty_inferred, loc=loc),
loc,
)
)
array_kws = []
# array_var = empty_func(size_tuple_var)
stmts.append(
_new_definition(
func_ir,
array_var,
ir.Expr.call(empty_func, (size_tuple_var,), list(array_kws), loc=loc),
loc,
)
)
# Add back removed just in case they are used by something else
for var in removed:
stmts.append(_new_definition(func_ir, var, array_var, loc))
# Add back terminator
stmts.append(terminator)
# Modify loop_entry
loop_entry.body = stmts
if range_def:
if range_def[0] != 0:
# when range doesn't start from 0, index_var becomes loop index
# (iter_first_var) minus an offset (range_def[0])
terminator = loop_header.terminator
assert isinstance(terminator, ir.Branch)
# find the block in the loop body that header jumps to
block_id = terminator.truebr
blk = func_ir.blocks[block_id]
loc = blk.loc
blk.body.insert(
0,
_new_definition(
func_ir,
index_var,
ir.Expr.binop(
fn=operator.sub, lhs=iter_first_var, rhs=range_def[0], loc=loc
),
loc,
),
)
else:
# Insert index_var increment to the end of loop header
loc = loop_header.loc
terminator = loop_header.terminator
stmts = loop_header.body[0:-1]
next_index_var = ir.Var(scope, mk_unique_var("next_index"), loc)
one = ir.Var(scope, mk_unique_var("one"), loc)
# one = 1
stmts.append(_new_definition(func_ir, one, ir.Const(value=1, loc=loc), loc))
# next_index_var = index_var + 1
stmts.append(
_new_definition(
func_ir,
next_index_var,
ir.Expr.binop(fn=operator.add, lhs=index_var, rhs=one, loc=loc),
loc,
)
)
# index_var = next_index_var
stmts.append(_new_definition(func_ir, index_var, next_index_var, loc))
stmts.append(terminator)
loop_header.body = stmts
# In append_block, change list_append into array assign
for i in range(len(append_block.body)):
if append_block.body[i] == append_stmt:
debug_print("Replace append with SetItem")
append_block.body[i] = ir.SetItem(
target=array_var,
index=index_var,
value=append_stmt.value.args[0],
loc=append_stmt.loc,
)
# replace array call, by changing "a = array(b)" to "a = b"
stmt = func_ir.blocks[exit_block].body[array_call_index]
# stmt can be either array call or SetItem, we only replace array call
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
stmt.value = array_var
func_ir._definitions[stmt.target.name] = [stmt.value]
return True
|
https://github.com/numba/numba/issues/3659
|
Traceback (most recent call last):
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 617, in new_error_context
yield
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 96, in lower_inst
value = self.lower_assign(inst)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 186, in lower_assign
val = self.loadvar(value.name)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\pylowering.py", line 555, in loadvar
assert name in self._live_vars, name
AssertionError: array.22
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py", line 10, in <module>
print(main())
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 367, in _compile_for_args
raise e
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 255, in run
raise patched_exception
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 246, in run
stage()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 691, in stage_objectmode_backend
self._backend(lowerfn, objectmode=True)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 666, in _backend
lowered = lowerfn()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 638, in backend_object_mode
self.flags)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\compiler.py", line 1075, in py_lowering_stage
lower.lower()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 178, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 219, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "C:\Users\yukoba\Anaconda3\lib\contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\errors.py", line 625, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\yukoba\Anaconda3\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in object mode pipeline (step: object mode backend)
array.22
File "scratch_6.py", line 7:
def main():
<source elided>
a = np.array(((1, 2), (3, 4)))
return np.array([x for x in a])
^
[1] During: lowering "$0.13 = array.22" at C:/Users/yukoba/.IntelliJIdea2018.3/config/scratches/scratch_6.py (7)
|
AssertionError
|
def has_no_side_effect(rhs, lives, call_table):
"""Returns True if this expression has no side effects that
would prevent re-ordering.
"""
if isinstance(rhs, ir.Expr) and rhs.op == "call":
func_name = rhs.func.name
if func_name not in call_table or call_table[func_name] == []:
return False
call_list = call_table[func_name]
if (
call_list == ["empty", numpy]
or call_list == [slice]
or call_list == ["stencil", numba]
or call_list == ["log", numpy]
or call_list == ["dtype", numpy]
or call_list == [numba.array_analysis.wrap_index]
):
return True
elif isinstance(call_list[0], numba.extending._Intrinsic) and (
call_list[0]._name == "empty_inferred"
or call_list[0]._name == "unsafe_empty_inferred"
):
return True
from numba.targets.registry import CPUDispatcher
from numba.targets.linalg import dot_3_mv_check_args
if isinstance(call_list[0], CPUDispatcher):
py_func = call_list[0].py_func
if py_func == dot_3_mv_check_args:
return True
for f in remove_call_handlers:
if f(rhs, lives, call_list):
return True
return False
if isinstance(rhs, ir.Expr) and rhs.op == "inplace_binop":
return rhs.lhs.name not in lives
if isinstance(rhs, ir.Yield):
return False
if isinstance(rhs, ir.Expr) and rhs.op == "pair_first":
# don't remove pair_first since prange looks for it
return False
return True
|
def has_no_side_effect(rhs, lives, call_table):
"""Returns True if this expression has no side effects that
would prevent re-ordering.
"""
if isinstance(rhs, ir.Expr) and rhs.op == "call":
func_name = rhs.func.name
if func_name not in call_table or call_table[func_name] == []:
return False
call_list = call_table[func_name]
if (
call_list == ["empty", numpy]
or call_list == [slice]
or call_list == ["stencil", numba]
or call_list == ["log", numpy]
or call_list == [numba.array_analysis.wrap_index]
):
return True
elif isinstance(call_list[0], numba.extending._Intrinsic) and (
call_list[0]._name == "empty_inferred"
or call_list[0]._name == "unsafe_empty_inferred"
):
return True
from numba.targets.registry import CPUDispatcher
from numba.targets.linalg import dot_3_mv_check_args
if isinstance(call_list[0], CPUDispatcher):
py_func = call_list[0].py_func
if py_func == dot_3_mv_check_args:
return True
for f in remove_call_handlers:
if f(rhs, lives, call_list):
return True
return False
if isinstance(rhs, ir.Expr) and rhs.op == "inplace_binop":
return rhs.lhs.name not in lives
if isinstance(rhs, ir.Yield):
return False
if isinstance(rhs, ir.Expr) and rhs.op == "pair_first":
# don't remove pair_first since prange looks for it
return False
return True
|
https://github.com/numba/numba/issues/3066
|
Traceback (most recent call last):
File "./prange.py", line 10, in <module>
func(np.ones(10))
File "/home/bmerry/work/sdp/env3/lib/python3.5/site-packages/numba/dispatcher.py", line 344, in _compile_for_args
reraise(type(e), e, None)
File "/home/bmerry/work/sdp/env3/lib/python3.5/site-packages/numba/six.py", line 658, in reraise
raise value.with_traceback(tb)
numba.errors.TypingError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython frontend)
Unknown attribute 'type' of type class(float64)
File "prange.py", line 8:
def func(a):
<source elided>
for i in numba.prange(a.shape[0]):
a[i] = a.dtype.type(0)
^
[1] During: typing of get attribute at ./prange.py (8)
File "prange.py", line 8:
def func(a):
<source elided>
for i in numba.prange(a.shape[0]):
a[i] = a.dtype.type(0)
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.5, range = (0, $3.6, 1))]{26: <ir.Block at ./prange.py (7)>}Var(parfor_index.5, ./prange.py (7))" at ./prange.py (7)
|
numba.errors.TypingError
|
def _replace_parallel_functions(self, blocks):
"""
Replace functions with their parallel implemntation in
replace_functions_map if available.
The implementation code is inlined to enable more optimization.
"""
from numba.inline_closurecall import inline_closure_call
work_list = list(blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
lhs_typ = self.typemap[lhs.name]
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
# Try inline known calls with their parallel implementations
def replace_func():
func_def = get_definition(self.func_ir, expr.func)
callname = find_callname(self.func_ir, expr)
repl_func = replace_functions_map.get(callname, None)
require(repl_func != None)
typs = tuple(self.typemap[x.name] for x in expr.args)
try:
new_func = repl_func(lhs_typ, *typs)
except:
new_func = None
require(new_func != None)
g = copy.copy(self.func_ir.func_id.func.__globals__)
g["numba"] = numba
g["np"] = numpy
g["math"] = math
# inline the parallel implementation
inline_closure_call(
self.func_ir,
g,
block,
i,
new_func,
self.typingctx,
typs,
self.typemap,
self.calltypes,
work_list,
)
return True
if guard(replace_func):
break
elif (
isinstance(expr, ir.Expr)
and expr.op == "getattr"
and expr.attr == "dtype"
):
# Replace getattr call "A.dtype" with numpy.dtype(<actual type>).
# This helps remove superfluous dependencies from parfor.
typ = self.typemap[expr.value.name]
if isinstance(typ, types.npytypes.Array):
# Convert A.dtype to four statements.
# 1) Get numpy global.
# 2) Create var for known type of array, e.g., numpy.float64
# 3) Get dtype function from numpy module.
# 4) Create var for numpy.dtype(var from #2).
# Create var for numpy module.
dtype = typ.dtype
scope = block.scope
loc = instr.loc
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global("np", numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# Create var for type infered type of the array, e.g., numpy.float64.
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
self.typemap[typ_var.name] = types.functions.NumberClass(dtype)
dtype_str = str(dtype)
if dtype_str == "bool":
dtype_str = "bool_"
np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)
# Get the dtype function from the numpy module.
dtype_attr_var = ir.Var(
scope, mk_unique_var("$dtype_attr_var"), loc
)
temp = find_template(numpy.dtype)
tfunc = numba.types.Function(temp)
tfunc.get_call_type(
self.typingctx, (self.typemap[typ_var.name],), {}
)
self.typemap[dtype_attr_var.name] = types.functions.Function(
temp
)
dtype_attr_getattr = ir.Expr.getattr(g_np_var, "dtype", loc)
dtype_attr_assign = ir.Assign(
dtype_attr_getattr, dtype_attr_var, loc
)
# Call numpy.dtype on the statically coded type two steps above.
dtype_var = ir.Var(scope, mk_unique_var("$dtype_var"), loc)
self.typemap[dtype_var.name] = types.npytypes.DType(dtype)
dtype_getattr = ir.Expr.call(dtype_attr_var, [typ_var], (), loc)
dtype_assign = ir.Assign(dtype_getattr, dtype_var, loc)
self.calltypes[dtype_getattr] = signature(
self.typemap[dtype_var.name], self.typemap[typ_var.name]
)
# The original A.dtype rhs is replaced with result of this call.
instr.value = dtype_var
# Add statements to body of the code.
block.body.insert(0, dtype_assign)
block.body.insert(0, dtype_attr_assign)
block.body.insert(0, typ_var_assign)
block.body.insert(0, g_np_assign)
break
|
def _replace_parallel_functions(self, blocks):
"""
Replace functions with their parallel implemntation in
replace_functions_map if available.
The implementation code is inlined to enable more optimization.
"""
from numba.inline_closurecall import inline_closure_call
work_list = list(blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
lhs_typ = self.typemap[lhs.name]
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
# Try inline known calls with their parallel implementations
def replace_func():
func_def = get_definition(self.func_ir, expr.func)
callname = find_callname(self.func_ir, expr)
repl_func = replace_functions_map.get(callname, None)
require(repl_func != None)
typs = tuple(self.typemap[x.name] for x in expr.args)
try:
new_func = repl_func(lhs_typ, *typs)
except:
new_func = None
require(new_func != None)
g = copy.copy(self.func_ir.func_id.func.__globals__)
g["numba"] = numba
g["np"] = numpy
g["math"] = math
# inline the parallel implementation
inline_closure_call(
self.func_ir,
g,
block,
i,
new_func,
self.typingctx,
typs,
self.typemap,
self.calltypes,
work_list,
)
return True
if guard(replace_func):
break
elif (
isinstance(expr, ir.Expr)
and expr.op == "getattr"
and expr.attr == "dtype"
):
# Replace getattr call "A.dtype" with the actual type itself.
# This helps remove superfulous dependencies from parfor.
typ = self.typemap[expr.value.name]
if isinstance(typ, types.npytypes.Array):
dtype = typ.dtype
scope = block.scope
loc = instr.loc
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global("np", numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
self.typemap[typ_var.name] = types.DType(dtype)
dtype_str = str(dtype)
if dtype_str == "bool":
dtype_str = "bool_"
np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)
instr.value = typ_var
block.body.insert(0, typ_var_assign)
block.body.insert(0, g_np_assign)
break
|
https://github.com/numba/numba/issues/3066
|
Traceback (most recent call last):
File "./prange.py", line 10, in <module>
func(np.ones(10))
File "/home/bmerry/work/sdp/env3/lib/python3.5/site-packages/numba/dispatcher.py", line 344, in _compile_for_args
reraise(type(e), e, None)
File "/home/bmerry/work/sdp/env3/lib/python3.5/site-packages/numba/six.py", line 658, in reraise
raise value.with_traceback(tb)
numba.errors.TypingError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython frontend)
Unknown attribute 'type' of type class(float64)
File "prange.py", line 8:
def func(a):
<source elided>
for i in numba.prange(a.shape[0]):
a[i] = a.dtype.type(0)
^
[1] During: typing of get attribute at ./prange.py (8)
File "prange.py", line 8:
def func(a):
<source elided>
for i in numba.prange(a.shape[0]):
a[i] = a.dtype.type(0)
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.5, range = (0, $3.6, 1))]{26: <ir.Block at ./prange.py (7)>}Var(parfor_index.5, ./prange.py (7))" at ./prange.py (7)
|
numba.errors.TypingError
|
def find_file(pat, libdir=None):
if libdir is None:
libdirs = get_lib_dirs()
elif isinstance(libdir, str):
libdirs = [
libdir,
]
else:
libdirs = list(libdir)
files = []
for ldir in libdirs:
entries = os.listdir(ldir)
candidates = [os.path.join(ldir, ent) for ent in entries if pat.match(ent)]
files.extend([c for c in candidates if os.path.isfile(c)])
return files
|
def find_file(pat, libdir=None):
if libdir is None:
libdirs = get_lib_dirs()
else:
libdirs = list(libdir)
files = []
for ldir in libdirs:
entries = os.listdir(ldir)
candidates = [os.path.join(ldir, ent) for ent in entries if pat.match(ent)]
files.extend([c for c in candidates if os.path.isfile(c)])
return files
|
https://github.com/numba/numba/issues/3831
|
Traceback (most recent call last):
File "cuda_issue.py", line 4, in <module>
@cuda.jit('(int64, float32)')
File "numba/numba/cuda/decorators.py", line 98, in kernel_jit
kernel.bind()
File "numba/numba/cuda/compiler.py", line 503, in bind
self._func.get()
File "numba/numba/cuda/compiler.py", line 381, in get
ptx = self.ptx.get()
File "numba/numba/cuda/compiler.py", line 352, in get
**self._extra_options)
File "numba/numba/cuda/cudadrv/nvvm.py", line 475, in llvm_to_ptx
libdevice = LibDevice(arch=opts.get('arch', 'compute_20'))
File "numba/numba/cuda/cudadrv/nvvm.py", line 350, in __init__
if get_libdevice(arch) is None:
File "numba/numba/cuda/cudadrv/libs.py", line 22, in get_libdevice
candidates = find_file(re.compile(pat), libdir)
File "numba/numba/findlib.py", line 45, in find_file
entries = os.listdir(ldir)
FileNotFoundError: [Errno 2] No such file or directory: 'u'
|
FileNotFoundError
|
def get_generator_type(self, typdict, retty):
gi = self.generator_info
arg_types = [None] * len(self.arg_names)
for index, name in self.arg_names.items():
arg_types[index] = typdict[name]
state_types = [typdict[var_name] for var_name in gi.state_vars]
yield_types = [typdict[y.inst.value.name] for y in gi.get_yield_points()]
if not yield_types:
msg = "Cannot type generator: it does not yield any value"
raise TypingError(msg)
yield_type = self.context.unify_types(*yield_types)
if yield_type is None or isinstance(yield_type, types.Optional):
msg = "Cannot type generator: cannot unify yielded types %s"
yp_highlights = []
for y in gi.get_yield_points():
msg = _termcolor.errmsg("Yield of: IR '%s', type '%s', location: %s")
yp_highlights.append(
msg % (str(y.inst), typdict[y.inst.value.name], y.inst.loc.strformat())
)
explain_ty = set()
for ty in yield_types:
if isinstance(ty, types.Optional):
explain_ty.add(ty.type)
explain_ty.add(types.NoneType("none"))
else:
explain_ty.add(ty)
raise TypingError(
"Can't unify yield type from the "
"following types: %s"
% ", ".join(sorted(map(str, explain_ty)))
+ "\n\n"
+ "\n".join(yp_highlights)
)
return types.Generator(
self.func_id.func, yield_type, arg_types, state_types, has_finalizer=True
)
|
def get_generator_type(self, typdict, retty):
gi = self.generator_info
arg_types = [None] * len(self.arg_names)
for index, name in self.arg_names.items():
arg_types[index] = typdict[name]
state_types = [typdict[var_name] for var_name in gi.state_vars]
yield_types = [typdict[y.inst.value.name] for y in gi.get_yield_points()]
if not yield_types:
msg = "Cannot type generator: it does not yield any value"
raise TypingError(msg)
yield_type = self.context.unify_types(*yield_types)
if yield_type is None:
msg = "Cannot type generator: cannot unify yielded types %s"
raise TypingError(msg % (yield_types,))
return types.Generator(
self.func_id.func, yield_type, arg_types, state_types, has_finalizer=True
)
|
https://github.com/numba/numba/issues/3779
|
Traceback (most recent call last):
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/errors.py", line 617, in new_error_context
yield
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/lowering.py", line 308, in lower_inst
val = self.lower_assign(ty, inst)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/lowering.py", line 482, in lower_assign
res = self.lower_yield(ty, value)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/lowering.py", line 505, in lower_yield
self.call_conv.return_value(self.builder, retval)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/targets/callconv.py", line 351, in return_value
(str(retval.type), str(retptr.type.pointee))
AssertionError: ('{i64, i1}', 'i64')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "minrep.py", line 8, in <module>
g = test()
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 926, in compile_extra
return pipeline.compile_extra(func)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 374, in compile_extra
return self._compile_bytecode()
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 857, in _compile_bytecode
return self._compile_core()
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 844, in _compile_core
res = pm.run(self.status)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 255, in run
raise patched_exception
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 246, in run
stage()
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 717, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 666, in _backend
lowered = lowerfn()
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 653, in backend_nopython_mode
self.metadata)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/compiler.py", line 1048, in native_lowering_stage
lower.lower()
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/lowering.py", line 184, in lower
self.genlower.lower_next_func(self)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/generators.py", line 153, in lower_next_func
entry_block_tail = lower.lower_function_body()
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/lowering.py", line 244, in lower_function_body
self.lower_block(block)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/lowering.py", line 259, in lower_block
self.lower_inst(inst)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/errors.py", line 625, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "/Users/ndv/Library/Python/3.6/lib/python/site-packages/numba/six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
('{i64, i1}', 'i64')
File "minrep.py", line 5:
def test():
yield None
^
[1] During: lowering "$0.2 = yield $const0.1" at minrep.py (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
AssertionError
|
def device_memory_size(devmem):
"""Check the memory size of the device memory.
The result is cached in the device memory object.
It may query the driver for the memory size of the device memory allocation.
"""
sz = getattr(devmem, "_cuda_memsize_", None)
if sz is None:
s, e = device_extents(devmem)
sz = e - s
devmem._cuda_memsize_ = sz
assert sz >= 0, "{} length array".format(sz)
return sz
|
def device_memory_size(devmem):
"""Check the memory size of the device memory.
The result is cached in the device memory object.
It may query the driver for the memory size of the device memory allocation.
"""
sz = getattr(devmem, "_cuda_memsize_", None)
if sz is None:
s, e = device_extents(devmem)
sz = e - s
devmem._cuda_memsize_ = sz
assert sz > 0, "zero length array"
return sz
|
https://github.com/numba/numba/issues/3705
|
Traceback (most recent call last):
File "pyarrow/tests/numba_bug.py", line 9, in <module>
d_arr[::-1].copy_to_host()
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devices.py", line 212, in _require_cuda_context
return fn(*args, **kws)
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 240, in copy_to_host
strides=self.strides, buffer=hostary)
ValueError: strides is incompatible with shape of requested array and size of buffer
|
ValueError
|
def __init__(self, start, stop, size, stride, single):
self.start = start
self.stop = stop
self.size = size
self.stride = stride
self.single = single
assert not single or size == 1
|
def __init__(self, start, stop, size, stride, single):
if stop < start:
raise ValueError("end offset is before start offset")
self.start = start
self.stop = stop
self.size = size
self.stride = stride
self.single = single
assert not single or size == 1
|
https://github.com/numba/numba/issues/3705
|
Traceback (most recent call last):
File "pyarrow/tests/numba_bug.py", line 9, in <module>
d_arr[::-1].copy_to_host()
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devices.py", line 212, in _require_cuda_context
return fn(*args, **kws)
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 240, in copy_to_host
strides=self.strides, buffer=hostary)
ValueError: strides is incompatible with shape of requested array and size of buffer
|
ValueError
|
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.indices(self.size)
stride = step * self.stride
start = self.start + start * abs(self.stride)
stop = self.start + stop * abs(self.stride)
if stride == 0:
size = 1
else:
size = (stop - start + (stride - 1)) // stride
if size < 0:
size = 0
ret = Dim(start=start, stop=stop, size=size, stride=stride, single=False)
return ret
else:
sliced = self[item : item + 1]
return Dim(
start=sliced.start,
stop=sliced.stop,
size=sliced.size,
stride=sliced.stride,
single=True,
)
|
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.start, item.stop, item.step
single = False
else:
single = True
start = item
stop = start + 1
step = None
# Default values
# Start value is default to zero
if start is None:
start = 0
# Stop value is default to self.size
if stop is None:
stop = self.size
# Step is default to 1
if step is None:
step = 1
stride = step * self.stride
# Compute start in bytes
if start >= 0:
start = self.start + start * self.stride
else:
start = self.stop + start * self.stride
start = max(start, self.start)
# Compute stop in bytes
if stop >= 0:
stop = self.start + stop * self.stride
else:
stop = self.stop + stop * self.stride
stop = min(stop, self.stop)
# Clip stop
if (stop - start) > self.size * self.stride:
stop = start + self.size * stride
if stop < start:
start = stop
size = 0
elif stride == 0:
size = 1 if single else ((stop - start) // step)
else:
size = (stop - start + (stride - 1)) // stride
return Dim(start, stop, size, stride, single)
|
https://github.com/numba/numba/issues/3705
|
Traceback (most recent call last):
File "pyarrow/tests/numba_bug.py", line 9, in <module>
d_arr[::-1].copy_to_host()
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devices.py", line 212, in _require_cuda_context
return fn(*args, **kws)
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 240, in copy_to_host
strides=self.strides, buffer=hostary)
ValueError: strides is incompatible with shape of requested array and size of buffer
|
ValueError
|
def copy_to_host(self, ary=None, stream=0):
"""Copy ``self`` to ``ary`` or create a new Numpy ndarray
if ``ary`` is ``None``.
If a CUDA ``stream`` is given, then the transfer will be made
asynchronously as part as the given stream. Otherwise, the transfer is
synchronous: the function returns after the copy is finished.
Always returns the host array.
Example::
import numpy as np
from numba import cuda
arr = np.arange(1000)
d_arr = cuda.to_device(arr)
my_kernel[100, 100](d_arr)
result_array = d_arr.copy_to_host()
"""
if any(s < 0 for s in self.strides):
msg = "D->H copy not implemented for negative strides: {}"
raise NotImplementedError(msg.format(self.strides))
assert self.alloc_size >= 0, "Negative memory size"
stream = self._default_stream(stream)
if ary is None:
hostary = np.empty(shape=self.alloc_size, dtype=np.byte)
else:
check_array_compatibility(self, ary)
hostary = ary
if self.alloc_size != 0:
_driver.device_to_host(hostary, self, self.alloc_size, stream=stream)
if ary is None:
if self.size == 0:
hostary = np.ndarray(shape=self.shape, dtype=self.dtype, buffer=hostary)
else:
hostary = np.ndarray(
shape=self.shape, dtype=self.dtype, strides=self.strides, buffer=hostary
)
return hostary
|
def copy_to_host(self, ary=None, stream=0):
"""Copy ``self`` to ``ary`` or create a new Numpy ndarray
if ``ary`` is ``None``.
If a CUDA ``stream`` is given, then the transfer will be made
asynchronously as part as the given stream. Otherwise, the transfer is
synchronous: the function returns after the copy is finished.
Always returns the host array.
Example::
import numpy as np
from numba import cuda
arr = np.arange(1000)
d_arr = cuda.to_device(arr)
my_kernel[100, 100](d_arr)
result_array = d_arr.copy_to_host()
"""
stream = self._default_stream(stream)
if ary is None:
hostary = np.empty(shape=self.alloc_size, dtype=np.byte)
else:
check_array_compatibility(self, ary)
hostary = ary
assert self.alloc_size >= 0, "Negative memory size"
if self.alloc_size != 0:
_driver.device_to_host(hostary, self, self.alloc_size, stream=stream)
if ary is None:
if self.size == 0:
hostary = np.ndarray(shape=self.shape, dtype=self.dtype, buffer=hostary)
else:
hostary = np.ndarray(
shape=self.shape, dtype=self.dtype, strides=self.strides, buffer=hostary
)
return hostary
|
https://github.com/numba/numba/issues/3705
|
Traceback (most recent call last):
File "pyarrow/tests/numba_bug.py", line 9, in <module>
d_arr[::-1].copy_to_host()
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devices.py", line 212, in _require_cuda_context
return fn(*args, **kws)
File "/home/pearu/miniconda3/envs/pyarrow-dev/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 240, in copy_to_host
strides=self.strides, buffer=hostary)
ValueError: strides is incompatible with shape of requested array and size of buffer
|
ValueError
|
def infer_constant(self, name, loc=None):
"""
Infer a constant value for the given variable *name*.
If no value can be inferred, numba.errors.ConstantInferenceError
is raised.
"""
if name not in self._cache:
try:
self._cache[name] = (True, self._do_infer(name))
except ConstantInferenceError as exc:
# Store the exception args only, to avoid keeping
# a whole traceback alive.
self._cache[name] = (False, (exc.__class__, exc.args))
success, val = self._cache[name]
if success:
return val
else:
exc, args = val
if issubclass(exc, NumbaError):
raise exc(*args, loc=loc)
else:
raise exc(*args)
|
def infer_constant(self, name):
"""
Infer a constant value for the given variable *name*.
If no value can be inferred, numba.errors.ConstantInferenceError
is raised.
"""
if name not in self._cache:
try:
self._cache[name] = (True, self._do_infer(name))
except ConstantInferenceError as exc:
# Store the exception args only, to avoid keeping
# a whole traceback alive.
self._cache[name] = (False, (exc.__class__, exc.args))
success, val = self._cache[name]
if success:
return val
else:
exc, args = val
raise exc(*args)
|
https://github.com/numba/numba/issues/3717
|
---------------------------------------------------------------------------
ConstantInferenceError Traceback (most recent call last)
<ipython-input-29-41c708083646> in <module>
5 return a*b
6
----> 7 problem(1,2)
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
358 # this is from trying to infer something as constant when it isn't
359 # or isn't supported as a constant
--> 360 error_rewrite(e, 'constant_inference')
361 except Exception as e:
362 if config.SHOW_HELP:
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
313 raise e
314 else:
--> 315 reraise(type(e), e, None)
316
317 argtypes = []
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
ConstantInferenceError: Failed in nopython mode pipeline (step: nopython rewrites)
Cannot make a constant from: Cannot make a constant from: Cannot make a constant from: constant inference not possible for arg(0, name=a)
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
please either raise a bug report along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
ConstantInferenceError
|
def _fail(self, val):
# The location here is set to None because `val` is the ir.Var name
# and not the actual offending use of the var. When this is raised it is
# caught in the flow control of `infer_constant` and the class and args
# (the message) are captured and then raised again but with the location
# set to the expression that caused the constant inference error.
raise ConstantInferenceError(
"Constant inference not possible for: %s" % (val,), loc=None
)
|
def _fail(self, val):
raise ConstantInferenceError("constant inference not possible for %s" % (val,))
|
https://github.com/numba/numba/issues/3717
|
---------------------------------------------------------------------------
ConstantInferenceError Traceback (most recent call last)
<ipython-input-29-41c708083646> in <module>
5 return a*b
6
----> 7 problem(1,2)
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
358 # this is from trying to infer something as constant when it isn't
359 # or isn't supported as a constant
--> 360 error_rewrite(e, 'constant_inference')
361 except Exception as e:
362 if config.SHOW_HELP:
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
313 raise e
314 else:
--> 315 reraise(type(e), e, None)
316
317 argtypes = []
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
ConstantInferenceError: Failed in nopython mode pipeline (step: nopython rewrites)
Cannot make a constant from: Cannot make a constant from: Cannot make a constant from: constant inference not possible for arg(0, name=a)
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
please either raise a bug report along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
ConstantInferenceError
|
def _infer_expr(self, expr):
# Infer an expression: handle supported cases
if expr.op == "call":
func = self.infer_constant(expr.func.name, loc=expr.loc)
return self._infer_call(func, expr)
elif expr.op == "getattr":
value = self.infer_constant(expr.value.name, loc=expr.loc)
return self._infer_getattr(value, expr)
elif expr.op == "build_list":
return [self.infer_constant(i.name, loc=expr.loc) for i in expr.items]
elif expr.op == "build_tuple":
return tuple(self.infer_constant(i.name, loc=expr.loc) for i in expr.items)
self._fail(expr)
|
def _infer_expr(self, expr):
# Infer an expression: handle supported cases
if expr.op == "call":
func = self.infer_constant(expr.func.name)
return self._infer_call(func, expr)
elif expr.op == "getattr":
value = self.infer_constant(expr.value.name)
return self._infer_getattr(value, expr)
elif expr.op == "build_list":
return [self.infer_constant(i.name) for i in expr.items]
elif expr.op == "build_tuple":
return tuple(self.infer_constant(i.name) for i in expr.items)
self._fail(expr)
|
https://github.com/numba/numba/issues/3717
|
---------------------------------------------------------------------------
ConstantInferenceError Traceback (most recent call last)
<ipython-input-29-41c708083646> in <module>
5 return a*b
6
----> 7 problem(1,2)
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
358 # this is from trying to infer something as constant when it isn't
359 # or isn't supported as a constant
--> 360 error_rewrite(e, 'constant_inference')
361 except Exception as e:
362 if config.SHOW_HELP:
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
313 raise e
314 else:
--> 315 reraise(type(e), e, None)
316
317 argtypes = []
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
ConstantInferenceError: Failed in nopython mode pipeline (step: nopython rewrites)
Cannot make a constant from: Cannot make a constant from: Cannot make a constant from: constant inference not possible for arg(0, name=a)
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
please either raise a bug report along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
ConstantInferenceError
|
def _infer_call(self, func, expr):
if expr.kws or expr.vararg:
self._fail(expr)
# Check supported callables
_slice = func in (slice,)
_exc = isinstance(func, type) and issubclass(func, BaseException)
if _slice or _exc:
args = [self.infer_constant(a.name, loc=expr.loc) for a in expr.args]
if _slice:
return func(*args)
elif _exc:
# If the exception class is user defined it may implement a ctor
# that does not pass the args to the super. Therefore return the
# raw class and the args so this can be instantiated at the call
# site in the way the user source expects it to be.
return func, args
else:
assert 0, "Unreachable"
self._fail(expr)
|
def _infer_call(self, func, expr):
if expr.kws or expr.vararg:
self._fail(expr)
# Check supported callables
_slice = func in (slice,)
_exc = isinstance(func, type) and issubclass(func, BaseException)
if _slice or _exc:
args = [self.infer_constant(a.name) for a in expr.args]
if _slice:
return func(*args)
elif _exc:
# If the exception class is user defined it may implement a ctor
# that does not pass the args to the super. Therefore return the
# raw class and the args so this can be instantiated at the call
# site in the way the user source expects it to be.
return func, args
else:
assert 0, "Unreachable"
self._fail(expr)
|
https://github.com/numba/numba/issues/3717
|
---------------------------------------------------------------------------
ConstantInferenceError Traceback (most recent call last)
<ipython-input-29-41c708083646> in <module>
5 return a*b
6
----> 7 problem(1,2)
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
358 # this is from trying to infer something as constant when it isn't
359 # or isn't supported as a constant
--> 360 error_rewrite(e, 'constant_inference')
361 except Exception as e:
362 if config.SHOW_HELP:
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
313 raise e
314 else:
--> 315 reraise(type(e), e, None)
316
317 argtypes = []
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
ConstantInferenceError: Failed in nopython mode pipeline (step: nopython rewrites)
Cannot make a constant from: Cannot make a constant from: Cannot make a constant from: constant inference not possible for arg(0, name=a)
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
please either raise a bug report along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
ConstantInferenceError
|
def __init__(self, value, loc=None):
super(ConstantInferenceError, self).__init__(value, loc=loc)
|
def __init__(self, value, loc=None):
self.value = value
msg = "Cannot make a constant from: %s" % value
super(ConstantInferenceError, self).__init__(msg, loc=loc)
|
https://github.com/numba/numba/issues/3717
|
---------------------------------------------------------------------------
ConstantInferenceError Traceback (most recent call last)
<ipython-input-29-41c708083646> in <module>
5 return a*b
6
----> 7 problem(1,2)
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
358 # this is from trying to infer something as constant when it isn't
359 # or isn't supported as a constant
--> 360 error_rewrite(e, 'constant_inference')
361 except Exception as e:
362 if config.SHOW_HELP:
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
313 raise e
314 else:
--> 315 reraise(type(e), e, None)
316
317 argtypes = []
~/.virtualenvs/dl4cv/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
ConstantInferenceError: Failed in nopython mode pipeline (step: nopython rewrites)
Cannot make a constant from: Cannot make a constant from: Cannot make a constant from: constant inference not possible for arg(0, name=a)
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
please either raise a bug report along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
ConstantInferenceError
|
def as_method(self):
"""
Convert this signature to a bound method signature.
"""
if self.recvr is not None:
return self
sig = signature(self.return_type, *self.args[1:], recvr=self.args[0])
# Adjust the python signature
params = list(self.pysig.parameters.values())[1:]
sig.pysig = utils.pySignature(
parameters=params,
return_annotation=self.pysig.return_annotation,
)
return sig
|
def as_method(self):
"""
Convert this signature to a bound method signature.
"""
if self.recvr is not None:
return self
sig = signature(self.return_type, *self.args[1:], recvr=self.args[0])
return sig
|
https://github.com/numba/numba/issues/3489
|
Traceback (most recent call last):
File "<path>/numba/numba/errors.py", line 601, in new_error_context
yield
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<path>/numba/numba/lowering.py", line 306, in lower_inst
val = self.lower_assign(ty, inst)
File "<path>/numba/numba/lowering.py", line 452, in lower_assign
return self.lower_expr(ty, value)
File "<path>/numba/numba/lowering.py", line 875, in lower_expr
res = self.lower_call(resty, expr)
File "<path>/numba/numba/lowering.py", line 841, in lower_call
res = impl(self.builder, argvals, self.loc)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/typing/templates.py", line 532, in method_impl
return call(builder, args)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/targets/imputils.py", line 192, in imp
builder, func, fndesc.restype, fndesc.argtypes, args)
File "<path>/numba/numba/targets/callconv.py", line 479, in call_function
args = list(arginfo.as_arguments(builder, args))
File "<path>/numba/numba/datamodel/packer.py", line 94, in as_arguments
% (self._nargs, len(values)))
TypeError: invalid number of args: expected 2, got 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test_pr3468.py", line 17, in <module>
bar(Z)
File "<path>/numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "<path>/numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "<path>/numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "<path>/numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "<path>/numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "<path>/numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "<path>/numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/compiler.py", line 253, in run
raise patched_exception
File "<path>/numba/numba/compiler.py", line 244, in run
stage()
File "<path>/numba/numba/compiler.py", line 697, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "<path>/numba/numba/compiler.py", line 647, in _backend
lowered = lowerfn()
File "<path>/numba/numba/compiler.py", line 634, in backend_nopython_mode
self.flags)
File "<path>/numba/numba/compiler.py", line 1026, in native_lowering_stage
lower.lower()
File "<path>/numba/numba/lowering.py", line 176, in lower
self.lower_normal_function(self.fndesc)
File "<path>/numba/numba/lowering.py", line 217, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "<path>/numba/numba/lowering.py", line 242, in lower_function_body
self.lower_block(block)
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<env>/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "<path>/numba/numba/errors.py", line 609, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "<path>/numba/numba/six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
invalid number of args: expected 2, got 1
|
TypeError
|
def _get_dispatcher(cls, context, typ, attr, sig_args, sig_kws):
"""
Get the compiled dispatcher implementing the attribute for
the given formal signature.
"""
cache_key = context, typ, attr, tuple(sig_args), tuple(sig_kws.items())
try:
disp = cls._impl_cache[cache_key]
except KeyError:
# Get the overload implementation for the given type
pyfunc = cls._overload_func(*sig_args, **sig_kws)
if pyfunc is None:
# No implementation => fail typing
cls._impl_cache[cache_key] = None
return
from numba import jit
disp = cls._impl_cache[cache_key] = jit(nopython=True)(pyfunc)
return disp
|
def _get_dispatcher(cls, context, typ, attr, sig_args, sig_kws):
"""
Get the compiled dispatcher implementing the attribute for
the given formal signature.
"""
cache_key = context, typ, attr
try:
disp = cls._impl_cache[cache_key]
except KeyError:
# Get the overload implementation for the given type
pyfunc = cls._overload_func(*sig_args, **sig_kws)
if pyfunc is None:
# No implementation => fail typing
cls._impl_cache[cache_key] = None
return
from numba import jit
disp = cls._impl_cache[cache_key] = jit(nopython=True)(pyfunc)
return disp
|
https://github.com/numba/numba/issues/3489
|
Traceback (most recent call last):
File "<path>/numba/numba/errors.py", line 601, in new_error_context
yield
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<path>/numba/numba/lowering.py", line 306, in lower_inst
val = self.lower_assign(ty, inst)
File "<path>/numba/numba/lowering.py", line 452, in lower_assign
return self.lower_expr(ty, value)
File "<path>/numba/numba/lowering.py", line 875, in lower_expr
res = self.lower_call(resty, expr)
File "<path>/numba/numba/lowering.py", line 841, in lower_call
res = impl(self.builder, argvals, self.loc)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/typing/templates.py", line 532, in method_impl
return call(builder, args)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/targets/imputils.py", line 192, in imp
builder, func, fndesc.restype, fndesc.argtypes, args)
File "<path>/numba/numba/targets/callconv.py", line 479, in call_function
args = list(arginfo.as_arguments(builder, args))
File "<path>/numba/numba/datamodel/packer.py", line 94, in as_arguments
% (self._nargs, len(values)))
TypeError: invalid number of args: expected 2, got 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test_pr3468.py", line 17, in <module>
bar(Z)
File "<path>/numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "<path>/numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "<path>/numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "<path>/numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "<path>/numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "<path>/numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "<path>/numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/compiler.py", line 253, in run
raise patched_exception
File "<path>/numba/numba/compiler.py", line 244, in run
stage()
File "<path>/numba/numba/compiler.py", line 697, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "<path>/numba/numba/compiler.py", line 647, in _backend
lowered = lowerfn()
File "<path>/numba/numba/compiler.py", line 634, in backend_nopython_mode
self.flags)
File "<path>/numba/numba/compiler.py", line 1026, in native_lowering_stage
lower.lower()
File "<path>/numba/numba/lowering.py", line 176, in lower
self.lower_normal_function(self.fndesc)
File "<path>/numba/numba/lowering.py", line 217, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "<path>/numba/numba/lowering.py", line 242, in lower_function_body
self.lower_block(block)
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<env>/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "<path>/numba/numba/errors.py", line 609, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "<path>/numba/numba/six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
invalid number of args: expected 2, got 1
|
TypeError
|
def do_class_init(cls):
"""
Register generic method implementation.
"""
from numba.targets.imputils import lower_builtin
attr = cls._attr
@lower_builtin((cls.key, attr), cls.key, types.VarArg(types.Any))
def method_impl(context, builder, sig, args):
typ = sig.args[0]
typing_context = context.typing_context
disp = cls._get_dispatcher(typing_context, typ, attr, sig.args, {})
disp_type = types.Dispatcher(disp)
sig = disp_type.get_call_type(typing_context, sig.args, {})
call = context.get_function(disp_type, sig)
# Link dependent library
context.add_linking_libs(getattr(call, "libs", ()))
return call(builder, _adjust_omitted_args(sig.args, args))
|
def do_class_init(cls):
"""
Register generic method implementation.
"""
from numba.targets.imputils import lower_builtin
attr = cls._attr
@lower_builtin((cls.key, attr), cls.key, types.VarArg(types.Any))
def method_impl(context, builder, sig, args):
typ = sig.args[0]
typing_context = context.typing_context
disp = cls._get_dispatcher(typing_context, typ, attr, sig.args, {})
disp_type = types.Dispatcher(disp)
sig = disp_type.get_call_type(typing_context, sig.args, {})
call = context.get_function(disp_type, sig)
# Link dependent library
context.add_linking_libs(getattr(call, "libs", ()))
return call(builder, args)
|
https://github.com/numba/numba/issues/3489
|
Traceback (most recent call last):
File "<path>/numba/numba/errors.py", line 601, in new_error_context
yield
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<path>/numba/numba/lowering.py", line 306, in lower_inst
val = self.lower_assign(ty, inst)
File "<path>/numba/numba/lowering.py", line 452, in lower_assign
return self.lower_expr(ty, value)
File "<path>/numba/numba/lowering.py", line 875, in lower_expr
res = self.lower_call(resty, expr)
File "<path>/numba/numba/lowering.py", line 841, in lower_call
res = impl(self.builder, argvals, self.loc)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/typing/templates.py", line 532, in method_impl
return call(builder, args)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/targets/imputils.py", line 192, in imp
builder, func, fndesc.restype, fndesc.argtypes, args)
File "<path>/numba/numba/targets/callconv.py", line 479, in call_function
args = list(arginfo.as_arguments(builder, args))
File "<path>/numba/numba/datamodel/packer.py", line 94, in as_arguments
% (self._nargs, len(values)))
TypeError: invalid number of args: expected 2, got 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test_pr3468.py", line 17, in <module>
bar(Z)
File "<path>/numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "<path>/numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "<path>/numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "<path>/numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "<path>/numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "<path>/numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "<path>/numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/compiler.py", line 253, in run
raise patched_exception
File "<path>/numba/numba/compiler.py", line 244, in run
stage()
File "<path>/numba/numba/compiler.py", line 697, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "<path>/numba/numba/compiler.py", line 647, in _backend
lowered = lowerfn()
File "<path>/numba/numba/compiler.py", line 634, in backend_nopython_mode
self.flags)
File "<path>/numba/numba/compiler.py", line 1026, in native_lowering_stage
lower.lower()
File "<path>/numba/numba/lowering.py", line 176, in lower
self.lower_normal_function(self.fndesc)
File "<path>/numba/numba/lowering.py", line 217, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "<path>/numba/numba/lowering.py", line 242, in lower_function_body
self.lower_block(block)
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<env>/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "<path>/numba/numba/errors.py", line 609, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "<path>/numba/numba/six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
invalid number of args: expected 2, got 1
|
TypeError
|
def method_impl(context, builder, sig, args):
typ = sig.args[0]
typing_context = context.typing_context
disp = cls._get_dispatcher(typing_context, typ, attr, sig.args, {})
disp_type = types.Dispatcher(disp)
sig = disp_type.get_call_type(typing_context, sig.args, {})
call = context.get_function(disp_type, sig)
# Link dependent library
context.add_linking_libs(getattr(call, "libs", ()))
return call(builder, _adjust_omitted_args(sig.args, args))
|
def method_impl(context, builder, sig, args):
typ = sig.args[0]
typing_context = context.typing_context
disp = cls._get_dispatcher(typing_context, typ, attr, sig.args, {})
disp_type = types.Dispatcher(disp)
sig = disp_type.get_call_type(typing_context, sig.args, {})
call = context.get_function(disp_type, sig)
# Link dependent library
context.add_linking_libs(getattr(call, "libs", ()))
return call(builder, args)
|
https://github.com/numba/numba/issues/3489
|
Traceback (most recent call last):
File "<path>/numba/numba/errors.py", line 601, in new_error_context
yield
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<path>/numba/numba/lowering.py", line 306, in lower_inst
val = self.lower_assign(ty, inst)
File "<path>/numba/numba/lowering.py", line 452, in lower_assign
return self.lower_expr(ty, value)
File "<path>/numba/numba/lowering.py", line 875, in lower_expr
res = self.lower_call(resty, expr)
File "<path>/numba/numba/lowering.py", line 841, in lower_call
res = impl(self.builder, argvals, self.loc)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/typing/templates.py", line 532, in method_impl
return call(builder, args)
File "<path>/numba/numba/targets/base.py", line 1116, in __call__
return self._imp(self._context, builder, self._sig, args)
File "<path>/numba/numba/targets/imputils.py", line 192, in imp
builder, func, fndesc.restype, fndesc.argtypes, args)
File "<path>/numba/numba/targets/callconv.py", line 479, in call_function
args = list(arginfo.as_arguments(builder, args))
File "<path>/numba/numba/datamodel/packer.py", line 94, in as_arguments
% (self._nargs, len(values)))
TypeError: invalid number of args: expected 2, got 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test_pr3468.py", line 17, in <module>
bar(Z)
File "<path>/numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "<path>/numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "<path>/numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "<path>/numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "<path>/numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "<path>/numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "<path>/numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "<path>/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/numba/compiler.py", line 253, in run
raise patched_exception
File "<path>/numba/numba/compiler.py", line 244, in run
stage()
File "<path>/numba/numba/compiler.py", line 697, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "<path>/numba/numba/compiler.py", line 647, in _backend
lowered = lowerfn()
File "<path>/numba/numba/compiler.py", line 634, in backend_nopython_mode
self.flags)
File "<path>/numba/numba/compiler.py", line 1026, in native_lowering_stage
lower.lower()
File "<path>/numba/numba/lowering.py", line 176, in lower
self.lower_normal_function(self.fndesc)
File "<path>/numba/numba/lowering.py", line 217, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "<path>/numba/numba/lowering.py", line 242, in lower_function_body
self.lower_block(block)
File "<path>/numba/numba/lowering.py", line 257, in lower_block
self.lower_inst(inst)
File "<env>/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "<path>/numba/numba/errors.py", line 609, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "<path>/numba/numba/six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
invalid number of args: expected 2, got 1
|
TypeError
|
def from_types(cls, tys, pyclass=None):
"""
Instantiate the right tuple type for the given element types.
"""
if pyclass is not None and pyclass is not tuple:
# A subclass => is it a namedtuple?
assert issubclass(pyclass, tuple)
if hasattr(pyclass, "_asdict"):
tys = tuple(map(unliteral, tys))
homogeneous = is_homogeneous(*tys)
if homogeneous:
return NamedUniTuple(tys[0], len(tys), pyclass)
else:
return NamedTuple(tys, pyclass)
else:
# non-named tuple
homogeneous = is_homogeneous(*tys)
if homogeneous:
return UniTuple(tys[0], len(tys))
else:
return Tuple(tys)
|
def from_types(cls, tys, pyclass=None):
"""
Instantiate the right tuple type for the given element types.
"""
homogeneous = False
if tys:
first = tys[0]
for ty in tys[1:]:
if ty != first:
break
else:
homogeneous = True
if pyclass is not None and pyclass is not tuple:
# A subclass => is it a namedtuple?
assert issubclass(pyclass, tuple)
if hasattr(pyclass, "_asdict"):
if homogeneous:
return NamedUniTuple(first, len(tys), pyclass)
else:
return NamedTuple(tys, pyclass)
if homogeneous:
return UniTuple(first, len(tys))
else:
return Tuple(tys)
|
https://github.com/numba/numba/issues/3565
|
---------------------------------------------------------------------------
TypingError Traceback (most recent call last)
<ipython-input-60-b047fa49e45c> in <module>()
----> 1 call(1, 1)
/opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
346 e.patch_message(msg)
347
--> 348 error_rewrite(e, 'typing')
349 except errors.UnsupportedError as e:
350 # Something unsupported is present in the user code, add help info
/opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
313 raise e
314 else:
--> 315 reraise(type(e), e, None)
316
317 argtypes = []
/opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Cannot unify Data2(int64, Literal[int](0)) and Data2(int64 x 2) for 'result', defined at <ipython-input-59-68b6396882a7> (14)
File "<ipython-input-59-68b6396882a7>", line 14:
def call(i, j):
<source elided>
if j == 0:
result = Data1(i)
^
[1] During: typing of assignment at <ipython-input-59-68b6396882a7> (16)
File "<ipython-input-59-68b6396882a7>", line 16:
def call(i, j):
<source elided>
else:
result = Data2(i, j)
^
|
TypingError
|
def unify(self):
"""
Run the final unification pass over all inferred types, and
catch imprecise types.
"""
typdict = utils.UniqueDict()
def find_offender(name, exhaustive=False):
# finds the offending variable definition by name
# if exhaustive is set it will try and trace through temporary
# variables to find a concrete offending definition.
offender = None
for block in self.func_ir.blocks.values():
offender = block.find_variable_assignment(name)
if offender is not None:
if not exhaustive:
break
try: # simple assignment
hasattr(offender.value, "name")
offender_value = offender.value.name
except (AttributeError, KeyError):
break
orig_offender = offender
if offender_value.startswith("$"):
offender = find_offender(offender_value, exhaustive=exhaustive)
if offender is None:
offender = orig_offender
break
return offender
def diagnose_imprecision(offender):
# helper for diagnosing imprecise types
list_msg = """\n
For Numba to be able to compile a list, the list must have a known and
precise type that can be inferred from the other variables. Whilst sometimes
the type of empty lists can be inferred, this is not always the case, see this
documentation for help:
http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-has-an-untyped-list-problem
"""
if offender is not None:
# This block deals with imprecise lists
if hasattr(offender, "value"):
if hasattr(offender.value, "op"):
# might be `foo = []`
if offender.value.op == "build_list":
return list_msg
# or might be `foo = list()`
elif offender.value.op == "call":
try: # assignment involving a call
call_name = offender.value.func.name
# find the offender based on the call name
offender = find_offender(call_name)
if isinstance(offender.value, ir.Global):
if offender.value.name == "list":
return list_msg
except (AttributeError, KeyError):
pass
return "" # no help possible
def check_var(name):
tv = self.typevars[name]
if not tv.defined:
offender = find_offender(name)
val = getattr(offender, "value", "unknown operation")
loc = getattr(offender, "loc", ir.unknown_loc)
msg = "Undefined variable '%s', operation: %s, location: %s"
raise TypingError(msg % (var, val, loc), loc)
tp = tv.getone()
if not tp.is_precise():
offender = find_offender(name, exhaustive=True)
msg = (
"Cannot infer the type of variable '%s'%s, have imprecise type: %s. %s"
)
istmp = " (temporary variable)" if var.startswith("$") else ""
loc = getattr(offender, "loc", ir.unknown_loc)
# is this an untyped list? try and provide help
extra_msg = diagnose_imprecision(offender)
raise TypingError(msg % (var, istmp, tp, extra_msg), loc)
else: # type is precise, hold it
typdict[var] = tp
# For better error display, check first user-visible vars, then
# temporaries
temps = set(k for k in self.typevars if not k[0].isalpha())
others = set(self.typevars) - temps
for var in sorted(others):
check_var(var)
for var in sorted(temps):
check_var(var)
retty = self.get_return_type(typdict)
fntys = self.get_function_types(typdict)
if self.generator_info:
retty = self.get_generator_type(typdict, retty)
self.debug.unify_finished(typdict, retty, fntys)
return typdict, retty, fntys
|
def unify(self):
"""
Run the final unification pass over all inferred types, and
catch imprecise types.
"""
typdict = utils.UniqueDict()
def find_offender(name, exhaustive=False):
# finds the offending variable definition by name
# if exhaustive is set it will try and trace through temporary
# variables to find a concrete offending definition.
offender = None
for block in self.func_ir.blocks.values():
offender = block.find_variable_assignment(name)
if offender is not None:
if not exhaustive:
break
try: # simple assignment
hasattr(offender.value, "name")
offender_value = offender.value.name
except (AttributeError, KeyError):
break
orig_offender = offender
if offender_value.startswith("$"):
offender = find_offender(offender_value, exhaustive=exhaustive)
if offender is None:
offender = orig_offender
break
return offender
def diagnose_imprecision(offender):
# helper for diagnosing imprecise types
list_msg = """\n
For Numba to be able to compile a list, the list must have a known and
precise type that can be inferred from the other variables. Whilst sometimes
the type of empty lists can be inferred, this is not always the case, see this
documentation for help:
http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-has-an-untyped-list-problem
"""
if offender is not None:
# This block deals with imprecise lists
if hasattr(offender, "value"):
if hasattr(offender.value, "op"):
# might be `foo = []`
if offender.value.op == "build_list":
return list_msg
# or might be `foo = list()`
elif offender.value.op == "call":
try: # assignment involving a call
call_name = offender.value.func.name
# find the offender based on the call name
offender = find_offender(call_name)
if isinstance(offender.value, ir.Global):
if offender.value.name == "list":
return list_msg
except (AttributeError, KeyError):
pass
return "" # no help possible
def check_var(name):
tv = self.typevars[name]
if not tv.defined:
offender = find_offender(name)
val = getattr(offender, "value", "unknown operation")
loc = getattr(offender, "loc", "unknown location")
msg = "Undefined variable '%s', operation: %s, location: %s"
raise TypingError(msg % (var, val, loc), loc)
tp = tv.getone()
if not tp.is_precise():
offender = find_offender(name, exhaustive=True)
msg = (
"Cannot infer the type of variable '%s'%s, have imprecise type: %s. %s"
)
istmp = " (temporary variable)" if var.startswith("$") else ""
loc = getattr(offender, "loc", "unknown location")
# is this an untyped list? try and provide help
extra_msg = diagnose_imprecision(offender)
raise TypingError(msg % (var, istmp, tp, extra_msg), loc)
else: # type is precise, hold it
typdict[var] = tp
# For better error display, check first user-visible vars, then
# temporaries
temps = set(k for k in self.typevars if not k[0].isalpha())
others = set(self.typevars) - temps
for var in sorted(others):
check_var(var)
for var in sorted(temps):
check_var(var)
retty = self.get_return_type(typdict)
fntys = self.get_function_types(typdict)
if self.generator_info:
retty = self.get_generator_type(typdict, retty)
self.debug.unify_finished(typdict, retty, fntys)
return typdict, retty, fntys
|
https://github.com/numba/numba/issues/3389
|
Traceback (most recent call last):
File "examples/hello.py", line 25, in <module>
main()
File "examples/hello.py", line 19, in main
print(hpat(2))
File "numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/compiler.py", line 253, in run
raise patched_exception
File "numba/numba/compiler.py", line 244, in run
stage()
File "numba/numba/compiler.py", line 478, in stage_nopython_frontend
self.locals)
File "numba/numba/compiler.py", line 1009, in type_inference_stage
typemap, restype, calltypes = infer.unify()
File "numba/numba/typeinfer.py", line 924, in unify
check_var(var)
File "numba/numba/typeinfer.py", line 910, in check_var
raise TypingError(msg % (var, val, loc), loc)
File "numba/numba/errors.py", line 406, in __init__
highlight("%s\n%s\n" % (msg, loc.strformat())))
AttributeError: Failed in hpat mode pipeline (step: nopython frontend)
'str' object has no attribute 'strformat'
|
AttributeError
|
def check_var(name):
tv = self.typevars[name]
if not tv.defined:
offender = find_offender(name)
val = getattr(offender, "value", "unknown operation")
loc = getattr(offender, "loc", ir.unknown_loc)
msg = "Undefined variable '%s', operation: %s, location: %s"
raise TypingError(msg % (var, val, loc), loc)
tp = tv.getone()
if not tp.is_precise():
offender = find_offender(name, exhaustive=True)
msg = "Cannot infer the type of variable '%s'%s, have imprecise type: %s. %s"
istmp = " (temporary variable)" if var.startswith("$") else ""
loc = getattr(offender, "loc", ir.unknown_loc)
# is this an untyped list? try and provide help
extra_msg = diagnose_imprecision(offender)
raise TypingError(msg % (var, istmp, tp, extra_msg), loc)
else: # type is precise, hold it
typdict[var] = tp
|
def check_var(name):
tv = self.typevars[name]
if not tv.defined:
offender = find_offender(name)
val = getattr(offender, "value", "unknown operation")
loc = getattr(offender, "loc", "unknown location")
msg = "Undefined variable '%s', operation: %s, location: %s"
raise TypingError(msg % (var, val, loc), loc)
tp = tv.getone()
if not tp.is_precise():
offender = find_offender(name, exhaustive=True)
msg = "Cannot infer the type of variable '%s'%s, have imprecise type: %s. %s"
istmp = " (temporary variable)" if var.startswith("$") else ""
loc = getattr(offender, "loc", "unknown location")
# is this an untyped list? try and provide help
extra_msg = diagnose_imprecision(offender)
raise TypingError(msg % (var, istmp, tp, extra_msg), loc)
else: # type is precise, hold it
typdict[var] = tp
|
https://github.com/numba/numba/issues/3389
|
Traceback (most recent call last):
File "examples/hello.py", line 25, in <module>
main()
File "examples/hello.py", line 19, in main
print(hpat(2))
File "numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/compiler.py", line 253, in run
raise patched_exception
File "numba/numba/compiler.py", line 244, in run
stage()
File "numba/numba/compiler.py", line 478, in stage_nopython_frontend
self.locals)
File "numba/numba/compiler.py", line 1009, in type_inference_stage
typemap, restype, calltypes = infer.unify()
File "numba/numba/typeinfer.py", line 924, in unify
check_var(var)
File "numba/numba/typeinfer.py", line 910, in check_var
raise TypingError(msg % (var, val, loc), loc)
File "numba/numba/errors.py", line 406, in __init__
highlight("%s\n%s\n" % (msg, loc.strformat())))
AttributeError: Failed in hpat mode pipeline (step: nopython frontend)
'str' object has no attribute 'strformat'
|
AttributeError
|
def _unify_return_types(self, rettypes):
if rettypes:
unified = self.context.unify_types(*rettypes)
if unified is None or not unified.is_precise():
def check_type(atype):
lst = []
for k, v in self.typevars.items():
if atype == v.type:
lst.append(k)
returns = {}
for x in reversed(lst):
for block in self.func_ir.blocks.values():
for instr in block.find_insts(ir.Return):
value = instr.value
if isinstance(value, ir.Var):
name = value.name
else:
pass
if x == name:
returns[x] = instr
break
for name, offender in returns.items():
loc = getattr(offender, "loc", ir.unknown_loc)
msg = "Return of: IR name '%s', type '%s', location: %s"
interped = msg % (name, atype, loc.strformat())
return interped
problem_str = []
for xtype in rettypes:
problem_str.append(_termcolor.errmsg(check_type(xtype)))
raise TypingError(
"Can't unify return type from the "
"following types: %s"
% ", ".join(sorted(map(str, rettypes)))
+ "\n"
+ "\n".join(problem_str)
)
return unified
else:
# Function without a successful return path
return types.none
|
def _unify_return_types(self, rettypes):
if rettypes:
unified = self.context.unify_types(*rettypes)
if unified is None or not unified.is_precise():
def check_type(atype):
lst = []
for k, v in self.typevars.items():
if atype == v.type:
lst.append(k)
returns = {}
for x in reversed(lst):
for block in self.func_ir.blocks.values():
for instr in block.find_insts(ir.Return):
value = instr.value
if isinstance(value, ir.Var):
name = value.name
else:
pass
if x == name:
returns[x] = instr
break
for name, offender in returns.items():
loc = getattr(offender, "loc", "unknown location")
msg = "Return of: IR name '%s', type '%s', location: %s"
interped = msg % (name, atype, loc.strformat())
return interped
problem_str = []
for xtype in rettypes:
problem_str.append(_termcolor.errmsg(check_type(xtype)))
raise TypingError(
"Can't unify return type from the "
"following types: %s"
% ", ".join(sorted(map(str, rettypes)))
+ "\n"
+ "\n".join(problem_str)
)
return unified
else:
# Function without a successful return path
return types.none
|
https://github.com/numba/numba/issues/3389
|
Traceback (most recent call last):
File "examples/hello.py", line 25, in <module>
main()
File "examples/hello.py", line 19, in main
print(hpat(2))
File "numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/compiler.py", line 253, in run
raise patched_exception
File "numba/numba/compiler.py", line 244, in run
stage()
File "numba/numba/compiler.py", line 478, in stage_nopython_frontend
self.locals)
File "numba/numba/compiler.py", line 1009, in type_inference_stage
typemap, restype, calltypes = infer.unify()
File "numba/numba/typeinfer.py", line 924, in unify
check_var(var)
File "numba/numba/typeinfer.py", line 910, in check_var
raise TypingError(msg % (var, val, loc), loc)
File "numba/numba/errors.py", line 406, in __init__
highlight("%s\n%s\n" % (msg, loc.strformat())))
AttributeError: Failed in hpat mode pipeline (step: nopython frontend)
'str' object has no attribute 'strformat'
|
AttributeError
|
def check_type(atype):
lst = []
for k, v in self.typevars.items():
if atype == v.type:
lst.append(k)
returns = {}
for x in reversed(lst):
for block in self.func_ir.blocks.values():
for instr in block.find_insts(ir.Return):
value = instr.value
if isinstance(value, ir.Var):
name = value.name
else:
pass
if x == name:
returns[x] = instr
break
for name, offender in returns.items():
loc = getattr(offender, "loc", ir.unknown_loc)
msg = "Return of: IR name '%s', type '%s', location: %s"
interped = msg % (name, atype, loc.strformat())
return interped
|
def check_type(atype):
lst = []
for k, v in self.typevars.items():
if atype == v.type:
lst.append(k)
returns = {}
for x in reversed(lst):
for block in self.func_ir.blocks.values():
for instr in block.find_insts(ir.Return):
value = instr.value
if isinstance(value, ir.Var):
name = value.name
else:
pass
if x == name:
returns[x] = instr
break
for name, offender in returns.items():
loc = getattr(offender, "loc", "unknown location")
msg = "Return of: IR name '%s', type '%s', location: %s"
interped = msg % (name, atype, loc.strformat())
return interped
|
https://github.com/numba/numba/issues/3389
|
Traceback (most recent call last):
File "examples/hello.py", line 25, in <module>
main()
File "examples/hello.py", line 19, in main
print(hpat(2))
File "numba/numba/dispatcher.py", line 367, in _compile_for_args
raise e
File "numba/numba/dispatcher.py", line 324, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/dispatcher.py", line 655, in compile
cres = self._compiler.compile(args, return_type)
File "numba/numba/dispatcher.py", line 82, in compile
pipeline_class=self.pipeline_class)
File "numba/numba/compiler.py", line 905, in compile_extra
return pipeline.compile_extra(func)
File "numba/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "numba/numba/compiler.py", line 836, in _compile_bytecode
return self._compile_core()
File "numba/numba/compiler.py", line 823, in _compile_core
res = pm.run(self.status)
File "numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba/numba/compiler.py", line 253, in run
raise patched_exception
File "numba/numba/compiler.py", line 244, in run
stage()
File "numba/numba/compiler.py", line 478, in stage_nopython_frontend
self.locals)
File "numba/numba/compiler.py", line 1009, in type_inference_stage
typemap, restype, calltypes = infer.unify()
File "numba/numba/typeinfer.py", line 924, in unify
check_var(var)
File "numba/numba/typeinfer.py", line 910, in check_var
raise TypingError(msg % (var, val, loc), loc)
File "numba/numba/errors.py", line 406, in __init__
highlight("%s\n%s\n" % (msg, loc.strformat())))
AttributeError: Failed in hpat mode pipeline (step: nopython frontend)
'str' object has no attribute 'strformat'
|
AttributeError
|
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimenions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes
)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes
)
if "out" in name_var_table:
raise ValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT == 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT == 1:
print(
"_stencil_wrapper",
return_type,
return_type.dtype,
type(return_type.dtype),
args,
)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id,
)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i), name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood", name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise ValueError(
"The first argument to a stencil kernel must "
"use relative indexing, not standard indexing."
)
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise ValueError(
"Standard indexing requested for an array name "
"not present in the stencil kernel definition."
)
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed
)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT == 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(
kernel_copy.blocks, index_vars, out_name
)
if config.DEBUG_ARRAY_OPT == 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(
stencil_func_name, ",".join(kernel_copy.arg_names), sig_extra
)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
return_type_name = numpy_support.as_dtype(return_type.dtype).type.__name__
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
out_init = "{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval, return_type_name
)
else:
out_init = "{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type_name
)
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),{}[{}]-max(0,{})):\n").format(
index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]
)
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT == 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec_(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = (
[sentinel_name, out_name, neighborhood_name, shape_name]
+ kernel_copy.arg_names
+ index_vars
)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift lables in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label
)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT == 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for l, b in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert isinstance(the_array, types.Type)
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT == 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{},
)
return new_func
|
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimenions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes
)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes
)
if "out" in name_var_table:
raise ValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT == 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT == 1:
print(
"_stencil_wrapper",
return_type,
return_type.dtype,
type(return_type.dtype),
args,
)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id,
)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i), name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood", name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise ValueError(
"The first argument to a stencil kernel must "
"use relative indexing, not standard indexing."
)
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise ValueError(
"Standard indexing requested for an array name "
"not present in the stencil kernel definition."
)
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed
)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT == 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(
kernel_copy.blocks, index_vars, out_name
)
if config.DEBUG_ARRAY_OPT == 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(
stencil_func_name, ",".join(kernel_copy.arg_names), sig_extra
)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
out_init = "{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval, return_type.dtype
)
else:
out_init = "{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type.dtype
)
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),{}[{}]-max(0,{})):\n").format(
index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]
)
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT == 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec_(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = (
[sentinel_name, out_name, neighborhood_name, shape_name]
+ kernel_copy.arg_names
+ index_vars
)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift lables in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label
)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT == 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for l, b in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert isinstance(the_array, types.Type)
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT == 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{},
)
return new_func
|
https://github.com/numba/numba/issues/3497
|
$ python ml1.py
[[False False False False False]
[False False True False False]
[False True False True False]
[False False False False False]]
Traceback (most recent call last):
File "ml1.py", line 16, in <module>
print(njit(foo)(A))
File "<path>/numba/numba/dispatcher.py", line 348, in _compile_for_args
error_rewrite(e, 'typing')
File "<path>/numba/numba/dispatcher.py", line 315, in error_rewrite
reraise(type(e), e, None)
File "<path>/numba/numba/six.py", line 658, in reraise
raise value.with_traceback(tb)
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<built-in function zeros>) with argument(s) of type(s): (tuple(int64 x 2), dtype=Function(<class 'bool'>))
* parameterized
In definition 0:
All templates rejected
This error is usually caused by passing an argument of a type that is unsupported by the named function.
[1] During: resolving callee type: Function(<built-in function zeros>)
[2] During: typing of call at <string> (3)
File "<string>", line 3:
<source missing, REPL/exec in use?>
[1] During: lowering "$0.3 = call $0.1(array, func=$0.1, args=[Var(array, ml1.py (13))], kws=(), vararg=None)" at ml1.py (13)
|
numba.errors.TypingError
|
def __init__(
self,
native,
modname,
qualname,
unique_name,
doc,
typemap,
restype,
calltypes,
args,
kws,
mangler=None,
argtypes=None,
inline=False,
noalias=False,
env_name=None,
global_dict=None,
):
self.native = native
self.modname = modname
self.global_dict = global_dict
self.qualname = qualname
self.unique_name = unique_name
self.doc = doc
# XXX typemap and calltypes should be on the compile result,
# not the FunctionDescriptor
self.typemap = typemap
self.calltypes = calltypes
self.args = args
self.kws = kws
self.restype = restype
# Argument types
if argtypes is not None:
assert isinstance(argtypes, tuple), argtypes
self.argtypes = argtypes
else:
# Get argument types from the type inference result
# (note the "arg.FOO" convention as used in typeinfer
self.argtypes = tuple(self.typemap["arg." + a] for a in args)
mangler = default_mangler if mangler is None else mangler
# The mangled name *must* be unique, else the wrong function can
# be chosen at link time.
qualprefix = qualifying_prefix(self.modname, self.unique_name)
self.mangled_name = mangler(qualprefix, self.argtypes)
if env_name is None:
env_name = mangler(".NumbaEnv.{}".format(qualprefix), self.argtypes)
self.env_name = env_name
self.inline = inline
self.noalias = noalias
|
def __init__(
self,
native,
modname,
qualname,
unique_name,
doc,
typemap,
restype,
calltypes,
args,
kws,
mangler=None,
argtypes=None,
inline=False,
noalias=False,
env_name=None,
):
self.native = native
self.modname = modname
self.qualname = qualname
self.unique_name = unique_name
self.doc = doc
# XXX typemap and calltypes should be on the compile result,
# not the FunctionDescriptor
self.typemap = typemap
self.calltypes = calltypes
self.args = args
self.kws = kws
self.restype = restype
# Argument types
if argtypes is not None:
assert isinstance(argtypes, tuple), argtypes
self.argtypes = argtypes
else:
# Get argument types from the type inference result
# (note the "arg.FOO" convention as used in typeinfer
self.argtypes = tuple(self.typemap["arg." + a] for a in args)
mangler = default_mangler if mangler is None else mangler
# The mangled name *must* be unique, else the wrong function can
# be chosen at link time.
qualprefix = qualifying_prefix(self.modname, self.unique_name)
self.mangled_name = mangler(qualprefix, self.argtypes)
if env_name is None:
env_name = mangler(".NumbaEnv.{}".format(qualprefix), self.argtypes)
self.env_name = env_name
self.inline = inline
self.noalias = noalias
|
https://github.com/numba/numba/issues/3355
|
Traceback (most recent call last):
File "tmp_tests/obj_gen_test.py", line 14, in <module>
g()
NameError: global name 'np' is not defined
|
NameError
|
def _get_function_info(cls, func_ir):
"""
Returns
-------
qualname, unique_name, modname, doc, args, kws, globals
``unique_name`` must be a unique name.
"""
func = func_ir.func_id.func
qualname = func_ir.func_id.func_qualname
# XXX to func_id
modname = func.__module__
doc = func.__doc__ or ""
args = tuple(func_ir.arg_names)
kws = () # TODO
global_dict = None
if modname is None:
# Dynamically generated function.
modname = _dynamic_modname
# Retain a reference to the dictionary of the function.
# This disables caching, serialization and pickling.
global_dict = func_ir.func_id.func.__globals__
unique_name = func_ir.func_id.unique_name
return qualname, unique_name, modname, doc, args, kws, global_dict
|
def _get_function_info(cls, func_ir):
"""
Returns
-------
qualname, unique_name, modname, doc, args, kws, globals
``unique_name`` must be a unique name.
"""
func = func_ir.func_id.func
qualname = func_ir.func_id.func_qualname
# XXX to func_id
modname = func.__module__
doc = func.__doc__ or ""
args = tuple(func_ir.arg_names)
kws = () # TODO
if modname is None:
# Dynamically generated function.
modname = _dynamic_modname
unique_name = func_ir.func_id.unique_name
return qualname, unique_name, modname, doc, args, kws
|
https://github.com/numba/numba/issues/3355
|
Traceback (most recent call last):
File "tmp_tests/obj_gen_test.py", line 14, in <module>
g()
NameError: global name 'np' is not defined
|
NameError
|
def _from_python_function(
cls,
func_ir,
typemap,
restype,
calltypes,
native,
mangler=None,
inline=False,
noalias=False,
):
(
qualname,
unique_name,
modname,
doc,
args,
kws,
global_dict,
) = cls._get_function_info(func_ir)
self = cls(
native,
modname,
qualname,
unique_name,
doc,
typemap,
restype,
calltypes,
args,
kws,
mangler=mangler,
inline=inline,
noalias=noalias,
global_dict=global_dict,
)
return self
|
def _from_python_function(
cls,
func_ir,
typemap,
restype,
calltypes,
native,
mangler=None,
inline=False,
noalias=False,
):
(
qualname,
unique_name,
modname,
doc,
args,
kws,
) = cls._get_function_info(func_ir)
self = cls(
native,
modname,
qualname,
unique_name,
doc,
typemap,
restype,
calltypes,
args,
kws,
mangler=mangler,
inline=inline,
noalias=noalias,
)
return self
|
https://github.com/numba/numba/issues/3355
|
Traceback (most recent call last):
File "tmp_tests/obj_gen_test.py", line 14, in <module>
g()
NameError: global name 'np' is not defined
|
NameError
|
def from_fndesc(cls, fndesc):
try:
# Avoid creating new Env
return cls._memo[fndesc.env_name]
except KeyError:
inst = cls(fndesc.lookup_globals())
inst.env_name = fndesc.env_name
cls._memo[fndesc.env_name] = inst
return inst
|
def from_fndesc(cls, fndesc):
mod = fndesc.lookup_module()
try:
# Avoid creating new Env
return cls._memo[fndesc.env_name]
except KeyError:
inst = cls(mod.__dict__)
inst.env_name = fndesc.env_name
cls._memo[fndesc.env_name] = inst
return inst
|
https://github.com/numba/numba/issues/3355
|
Traceback (most recent call last):
File "tmp_tests/obj_gen_test.py", line 14, in <module>
g()
NameError: global name 'np' is not defined
|
NameError
|
def from_cuda_array_interface(desc, owner=None):
"""Create a DeviceNDArray from a cuda-array-interface description.
The *owner* is the owner of the underlying memory.
The resulting DeviceNDArray will acquire a reference from it.
"""
shape = desc["shape"]
strides = desc.get("strides")
dtype = np.dtype(desc["typestr"])
shape, strides, dtype = _prepare_shape_strides_dtype(
shape, strides, dtype, order="C"
)
size = driver.memory_size_from_info(shape, strides, dtype.itemsize)
devptr = driver.get_devptr_for_active_ctx(desc["data"][0])
data = driver.MemoryPointer(current_context(), devptr, size=size, owner=owner)
da = devicearray.DeviceNDArray(
shape=shape, strides=strides, dtype=dtype, gpu_data=data
)
return da
|
def from_cuda_array_interface(desc, owner=None):
"""Create a DeviceNDArray from a cuda-array-interface description.
The *owner* is the owner of the underlying memory.
The resulting DeviceNDArray will acquire a reference from it.
"""
shape = desc["shape"]
strides = desc.get("strides")
dtype = np.dtype(desc["typestr"])
shape, strides, dtype = _prepare_shape_strides_dtype(
shape, strides, dtype, order="C"
)
devptr = driver.get_devptr_for_active_ctx(desc["data"][0])
data = driver.MemoryPointer(
current_context(), devptr, size=np.prod(shape) * dtype.itemsize, owner=owner
)
da = devicearray.DeviceNDArray(
shape=shape, strides=strides, dtype=dtype, gpu_data=data
)
return da
|
https://github.com/numba/numba/issues/3333
|
$ python repro_indexing.py
Traceback (most recent call last):
File "repro_indexing.py", line 8, in <module>
ext_arrslice = ext_arr[:5]
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/devices.py", line 212, in _require_cuda_context
return fn(*args, **kws)
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 468, in __getitem__
return self._do_getitem(item)
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 482, in _do_getitem
newdata = self.gpu_data.view(*extents[0])
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/driver.py", line 1214, in view
return OwnedPointer(weakref.proxy(self.owner), view)
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/driver.py", line 1298, in __init__
self._mem.refct += 1
AttributeError: 'DeviceNDArray' object has no attribute 'refct'
|
AttributeError
|
def view(self, start, stop=None):
if stop is None:
size = self.size - start
else:
size = stop - start
# Handle NULL/empty memory buffer
if self.device_pointer.value is None:
if size != 0:
raise RuntimeError("non-empty slice into empty slice")
view = self # new view is just a reference to self
# Handle normal case
else:
base = self.device_pointer.value + start
if size < 0:
raise RuntimeError("size cannot be negative")
pointer = drvapi.cu_device_ptr(base)
view = MemoryPointer(self.context, pointer, size, owner=self.owner)
if isinstance(self.owner, (MemoryPointer, OwnedPointer)):
# Owned by a numba-managed memory segment, take an owned reference
return OwnedPointer(weakref.proxy(self.owner), view)
else:
# Owned by external alloc, return view with same external owner
return view
|
def view(self, start, stop=None):
if stop is None:
size = self.size - start
else:
size = stop - start
# Handle NULL/empty memory buffer
if self.device_pointer.value is None:
if size != 0:
raise RuntimeError("non-empty slice into empty slice")
view = self # new view is just a reference to self
# Handle normal case
else:
base = self.device_pointer.value + start
if size < 0:
raise RuntimeError("size cannot be negative")
pointer = drvapi.cu_device_ptr(base)
view = MemoryPointer(self.context, pointer, size, owner=self.owner)
return OwnedPointer(weakref.proxy(self.owner), view)
|
https://github.com/numba/numba/issues/3333
|
$ python repro_indexing.py
Traceback (most recent call last):
File "repro_indexing.py", line 8, in <module>
ext_arrslice = ext_arr[:5]
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/devices.py", line 212, in _require_cuda_context
return fn(*args, **kws)
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 468, in __getitem__
return self._do_getitem(item)
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/devicearray.py", line 482, in _do_getitem
newdata = self.gpu_data.view(*extents[0])
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/driver.py", line 1214, in view
return OwnedPointer(weakref.proxy(self.owner), view)
File "/scratch/USERS/fordas/workspace/numba_min/.conda/lib/python3.7/site-packages/numba/cuda/cudadrv/driver.py", line 1298, in __init__
self._mem.refct += 1
AttributeError: 'DeviceNDArray' object has no attribute 'refct'
|
AttributeError
|
def min_parallel_impl(return_type, arg):
# XXX: use prange for 1D arrays since pndindex returns a 1-tuple instead of
# integer. This causes type and fusion issues.
if arg.ndim == 1:
def min_1(in_arr):
numba.parfor.init_prange()
min_checker(len(in_arr))
val = numba.targets.builtins.get_type_max_value(in_arr.dtype)
for i in numba.parfor.internal_prange(len(in_arr)):
val = min(val, in_arr[i])
return val
else:
def min_1(in_arr):
numba.parfor.init_prange()
min_checker(len(in_arr))
val = numba.targets.builtins.get_type_max_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = min(val, in_arr[i])
return val
return min_1
|
def min_parallel_impl(return_type, arg):
# XXX: use prange for 1D arrays since pndindex returns a 1-tuple instead of
# integer. This causes type and fusion issues.
if arg.ndim == 1:
def min_1(in_arr):
numba.parfor.init_prange()
val = numba.targets.builtins.get_type_max_value(in_arr.dtype)
for i in numba.parfor.internal_prange(len(in_arr)):
val = min(val, in_arr[i])
return val
else:
def min_1(in_arr):
numba.parfor.init_prange()
val = numba.targets.builtins.get_type_max_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = min(val, in_arr[i])
return val
return min_1
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def min_1(in_arr):
numba.parfor.init_prange()
min_checker(len(in_arr))
val = numba.targets.builtins.get_type_max_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = min(val, in_arr[i])
return val
|
def min_1(in_arr):
numba.parfor.init_prange()
val = numba.targets.builtins.get_type_max_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = min(val, in_arr[i])
return val
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def max_parallel_impl(return_type, arg):
if arg.ndim == 1:
def max_1(in_arr):
numba.parfor.init_prange()
max_checker(len(in_arr))
val = numba.targets.builtins.get_type_min_value(in_arr.dtype)
for i in numba.parfor.internal_prange(len(in_arr)):
val = max(val, in_arr[i])
return val
else:
def max_1(in_arr):
numba.parfor.init_prange()
max_checker(len(in_arr))
val = numba.targets.builtins.get_type_min_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = max(val, in_arr[i])
return val
return max_1
|
def max_parallel_impl(return_type, arg):
if arg.ndim == 1:
def max_1(in_arr):
numba.parfor.init_prange()
val = numba.targets.builtins.get_type_min_value(in_arr.dtype)
for i in numba.parfor.internal_prange(len(in_arr)):
val = max(val, in_arr[i])
return val
else:
def max_1(in_arr):
numba.parfor.init_prange()
val = numba.targets.builtins.get_type_min_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = max(val, in_arr[i])
return val
return max_1
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def max_1(in_arr):
numba.parfor.init_prange()
max_checker(len(in_arr))
val = numba.targets.builtins.get_type_min_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = max(val, in_arr[i])
return val
|
def max_1(in_arr):
numba.parfor.init_prange()
val = numba.targets.builtins.get_type_min_value(in_arr.dtype)
for i in numba.pndindex(in_arr.shape):
val = max(val, in_arr[i])
return val
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def argmin_parallel_impl(in_arr):
numba.parfor.init_prange()
argmin_checker(len(in_arr))
A = in_arr.ravel()
init_val = numba.targets.builtins.get_type_max_value(A.dtype)
ival = numba.typing.builtins.IndexValue(0, init_val)
for i in numba.parfor.internal_prange(len(A)):
curr_ival = numba.typing.builtins.IndexValue(i, A[i])
ival = min(ival, curr_ival)
return ival.index
|
def argmin_parallel_impl(in_arr):
numba.parfor.init_prange()
A = in_arr.ravel()
init_val = numba.targets.builtins.get_type_max_value(A.dtype)
ival = numba.typing.builtins.IndexValue(0, init_val)
for i in numba.parfor.internal_prange(len(A)):
curr_ival = numba.typing.builtins.IndexValue(i, A[i])
ival = min(ival, curr_ival)
return ival.index
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def argmax_parallel_impl(in_arr):
numba.parfor.init_prange()
argmax_checker(len(in_arr))
A = in_arr.ravel()
init_val = numba.targets.builtins.get_type_min_value(A.dtype)
ival = numba.typing.builtins.IndexValue(0, init_val)
for i in numba.parfor.internal_prange(len(A)):
curr_ival = numba.typing.builtins.IndexValue(i, A[i])
ival = max(ival, curr_ival)
return ival.index
|
def argmax_parallel_impl(in_arr):
numba.parfor.init_prange()
A = in_arr.ravel()
init_val = numba.targets.builtins.get_type_min_value(A.dtype)
ival = numba.typing.builtins.IndexValue(0, init_val)
for i in numba.parfor.internal_prange(len(A)):
curr_ival = numba.typing.builtins.IndexValue(i, A[i])
ival = max(ival, curr_ival)
return ival.index
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def _replace_parallel_functions(self, blocks):
"""
Replace functions with their parallel implemntation in
replace_functions_map if available.
The implementation code is inlined to enable more optimization.
"""
from numba.inline_closurecall import inline_closure_call
work_list = list(blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
lhs_typ = self.typemap[lhs.name]
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
# Try inline known calls with their parallel implementations
def replace_func():
func_def = get_definition(self.func_ir, expr.func)
callname = find_callname(self.func_ir, expr)
repl_func = replace_functions_map.get(callname, None)
require(repl_func != None)
typs = tuple(self.typemap[x.name] for x in expr.args)
try:
new_func = repl_func(lhs_typ, *typs)
except:
new_func = None
require(new_func != None)
g = copy.copy(self.func_ir.func_id.func.__globals__)
g["numba"] = numba
g["np"] = numpy
g["math"] = math
# if the function being inlined has a function
# checking the inputs, find it and add it to globals
check = replace_functions_checkers_map.get(callname, None)
if check is not None:
g[check.name] = check.func
# inline the parallel implementation
inline_closure_call(
self.func_ir,
g,
block,
i,
new_func,
self.typingctx,
typs,
self.typemap,
self.calltypes,
work_list,
)
return True
if guard(replace_func):
break
elif (
isinstance(expr, ir.Expr)
and expr.op == "getattr"
and expr.attr == "dtype"
):
# Replace getattr call "A.dtype" with numpy.dtype(<actual type>).
# This helps remove superfluous dependencies from parfor.
typ = self.typemap[expr.value.name]
if isinstance(typ, types.npytypes.Array):
# Convert A.dtype to four statements.
# 1) Get numpy global.
# 2) Create var for known type of array, e.g., numpy.float64
# 3) Get dtype function from numpy module.
# 4) Create var for numpy.dtype(var from #2).
# Create var for numpy module.
dtype = typ.dtype
scope = block.scope
loc = instr.loc
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global("np", numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# Create var for type infered type of the array, e.g., numpy.float64.
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
self.typemap[typ_var.name] = types.functions.NumberClass(dtype)
dtype_str = str(dtype)
if dtype_str == "bool":
dtype_str = "bool_"
np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)
# Get the dtype function from the numpy module.
dtype_attr_var = ir.Var(
scope, mk_unique_var("$dtype_attr_var"), loc
)
temp = find_template(numpy.dtype)
tfunc = numba.types.Function(temp)
tfunc.get_call_type(
self.typingctx, (self.typemap[typ_var.name],), {}
)
self.typemap[dtype_attr_var.name] = types.functions.Function(
temp
)
dtype_attr_getattr = ir.Expr.getattr(g_np_var, "dtype", loc)
dtype_attr_assign = ir.Assign(
dtype_attr_getattr, dtype_attr_var, loc
)
# Call numpy.dtype on the statically coded type two steps above.
dtype_var = ir.Var(scope, mk_unique_var("$dtype_var"), loc)
self.typemap[dtype_var.name] = types.npytypes.DType(dtype)
dtype_getattr = ir.Expr.call(dtype_attr_var, [typ_var], (), loc)
dtype_assign = ir.Assign(dtype_getattr, dtype_var, loc)
self.calltypes[dtype_getattr] = signature(
self.typemap[dtype_var.name], self.typemap[typ_var.name]
)
# The original A.dtype rhs is replaced with result of this call.
instr.value = dtype_var
# Add statements to body of the code.
block.body.insert(0, dtype_assign)
block.body.insert(0, dtype_attr_assign)
block.body.insert(0, typ_var_assign)
block.body.insert(0, g_np_assign)
break
|
def _replace_parallel_functions(self, blocks):
"""
Replace functions with their parallel implemntation in
replace_functions_map if available.
The implementation code is inlined to enable more optimization.
"""
from numba.inline_closurecall import inline_closure_call
work_list = list(blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
lhs_typ = self.typemap[lhs.name]
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
# Try inline known calls with their parallel implementations
def replace_func():
func_def = get_definition(self.func_ir, expr.func)
callname = find_callname(self.func_ir, expr)
repl_func = replace_functions_map.get(callname, None)
require(repl_func != None)
typs = tuple(self.typemap[x.name] for x in expr.args)
try:
new_func = repl_func(lhs_typ, *typs)
except:
new_func = None
require(new_func != None)
g = copy.copy(self.func_ir.func_id.func.__globals__)
g["numba"] = numba
g["np"] = numpy
g["math"] = math
# inline the parallel implementation
inline_closure_call(
self.func_ir,
g,
block,
i,
new_func,
self.typingctx,
typs,
self.typemap,
self.calltypes,
work_list,
)
return True
if guard(replace_func):
break
elif (
isinstance(expr, ir.Expr)
and expr.op == "getattr"
and expr.attr == "dtype"
):
# Replace getattr call "A.dtype" with numpy.dtype(<actual type>).
# This helps remove superfluous dependencies from parfor.
typ = self.typemap[expr.value.name]
if isinstance(typ, types.npytypes.Array):
# Convert A.dtype to four statements.
# 1) Get numpy global.
# 2) Create var for known type of array, e.g., numpy.float64
# 3) Get dtype function from numpy module.
# 4) Create var for numpy.dtype(var from #2).
# Create var for numpy module.
dtype = typ.dtype
scope = block.scope
loc = instr.loc
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global("np", numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# Create var for type infered type of the array, e.g., numpy.float64.
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
self.typemap[typ_var.name] = types.functions.NumberClass(dtype)
dtype_str = str(dtype)
if dtype_str == "bool":
dtype_str = "bool_"
np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)
# Get the dtype function from the numpy module.
dtype_attr_var = ir.Var(
scope, mk_unique_var("$dtype_attr_var"), loc
)
temp = find_template(numpy.dtype)
tfunc = numba.types.Function(temp)
tfunc.get_call_type(
self.typingctx, (self.typemap[typ_var.name],), {}
)
self.typemap[dtype_attr_var.name] = types.functions.Function(
temp
)
dtype_attr_getattr = ir.Expr.getattr(g_np_var, "dtype", loc)
dtype_attr_assign = ir.Assign(
dtype_attr_getattr, dtype_attr_var, loc
)
# Call numpy.dtype on the statically coded type two steps above.
dtype_var = ir.Var(scope, mk_unique_var("$dtype_var"), loc)
self.typemap[dtype_var.name] = types.npytypes.DType(dtype)
dtype_getattr = ir.Expr.call(dtype_attr_var, [typ_var], (), loc)
dtype_assign = ir.Assign(dtype_getattr, dtype_var, loc)
self.calltypes[dtype_getattr] = signature(
self.typemap[dtype_var.name], self.typemap[typ_var.name]
)
# The original A.dtype rhs is replaced with result of this call.
instr.value = dtype_var
# Add statements to body of the code.
block.body.insert(0, dtype_assign)
block.body.insert(0, dtype_attr_assign)
block.body.insert(0, typ_var_assign)
block.body.insert(0, g_np_assign)
break
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def replace_func():
func_def = get_definition(self.func_ir, expr.func)
callname = find_callname(self.func_ir, expr)
repl_func = replace_functions_map.get(callname, None)
require(repl_func != None)
typs = tuple(self.typemap[x.name] for x in expr.args)
try:
new_func = repl_func(lhs_typ, *typs)
except:
new_func = None
require(new_func != None)
g = copy.copy(self.func_ir.func_id.func.__globals__)
g["numba"] = numba
g["np"] = numpy
g["math"] = math
# if the function being inlined has a function
# checking the inputs, find it and add it to globals
check = replace_functions_checkers_map.get(callname, None)
if check is not None:
g[check.name] = check.func
# inline the parallel implementation
inline_closure_call(
self.func_ir,
g,
block,
i,
new_func,
self.typingctx,
typs,
self.typemap,
self.calltypes,
work_list,
)
return True
|
def replace_func():
func_def = get_definition(self.func_ir, expr.func)
callname = find_callname(self.func_ir, expr)
repl_func = replace_functions_map.get(callname, None)
require(repl_func != None)
typs = tuple(self.typemap[x.name] for x in expr.args)
try:
new_func = repl_func(lhs_typ, *typs)
except:
new_func = None
require(new_func != None)
g = copy.copy(self.func_ir.func_id.func.__globals__)
g["numba"] = numba
g["np"] = numpy
g["math"] = math
# inline the parallel implementation
inline_closure_call(
self.func_ir,
g,
block,
i,
new_func,
self.typingctx,
typs,
self.typemap,
self.calltypes,
work_list,
)
return True
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_min(context, builder, sig, args):
ty = sig.args[0].dtype
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
nat = ty("NaT")
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(
(
"zero-size array to reduction operation "
"minimum which has no identity"
)
)
min_value = nat
it = np.nditer(arry)
for view in it:
v = view.item()
if v != nat:
min_value = v
break
for view in it:
v = view.item()
if v != nat and v < min_value:
min_value = v
return min_value
else:
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(
(
"zero-size array to reduction operation "
"minimum which has no identity"
)
)
it = np.nditer(arry)
for view in it:
min_value = view.item()
break
for view in it:
v = view.item()
if v < min_value:
min_value = v
return min_value
res = context.compile_internal(builder, array_min_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
|
def array_min(context, builder, sig, args):
ty = sig.args[0].dtype
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
nat = ty("NaT")
def array_min_impl(arry):
min_value = nat
it = np.nditer(arry)
for view in it:
v = view.item()
if v != nat:
min_value = v
break
for view in it:
v = view.item()
if v != nat and v < min_value:
min_value = v
return min_value
else:
def array_min_impl(arry):
it = np.nditer(arry)
for view in it:
min_value = view.item()
break
for view in it:
v = view.item()
if v < min_value:
min_value = v
return min_value
res = context.compile_internal(builder, array_min_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(
("zero-size array to reduction operation minimum which has no identity")
)
it = np.nditer(arry)
for view in it:
min_value = view.item()
break
for view in it:
v = view.item()
if v < min_value:
min_value = v
return min_value
|
def array_min_impl(arry):
it = np.nditer(arry)
for view in it:
min_value = view.item()
break
for view in it:
v = view.item()
if v < min_value:
min_value = v
return min_value
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_max(context, builder, sig, args):
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(
("zero-size array to reduction operation maximum which has no identity")
)
it = np.nditer(arry)
for view in it:
max_value = view.item()
break
for view in it:
v = view.item()
if v > max_value:
max_value = v
return max_value
res = context.compile_internal(builder, array_max_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
|
def array_max(context, builder, sig, args):
def array_max_impl(arry):
it = np.nditer(arry)
for view in it:
max_value = view.item()
break
for view in it:
v = view.item()
if v > max_value:
max_value = v
return max_value
res = context.compile_internal(builder, array_max_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(
("zero-size array to reduction operation maximum which has no identity")
)
it = np.nditer(arry)
for view in it:
max_value = view.item()
break
for view in it:
v = view.item()
if v > max_value:
max_value = v
return max_value
|
def array_max_impl(arry):
it = np.nditer(arry)
for view in it:
max_value = view.item()
break
for view in it:
v = view.item()
if v > max_value:
max_value = v
return max_value
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_argmin(context, builder, sig, args):
ty = sig.args[0].dtype
# NOTE: Under Numpy < 1.10, argmin() is inconsistent with min() on NaT values:
# https://github.com/numpy/numpy/issues/6030
if numpy_version >= (1, 10) and isinstance(
ty, (types.NPDatetime, types.NPTimedelta)
):
# NaT is smaller than every other value, but it is
# ignored as far as argmin() is concerned.
nat = ty("NaT")
def array_argmin_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
min_value = nat
min_idx = 0
it = arry.flat
idx = 0
for v in it:
if v != nat:
min_value = v
min_idx = idx
idx += 1
break
idx += 1
for v in it:
if v != nat and v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
else:
def array_argmin_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
for v in arry.flat:
min_value = v
min_idx = 0
break
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
res = context.compile_internal(builder, array_argmin_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
|
def array_argmin(context, builder, sig, args):
ty = sig.args[0].dtype
# NOTE: Under Numpy < 1.10, argmin() is inconsistent with min() on NaT values:
# https://github.com/numpy/numpy/issues/6030
if numpy_version >= (1, 10) and isinstance(
ty, (types.NPDatetime, types.NPTimedelta)
):
# NaT is smaller than every other value, but it is
# ignored as far as argmin() is concerned.
nat = ty("NaT")
def array_argmin_impl(arry):
min_value = nat
min_idx = 0
it = arry.flat
idx = 0
for v in it:
if v != nat:
min_value = v
min_idx = idx
idx += 1
break
idx += 1
for v in it:
if v != nat and v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
else:
def array_argmin_impl(arry):
for v in arry.flat:
min_value = v
min_idx = 0
break
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
res = context.compile_internal(builder, array_argmin_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_argmin_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
for v in arry.flat:
min_value = v
min_idx = 0
break
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
|
def array_argmin_impl(arry):
for v in arry.flat:
min_value = v
min_idx = 0
break
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_argmax(context, builder, sig, args):
def array_argmax_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
res = context.compile_internal(builder, array_argmax_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
|
def array_argmax(context, builder, sig, args):
def array_argmax_impl(arry):
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
res = context.compile_internal(builder, array_argmax_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def array_argmax_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
|
def array_argmax_impl(arry):
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
|
https://github.com/numba/numba/issues/3119
|
In [324]: def test_0d_min(X):
...: return np.min(X)
...:
...:
In [325]: test_0d_min(np.array([])) # correct behavior
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
... (omitted)
ValueError: zero-size array to reduction operation minimum which has no identity
In [326]: nb.jit(test_0d_min)(np.array([])) # incorrect
Out[326]: 0.0
In [328]: nb.jit(test_0d_min, parallel=True)(np.array([])) # incorrect
Out[328]: 1.7976931348623157e+308
|
ValueError
|
def call_parallel_gufunc(
lowerer,
cres,
gu_signature,
outer_sig,
expr_args,
expr_arg_types,
loop_ranges,
redvars,
reddict,
redarrdict,
init_block,
index_var_typ,
races,
):
"""
Adds the call to the gufunc function from the main function.
"""
context = lowerer.context
builder = lowerer.builder
library = lowerer.library
from .parallel import (
ParallelGUFuncBuilder,
build_gufunc_wrapper,
get_thread_count,
_launch_threads,
_init,
)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("args = ", expr_args)
print(
"outer_sig = ",
outer_sig.args,
outer_sig.return_type,
outer_sig.recvr,
outer_sig.pysig,
)
print("loop_ranges = ", loop_ranges)
print("expr_args", expr_args)
print("expr_arg_types", expr_arg_types)
print("gu_signature", gu_signature)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
_init()
wrapper_ptr, env, wrapper_name = build_gufunc_wrapper(
llvm_func, cres, sin, sout, {}
)
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.uintp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert step == 1 # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print(
"call_parallel_gufunc loop_ranges[{}] = ".format(i), start, stop, step
)
cgutils.printf(
builder, "loop range[{}]: %d %d (%d)\n".format(i), start, stop, step
)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
uintp_ptr_t = lc.Type.pointer(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
sched_name = expr_args.pop(0)
sched_typ = outer_sig.args[0]
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
if index_var_typ.signed:
sched_type = intp_t
sched_ptr_type = intp_ptr_t
else:
sched_type = uintp_t
sched_ptr_type = uintp_ptr_t
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
dim_stops = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(dim_starts, [context.get_constant(types.uintp, i)])
)
builder.store(
stop, builder.gep(dim_stops, [context.get_constant(types.uintp, i)])
)
sched_size = get_thread_count() * num_dim * 2
sched = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, sched_size),
name="sched",
)
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = lc.Type.function(
intp_ptr_t,
[uintp_t, sched_ptr_type, sched_ptr_type, uintp_t, sched_ptr_type, intp_t],
)
if index_var_typ.signed:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_signed"
)
else:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_unsigned"
)
builder.call(
do_scheduling,
[
context.get_constant(types.uintp, num_dim),
dim_starts,
dim_stops,
context.get_constant(types.uintp, get_thread_count()),
sched,
context.get_constant(types.intp, debug_flag),
],
)
# Get the LLVM vars for the Numba IR reduction array vars.
redarrs = [lowerer.loadvar(redarrdict[x].name) for x in redvars]
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
if config.DEBUG_ARRAY_OPT:
for i in range(get_thread_count()):
cgutils.printf(builder, "sched[" + str(i) + "] = ")
for j in range(num_dim * 2):
cgutils.printf(
builder,
"%d ",
builder.load(
builder.gep(
sched,
[context.get_constant(types.intp, i * num_dim * 2 + j)],
)
),
)
cgutils.printf(builder, "\n")
# ----------------------------------------------------------------------------
# Prepare arguments: args, shapes, steps, data
all_args = [lowerer.loadvar(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(types.intp, 1 + num_args),
name="pargs",
)
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
red_shapes = {}
rv_to_arg_dict = {}
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
var = expr_args[i]
aty = expr_arg_types[i]
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
ary_shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
# Start from 1 because we skip the first dimension of length num_threads just like sched.
for j in range(1, len(strides)):
array_strides.append(strides[j])
red_shapes[i] = ary_shapes[1:]
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
if var in races:
typ = (
context.get_data_type(aty.dtype)
if aty.dtype != types.boolean
else lc.Type.int(1)
)
rv_arg = cgutils.alloca_once(builder, typ)
builder.store(arg, rv_arg)
builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst)
rv_to_arg_dict[var] = (arg, rv_arg)
array_strides.append(
context.get_constant(types.intp, context.get_abi_sizeof(typ))
)
else:
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# ----------------------------------------------------------------------------
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
assert len(expr_args) == len(all_args)
assert len(expr_args) == len(expr_arg_types)
assert len(expr_args) == len(sin + sout)
assert len(expr_args) == len(outer_sig.args[1:])
for var, arg, aty, gu_sig in zip(expr_args, all_args, expr_arg_types, sin + sout):
if isinstance(aty, types.npytypes.Array):
i = aty.ndim - len(gu_sig)
else:
i = 0
if config.DEBUG_ARRAY_OPT:
print("var =", var, "gu_sig =", gu_sig, "type =", aty, "i =", i)
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
if var in races:
sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1)
else:
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
occurances.append(dim_sym)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(context.get_constant(types.intp, get_thread_count()), shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym],
builder.gep(shapes, [context.get_constant(types.intp, i)]),
)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_steps), name="psteps"
)
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp), steps)
# The steps for all others are 0, except for reduction results.
for i in range(num_args):
if i >= ninouts: # steps for reduction vars are abi_sizeof(typ)
j = i - ninouts
# Get the base dtype of the reduction array.
redtyp = lowerer.fndesc.typemap[redvars[j]]
red_stride = None
if isinstance(redtyp, types.npytypes.Array):
redtyp = redtyp.dtype
red_stride = red_shapes[i]
typ = context.get_value_type(redtyp)
sizeof = context.get_abi_sizeof(typ)
# Set stepsize to the size of that dtype.
stepsize = context.get_constant(types.intp, sizeof)
if red_stride != None:
for rs in red_stride:
stepsize = builder.mul(stepsize, rs)
else:
# steps are strides
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# ----------------------------------------------------------------------------
# prepare data
data = builder.inttoptr(zero, byte_ptr_t)
fnty = lc.Type.function(
lc.Type.void(), [byte_ptr_ptr_t, intp_ptr_t, intp_ptr_t, byte_ptr_t]
)
fn = builder.module.get_or_insert_function(fnty, name=wrapper_name)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
result = builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
for k, v in rv_to_arg_dict.items():
arg, rv_arg = v
only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)])
builder.store(builder.load(only_elem_ptr), lowerer.getvar(k))
scope = init_block.scope
loc = init_block.loc
|
def call_parallel_gufunc(
lowerer,
cres,
gu_signature,
outer_sig,
expr_args,
expr_arg_types,
loop_ranges,
redvars,
reddict,
redarrdict,
init_block,
index_var_typ,
races,
):
"""
Adds the call to the gufunc function from the main function.
"""
context = lowerer.context
builder = lowerer.builder
library = lowerer.library
from .parallel import (
ParallelGUFuncBuilder,
build_gufunc_wrapper,
get_thread_count,
_launch_threads,
_init,
)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("args = ", expr_args)
print(
"outer_sig = ",
outer_sig.args,
outer_sig.return_type,
outer_sig.recvr,
outer_sig.pysig,
)
print("loop_ranges = ", loop_ranges)
print("expr_args", expr_args)
print("expr_arg_types", expr_arg_types)
print("gu_signature", gu_signature)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
_init()
wrapper_ptr, env, wrapper_name = build_gufunc_wrapper(
llvm_func, cres, sin, sout, {}
)
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.uintp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert step == 1 # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print(
"call_parallel_gufunc loop_ranges[{}] = ".format(i), start, stop, step
)
cgutils.printf(
builder, "loop range[{}]: %d %d (%d)\n".format(i), start, stop, step
)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
uintp_ptr_t = lc.Type.pointer(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
sched_name = expr_args.pop(0)
sched_typ = outer_sig.args[0]
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
if index_var_typ.signed:
sched_type = intp_t
sched_ptr_type = intp_ptr_t
else:
sched_type = uintp_t
sched_ptr_type = uintp_ptr_t
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
dim_stops = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(dim_starts, [context.get_constant(types.uintp, i)])
)
builder.store(
stop, builder.gep(dim_stops, [context.get_constant(types.uintp, i)])
)
sched_size = get_thread_count() * num_dim * 2
sched = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, sched_size),
name="sched",
)
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = lc.Type.function(
intp_ptr_t,
[uintp_t, sched_ptr_type, sched_ptr_type, uintp_t, sched_ptr_type, intp_t],
)
if index_var_typ.signed:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_signed"
)
else:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_unsigned"
)
builder.call(
do_scheduling,
[
context.get_constant(types.uintp, num_dim),
dim_starts,
dim_stops,
context.get_constant(types.uintp, get_thread_count()),
sched,
context.get_constant(types.intp, debug_flag),
],
)
# Get the LLVM vars for the Numba IR reduction array vars.
redarrs = [lowerer.loadvar(x.name) for x in list(redarrdict.values())]
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
if config.DEBUG_ARRAY_OPT:
for i in range(get_thread_count()):
cgutils.printf(builder, "sched[" + str(i) + "] = ")
for j in range(num_dim * 2):
cgutils.printf(
builder,
"%d ",
builder.load(
builder.gep(
sched,
[context.get_constant(types.intp, i * num_dim * 2 + j)],
)
),
)
cgutils.printf(builder, "\n")
# ----------------------------------------------------------------------------
# Prepare arguments: args, shapes, steps, data
all_args = [lowerer.loadvar(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(types.intp, 1 + num_args),
name="pargs",
)
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
red_shapes = {}
rv_to_arg_dict = {}
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
var = expr_args[i]
aty = expr_arg_types[i]
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
ary_shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
# Start from 1 because we skip the first dimension of length num_threads just like sched.
for j in range(1, len(strides)):
array_strides.append(strides[j])
red_shapes[i] = ary_shapes[1:]
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
if var in races:
typ = (
context.get_data_type(aty.dtype)
if aty.dtype != types.boolean
else lc.Type.int(1)
)
rv_arg = cgutils.alloca_once(builder, typ)
builder.store(arg, rv_arg)
builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst)
rv_to_arg_dict[var] = (arg, rv_arg)
array_strides.append(
context.get_constant(types.intp, context.get_abi_sizeof(typ))
)
else:
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# ----------------------------------------------------------------------------
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
assert len(expr_args) == len(all_args)
assert len(expr_args) == len(expr_arg_types)
assert len(expr_args) == len(sin + sout)
assert len(expr_args) == len(outer_sig.args[1:])
for var, arg, aty, gu_sig in zip(expr_args, all_args, expr_arg_types, sin + sout):
if isinstance(aty, types.npytypes.Array):
i = aty.ndim - len(gu_sig)
else:
i = 0
if config.DEBUG_ARRAY_OPT:
print("var =", var, "gu_sig =", gu_sig, "type =", aty, "i =", i)
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
if var in races:
sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1)
else:
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
occurances.append(dim_sym)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(context.get_constant(types.intp, get_thread_count()), shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym],
builder.gep(shapes, [context.get_constant(types.intp, i)]),
)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_steps), name="psteps"
)
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp), steps)
# The steps for all others are 0, except for reduction results.
for i in range(num_args):
if i >= ninouts: # steps for reduction vars are abi_sizeof(typ)
j = i - ninouts
# Get the base dtype of the reduction array.
redtyp = lowerer.fndesc.typemap[redvars[j]]
red_stride = None
if isinstance(redtyp, types.npytypes.Array):
redtyp = redtyp.dtype
red_stride = red_shapes[i]
typ = context.get_value_type(redtyp)
sizeof = context.get_abi_sizeof(typ)
# Set stepsize to the size of that dtype.
stepsize = context.get_constant(types.intp, sizeof)
if red_stride != None:
for rs in red_stride:
stepsize = builder.mul(stepsize, rs)
else:
# steps are strides
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# ----------------------------------------------------------------------------
# prepare data
data = builder.inttoptr(zero, byte_ptr_t)
fnty = lc.Type.function(
lc.Type.void(), [byte_ptr_ptr_t, intp_ptr_t, intp_ptr_t, byte_ptr_t]
)
fn = builder.module.get_or_insert_function(fnty, name=wrapper_name)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
result = builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
for k, v in rv_to_arg_dict.items():
arg, rv_arg = v
only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)])
builder.store(builder.load(only_elem_ptr), lowerer.getvar(k))
scope = init_block.scope
loc = init_block.loc
|
https://github.com/numba/numba/issues/3279
|
ERROR: test_fuse_argmin (numba.tests.test_parfors.TestParfors)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/numba/numba/numba/errors.py", line 601, in new_error_context
yield
File "/home/travis/build/numba/numba/numba/lowering.py", line 254, in lower_block
self.lower_inst(inst)
File "/home/travis/build/numba/numba/numba/lowering.py", line 403, in lower_inst
func(self, inst)
File "/home/travis/build/numba/numba/numba/npyufunc/parfor.py", line 276, in _lower_parfor_parallel
parfor.races)
File "/home/travis/build/numba/numba/numba/npyufunc/parfor.py", line 1206, in call_parallel_gufunc
ary = context.make_array(aty)(context, builder, arg)
File "/home/travis/build/numba/numba/numba/cgutils.py", line 117, in __init__
% (outer_ref.type.pointee, value.type))
AssertionError: bad value type: expected {i8*, i8*, i64, i64, {i64, double}*, [1 x i64], [1 x i64]}, got {i8*, i8*, i64, i64, double*, [1 x i64], [1 x i64]}
|
AssertionError
|
def get_array_index_type(ary, idx):
"""
Returns None or a tuple-3 for the types of the input array, index, and
resulting type of ``array[index]``.
Note: This is shared logic for ndarray getitem and setitem.
"""
if not isinstance(ary, types.Buffer):
return
ndim = ary.ndim
left_indices = []
right_indices = []
ellipsis_met = False
advanced = False
has_integer = False
if not isinstance(idx, types.BaseTuple):
idx = [idx]
# Walk indices
for ty in idx:
if ty is types.ellipsis:
if ellipsis_met:
raise TypeError(
"only one ellipsis allowed in array index (got %s)" % (idx,)
)
ellipsis_met = True
elif isinstance(ty, types.SliceType):
pass
elif isinstance(ty, types.Integer):
# Normalize integer index
ty = types.intp if ty.signed else types.uintp
# Integer indexing removes the given dimension
ndim -= 1
has_integer = True
elif (
isinstance(ty, types.Array)
and ty.ndim == 0
and isinstance(ty.dtype, types.Integer)
):
# 0-d array used as integer index
ndim -= 1
has_integer = True
elif (
isinstance(ty, types.Array)
and ty.ndim == 1
and isinstance(ty.dtype, (types.Integer, types.Boolean))
):
if advanced or has_integer:
# We don't support the complicated combination of
# advanced indices (and integers are considered part
# of them by Numpy).
raise NotImplementedError("only one advanced index supported")
advanced = True
else:
raise TypeError("unsupported array index type %s in %s" % (ty, idx))
(right_indices if ellipsis_met else left_indices).append(ty)
# Only Numpy arrays support advanced indexing
if advanced and not isinstance(ary, types.Array):
return
# Check indices and result dimensionality
all_indices = left_indices + right_indices
if ellipsis_met:
assert right_indices[0] is types.ellipsis
del right_indices[0]
n_indices = len(all_indices) - ellipsis_met
if n_indices > ary.ndim:
raise TypeError("cannot index %s with %d indices: %s" % (ary, n_indices, idx))
if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:
# Full integer indexing => scalar result
# (note if ellipsis is present, a 0-d view is returned instead)
res = ary.dtype
elif advanced:
# Result is a copy
res = ary.copy(ndim=ndim, layout="C", readonly=False)
else:
# Result is a view
if ary.slice_is_copy:
# Avoid view semantics when the original type creates a copy
# when slicing.
return
# Infer layout
layout = ary.layout
def keeps_contiguity(ty, is_innermost):
# A slice can only keep an array contiguous if it is the
# innermost index and it is not strided
return (
ty is types.ellipsis
or isinstance(ty, types.Integer)
or (
is_innermost and isinstance(ty, types.SliceType) and not ty.has_step
)
)
def check_contiguity(outer_indices):
"""
Whether indexing with the given indices (from outer to inner in
physical layout order) can keep an array contiguous.
"""
for ty in outer_indices[:-1]:
if not keeps_contiguity(ty, False):
return False
if outer_indices and not keeps_contiguity(outer_indices[-1], True):
return False
return True
if layout == "C":
# Integer indexing on the left keeps the array C-contiguous
if n_indices == ary.ndim:
# If all indices are there, ellipsis's place is indifferent
left_indices = left_indices + right_indices
right_indices = []
if right_indices:
layout = "A"
elif not check_contiguity(left_indices):
layout = "A"
elif layout == "F":
# Integer indexing on the right keeps the array F-contiguous
if n_indices == ary.ndim:
# If all indices are there, ellipsis's place is indifferent
right_indices = left_indices + right_indices
left_indices = []
if left_indices:
layout = "A"
elif not check_contiguity(right_indices[::-1]):
layout = "A"
if ndim == 0:
# Implicitly convert to a scalar if the output ndim==0
res = ary.dtype
else:
res = ary.copy(ndim=ndim, layout=layout)
# Re-wrap indices
if isinstance(idx, types.BaseTuple):
idx = types.BaseTuple.from_types(all_indices)
else:
(idx,) = all_indices
return Indexing(idx, res, advanced)
|
def get_array_index_type(ary, idx):
"""
Returns None or a tuple-3 for the types of the input array, index, and
resulting type of ``array[index]``.
Note: This is shared logic for ndarray getitem and setitem.
"""
if not isinstance(ary, types.Buffer):
return
ndim = ary.ndim
left_indices = []
right_indices = []
ellipsis_met = False
advanced = False
has_integer = False
if not isinstance(idx, types.BaseTuple):
idx = [idx]
# Walk indices
for ty in idx:
if ty is types.ellipsis:
if ellipsis_met:
raise TypeError(
"only one ellipsis allowed in array index (got %s)" % (idx,)
)
ellipsis_met = True
elif isinstance(ty, types.SliceType):
pass
elif isinstance(ty, types.Integer):
# Normalize integer index
ty = types.intp if ty.signed else types.uintp
# Integer indexing removes the given dimension
ndim -= 1
has_integer = True
elif (
isinstance(ty, types.Array)
and ty.ndim == 0
and isinstance(ty.dtype, types.Integer)
):
# 0-d array used as integer index
ndim -= 1
has_integer = True
elif (
isinstance(ty, types.Array)
and ty.ndim == 1
and isinstance(ty.dtype, (types.Integer, types.Boolean))
):
if advanced or has_integer:
# We don't support the complicated combination of
# advanced indices (and integers are considered part
# of them by Numpy).
raise NotImplementedError("only one advanced index supported")
advanced = True
else:
raise TypeError("unsupported array index type %s in %s" % (ty, idx))
(right_indices if ellipsis_met else left_indices).append(ty)
# Only Numpy arrays support advanced indexing
if advanced and not isinstance(ary, types.Array):
return
# Check indices and result dimensionality
all_indices = left_indices + right_indices
if ellipsis_met:
assert right_indices[0] is types.ellipsis
del right_indices[0]
n_indices = len(all_indices) - ellipsis_met
if n_indices > ary.ndim:
raise TypeError("cannot index %s with %d indices: %s" % (ary, n_indices, idx))
if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:
# Full integer indexing => scalar result
# (note if ellipsis is present, a 0-d view is returned instead)
res = ary.dtype
elif advanced:
# Result is a copy
res = ary.copy(ndim=ndim, layout="C", readonly=False)
else:
# Result is a view
if ary.slice_is_copy:
# Avoid view semantics when the original type creates a copy
# when slicing.
return
# Infer layout
layout = ary.layout
def keeps_contiguity(ty, is_innermost):
# A slice can only keep an array contiguous if it is the
# innermost index and it is not strided
return (
ty is types.ellipsis
or isinstance(ty, types.Integer)
or (
is_innermost and isinstance(ty, types.SliceType) and not ty.has_step
)
)
def check_contiguity(outer_indices):
"""
Whether indexing with the given indices (from outer to inner in
physical layout order) can keep an array contiguous.
"""
for ty in outer_indices[:-1]:
if not keeps_contiguity(ty, False):
return False
if outer_indices and not keeps_contiguity(outer_indices[-1], True):
return False
return True
if layout == "C":
# Integer indexing on the left keeps the array C-contiguous
if n_indices == ary.ndim:
# If all indices are there, ellipsis's place is indifferent
left_indices = left_indices + right_indices
right_indices = []
if right_indices:
layout = "A"
elif not check_contiguity(left_indices):
layout = "A"
elif layout == "F":
# Integer indexing on the right keeps the array F-contiguous
if n_indices == ary.ndim:
# If all indices are there, ellipsis's place is indifferent
right_indices = left_indices + right_indices
left_indices = []
if left_indices:
layout = "A"
elif not check_contiguity(right_indices[::-1]):
layout = "A"
res = ary.copy(ndim=ndim, layout=layout)
# Re-wrap indices
if isinstance(idx, types.BaseTuple):
idx = types.BaseTuple.from_types(all_indices)
else:
(idx,) = all_indices
return Indexing(idx, res, advanced)
|
https://github.com/numba/numba/issues/3225
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)
674 try:
--> 675 impl = self._casts.find((fromty, toty))
676 return impl(self, builder, fromty, toty, val)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in find(self, sig)
47 if out is None:
---> 48 out = self._find(sig)
49 self._cache[sig] = out
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in _find(self, sig)
56 else:
---> 57 raise NotImplementedError(self, sig)
58
NotImplementedError: (<numba.targets.base.OverloadSelector object at 0x1059269e8>, (array(float64, 0d, C), float64))
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
576 try:
--> 577 yield
578 except NumbaError as e:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
253 loc=self.loc, errcls_=defaulterrcls):
--> 254 self.lower_inst(inst)
255
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_inst(self, inst)
357 assert signature is not None
--> 358 return self.lower_setitem(inst.target, inst.index, inst.value, signature)
359
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_setitem(self, target_var, index_var, value_var, signature)
429
--> 430 return impl(self.builder, (target, index, value))
431
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in __call__(self, builder, args)
1078 def __call__(self, builder, args):
-> 1079 return self._imp(self._context, builder, self._sig, args)
1080
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/arrayobj.py in setitem_array(context, builder, sig, args)
481 # Store source value the given location
--> 482 val = context.cast(builder, val, valty, aryty.dtype)
483 store_item(context, builder, aryty, val, dataptr)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)
678 raise NotImplementedError(
--> 679 "Cannot cast %s to %s: %s" % (fromty, toty, val))
680
NotImplementedError: Cannot cast array(float64, 0d, C) to float64: %".417" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %"$22.9"
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-6-e6ce0775290a> in <module>()
----> 1 jitted(A[0], B[0], indices)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
366 e.patch_message(''.join(e.args) + help_msg)
367 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 368 raise e
369
370 def inspect_llvm(self, signature=None):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
323 argtypes.append(self.typeof_pyval(a))
324 try:
--> 325 return self.compile(tuple(argtypes))
326 except errors.TypingError as e:
327 # Intercept typing error that may be due to an argument
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, sig)
651
652 self._cache_misses[sig] += 1
--> 653 cres = self._compiler.compile(args, return_type)
654 self.add_overload(cres)
655 self._cache.save_overload(sig, cres)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, args, return_type)
81 args=args, return_type=return_type,
82 flags=flags, locals=self.locals,
---> 83 pipeline_class=self.pipeline_class)
84 # Check typing error if object mode is used
85 if cres.typing_error is not None and not flags.enable_pyobject:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
871 pipeline = pipeline_class(typingctx, targetctx, library,
872 args, return_type, flags, locals)
--> 873 return pipeline.compile_extra(func)
874
875
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(self, func)
365 self.lifted = ()
366 self.lifted_from = None
--> 367 return self._compile_bytecode()
368
369 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_bytecode(self)
802 """
803 assert self.func_ir is None
--> 804 return self._compile_core()
805
806 def _compile_ir(self):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_core(self)
789 self.define_pipelines(pm)
790 pm.finalize()
--> 791 res = pm.run(self.status)
792 if res is not None:
793 # Early pipeline completion
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
251 # No more fallback pipelines?
252 if is_final_pipeline:
--> 253 raise patched_exception
254 # Go to next fallback pipeline
255 else:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
243 try:
244 event(stage_name)
--> 245 stage()
246 except _EarlyPipelineCompletion as e:
247 return e.result
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in stage_nopython_backend(self)
676 """
677 lowerfn = self.backend_nopython_mode
--> 678 self._backend(lowerfn, objectmode=False)
679
680 def stage_compile_interp_mode(self):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
626 self.library.enable_object_caching()
627
--> 628 lowered = lowerfn()
629 signature = typing.signature(self.return_type, *self.args)
630 self.cr = compile_result(
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in backend_nopython_mode(self)
613 self.return_type,
614 self.calltypes,
--> 615 self.flags)
616
617 def _backend(self, lowerfn, objectmode):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags)
990
991 lower = lowering.Lower(targetctx, library, fndesc, interp)
--> 992 lower.lower()
993 if not flags.no_cpython_wrapper:
994 lower.create_cpython_wrapper(flags.release_gil)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower(self)
171 if self.generator_info is None:
172 self.genlower = None
--> 173 self.lower_normal_function(self.fndesc)
174 else:
175 self.genlower = self.GeneratorLower(self)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
212 # Init argument values
213 self.extract_function_arguments()
--> 214 entry_block_tail = self.lower_function_body()
215
216 # Close tail of entry block
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_function_body(self)
237 bb = self.blkmap[offset]
238 self.builder.position_at_end(bb)
--> 239 self.lower_block(block)
240
241 self.post_lower()
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
252 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
253 loc=self.loc, errcls_=defaulterrcls):
--> 254 self.lower_inst(inst)
255
256 def create_cpython_wrapper(self, release_gil=False):
~/miniconda3/envs/numba3/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
583 from numba import config
584 tb = sys.exc_info()[2] if config.FULL_TRACEBACKS else None
--> 585 six.reraise(type(newerr), newerr, tb)
586
587
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
657 if value.__traceback__ is not tb:
658 raise value.with_traceback(tb)
--> 659 raise value
660
661 else:
LoweringError: Failed at nopython (nopython mode backend)
Cannot cast array(float64, 0d, C) to float64: %".417" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %"$22.9"
File "<ipython-input-1-f6cc8d5fb861>", line 8:
def func(A, B, indices):
<source elided>
index = indices[i]
rv[..., index] = B[..., index]
^
[1] During: lowering "rv[$22.13] = $22.9" at <ipython-input-1-f6cc8d5fb861> (8)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
NotImplementedError
|
def _inline_const_arraycall(block, func_ir, context, typemap, calltypes):
"""Look for array(list) call where list is a constant list created by build_list,
and turn them into direct array creation and initialization, if the following
conditions are met:
1. The build_list call immediate preceeds the array call;
2. The list variable is no longer live after array call;
If any condition check fails, no modification will be made.
"""
debug_print = _make_debug_print("inline_const_arraycall")
scope = block.scope
def inline_array(array_var, expr, stmts, list_vars, dels):
"""Check to see if the given "array_var" is created from a list
of constants, and try to inline the list definition as array
initialization.
Extra statements produced with be appended to "stmts".
"""
callname = guard(find_callname, func_ir, expr)
require(callname and callname[1] == "numpy" and callname[0] == "array")
require(expr.args[0].name in list_vars)
ret_type = calltypes[expr].return_type
require(isinstance(ret_type, types.ArrayCompatible) and ret_type.ndim == 1)
loc = expr.loc
list_var = expr.args[0]
# Get the type of the array to be created.
array_typ = typemap[array_var.name]
debug_print("inline array_var = ", array_var, " list_var = ", list_var)
# Get the element type of the array to be created.
dtype = array_typ.dtype
# Get the sequence of operations to provide values to the new array.
seq, _ = find_build_sequence(func_ir, list_var)
size = len(seq)
# Create a tuple to pass to empty below to specify the new array size.
size_var = ir.Var(scope, mk_unique_var("size"), loc)
size_tuple_var = ir.Var(scope, mk_unique_var("size_tuple"), loc)
size_typ = types.intp
size_tuple_typ = types.UniTuple(size_typ, 1)
typemap[size_var.name] = size_typ
typemap[size_tuple_var.name] = size_tuple_typ
stmts.append(_new_definition(func_ir, size_var, ir.Const(size, loc=loc), loc))
stmts.append(
_new_definition(
func_ir,
size_tuple_var,
ir.Expr.build_tuple(items=[size_var], loc=loc),
loc,
)
)
# The general approach is to create an empty array and then fill
# the elements in one-by-one from their specificiation.
# Get the numpy type to pass to empty.
nptype = types.DType(dtype)
# Create a variable to hold the numpy empty function.
empty_func = ir.Var(scope, mk_unique_var("empty_func"), loc)
fnty = get_np_ufunc_typ(np.empty)
sig = context.resolve_function_type(fnty, (size_typ,), {"dtype": nptype})
typemap[empty_func.name] = fnty
stmts.append(
_new_definition(
func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc
)
)
# We pass two arguments to empty, first the size tuple and second
# the dtype of the new array. Here, we created typ_var which is
# the dtype argument of the new array. typ_var in turn is created
# by getattr of the dtype string on the numpy module.
# Create var for numpy module.
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
typemap[g_np_var.name] = types.misc.Module(np)
g_np = ir.Global("np", np, loc)
stmts.append(_new_definition(func_ir, g_np_var, g_np, loc))
# Create var for result of numpy.<dtype>.
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
typemap[typ_var.name] = nptype
dtype_str = str(dtype)
if dtype_str == "bool":
dtype_str = "bool_"
# Get dtype attribute of numpy module.
np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
stmts.append(_new_definition(func_ir, typ_var, np_typ_getattr, loc))
# Create the call to numpy.empty passing the size tuple and dtype var.
empty_call = ir.Expr.call(empty_func, [size_var, typ_var], {}, loc=loc)
calltypes[empty_call] = typing.signature(array_typ, size_typ, nptype)
stmts.append(_new_definition(func_ir, array_var, empty_call, loc))
# Fill in the new empty array one-by-one.
for i in range(size):
index_var = ir.Var(scope, mk_unique_var("index"), loc)
index_typ = types.intp
typemap[index_var.name] = index_typ
stmts.append(_new_definition(func_ir, index_var, ir.Const(i, loc), loc))
setitem = ir.SetItem(array_var, index_var, seq[i], loc)
calltypes[setitem] = typing.signature(
types.none, array_typ, index_typ, dtype
)
stmts.append(setitem)
stmts.extend(dels)
return True
# list_vars keep track of the variable created from the latest
# build_list instruction, as well as its synonyms.
list_vars = []
# dead_vars keep track of those in list_vars that are considered dead.
dead_vars = []
# list_items keep track of the elements used in build_list.
list_items = []
stmts = []
# dels keep track of the deletion of list_items, which will need to be
# moved after array initialization.
dels = []
modified = False
for inst in block.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Var):
if inst.value.name in list_vars:
list_vars.append(inst.target.name)
stmts.append(inst)
continue
elif isinstance(inst.value, ir.Expr):
expr = inst.value
if expr.op == "build_list":
list_vars = [inst.target.name]
list_items = [x.name for x in expr.items]
stmts.append(inst)
continue
elif expr.op == "call" and expr in calltypes:
arr_var = inst.target
if guard(inline_array, inst.target, expr, stmts, list_vars, dels):
modified = True
continue
elif isinstance(inst, ir.Del):
removed_var = inst.value
if removed_var in list_items:
dels.append(inst)
continue
elif removed_var in list_vars:
# one of the list_vars is considered dead.
dead_vars.append(removed_var)
list_vars.remove(removed_var)
stmts.append(inst)
if list_vars == []:
# if all list_vars are considered dead, we need to filter
# them out from existing stmts to completely remove
# build_list.
# Note that if a translation didn't take place, dead_vars
# will also be empty when we reach this point.
body = []
for inst in stmts:
if (
isinstance(inst, ir.Assign)
and inst.target.name in dead_vars
) or (isinstance(inst, ir.Del) and inst.value in dead_vars):
continue
body.append(inst)
stmts = body
dead_vars = []
modified = True
continue
stmts.append(inst)
# If the list is used in any capacity between build_list and array
# call, then we must call off the translation for this list because
# it could be mutated and list_items would no longer be applicable.
list_var_used = any([x.name in list_vars for x in inst.list_vars()])
if list_var_used:
list_vars = []
dead_vars = []
list_items = []
dels = []
return stmts if modified else None
|
def _inline_const_arraycall(block, func_ir, context, typemap, calltypes):
"""Look for array(list) call where list is a constant list created by build_list,
and turn them into direct array creation and initialization, if the following
conditions are met:
1. The build_list call immediate preceeds the array call;
2. The list variable is no longer live after array call;
If any condition check fails, no modification will be made.
"""
debug_print = _make_debug_print("inline_const_arraycall")
scope = block.scope
def inline_array(array_var, expr, stmts, list_vars, dels):
"""Check to see if the given "array_var" is created from a list
of constants, and try to inline the list definition as array
initialization.
Extra statements produced with be appended to "stmts".
"""
callname = guard(find_callname, func_ir, expr)
require(callname and callname[1] == "numpy" and callname[0] == "array")
require(expr.args[0].name in list_vars)
ret_type = calltypes[expr].return_type
require(isinstance(ret_type, types.ArrayCompatible) and ret_type.ndim == 1)
loc = expr.loc
list_var = expr.args[0]
array_typ = typemap[array_var.name]
debug_print("inline array_var = ", array_var, " list_var = ", list_var)
dtype = array_typ.dtype
seq, op = find_build_sequence(func_ir, list_var)
size = len(seq)
size_var = ir.Var(scope, mk_unique_var("size"), loc)
size_tuple_var = ir.Var(scope, mk_unique_var("size_tuple"), loc)
size_typ = types.intp
size_tuple_typ = types.UniTuple(size_typ, 1)
typemap[size_var.name] = size_typ
typemap[size_tuple_var.name] = size_tuple_typ
stmts.append(_new_definition(func_ir, size_var, ir.Const(size, loc=loc), loc))
stmts.append(
_new_definition(
func_ir,
size_tuple_var,
ir.Expr.build_tuple(items=[size_var], loc=loc),
loc,
)
)
empty_func = ir.Var(scope, mk_unique_var("empty_func"), loc)
fnty = get_np_ufunc_typ(np.empty)
sig = context.resolve_function_type(fnty, (size_typ,), {})
typemap[empty_func.name] = fnty #
stmts.append(
_new_definition(
func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc
)
)
empty_call = ir.Expr.call(empty_func, [size_var], {}, loc=loc)
calltypes[empty_call] = typing.signature(array_typ, size_typ)
stmts.append(_new_definition(func_ir, array_var, empty_call, loc))
for i in range(size):
index_var = ir.Var(scope, mk_unique_var("index"), loc)
index_typ = types.intp
typemap[index_var.name] = index_typ
stmts.append(_new_definition(func_ir, index_var, ir.Const(i, loc), loc))
setitem = ir.SetItem(array_var, index_var, seq[i], loc)
calltypes[setitem] = typing.signature(
types.none, array_typ, index_typ, dtype
)
stmts.append(setitem)
stmts.extend(dels)
return True
# list_vars keep track of the variable created from the latest
# build_list instruction, as well as its synonyms.
list_vars = []
# dead_vars keep track of those in list_vars that are considered dead.
dead_vars = []
# list_items keep track of the elements used in build_list.
list_items = []
stmts = []
# dels keep track of the deletion of list_items, which will need to be
# moved after array initialization.
dels = []
modified = False
for inst in block.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Var):
if inst.value.name in list_vars:
list_vars.append(inst.target.name)
stmts.append(inst)
continue
elif isinstance(inst.value, ir.Expr):
expr = inst.value
if expr.op == "build_list":
list_vars = [inst.target.name]
list_items = [x.name for x in expr.items]
stmts.append(inst)
continue
elif expr.op == "call" and expr in calltypes:
arr_var = inst.target
if guard(inline_array, inst.target, expr, stmts, list_vars, dels):
modified = True
continue
elif isinstance(inst, ir.Del):
removed_var = inst.value
if removed_var in list_items:
dels.append(inst)
continue
elif removed_var in list_vars:
# one of the list_vars is considered dead.
dead_vars.append(removed_var)
list_vars.remove(removed_var)
stmts.append(inst)
if list_vars == []:
# if all list_vars are considered dead, we need to filter
# them out from existing stmts to completely remove
# build_list.
# Note that if a translation didn't take place, dead_vars
# will also be empty when we reach this point.
body = []
for inst in stmts:
if (
isinstance(inst, ir.Assign)
and inst.target.name in dead_vars
) or (isinstance(inst, ir.Del) and inst.value in dead_vars):
continue
body.append(inst)
stmts = body
dead_vars = []
modified = True
continue
stmts.append(inst)
# If the list is used in any capacity between build_list and array
# call, then we must call off the translation for this list because
# it could be mutated and list_items would no longer be applicable.
list_var_used = any([x.name in list_vars for x in inst.list_vars()])
if list_var_used:
list_vars = []
dead_vars = []
list_items = []
dels = []
return stmts if modified else None
|
https://github.com/numba/numba/issues/3069
|
Traceback (most recent call last):
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 491, in new_error_context
yield
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 365, in lower_inst
func(self, inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 83, in _lower_parfor_parallel
bool(alias_map), index_var_typ)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 357, in _create_gufunc_for_parfor_body
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\abstract.py", line 60, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\npytypes.py", line 288, in __init__
super(Array, self).__init__(dtype, ndim, layout, name=name)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\common.py", line 51, in __init__
raise TypeError("Buffer dtype cannot be buffer")
TypeError: Buffer dtype cannot be buffer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 360, in _compile_for_args
raise e
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 311, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 618, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 871, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 365, in compile_extra
return self._compile_bytecode()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 802, in _compile_bytecode
return self._compile_core()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 789, in _compile_core
res = pm.run(self.status)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 251, in run
raise patched_exception
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 243, in run
stage()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 676, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 626, in _backend
lowered = lowerfn()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 613, in backend_nopython_mode
self.flags)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 990, in native_lowering_stage
lower.lower()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 499, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Buffer dtype cannot be buffer
File "<stdin>", line 5:
[1] During: lowering "id=6[LoopNest(index_variable = parfor_index.271, range = (0, $16.3, 1))]{28: <ir.Block at <stdin> (5)>}Var(parfor_index.271, <stdin> (5))" at <stdin> (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
TypeError
|
def inline_array(array_var, expr, stmts, list_vars, dels):
"""Check to see if the given "array_var" is created from a list
of constants, and try to inline the list definition as array
initialization.
Extra statements produced with be appended to "stmts".
"""
callname = guard(find_callname, func_ir, expr)
require(callname and callname[1] == "numpy" and callname[0] == "array")
require(expr.args[0].name in list_vars)
ret_type = calltypes[expr].return_type
require(isinstance(ret_type, types.ArrayCompatible) and ret_type.ndim == 1)
loc = expr.loc
list_var = expr.args[0]
# Get the type of the array to be created.
array_typ = typemap[array_var.name]
debug_print("inline array_var = ", array_var, " list_var = ", list_var)
# Get the element type of the array to be created.
dtype = array_typ.dtype
# Get the sequence of operations to provide values to the new array.
seq, _ = find_build_sequence(func_ir, list_var)
size = len(seq)
# Create a tuple to pass to empty below to specify the new array size.
size_var = ir.Var(scope, mk_unique_var("size"), loc)
size_tuple_var = ir.Var(scope, mk_unique_var("size_tuple"), loc)
size_typ = types.intp
size_tuple_typ = types.UniTuple(size_typ, 1)
typemap[size_var.name] = size_typ
typemap[size_tuple_var.name] = size_tuple_typ
stmts.append(_new_definition(func_ir, size_var, ir.Const(size, loc=loc), loc))
stmts.append(
_new_definition(
func_ir, size_tuple_var, ir.Expr.build_tuple(items=[size_var], loc=loc), loc
)
)
# The general approach is to create an empty array and then fill
# the elements in one-by-one from their specificiation.
# Get the numpy type to pass to empty.
nptype = types.DType(dtype)
# Create a variable to hold the numpy empty function.
empty_func = ir.Var(scope, mk_unique_var("empty_func"), loc)
fnty = get_np_ufunc_typ(np.empty)
sig = context.resolve_function_type(fnty, (size_typ,), {"dtype": nptype})
typemap[empty_func.name] = fnty
stmts.append(
_new_definition(func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc)
)
# We pass two arguments to empty, first the size tuple and second
# the dtype of the new array. Here, we created typ_var which is
# the dtype argument of the new array. typ_var in turn is created
# by getattr of the dtype string on the numpy module.
# Create var for numpy module.
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
typemap[g_np_var.name] = types.misc.Module(np)
g_np = ir.Global("np", np, loc)
stmts.append(_new_definition(func_ir, g_np_var, g_np, loc))
# Create var for result of numpy.<dtype>.
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
typemap[typ_var.name] = nptype
dtype_str = str(dtype)
if dtype_str == "bool":
dtype_str = "bool_"
# Get dtype attribute of numpy module.
np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
stmts.append(_new_definition(func_ir, typ_var, np_typ_getattr, loc))
# Create the call to numpy.empty passing the size tuple and dtype var.
empty_call = ir.Expr.call(empty_func, [size_var, typ_var], {}, loc=loc)
calltypes[empty_call] = typing.signature(array_typ, size_typ, nptype)
stmts.append(_new_definition(func_ir, array_var, empty_call, loc))
# Fill in the new empty array one-by-one.
for i in range(size):
index_var = ir.Var(scope, mk_unique_var("index"), loc)
index_typ = types.intp
typemap[index_var.name] = index_typ
stmts.append(_new_definition(func_ir, index_var, ir.Const(i, loc), loc))
setitem = ir.SetItem(array_var, index_var, seq[i], loc)
calltypes[setitem] = typing.signature(types.none, array_typ, index_typ, dtype)
stmts.append(setitem)
stmts.extend(dels)
return True
|
def inline_array(array_var, expr, stmts, list_vars, dels):
"""Check to see if the given "array_var" is created from a list
of constants, and try to inline the list definition as array
initialization.
Extra statements produced with be appended to "stmts".
"""
callname = guard(find_callname, func_ir, expr)
require(callname and callname[1] == "numpy" and callname[0] == "array")
require(expr.args[0].name in list_vars)
ret_type = calltypes[expr].return_type
require(isinstance(ret_type, types.ArrayCompatible) and ret_type.ndim == 1)
loc = expr.loc
list_var = expr.args[0]
array_typ = typemap[array_var.name]
debug_print("inline array_var = ", array_var, " list_var = ", list_var)
dtype = array_typ.dtype
seq, op = find_build_sequence(func_ir, list_var)
size = len(seq)
size_var = ir.Var(scope, mk_unique_var("size"), loc)
size_tuple_var = ir.Var(scope, mk_unique_var("size_tuple"), loc)
size_typ = types.intp
size_tuple_typ = types.UniTuple(size_typ, 1)
typemap[size_var.name] = size_typ
typemap[size_tuple_var.name] = size_tuple_typ
stmts.append(_new_definition(func_ir, size_var, ir.Const(size, loc=loc), loc))
stmts.append(
_new_definition(
func_ir, size_tuple_var, ir.Expr.build_tuple(items=[size_var], loc=loc), loc
)
)
empty_func = ir.Var(scope, mk_unique_var("empty_func"), loc)
fnty = get_np_ufunc_typ(np.empty)
sig = context.resolve_function_type(fnty, (size_typ,), {})
typemap[empty_func.name] = fnty #
stmts.append(
_new_definition(func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc)
)
empty_call = ir.Expr.call(empty_func, [size_var], {}, loc=loc)
calltypes[empty_call] = typing.signature(array_typ, size_typ)
stmts.append(_new_definition(func_ir, array_var, empty_call, loc))
for i in range(size):
index_var = ir.Var(scope, mk_unique_var("index"), loc)
index_typ = types.intp
typemap[index_var.name] = index_typ
stmts.append(_new_definition(func_ir, index_var, ir.Const(i, loc), loc))
setitem = ir.SetItem(array_var, index_var, seq[i], loc)
calltypes[setitem] = typing.signature(types.none, array_typ, index_typ, dtype)
stmts.append(setitem)
stmts.extend(dels)
return True
|
https://github.com/numba/numba/issues/3069
|
Traceback (most recent call last):
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 491, in new_error_context
yield
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 365, in lower_inst
func(self, inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 83, in _lower_parfor_parallel
bool(alias_map), index_var_typ)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 357, in _create_gufunc_for_parfor_body
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\abstract.py", line 60, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\npytypes.py", line 288, in __init__
super(Array, self).__init__(dtype, ndim, layout, name=name)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\common.py", line 51, in __init__
raise TypeError("Buffer dtype cannot be buffer")
TypeError: Buffer dtype cannot be buffer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 360, in _compile_for_args
raise e
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 311, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 618, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 871, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 365, in compile_extra
return self._compile_bytecode()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 802, in _compile_bytecode
return self._compile_core()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 789, in _compile_core
res = pm.run(self.status)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 251, in run
raise patched_exception
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 243, in run
stage()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 676, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 626, in _backend
lowered = lowerfn()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 613, in backend_nopython_mode
self.flags)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 990, in native_lowering_stage
lower.lower()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 499, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Buffer dtype cannot be buffer
File "<stdin>", line 5:
[1] During: lowering "id=6[LoopNest(index_variable = parfor_index.271, range = (0, $16.3, 1))]{28: <ir.Block at <stdin> (5)>}Var(parfor_index.271, <stdin> (5))" at <stdin> (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
TypeError
|
def _lower_parfor_parallel(lowerer, parfor):
from .parallel import get_thread_count
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
# We copy the typemap here because for race condition variable we'll
# update their type to array so they can be updated by the gufunc.
orig_typemap = lowerer.fndesc.typemap
# replace original typemap with copy and restore the original at the end.
lowerer.fndesc.typemap = copy.copy(orig_typemap)
typemap = lowerer.fndesc.typemap
varmap = lowerer.varmap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
loc = parfor.init_block.loc
scope = parfor.init_block.scope
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
for racevar in parfor.races:
if racevar not in varmap:
rvtyp = typemap[racevar]
rv = ir.Var(scope, racevar, loc)
lowerer._alloca_var(rv.name, rvtyp)
alias_map = {}
arg_aliases = {}
numba.parfor.find_potential_aliases_parfor(
parfor, parfor.params, typemap, lowerer.func_ir, alias_map, arg_aliases
)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params != None
parfor_output_arrays = numba.parfor.get_parfor_outputs(parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor.params, lowerer.fndesc.calltypes
)
# init reduction array allocation here.
nredvars = len(parfor_redvars)
redarrs = {}
if nredvars > 0:
# reduction arrays outer dimension equal to thread count
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
# For each reduction variable...
for i in range(nredvars):
redvar_typ = lowerer.fndesc.typemap[parfor_redvars[i]]
redvar = ir.Var(scope, parfor_redvars[i], loc)
redarrvar_typ = redtyp_to_redarraytype(redvar_typ)
reddtype = redarrvar_typ.dtype
if config.DEBUG_ARRAY_OPT:
print(
"redvar_typ",
redvar_typ,
redarrvar_typ,
reddtype,
types.DType(reddtype),
)
# If this is reduction over an array,
# the reduction array has just one added per-worker dimension.
if isinstance(redvar_typ, types.npytypes.Array):
redarrdim = redvar_typ.ndim + 1
else:
redarrdim = 1
# Reduction array is created and initialized to the initial reduction value.
# First create a var for the numpy empty ufunc.
empty_func = ir.Var(scope, mk_unique_var("empty_func"), loc)
ff_fnty = get_np_ufunc_typ(np.empty)
ff_sig = typingctx.resolve_function_type(
ff_fnty,
(types.UniTuple(types.intp, redarrdim), types.DType(reddtype)),
{},
)
typemap[empty_func.name] = ff_fnty
empty_assign = ir.Assign(
ir.Global("empty", np.empty, loc=loc), empty_func, loc
)
lowerer.lower_inst(empty_assign)
# Create var for outer dimension size of reduction array equal to number of threads.
num_threads_var = ir.Var(scope, mk_unique_var("num_threads"), loc)
num_threads_assign = ir.Assign(
ir.Const(thread_count, loc), num_threads_var, loc
)
typemap[num_threads_var.name] = types.intp
lowerer.lower_inst(num_threads_assign)
# Empty call takes tuple of sizes. Create here and fill in outer dimension (num threads).
size_var = ir.Var(scope, mk_unique_var("tuple_size_var"), loc)
typemap[size_var.name] = types.UniTuple(types.intp, redarrdim)
size_var_list = [num_threads_var]
# If this is a reduction over an array...
if isinstance(redvar_typ, types.npytypes.Array):
# Add code to get the shape of the array being reduced over.
redshape_var = ir.Var(scope, mk_unique_var("redarr_shape"), loc)
typemap[redshape_var.name] = types.UniTuple(types.intp, redvar_typ.ndim)
redshape_getattr = ir.Expr.getattr(redvar, "shape", loc)
redshape_assign = ir.Assign(redshape_getattr, redshape_var, loc)
lowerer.lower_inst(redshape_assign)
# Add the dimension sizes of the array being reduced over to the tuple of sizes pass to empty.
for j in range(redvar_typ.ndim):
onedimvar = ir.Var(scope, mk_unique_var("redshapeonedim"), loc)
onedimgetitem = ir.Expr.static_getitem(redshape_var, j, None, loc)
typemap[onedimvar.name] = types.intp
onedimassign = ir.Assign(onedimgetitem, onedimvar, loc)
lowerer.lower_inst(onedimassign)
size_var_list += [onedimvar]
size_call = ir.Expr.build_tuple(size_var_list, loc)
size_assign = ir.Assign(size_call, size_var, loc)
lowerer.lower_inst(size_assign)
# Add call to empty passing the size var tuple.
empty_call = ir.Expr.call(empty_func, [size_var], {}, loc=loc)
redarr_var = ir.Var(scope, mk_unique_var("redarr"), loc)
typemap[redarr_var.name] = redarrvar_typ
empty_call_assign = ir.Assign(empty_call, redarr_var, loc)
lowerer.fndesc.calltypes[empty_call] = ff_sig
lowerer.lower_inst(empty_call_assign)
# Remember mapping of original reduction array to the newly created per-worker reduction array.
redarrs[redvar.name] = redarr_var
init_val = parfor_reddict[parfor_redvars[i]][0]
if init_val != None:
if isinstance(redvar_typ, types.npytypes.Array):
# Create an array of identity values for the reduction.
# First, create a variable for np.full.
full_func = ir.Var(scope, mk_unique_var("full_func"), loc)
full_fnty = get_np_ufunc_typ(np.full)
full_sig = typingctx.resolve_function_type(
full_fnty,
(
types.UniTuple(types.intp, redvar_typ.ndim),
reddtype,
types.DType(reddtype),
),
{},
)
typemap[full_func.name] = full_fnty
full_assign = ir.Assign(
ir.Global("full", np.full, loc=loc), full_func, loc
)
lowerer.lower_inst(full_assign)
# Then create a var with the identify value.
init_val_var = ir.Var(scope, mk_unique_var("init_val"), loc)
init_val_assign = ir.Assign(
ir.Const(init_val, loc), init_val_var, loc
)
typemap[init_val_var.name] = reddtype
lowerer.lower_inst(init_val_assign)
# Then, call np.full with the shape of the reduction array and the identity value.
full_call = ir.Expr.call(
full_func, [redshape_var, init_val_var], {}, loc=loc
)
lowerer.fndesc.calltypes[full_call] = full_sig
redtoset = ir.Var(scope, mk_unique_var("redtoset"), loc)
redtoset_assign = ir.Assign(full_call, redtoset, loc)
typemap[redtoset.name] = redvar_typ
lowerer.lower_inst(redtoset_assign)
else:
redtoset = ir.Var(scope, mk_unique_var("redtoset"), loc)
redtoset_assign = ir.Assign(ir.Const(init_val, loc), redtoset, loc)
typemap[redtoset.name] = reddtype
lowerer.lower_inst(redtoset_assign)
else:
redtoset = redvar
# For each thread, initialize the per-worker reduction array to the current reduction array value.
for j in range(get_thread_count()):
index_var = ir.Var(scope, mk_unique_var("index_var"), loc)
index_var_assign = ir.Assign(ir.Const(j, loc), index_var, loc)
typemap[index_var.name] = types.uintp
lowerer.lower_inst(index_var_assign)
redsetitem = ir.SetItem(redarr_var, index_var, redtoset, loc)
lowerer.fndesc.calltypes[redsetitem] = signature(
types.none,
typemap[redarr_var.name],
typemap[index_var.name],
redvar_typ,
)
lowerer.lower_inst(redsetitem)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = copy.copy(parfor.flags)
flags.set("error_model", "numpy")
# Can't get here unless flags.set('auto_parallel', ParallelOptions(True))
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfor.sequential_parfor_lowering = True
func, func_args, func_sig, redargstartdim, func_arg_types = (
_create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
{},
bool(alias_map),
index_var_typ,
parfor.races,
)
)
numba.parfor.sequential_parfor_lowering = False
# get the shape signature
func_args = ["sched"] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("func_args = ", func_args)
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
print("num_reductions = ", num_reductions)
gu_signature = _create_shape_signature(
parfor.get_shape_classes,
num_inputs,
num_reductions,
func_args,
redargstartdim,
func_sig,
parfor.races,
typemap,
)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
func_arg_types,
loop_ranges,
parfor_redvars,
parfor_reddict,
redarrs,
parfor.init_block,
index_var_typ,
parfor.races,
)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
if nredvars > 0:
# Perform the final reduction across the reduction array created above.
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
# For each reduction variable...
for i in range(nredvars):
name = parfor_redvars[i]
redarr = redarrs[name]
redvar_typ = lowerer.fndesc.typemap[name]
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print"
strconsttyp = types.Const(res_print_str)
lhs = ir.Var(scope, mk_unique_var("str_const"), loc)
assign_lhs = ir.Assign(
value=ir.Const(value=res_print_str, loc=loc), target=lhs, loc=loc
)
typemap[lhs.name] = strconsttyp
lowerer.lower_inst(assign_lhs)
res_print = ir.Print(args=[lhs, redarr], vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(
types.none, typemap[lhs.name], typemap[redarr.name]
)
print("res_print", res_print)
lowerer.lower_inst(res_print)
# For each element in the reduction array created above.
for j in range(get_thread_count()):
# Create index var to access that element.
index_var = ir.Var(scope, mk_unique_var("index_var"), loc)
index_var_assign = ir.Assign(ir.Const(j, loc), index_var, loc)
typemap[index_var.name] = types.uintp
lowerer.lower_inst(index_var_assign)
# Read that element from the array into oneelem.
oneelem = ir.Var(scope, mk_unique_var("redelem"), loc)
oneelemgetitem = ir.Expr.getitem(redarr, index_var, loc)
typemap[oneelem.name] = redvar_typ
lowerer.fndesc.calltypes[oneelemgetitem] = signature(
redvar_typ, typemap[redarr.name], typemap[index_var.name]
)
oneelemassign = ir.Assign(oneelemgetitem, oneelem, loc)
lowerer.lower_inst(oneelemassign)
init_var = ir.Var(scope, name + "#init", loc)
init_assign = ir.Assign(oneelem, init_var, loc)
if name + "#init" not in typemap:
typemap[init_var.name] = redvar_typ
lowerer.lower_inst(init_assign)
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "one_res_print"
strconsttyp = types.Const(res_print_str)
lhs = ir.Var(scope, mk_unique_var("str_const"), loc)
assign_lhs = ir.Assign(
value=ir.Const(value=res_print_str, loc=loc),
target=lhs,
loc=loc,
)
typemap[lhs.name] = strconsttyp
lowerer.lower_inst(assign_lhs)
res_print = ir.Print(
args=[lhs, index_var, oneelem, init_var], vararg=None, loc=loc
)
lowerer.fndesc.calltypes[res_print] = signature(
types.none,
typemap[lhs.name],
typemap[index_var.name],
typemap[oneelem.name],
typemap[init_var.name],
)
print("res_print", res_print)
lowerer.lower_inst(res_print)
# generate code for combining reduction variable with thread output
for inst in parfor_reddict[name][1]:
# If we have a case where a parfor body has an array reduction like A += B
# and A and B have different data types then the reduction in the parallel
# region will operate on those differeing types. However, here, after the
# parallel region, we are summing across the reduction array and that is
# guaranteed to have the same data type so we need to change the reduction
# nodes so that the right-hand sides have a type equal to the reduction-type
# and therefore the left-hand side.
if isinstance(inst, ir.Assign):
rhs = inst.value
# We probably need to generalize this since it only does substitutions in
# inplace_ginops.
if (
isinstance(rhs, ir.Expr)
and rhs.op == "inplace_binop"
and rhs.rhs.name == init_var.name
):
# Get calltype of rhs.
ct = lowerer.fndesc.calltypes[rhs]
assert len(ct.args) == 2
# Create new arg types replace the second arg type with the reduction var type.
ctargs = (ct.args[0], redvar_typ)
# Update the signature of the call.
ct = ct.replace(args=ctargs)
# Remove so we can re-insrt since calltypes is unique dict.
lowerer.fndesc.calltypes.pop(rhs)
# Add calltype back in for the expr with updated signature.
lowerer.fndesc.calltypes[rhs] = ct
lowerer.lower_inst(inst)
# Restore the original typemap of the function that was replaced temporarily at the
# Beginning of this function.
lowerer.fndesc.typemap = orig_typemap
|
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
# We copy the typemap here because for race condition variable we'll
# update their type to array so they can be updated by the gufunc.
typemap = copy.copy(lowerer.fndesc.typemap)
varmap = lowerer.varmap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
loc = parfor.init_block.loc
scope = parfor.init_block.scope
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
for racevar in parfor.races:
if racevar not in varmap:
rvtyp = typemap[racevar]
rv = ir.Var(scope, racevar, loc)
lowerer._alloca_var(rv.name, rvtyp)
alias_map = {}
arg_aliases = {}
numba.parfor.find_potential_aliases_parfor(
parfor, parfor.params, typemap, lowerer.func_ir, alias_map, arg_aliases
)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params != None
parfor_output_arrays = numba.parfor.get_parfor_outputs(parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor.params, lowerer.fndesc.calltypes
)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = copy.copy(parfor.flags)
flags.set("error_model", "numpy")
# Can't get here unless flags.set('auto_parallel', ParallelOptions(True))
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfor.sequential_parfor_lowering = True
func, func_args, func_sig = _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
{},
bool(alias_map),
index_var_typ,
parfor.races,
)
numba.parfor.sequential_parfor_lowering = False
# get the shape signature
func_args = ["sched"] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
gu_signature = _create_shape_signature(
parfor.get_shape_classes,
num_inputs,
num_reductions,
func_args,
func_sig,
parfor.races,
)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
loop_ranges,
parfor_redvars,
parfor_reddict,
parfor.init_block,
index_var_typ,
parfor.races,
)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
|
https://github.com/numba/numba/issues/3069
|
Traceback (most recent call last):
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 491, in new_error_context
yield
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 365, in lower_inst
func(self, inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 83, in _lower_parfor_parallel
bool(alias_map), index_var_typ)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 357, in _create_gufunc_for_parfor_body
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\abstract.py", line 60, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\npytypes.py", line 288, in __init__
super(Array, self).__init__(dtype, ndim, layout, name=name)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\common.py", line 51, in __init__
raise TypeError("Buffer dtype cannot be buffer")
TypeError: Buffer dtype cannot be buffer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 360, in _compile_for_args
raise e
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 311, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 618, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 871, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 365, in compile_extra
return self._compile_bytecode()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 802, in _compile_bytecode
return self._compile_core()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 789, in _compile_core
res = pm.run(self.status)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 251, in run
raise patched_exception
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 243, in run
stage()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 676, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 626, in _backend
lowered = lowerfn()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 613, in backend_nopython_mode
self.flags)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 990, in native_lowering_stage
lower.lower()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 499, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Buffer dtype cannot be buffer
File "<stdin>", line 5:
[1] During: lowering "id=6[LoopNest(index_variable = parfor_index.271, range = (0, $16.3, 1))]{28: <ir.Block at <stdin> (5)>}Var(parfor_index.271, <stdin> (5))" at <stdin> (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
TypeError
|
def _create_shape_signature(
get_shape_classes,
num_inputs,
num_reductions,
args,
redargstartdim,
func_sig,
races,
typemap,
):
"""Create shape signature for GUFunc"""
if config.DEBUG_ARRAY_OPT:
print(
"_create_shape_signature", num_inputs, num_reductions, args, redargstartdim
)
for i in args[1:]:
print("argument", i, type(i), get_shape_classes(i, typemap=typemap))
num_inouts = len(args) - num_reductions
# maximum class number for array shapes
classes = [
get_shape_classes(var, typemap=typemap) if var not in races else (-1,)
for var in args[1:]
]
class_set = set()
for _class in classes:
if _class:
for i in _class:
class_set.add(i)
max_class = max(class_set) + 1 if class_set else 0
classes.insert(0, (max_class,)) # force set the class of 'sched' argument
class_set.add(max_class)
class_map = {}
# TODO: use prefix + class number instead of single char
alphabet = ord("a")
for n in class_set:
if n >= 0:
class_map[n] = chr(alphabet)
alphabet += 1
alpha_dict = {"latest_alpha": alphabet}
def bump_alpha(c, class_map):
if c >= 0:
return class_map[c]
else:
alpha_dict["latest_alpha"] += 1
return chr(alpha_dict["latest_alpha"])
gu_sin = []
gu_sout = []
count = 0
syms_sin = ()
if config.DEBUG_ARRAY_OPT:
print("args", args)
print("classes", classes)
for cls, arg in zip(classes, args):
count = count + 1
if cls:
dim_syms = tuple(bump_alpha(c, class_map) for c in cls)
else:
dim_syms = ()
if count > num_inouts:
# Strip the first symbol corresponding to the number of workers
# so that guvectorize will parallelize across the reduction.
gu_sin.append(dim_syms[redargstartdim[arg] :])
else:
gu_sin.append(dim_syms)
syms_sin += dim_syms
return (gu_sin, gu_sout)
|
def _create_shape_signature(
get_shape_classes, num_inputs, num_reductions, args, func_sig, races
):
"""Create shape signature for GUFunc"""
if config.DEBUG_ARRAY_OPT:
print("_create_shape_signature", num_inputs, num_reductions, args, func_sig)
for i in args[1:]:
print("argument", i, type(i), get_shape_classes(i))
num_inouts = len(args) - num_reductions
# maximum class number for array shapes
classes = [
get_shape_classes(var) if var not in races else (-1,) for var in args[1:]
]
class_set = set()
for _class in classes:
if _class:
for i in _class:
class_set.add(i)
max_class = max(class_set) + 1 if class_set else 0
classes.insert(0, (max_class,)) # force set the class of 'sched' argument
class_set.add(max_class)
class_map = {}
# TODO: use prefix + class number instead of single char
alphabet = ord("a")
for n in class_set:
if n >= 0:
class_map[n] = chr(alphabet)
alphabet += 1
alpha_dict = {"latest_alpha": alphabet}
def bump_alpha(c, class_map):
if c >= 0:
return class_map[c]
else:
alpha_dict["latest_alpha"] += 1
return chr(alpha_dict["latest_alpha"])
gu_sin = []
gu_sout = []
count = 0
syms_sin = ()
for cls in classes:
# print("create_shape_signature: var = ", var, " typ = ", typ)
count = count + 1
if cls:
dim_syms = tuple(bump_alpha(c, class_map) for c in cls)
else:
dim_syms = ()
if count > num_inouts:
# assume all reduction vars are scalar
gu_sout.append(())
elif count > num_inputs and all([s in syms_sin for s in dim_syms]):
# only when dim_syms are found in gu_sin, we consider this as
# output
gu_sout.append(dim_syms)
else:
gu_sin.append(dim_syms)
syms_sin += dim_syms
return (gu_sin, gu_sout)
|
https://github.com/numba/numba/issues/3069
|
Traceback (most recent call last):
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 491, in new_error_context
yield
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 365, in lower_inst
func(self, inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 83, in _lower_parfor_parallel
bool(alias_map), index_var_typ)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 357, in _create_gufunc_for_parfor_body
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\abstract.py", line 60, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\npytypes.py", line 288, in __init__
super(Array, self).__init__(dtype, ndim, layout, name=name)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\common.py", line 51, in __init__
raise TypeError("Buffer dtype cannot be buffer")
TypeError: Buffer dtype cannot be buffer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 360, in _compile_for_args
raise e
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 311, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 618, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 871, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 365, in compile_extra
return self._compile_bytecode()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 802, in _compile_bytecode
return self._compile_core()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 789, in _compile_core
res = pm.run(self.status)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 251, in run
raise patched_exception
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 243, in run
stage()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 676, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 626, in _backend
lowered = lowerfn()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 613, in backend_nopython_mode
self.flags)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 990, in native_lowering_stage
lower.lower()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 499, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Buffer dtype cannot be buffer
File "<stdin>", line 5:
[1] During: lowering "id=6[LoopNest(index_variable = parfor_index.271, range = (0, $16.3, 1))]{28: <ir.Block at <stdin> (5)>}Var(parfor_index.271, <stdin> (5))" at <stdin> (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
TypeError
|
def _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
locals,
has_aliases,
index_var_typ,
races,
):
"""
Takes a parfor and creates a gufunc function for its body.
There are two parts to this function.
1) Code to iterate across the iteration space as defined by the schedule.
2) The parfor body that does the work for a single point in the iteration space.
Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point
in the IR where the parfor body should be added.
This Python text is 'exec'ed into existence and its IR retrieved with run_frontend.
The IR is scanned for the sentinel assignment where that basic block is split and the IR
for the parfor body inserted.
"""
loc = parfor.init_block.loc
# The parfor body and the main function body share ir.Var nodes.
# We have to do some replacements of Var names in the parfor body to make them
# legal parameter names. If we don't copy then the Vars in the main function also
# would incorrectly change their name.
loop_body = copy.copy(parfor.loop_body)
remove_dels(loop_body)
parfor_dim = len(parfor.loop_nests)
loop_indices = [l.index_variable.name for l in parfor.loop_nests]
# Get all the parfor params.
parfor_params = parfor.params
# Get just the outputs of the parfor.
parfor_outputs = numba.parfor.get_parfor_outputs(parfor, parfor_params)
# Get all parfor reduction vars, and operators.
typemap = lowerer.fndesc.typemap
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor_params, lowerer.fndesc.calltypes
)
# Compute just the parfor inputs as a set difference.
parfor_inputs = sorted(
list(set(parfor_params) - set(parfor_outputs) - set(parfor_redvars))
)
races = races.difference(set(parfor_redvars))
for race in races:
warnings.warn_explicit(
"Variable %s used in parallel loop may be written "
"to simultaneously by multiple workers and may result "
"in non-deterministic or unintended results." % race,
ParallelSafetyWarning,
loc.filename,
loc.line,
)
replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes)
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))
# Reduction variables are represented as arrays, so they go under
# different names.
parfor_redarrs = []
parfor_red_arg_types = []
for var in parfor_redvars:
arr = var + "_arr"
parfor_redarrs.append(arr)
redarraytype = redtyp_to_redarraytype(typemap[var])
typemap[arr] = redarraytype_to_sig(redarraytype)
parfor_red_arg_types.append(redarraytype)
# Reorder all the params so that inputs go first then outputs.
parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("loop_indices = ", loop_indices, " ", type(loop_indices))
print("loop_body = ", loop_body, " ", type(loop_body))
_print_body(loop_body)
# Some Var are not legal parameter names so create a dict of potentially illegal
# param name to guaranteed legal name.
param_dict = legalize_names(parfor_params + parfor_redvars)
if config.DEBUG_ARRAY_OPT == 1:
print("param_dict = ", sorted(param_dict.items()), " ", type(param_dict))
# Some loop_indices are not legal parameter names so create a dict of potentially illegal
# loop index to guaranteed legal name.
ind_dict = legalize_names(loop_indices)
# Compute a new list of legal loop index names.
legal_loop_indices = [ind_dict[v] for v in loop_indices]
if config.DEBUG_ARRAY_OPT == 1:
print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict))
print(
"legal_loop_indices = ", legal_loop_indices, " ", type(legal_loop_indices)
)
for pd in parfor_params:
print("pd = ", pd)
print("pd type = ", typemap[pd], " ", type(typemap[pd]))
# Get the types of each parameter.
param_types = [typemap[v] for v in parfor_params]
# Calculate types of args passed to gufunc.
func_arg_types = [
typemap[v] for v in (parfor_inputs + parfor_outputs)
] + parfor_red_arg_types
# Replace illegal parameter names in the loop body with legal ones.
replace_var_names(loop_body, param_dict)
# remember the name before legalizing as the actual arguments
parfor_args = parfor_params
# Change parfor_params to be legal names.
parfor_params = [param_dict[v] for v in parfor_params]
parfor_params_orig = parfor_params
parfor_params = []
ascontig = False
for pindex in range(len(parfor_params_orig)):
if (
ascontig
and pindex < len(parfor_inputs)
and isinstance(param_types[pindex], types.npytypes.Array)
):
parfor_params.append(parfor_params_orig[pindex] + "param")
else:
parfor_params.append(parfor_params_orig[pindex])
# Change parfor body to replace illegal loop index vars with legal ones.
replace_var_names(loop_body, ind_dict)
loop_body_var_table = get_name_var_table(loop_body)
sentinel_name = get_unused_var_name("__sentinel__", loop_body_var_table)
if config.DEBUG_ARRAY_OPT == 1:
print("legal parfor_params = ", parfor_params, " ", type(parfor_params))
# Determine the unique names of the scheduling and gufunc functions.
# sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_"))
gufunc_name = "__numba_parfor_gufunc_%s" % (hex(hash(parfor)).replace("-", "_"))
if config.DEBUG_ARRAY_OPT:
# print("sched_func_name ", type(sched_func_name), " ", sched_func_name)
print("gufunc_name ", type(gufunc_name), " ", gufunc_name)
gufunc_txt = ""
# Create the gufunc function.
gufunc_txt += (
"def " + gufunc_name + "(sched, " + (", ".join(parfor_params)) + "):\n"
)
for pindex in range(len(parfor_inputs)):
if ascontig and isinstance(param_types[pindex], types.npytypes.Array):
gufunc_txt += (
" "
+ parfor_params_orig[pindex]
+ " = np.ascontiguousarray("
+ parfor_params[pindex]
+ ")\n"
)
# Add initialization of reduction variables
for arr, var in zip(parfor_redarrs, parfor_redvars):
# If reduction variable is a scalar then save current value to
# temp and accumulate on that temp to prevent false sharing.
if redtyp_is_scalar(typemap[var]):
gufunc_txt += " " + param_dict[var] + "=" + param_dict[arr] + "[0]\n"
else:
# The reduction variable is an array so np.copy it to a temp.
gufunc_txt += (
" " + param_dict[var] + "=np.copy(" + param_dict[arr] + ")\n"
)
# For each dimension of the parfor, create a for loop in the generated gufunc function.
# Iterate across the proper values extracted from the schedule.
# The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0,
# end_dim1, ..., end_dimN
for eachdim in range(parfor_dim):
for indent in range(eachdim + 1):
gufunc_txt += " "
sched_dim = eachdim
gufunc_txt += (
"for "
+ legal_loop_indices[eachdim]
+ " in range(sched["
+ str(sched_dim)
+ "], sched["
+ str(sched_dim + parfor_dim)
+ "] + np.uint8(1)):\n"
)
if config.DEBUG_ARRAY_OPT_RUNTIME:
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += "print("
for eachdim in range(parfor_dim):
gufunc_txt += (
'"'
+ legal_loop_indices[eachdim]
+ '",'
+ legal_loop_indices[eachdim]
+ ","
)
gufunc_txt += ")\n"
# Add the sentinel assignment so that we can find the loop body position
# in the IR.
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += sentinel_name + " = 0\n"
# Add assignments of reduction variables (for returning the value)
redargstartdim = {}
for arr, var in zip(parfor_redarrs, parfor_redvars):
# After the gufunc loops, copy the accumulated temp value back to reduction array.
if redtyp_is_scalar(typemap[var]):
gufunc_txt += " " + param_dict[arr] + "[0] = " + param_dict[var] + "\n"
redargstartdim[arr] = 1
else:
# After the gufunc loops, copy the accumulated temp array back to reduction array with ":"
gufunc_txt += (
" " + param_dict[arr] + "[:] = " + param_dict[var] + "[:]\n"
)
redargstartdim[arr] = 0
gufunc_txt += " return None\n"
if config.DEBUG_ARRAY_OPT:
print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt)
# Force gufunc outline into existence.
globls = {"np": np}
locls = {}
exec_(gufunc_txt, globls, locls)
gufunc_func = locls[gufunc_name]
if config.DEBUG_ARRAY_OPT:
print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func)
# Get the IR for the gufunc outline.
gufunc_ir = compiler.run_frontend(gufunc_func)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump ", type(gufunc_ir))
gufunc_ir.dump()
print("loop_body dump ", type(loop_body))
_print_body(loop_body)
# rename all variables in gufunc_ir afresh
var_table = get_name_var_table(gufunc_ir.blocks)
new_var_dict = {}
reserved_names = [sentinel_name] + list(param_dict.values()) + legal_loop_indices
for name, var in var_table.items():
if not (name in reserved_names):
new_var_dict[name] = mk_unique_var(name)
replace_var_names(gufunc_ir.blocks, new_var_dict)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump after renaming ")
gufunc_ir.dump()
gufunc_param_types = [
numba.types.npytypes.Array(index_var_typ, 1, "C")
] + param_types
if config.DEBUG_ARRAY_OPT:
print(
"gufunc_param_types = ", type(gufunc_param_types), "\n", gufunc_param_types
)
gufunc_stub_last_label = max(gufunc_ir.blocks.keys()) + 1
# Add gufunc stub last label to each parfor.loop_body label to prevent
# label conflicts.
loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label)
# new label for splitting sentinel block
new_label = max(loop_body.keys()) + 1
# If enabled, add a print statement after every assignment.
if config.DEBUG_ARRAY_OPT_RUNTIME:
for label, block in loop_body.items():
new_block = block.copy()
new_block.clear()
loc = block.loc
scope = block.scope
for inst in block.body:
new_block.append(inst)
# Append print after assignment
if isinstance(inst, ir.Assign):
# Only apply to numbers
if typemap[inst.target.name] not in types.number_domain:
continue
# Make constant string
strval = "{} =".format(inst.target.name)
strconsttyp = types.Const(strval)
lhs = ir.Var(scope, mk_unique_var("str_const"), loc)
assign_lhs = ir.Assign(
value=ir.Const(value=strval, loc=loc), target=lhs, loc=loc
)
typemap[lhs.name] = strconsttyp
new_block.append(assign_lhs)
# Make print node
print_node = ir.Print(args=[lhs, inst.target], vararg=None, loc=loc)
new_block.append(print_node)
sig = numba.typing.signature(
types.none, typemap[lhs.name], typemap[inst.target.name]
)
lowerer.fndesc.calltypes[print_node] = sig
loop_body[label] = new_block
if config.DEBUG_ARRAY_OPT:
print("parfor loop body")
_print_body(loop_body)
wrapped_blocks = wrap_loop_body(loop_body)
hoisted = hoist(parfor_params, loop_body, typemap, wrapped_blocks)
start_block = gufunc_ir.blocks[min(gufunc_ir.blocks.keys())]
start_block.body = start_block.body[:-1] + hoisted + [start_block.body[-1]]
unwrap_loop_body(loop_body)
if config.DEBUG_ARRAY_OPT:
print("After hoisting")
_print_body(loop_body)
# Search all the block in the gufunc outline for the sentinel assignment.
for label, block in gufunc_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the sentinel
# but the new block maintains the current block label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after the sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(loop_body.keys())
# The previous block jumps to the minimum labelled block of the
# parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc function's
# IR.
for l, b in loop_body.items():
gufunc_ir.blocks[l] = b
body_last_label = max(loop_body.keys())
gufunc_ir.blocks[new_label] = block
gufunc_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block containing
# statements after the sentinel.
gufunc_ir.blocks[body_last_label].append(ir.Jump(new_label, loc))
break
else:
continue
break
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump before renaming")
gufunc_ir.dump()
gufunc_ir.blocks = rename_labels(gufunc_ir.blocks)
remove_dels(gufunc_ir.blocks)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump")
gufunc_ir.dump()
print("flags", flags)
print("typemap", typemap)
old_alias = flags.noalias
if not has_aliases:
if config.DEBUG_ARRAY_OPT:
print("No aliases found so adding noalias flag.")
flags.noalias = True
kernel_func = compiler.compile_ir(
typingctx, targetctx, gufunc_ir, gufunc_param_types, types.none, flags, locals
)
flags.noalias = old_alias
kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("kernel_sig = ", kernel_sig)
return kernel_func, parfor_args, kernel_sig, redargstartdim, func_arg_types
|
def _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
locals,
has_aliases,
index_var_typ,
races,
):
"""
Takes a parfor and creates a gufunc function for its body.
There are two parts to this function.
1) Code to iterate across the iteration space as defined by the schedule.
2) The parfor body that does the work for a single point in the iteration space.
Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point
in the IR where the parfor body should be added.
This Python text is 'exec'ed into existence and its IR retrieved with run_frontend.
The IR is scanned for the sentinel assignment where that basic block is split and the IR
for the parfor body inserted.
"""
loc = parfor.init_block.loc
# The parfor body and the main function body share ir.Var nodes.
# We have to do some replacements of Var names in the parfor body to make them
# legal parameter names. If we don't copy then the Vars in the main function also
# would incorrectly change their name.
loop_body = copy.copy(parfor.loop_body)
remove_dels(loop_body)
parfor_dim = len(parfor.loop_nests)
loop_indices = [l.index_variable.name for l in parfor.loop_nests]
# Get all the parfor params.
parfor_params = parfor.params
# Get just the outputs of the parfor.
parfor_outputs = numba.parfor.get_parfor_outputs(parfor, parfor_params)
# Get all parfor reduction vars, and operators.
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor_params, lowerer.fndesc.calltypes
)
# Compute just the parfor inputs as a set difference.
parfor_inputs = sorted(
list(set(parfor_params) - set(parfor_outputs) - set(parfor_redvars))
)
races = races.difference(set(parfor_redvars))
for race in races:
warnings.warn_explicit(
"Variable %s used in parallel loop may be written "
"to simultaneously by multiple workers and may result "
"in non-deterministic or unintended results." % race,
ParallelSafetyWarning,
loc.filename,
loc.line,
)
replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes)
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))
# Reduction variables are represented as arrays, so they go under
# different names.
parfor_redarrs = []
for var in parfor_redvars:
arr = var + "_arr"
parfor_redarrs.append(arr)
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
# Reorder all the params so that inputs go first then outputs.
parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("loop_indices = ", loop_indices, " ", type(loop_indices))
print("loop_body = ", loop_body, " ", type(loop_body))
_print_body(loop_body)
# Some Var are not legal parameter names so create a dict of potentially illegal
# param name to guaranteed legal name.
param_dict = legalize_names(parfor_params + parfor_redvars)
if config.DEBUG_ARRAY_OPT == 1:
print("param_dict = ", sorted(param_dict.items()), " ", type(param_dict))
# Some loop_indices are not legal parameter names so create a dict of potentially illegal
# loop index to guaranteed legal name.
ind_dict = legalize_names(loop_indices)
# Compute a new list of legal loop index names.
legal_loop_indices = [ind_dict[v] for v in loop_indices]
if config.DEBUG_ARRAY_OPT == 1:
print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict))
print(
"legal_loop_indices = ", legal_loop_indices, " ", type(legal_loop_indices)
)
for pd in parfor_params:
print("pd = ", pd)
print("pd type = ", typemap[pd], " ", type(typemap[pd]))
# Get the types of each parameter.
param_types = [typemap[v] for v in parfor_params]
# if config.DEBUG_ARRAY_OPT==1:
# param_types_dict = { v:typemap[v] for v in parfor_params }
# print("param_types_dict = ", param_types_dict, " ", type(param_types_dict))
# print("param_types = ", param_types, " ", type(param_types))
# Replace illegal parameter names in the loop body with legal ones.
replace_var_names(loop_body, param_dict)
# remember the name before legalizing as the actual arguments
parfor_args = parfor_params
# Change parfor_params to be legal names.
parfor_params = [param_dict[v] for v in parfor_params]
parfor_params_orig = parfor_params
parfor_params = []
ascontig = False
for pindex in range(len(parfor_params_orig)):
if (
ascontig
and pindex < len(parfor_inputs)
and isinstance(param_types[pindex], types.npytypes.Array)
):
parfor_params.append(parfor_params_orig[pindex] + "param")
else:
parfor_params.append(parfor_params_orig[pindex])
# Change parfor body to replace illegal loop index vars with legal ones.
replace_var_names(loop_body, ind_dict)
loop_body_var_table = get_name_var_table(loop_body)
sentinel_name = get_unused_var_name("__sentinel__", loop_body_var_table)
if config.DEBUG_ARRAY_OPT == 1:
print("legal parfor_params = ", parfor_params, " ", type(parfor_params))
# Determine the unique names of the scheduling and gufunc functions.
# sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_"))
gufunc_name = "__numba_parfor_gufunc_%s" % (hex(hash(parfor)).replace("-", "_"))
if config.DEBUG_ARRAY_OPT:
# print("sched_func_name ", type(sched_func_name), " ", sched_func_name)
print("gufunc_name ", type(gufunc_name), " ", gufunc_name)
gufunc_txt = ""
# Create the gufunc function.
gufunc_txt += (
"def " + gufunc_name + "(sched, " + (", ".join(parfor_params)) + "):\n"
)
for pindex in range(len(parfor_inputs)):
if ascontig and isinstance(param_types[pindex], types.npytypes.Array):
gufunc_txt += (
" "
+ parfor_params_orig[pindex]
+ " = np.ascontiguousarray("
+ parfor_params[pindex]
+ ")\n"
)
# Add initialization of reduction variables
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[var] + "=" + param_dict[arr] + "[0]\n"
# For each dimension of the parfor, create a for loop in the generated gufunc function.
# Iterate across the proper values extracted from the schedule.
# The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0,
# end_dim1, ..., end_dimN
for eachdim in range(parfor_dim):
for indent in range(eachdim + 1):
gufunc_txt += " "
sched_dim = eachdim
gufunc_txt += (
"for "
+ legal_loop_indices[eachdim]
+ " in range(sched["
+ str(sched_dim)
+ "], sched["
+ str(sched_dim + parfor_dim)
+ "] + np.uint8(1)):\n"
)
if config.DEBUG_ARRAY_OPT_RUNTIME:
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += "print("
for eachdim in range(parfor_dim):
gufunc_txt += (
'"'
+ legal_loop_indices[eachdim]
+ '",'
+ legal_loop_indices[eachdim]
+ ","
)
gufunc_txt += ")\n"
# Add the sentinel assignment so that we can find the loop body position
# in the IR.
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += sentinel_name + " = 0\n"
# Add assignments of reduction variables (for returning the value)
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[arr] + "[0] = " + param_dict[var] + "\n"
gufunc_txt += " return None\n"
if config.DEBUG_ARRAY_OPT:
print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt)
# Force gufunc outline into existence.
globls = {"np": np}
locls = {}
exec_(gufunc_txt, globls, locls)
gufunc_func = locls[gufunc_name]
if config.DEBUG_ARRAY_OPT:
print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func)
# Get the IR for the gufunc outline.
gufunc_ir = compiler.run_frontend(gufunc_func)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump ", type(gufunc_ir))
gufunc_ir.dump()
print("loop_body dump ", type(loop_body))
_print_body(loop_body)
# rename all variables in gufunc_ir afresh
var_table = get_name_var_table(gufunc_ir.blocks)
new_var_dict = {}
reserved_names = [sentinel_name] + list(param_dict.values()) + legal_loop_indices
for name, var in var_table.items():
if not (name in reserved_names):
new_var_dict[name] = mk_unique_var(name)
replace_var_names(gufunc_ir.blocks, new_var_dict)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump after renaming ")
gufunc_ir.dump()
gufunc_param_types = [
numba.types.npytypes.Array(index_var_typ, 1, "C")
] + param_types
if config.DEBUG_ARRAY_OPT:
print(
"gufunc_param_types = ", type(gufunc_param_types), "\n", gufunc_param_types
)
gufunc_stub_last_label = max(gufunc_ir.blocks.keys()) + 1
# Add gufunc stub last label to each parfor.loop_body label to prevent
# label conflicts.
loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label)
# new label for splitting sentinel block
new_label = max(loop_body.keys()) + 1
# If enabled, add a print statement after every assignment.
if config.DEBUG_ARRAY_OPT_RUNTIME:
for label, block in loop_body.items():
new_block = block.copy()
new_block.clear()
loc = block.loc
scope = block.scope
for inst in block.body:
new_block.append(inst)
# Append print after assignment
if isinstance(inst, ir.Assign):
# Only apply to numbers
if typemap[inst.target.name] not in types.number_domain:
continue
# Make constant string
strval = "{} =".format(inst.target.name)
strconsttyp = types.Const(strval)
lhs = ir.Var(scope, mk_unique_var("str_const"), loc)
assign_lhs = ir.Assign(
value=ir.Const(value=strval, loc=loc), target=lhs, loc=loc
)
typemap[lhs.name] = strconsttyp
new_block.append(assign_lhs)
# Make print node
print_node = ir.Print(args=[lhs, inst.target], vararg=None, loc=loc)
new_block.append(print_node)
sig = numba.typing.signature(
types.none, typemap[lhs.name], typemap[inst.target.name]
)
lowerer.fndesc.calltypes[print_node] = sig
loop_body[label] = new_block
if config.DEBUG_ARRAY_OPT:
print("parfor loop body")
_print_body(loop_body)
wrapped_blocks = wrap_loop_body(loop_body)
hoisted = hoist(parfor_params, loop_body, typemap, wrapped_blocks)
start_block = gufunc_ir.blocks[min(gufunc_ir.blocks.keys())]
start_block.body = start_block.body[:-1] + hoisted + [start_block.body[-1]]
unwrap_loop_body(loop_body)
if config.DEBUG_ARRAY_OPT:
print("After hoisting")
_print_body(loop_body)
# Search all the block in the gufunc outline for the sentinel assignment.
for label, block in gufunc_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the sentinel
# but the new block maintains the current block label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after the sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(loop_body.keys())
# The previous block jumps to the minimum labelled block of the
# parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc function's
# IR.
for l, b in loop_body.items():
gufunc_ir.blocks[l] = b
body_last_label = max(loop_body.keys())
gufunc_ir.blocks[new_label] = block
gufunc_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block containing
# statements after the sentinel.
gufunc_ir.blocks[body_last_label].append(ir.Jump(new_label, loc))
break
else:
continue
break
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump before renaming")
gufunc_ir.dump()
gufunc_ir.blocks = rename_labels(gufunc_ir.blocks)
remove_dels(gufunc_ir.blocks)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump")
gufunc_ir.dump()
print("flags", flags)
print("typemap", typemap)
old_alias = flags.noalias
if not has_aliases:
if config.DEBUG_ARRAY_OPT:
print("No aliases found so adding noalias flag.")
flags.noalias = True
kernel_func = compiler.compile_ir(
typingctx, targetctx, gufunc_ir, gufunc_param_types, types.none, flags, locals
)
flags.noalias = old_alias
kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("kernel_sig = ", kernel_sig)
return kernel_func, parfor_args, kernel_sig
|
https://github.com/numba/numba/issues/3069
|
Traceback (most recent call last):
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 491, in new_error_context
yield
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 365, in lower_inst
func(self, inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 83, in _lower_parfor_parallel
bool(alias_map), index_var_typ)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 357, in _create_gufunc_for_parfor_body
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\abstract.py", line 60, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\npytypes.py", line 288, in __init__
super(Array, self).__init__(dtype, ndim, layout, name=name)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\common.py", line 51, in __init__
raise TypeError("Buffer dtype cannot be buffer")
TypeError: Buffer dtype cannot be buffer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 360, in _compile_for_args
raise e
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 311, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 618, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 871, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 365, in compile_extra
return self._compile_bytecode()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 802, in _compile_bytecode
return self._compile_core()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 789, in _compile_core
res = pm.run(self.status)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 251, in run
raise patched_exception
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 243, in run
stage()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 676, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 626, in _backend
lowered = lowerfn()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 613, in backend_nopython_mode
self.flags)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 990, in native_lowering_stage
lower.lower()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 499, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Buffer dtype cannot be buffer
File "<stdin>", line 5:
[1] During: lowering "id=6[LoopNest(index_variable = parfor_index.271, range = (0, $16.3, 1))]{28: <ir.Block at <stdin> (5)>}Var(parfor_index.271, <stdin> (5))" at <stdin> (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
TypeError
|
def call_parallel_gufunc(
lowerer,
cres,
gu_signature,
outer_sig,
expr_args,
expr_arg_types,
loop_ranges,
redvars,
reddict,
redarrdict,
init_block,
index_var_typ,
races,
):
"""
Adds the call to the gufunc function from the main function.
"""
context = lowerer.context
builder = lowerer.builder
library = lowerer.library
from .parallel import (
ParallelGUFuncBuilder,
build_gufunc_wrapper,
get_thread_count,
_launch_threads,
_init,
)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("args = ", expr_args)
print(
"outer_sig = ",
outer_sig.args,
outer_sig.return_type,
outer_sig.recvr,
outer_sig.pysig,
)
print("loop_ranges = ", loop_ranges)
print("expr_args", expr_args)
print("expr_arg_types", expr_arg_types)
print("gu_signature", gu_signature)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
_init()
wrapper_ptr, env, wrapper_name = build_gufunc_wrapper(
llvm_func, cres, sin, sout, {}
)
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.uintp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert step == 1 # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print(
"call_parallel_gufunc loop_ranges[{}] = ".format(i), start, stop, step
)
cgutils.printf(
builder, "loop range[{}]: %d %d (%d)\n".format(i), start, stop, step
)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
uintp_ptr_t = lc.Type.pointer(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
sched_name = expr_args.pop(0)
sched_typ = outer_sig.args[0]
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
if index_var_typ.signed:
sched_type = intp_t
sched_ptr_type = intp_ptr_t
else:
sched_type = uintp_t
sched_ptr_type = uintp_ptr_t
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
dim_stops = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(dim_starts, [context.get_constant(types.uintp, i)])
)
builder.store(
stop, builder.gep(dim_stops, [context.get_constant(types.uintp, i)])
)
sched_size = get_thread_count() * num_dim * 2
sched = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, sched_size),
name="sched",
)
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = lc.Type.function(
intp_ptr_t,
[uintp_t, sched_ptr_type, sched_ptr_type, uintp_t, sched_ptr_type, intp_t],
)
if index_var_typ.signed:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_signed"
)
else:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_unsigned"
)
builder.call(
do_scheduling,
[
context.get_constant(types.uintp, num_dim),
dim_starts,
dim_stops,
context.get_constant(types.uintp, get_thread_count()),
sched,
context.get_constant(types.intp, debug_flag),
],
)
# Get the LLVM vars for the Numba IR reduction array vars.
redarrs = [lowerer.loadvar(x.name) for x in list(redarrdict.values())]
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
if config.DEBUG_ARRAY_OPT:
for i in range(get_thread_count()):
cgutils.printf(builder, "sched[" + str(i) + "] = ")
for j in range(num_dim * 2):
cgutils.printf(
builder,
"%d ",
builder.load(
builder.gep(
sched,
[context.get_constant(types.intp, i * num_dim * 2 + j)],
)
),
)
cgutils.printf(builder, "\n")
# ----------------------------------------------------------------------------
# Prepare arguments: args, shapes, steps, data
all_args = [lowerer.loadvar(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(types.intp, 1 + num_args),
name="pargs",
)
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
red_shapes = {}
rv_to_arg_dict = {}
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
var = expr_args[i]
aty = expr_arg_types[i]
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
ary_shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
# Start from 1 because we skip the first dimension of length num_threads just like sched.
for j in range(1, len(strides)):
array_strides.append(strides[j])
red_shapes[i] = ary_shapes[1:]
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
if var in races:
typ = (
context.get_data_type(aty.dtype)
if aty.dtype != types.boolean
else lc.Type.int(1)
)
rv_arg = cgutils.alloca_once(builder, typ)
builder.store(arg, rv_arg)
builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst)
rv_to_arg_dict[var] = (arg, rv_arg)
array_strides.append(
context.get_constant(types.intp, context.get_abi_sizeof(typ))
)
else:
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# ----------------------------------------------------------------------------
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
assert len(expr_args) == len(all_args)
assert len(expr_args) == len(expr_arg_types)
assert len(expr_args) == len(sin + sout)
assert len(expr_args) == len(outer_sig.args[1:])
for var, arg, aty, gu_sig in zip(expr_args, all_args, expr_arg_types, sin + sout):
if isinstance(aty, types.npytypes.Array):
i = aty.ndim - len(gu_sig)
else:
i = 0
if config.DEBUG_ARRAY_OPT:
print("var =", var, "gu_sig =", gu_sig, "type =", aty, "i =", i)
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
if var in races:
sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1)
else:
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
occurances.append(dim_sym)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(context.get_constant(types.intp, get_thread_count()), shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym],
builder.gep(shapes, [context.get_constant(types.intp, i)]),
)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_steps), name="psteps"
)
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp), steps)
# The steps for all others are 0, except for reduction results.
for i in range(num_args):
if i >= ninouts: # steps for reduction vars are abi_sizeof(typ)
j = i - ninouts
# Get the base dtype of the reduction array.
redtyp = lowerer.fndesc.typemap[redvars[j]]
red_stride = None
if isinstance(redtyp, types.npytypes.Array):
redtyp = redtyp.dtype
red_stride = red_shapes[i]
typ = context.get_value_type(redtyp)
sizeof = context.get_abi_sizeof(typ)
# Set stepsize to the size of that dtype.
stepsize = context.get_constant(types.intp, sizeof)
if red_stride != None:
for rs in red_stride:
stepsize = builder.mul(stepsize, rs)
else:
# steps are strides
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# ----------------------------------------------------------------------------
# prepare data
data = builder.inttoptr(zero, byte_ptr_t)
fnty = lc.Type.function(
lc.Type.void(), [byte_ptr_ptr_t, intp_ptr_t, intp_ptr_t, byte_ptr_t]
)
fn = builder.module.get_or_insert_function(fnty, name=wrapper_name)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
result = builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
for k, v in rv_to_arg_dict.items():
arg, rv_arg = v
only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)])
builder.store(builder.load(only_elem_ptr), lowerer.getvar(k))
scope = init_block.scope
loc = init_block.loc
|
def call_parallel_gufunc(
lowerer,
cres,
gu_signature,
outer_sig,
expr_args,
loop_ranges,
redvars,
reddict,
init_block,
index_var_typ,
races,
):
"""
Adds the call to the gufunc function from the main function.
"""
context = lowerer.context
builder = lowerer.builder
library = lowerer.library
from .parallel import (
ParallelGUFuncBuilder,
build_gufunc_wrapper,
get_thread_count,
_launch_threads,
_init,
)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("args = ", expr_args)
print(
"outer_sig = ",
outer_sig.args,
outer_sig.return_type,
outer_sig.recvr,
outer_sig.pysig,
)
print("loop_ranges = ", loop_ranges)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
_init()
wrapper_ptr, env, wrapper_name = build_gufunc_wrapper(
llvm_func, cres, sin, sout, {}
)
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.uintp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert step == 1 # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print(
"call_parallel_gufunc loop_ranges[{}] = ".format(i), start, stop, step
)
cgutils.printf(
builder, "loop range[{}]: %d %d (%d)\n".format(i), start, stop, step
)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
uintp_ptr_t = lc.Type.pointer(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
sched_name = expr_args.pop(0)
sched_typ = outer_sig.args[0]
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
if index_var_typ.signed:
sched_type = intp_t
sched_ptr_type = intp_ptr_t
else:
sched_type = uintp_t
sched_ptr_type = uintp_ptr_t
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
dim_stops = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, num_dim),
name="dims",
)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(dim_starts, [context.get_constant(types.uintp, i)])
)
builder.store(
stop, builder.gep(dim_stops, [context.get_constant(types.uintp, i)])
)
sched_size = get_thread_count() * num_dim * 2
sched = cgutils.alloca_once(
builder,
sched_type,
size=context.get_constant(types.uintp, sched_size),
name="sched",
)
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = lc.Type.function(
intp_ptr_t,
[uintp_t, sched_ptr_type, sched_ptr_type, uintp_t, sched_ptr_type, intp_t],
)
if index_var_typ.signed:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_signed"
)
else:
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling_unsigned"
)
builder.call(
do_scheduling,
[
context.get_constant(types.uintp, num_dim),
dim_starts,
dim_stops,
context.get_constant(types.uintp, get_thread_count()),
sched,
context.get_constant(types.intp, debug_flag),
],
)
# init reduction array allocation here.
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
redarrs = []
for i in range(nredvars):
redvar_typ = lowerer.fndesc.typemap[redvars[i]]
# we need to use the default initial value instead of existing value in
# redvar if available
init_val = reddict[redvars[i]][0]
if init_val != None:
val = context.get_constant(redvar_typ, init_val)
else:
val = lowerer.loadvar(redvars[i])
typ = context.get_value_type(redvar_typ)
size = get_thread_count()
arr = cgutils.alloca_once(
builder, typ, size=context.get_constant(types.uintp, size)
)
redarrs.append(arr)
for j in range(size):
dst = builder.gep(arr, [context.get_constant(types.uintp, j)])
builder.store(val, dst)
if config.DEBUG_ARRAY_OPT:
for i in range(get_thread_count()):
cgutils.printf(builder, "sched[" + str(i) + "] = ")
for j in range(num_dim * 2):
cgutils.printf(
builder,
"%d ",
builder.load(
builder.gep(
sched,
[context.get_constant(types.intp, i * num_dim * 2 + j)],
)
),
)
cgutils.printf(builder, "\n")
# Prepare arguments: args, shapes, steps, data
all_args = [lowerer.loadvar(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(types.intp, 1 + num_args),
name="pargs",
)
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
rv_to_arg_dict = {}
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
var = expr_args[i]
aty = outer_sig.args[i + 1] # skip first argument sched
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
builder.store(builder.bitcast(arg, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
if var in races:
typ = (
context.get_data_type(aty.dtype)
if aty.dtype != types.boolean
else lc.Type.int(1)
)
rv_arg = cgutils.alloca_once(builder, typ)
builder.store(arg, rv_arg)
builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst)
rv_to_arg_dict[var] = (arg, rv_arg)
array_strides.append(
context.get_constant(types.intp, context.get_abi_sizeof(typ))
)
else:
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
for var, arg, aty, gu_sig in zip(
expr_args[:ninouts], all_args[:ninouts], outer_sig.args[1:], sin + sout
):
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " gu_sig = ", gu_sig)
i = 0
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
if var in races:
sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1)
else:
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
occurances.append(dim_sym)
i = i + 1
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(context.get_constant(types.intp, get_thread_count()), shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym],
builder.gep(shapes, [context.get_constant(types.intp, i)]),
)
i = i + 1
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_steps), name="psteps"
)
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp), steps)
# The steps for all others are 0, except for reduction results.
for i in range(num_args):
if i >= ninouts: # steps for reduction vars are abi_sizeof(typ)
j = i - ninouts
typ = context.get_value_type(lowerer.fndesc.typemap[redvars[j]])
sizeof = context.get_abi_sizeof(typ)
stepsize = context.get_constant(types.intp, sizeof)
else:
# steps are strides
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# prepare data
data = builder.inttoptr(zero, byte_ptr_t)
fnty = lc.Type.function(
lc.Type.void(), [byte_ptr_ptr_t, intp_ptr_t, intp_ptr_t, byte_ptr_t]
)
fn = builder.module.get_or_insert_function(fnty, name=wrapper_name)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
result = builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
for k, v in rv_to_arg_dict.items():
arg, rv_arg = v
only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)])
builder.store(builder.load(only_elem_ptr), lowerer.getvar(k))
scope = init_block.scope
loc = init_block.loc
calltypes = lowerer.fndesc.calltypes
# Accumulate all reduction arrays back to a single value
for i in range(get_thread_count()):
for name, arr in zip(redvars, redarrs):
tmpname = mk_unique_var(name)
src = builder.gep(arr, [context.get_constant(types.intp, i)])
val = builder.load(src)
vty = lowerer.fndesc.typemap[name]
lowerer.fndesc.typemap[tmpname] = vty
lowerer.storevar(val, tmpname)
tmpvar = ir.Var(scope, tmpname, loc)
tmp_assign = ir.Assign(tmpvar, ir.Var(scope, name + "#init", loc), loc)
if name + "#init" not in lowerer.fndesc.typemap:
lowerer.fndesc.typemap[name + "#init"] = vty
lowerer.lower_inst(tmp_assign)
# generate code for combining reduction variable with thread output
for inst in reddict[name][1]:
lowerer.lower_inst(inst)
# TODO: scalar output must be assigned back to corresponding output
# variables
return
|
https://github.com/numba/numba/issues/3069
|
Traceback (most recent call last):
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 491, in new_error_context
yield
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 365, in lower_inst
func(self, inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 83, in _lower_parfor_parallel
bool(alias_map), index_var_typ)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 357, in _create_gufunc_for_parfor_body
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\abstract.py", line 60, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\npytypes.py", line 288, in __init__
super(Array, self).__init__(dtype, ndim, layout, name=name)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\common.py", line 51, in __init__
raise TypeError("Buffer dtype cannot be buffer")
TypeError: Buffer dtype cannot be buffer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 360, in _compile_for_args
raise e
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 311, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 618, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 871, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 365, in compile_extra
return self._compile_bytecode()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 802, in _compile_bytecode
return self._compile_core()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 789, in _compile_core
res = pm.run(self.status)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 251, in run
raise patched_exception
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 243, in run
stage()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 676, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 626, in _backend
lowered = lowerfn()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 613, in backend_nopython_mode
self.flags)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 990, in native_lowering_stage
lower.lower()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 499, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Buffer dtype cannot be buffer
File "<stdin>", line 5:
[1] During: lowering "id=6[LoopNest(index_variable = parfor_index.271, range = (0, $16.3, 1))]{28: <ir.Block at <stdin> (5)>}Var(parfor_index.271, <stdin> (5))" at <stdin> (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
TypeError
|
def get_shape_classes(self, var, typemap=None):
"""get the shape classes for a given variable.
If a typemap is specified then use it for type resolution
"""
# We get shape classes from the equivalence set but that
# keeps its own typemap at a time prior to lowering. So
# if something is added during lowering then we can pass
# in a type map to use. We temporarily replace the
# equivalence set typemap, do the work and then restore
# the original on the way out.
if typemap is not None:
save_typemap = self.equiv_set.typemap
self.equiv_set.typemap = typemap
res = self.equiv_set.get_shape_classes(var)
if typemap is not None:
self.equiv_set.typemap = save_typemap
return res
|
def get_shape_classes(self, var):
return self.equiv_set.get_shape_classes(var)
|
https://github.com/numba/numba/issues/3069
|
Traceback (most recent call last):
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 491, in new_error_context
yield
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 365, in lower_inst
func(self, inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 83, in _lower_parfor_parallel
bool(alias_map), index_var_typ)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\npyufunc\parfor.py", line 357, in _create_gufunc_for_parfor_body
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\abstract.py", line 60, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\npytypes.py", line 288, in __init__
super(Array, self).__init__(dtype, ndim, layout, name=name)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\types\common.py", line 51, in __init__
raise TypeError("Buffer dtype cannot be buffer")
TypeError: Buffer dtype cannot be buffer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 360, in _compile_for_args
raise e
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 311, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 618, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 871, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 365, in compile_extra
return self._compile_bytecode()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 802, in _compile_bytecode
return self._compile_core()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 789, in _compile_core
res = pm.run(self.status)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 251, in run
raise patched_exception
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 243, in run
stage()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 676, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 626, in _backend
lowered = lowerfn()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 613, in backend_nopython_mode
self.flags)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\compiler.py", line 990, in native_lowering_stage
lower.lower()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\errors.py", line 499, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "C:\Users\kittoku\Anaconda3\envs\test_env\lib\site-packages\numba\six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Buffer dtype cannot be buffer
File "<stdin>", line 5:
[1] During: lowering "id=6[LoopNest(index_variable = parfor_index.271, range = (0, $16.3, 1))]{28: <ir.Block at <stdin> (5)>}Var(parfor_index.271, <stdin> (5))" at <stdin> (5)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
TypeError
|
def strformat(self, nlines_up=2):
try:
# Try to get a relative path
# ipython/jupyter input just returns as self.filename
path = os.path.relpath(self.filename)
except ValueError:
# Fallback to absolute path if error occurred in getting the
# relative path.
# This may happen on windows if the drive is different
path = os.path.abspath(self.filename)
lines = linecache.getlines(path)
ret = [] # accumulates output
if lines and self.line:
def count_spaces(string):
spaces = 0
for x in itertools.takewhile(str.isspace, str(string)):
spaces += 1
return spaces
# A few places in the code still use no `loc` or default to line 1
# this is often in places where exceptions are used for the purposes
# of flow control. As a result max is in use to prevent slice from
# `[negative: positive]`
selected = lines[max(0, self.line - nlines_up) : self.line]
# see if selected contains a definition
def_found = False
for x in selected:
if "def " in x:
def_found = True
# no definition found, try and find one
if not def_found:
# try and find a def, go backwards from error line
fn_name = None
for x in reversed(lines[: self.line - 1]):
if "def " in x:
fn_name = x
break
if fn_name:
ret.append(fn_name)
spaces = count_spaces(x)
ret.append(" " * (4 + spaces) + "<source elided>\n")
if selected:
ret.extend(selected[:-1])
ret.append(_termcolor.highlight(selected[-1]))
# point at the problem with a caret
spaces = count_spaces(selected[-1])
ret.append(" " * (spaces) + _termcolor.indicate("^"))
# if in the REPL source may not be available
if not ret:
ret = "<source missing, REPL in use?>"
err = _termcolor.filename('\nFile "%s", line %d:') + "\n%s"
tmp = err % (path, self.line, _termcolor.code("".join(ret)))
return tmp
|
def strformat(self, nlines_up=2):
try:
# Try to get a relative path
# ipython/jupyter input just returns as self.filename
path = os.path.relpath(self.filename)
except ValueError:
# Fallback to absolute path if error occurred in getting the
# relative path.
# This may happen on windows if the drive is different
path = os.path.abspath(self.filename)
lines = linecache.getlines(path)
ret = [] # accumulates output
if lines and self.line:
def count_spaces(string):
spaces = 0
for x in itertools.takewhile(str.isspace, str(string)):
spaces += 1
return spaces
selected = lines[self.line - nlines_up : self.line]
# see if selected contains a definition
def_found = False
for x in selected:
if "def " in x:
def_found = True
# no definition found, try and find one
if not def_found:
# try and find a def, go backwards from error line
fn_name = None
for x in reversed(lines[: self.line - 1]):
if "def " in x:
fn_name = x
break
if fn_name:
ret.append(fn_name)
spaces = count_spaces(x)
ret.append(" " * (4 + spaces) + "<source elided>\n")
ret.extend(selected[:-1])
ret.append(_termcolor.highlight(selected[-1]))
# point at the problem with a caret
spaces = count_spaces(selected[-1])
ret.append(" " * (spaces) + _termcolor.indicate("^"))
# if in the REPL source may not be available
if not ret:
ret = "<source missing, REPL in use?>"
err = _termcolor.filename('\nFile "%s", line %d:') + "\n%s"
tmp = err % (path, self.line, _termcolor.code("".join(ret)))
return tmp
|
https://github.com/numba/numba/issues/3135
|
$ conda create -n dsdebug python=3.6
[...]
The following NEW packages will be INSTALLED:
ca-certificates: 2018.03.07-0
certifi: 2018.4.16-py36_0
libcxx: 4.0.1-h579ed51_0
libcxxabi: 4.0.1-hebd6815_0
libedit: 3.1.20170329-hb402a30_2
libffi: 3.2.1-h475c297_4
ncurses: 6.1-h0a44026_0
openssl: 1.0.2o-h26aff7b_0
pip: 10.0.1-py36_0
python: 3.6.6-hc167b69_0
readline: 7.0-hc1231fa_4
setuptools: 39.2.0-py36_0
sqlite: 3.24.0-ha441bb4_0
tk: 8.6.7-h35a86e2_3
wheel: 0.31.1-py36_0
xz: 5.2.4-h1de35cc_4
zlib: 1.2.11-hf3cbc9b_2
[...]
$ conda activate dsdebug
(dsdebug)$ conda install numba numpy
[...]
The following NEW packages will be INSTALLED:
blas: 1.0-mkl
intel-openmp: 2018.0.3-0
libgfortran: 3.0.1-h93005f0_2
llvmlite: 0.24.0-py36hc454e04_0
mkl: 2018.0.3-1
mkl_fft: 1.0.2-py36h6b9c3cc_0
mkl_random: 1.0.1-py36h5d10147_1
numba: 0.39.0-py36h6440ff4_0
numpy: 1.14.5-py36h648b28d_4
numpy-base: 1.14.5-py36ha9ae307_4
[...]
(dsdebug)$ python dstest.py
Traceback (most recent call last):
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/errors.py", line 577, in new_error_context
yield
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 254, in lower_block
self.lower_inst(inst)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 303, in lower_inst
val = self.lower_assign(ty, inst)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 449, in lower_assign
return self.lower_expr(ty, value)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 940, in lower_expr
res = self.context.special_ops[expr.op](self, expr)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/npyufunc/array_exprs.py", line 388, in _lower_array_expr
cres = context.compile_subroutine_no_cache(builder, impl, inner_sig, flags=flags)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/targets/base.py", line 799, in compile_subroutine_no_cache
locals=locals)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 897, in compile_internal
return pipeline.compile_extra(func)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 804, in _compile_bytecode
return self._compile_core()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 791, in _compile_core
res = pm.run(self.status)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 253, in run
raise patched_exception
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 245, in run
stage()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 480, in stage_generic_rewrites
self, self.func_ir)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/rewrites/registry.py", line 70, in apply
pipeline.calltypes)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/rewrites/static_binop.py", line 23, in match
self.static_rhs[expr] = func_ir.infer_constant(expr.rhs)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/ir.py", line 964, in infer_constant
return self._consts.infer_constant(name)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/consts.py", line 34, in infer_constant
self._cache[name] = (True, self._do_infer(name))
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/consts.py", line 60, in _do_infer
const = defn.infer_constant()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/ir.py", line 577, in infer_constant
raise ConstantInferenceError('%s' % self, loc=self.loc)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/errors.py", line 528, in __init__
super(ConstantInferenceError, self).__init__(msg, loc=loc)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/errors.py", line 388, in __init__
highlight("%s\n%s\n" % (msg, loc.strformat())))
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/ir.py", line 87, in strformat
ret.append(_termcolor.highlight(selected[-1]))
IndexError: Failed at nopython (nopython rewrites)
list index out of range
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "dstest.py", line 10, in <module>
print(fn(a))
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/dispatcher.py", line 368, in _compile_for_args
raise e
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/dispatcher.py", line 325, in _compile_for_args
return self.compile(tuple(argtypes))
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/dispatcher.py", line 653, in compile
cres = self._compiler.compile(args, return_type)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/dispatcher.py", line 83, in compile
pipeline_class=self.pipeline_class)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 873, in compile_extra
return pipeline.compile_extra(func)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 367, in compile_extra
return self._compile_bytecode()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 804, in _compile_bytecode
return self._compile_core()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 791, in _compile_core
res = pm.run(self.status)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 253, in run
raise patched_exception
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 245, in run
stage()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 678, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 628, in _backend
lowered = lowerfn()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 615, in backend_nopython_mode
self.flags)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/compiler.py", line 992, in native_lowering_stage
lower.lower()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 173, in lower
self.lower_normal_function(self.fndesc)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 214, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 239, in lower_function_body
self.lower_block(block)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/lowering.py", line 254, in lower_block
self.lower_inst(inst)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/errors.py", line 585, in new_error_context
six.reraise(type(newerr), newerr, tb)
File "/Users/cball/anaconda3/envs/dsdebug/lib/python3.6/site-packages/numba/six.py", line 659, in reraise
raise value
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython rewrites)
list index out of range
File "dstest.py", line 6:
def fn(x):
return 10**x
^
[1] During: lowering "$0.3 = arrayexpr(expr=('**', [const(int, 10), Var(x, dstest.py (6))]), ty=array(float64, 1d, C))" at dstest.py (6)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
(dsdebug)$ conda install "numba<0.39"
[...]
The following packages will be DOWNGRADED:
llvmlite: 0.24.0-py36hc454e04_0 --> 0.23.2-py36hc454e04_0
numba: 0.39.0-py36h6440ff4_0 --> 0.38.1-py36h6440ff4_0
[...]
$ python dstest.py
[10.]
|
IndexError
|
def run(self):
"""Run inline closure call pass."""
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
call_name = guard(find_callname, self.func_ir, expr)
func_def = guard(get_definition, self.func_ir, expr.func)
if guard(
self._inline_reduction, work_list, block, i, expr, call_name
):
modified = True
break # because block structure changed
if guard(self._inline_closure, work_list, block, i, func_def):
modified = True
break # because block structure changed
if guard(self._inline_stencil, instr, call_name, func_def):
modified = True
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir.blocks)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(
_inline_arraycall,
self.func_ir,
cfg,
visited,
loops[k],
self.parallel_options.comprehension,
):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further
# removed
while remove_dead(self.func_ir.blocks, self.func_ir.arg_names, self.func_ir):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
debug_print("END")
|
def run(self):
"""Run inline closure call pass."""
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
call_name = guard(find_callname, self.func_ir, expr)
func_def = guard(get_definition, self.func_ir, expr.func)
if guard(
self._inline_reduction, work_list, block, i, expr, call_name
):
modified = True
break # because block structure changed
if guard(self._inline_closure, work_list, block, i, func_def):
modified = True
break # because block structure changed
if guard(self._inline_stencil, instr, call_name, func_def):
modified = True
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir.blocks)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(
_inline_arraycall,
self.func_ir,
cfg,
visited,
loops[k],
self.parallel_options.comprehension,
):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further
# removed
while remove_dead(self.func_ir.blocks, self.func_ir.arg_names):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
debug_print("END")
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def remove_dead(blocks, args, func_ir, typemap=None, alias_map=None, arg_aliases=None):
"""dead code elimination using liveness and CFG info.
Returns True if something has been removed, or False if nothing is removed.
"""
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
call_table, _ = get_call_table(blocks)
if alias_map is None or arg_aliases is None:
alias_map, arg_aliases = find_potential_aliases(blocks, args, typemap, func_ir)
if config.DEBUG_ARRAY_OPT == 1:
print("alias map:", alias_map)
# keep set for easier search
alias_set = set(alias_map.keys())
removed = False
for label, block in blocks.items():
# find live variables at each statement to delete dead assignment
lives = {v.name for v in block.terminator.list_vars()}
# find live variables at the end of block
for out_blk, _data in cfg.successors(label):
lives |= live_map[out_blk]
removed |= remove_dead_block(
block,
lives,
call_table,
arg_aliases,
alias_map,
alias_set,
func_ir,
typemap,
)
return removed
|
def remove_dead(blocks, args, typemap=None, alias_map=None, arg_aliases=None):
"""dead code elimination using liveness and CFG info.
Returns True if something has been removed, or False if nothing is removed.
"""
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
if alias_map is None or arg_aliases is None:
alias_map, arg_aliases = find_potential_aliases(blocks, args, typemap)
if config.DEBUG_ARRAY_OPT == 1:
print("alias map:", alias_map)
# keep set for easier search
alias_set = set(alias_map.keys())
call_table, _ = get_call_table(blocks)
removed = False
for label, block in blocks.items():
# find live variables at each statement to delete dead assignment
lives = {v.name for v in block.terminator.list_vars()}
# find live variables at the end of block
for out_blk, _data in cfg.successors(label):
lives |= live_map[out_blk]
removed |= remove_dead_block(
block, lives, call_table, arg_aliases, alias_map, alias_set, typemap
)
return removed
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def remove_dead_block(
block, lives, call_table, arg_aliases, alias_map, alias_set, func_ir, typemap
):
"""remove dead code using liveness info.
Mutable arguments (e.g. arrays) that are not definitely assigned are live
after return of function.
"""
# TODO: find mutable args that are not definitely assigned instead of
# assuming all args are live after return
removed = False
# add statements in reverse order
new_body = [block.terminator]
# for each statement in reverse order, excluding terminator
for stmt in reversed(block.body[:-1]):
# aliases of lives are also live
alias_lives = set()
init_alias_lives = lives & alias_set
for v in init_alias_lives:
alias_lives |= alias_map[v]
lives_n_aliases = lives | alias_lives | arg_aliases
# let external calls handle stmt if type matches
if type(stmt) in remove_dead_extensions:
f = remove_dead_extensions[type(stmt)]
stmt = f(stmt, lives, arg_aliases, alias_map, func_ir, typemap)
if stmt is None:
removed = True
continue
# ignore assignments that their lhs is not live or lhs==rhs
if isinstance(stmt, ir.Assign):
lhs = stmt.target
rhs = stmt.value
if lhs.name not in lives and has_no_side_effect(
rhs, lives_n_aliases, call_table
):
removed = True
continue
if isinstance(rhs, ir.Var) and lhs.name == rhs.name:
removed = True
continue
# TODO: remove other nodes like SetItem etc.
if isinstance(stmt, ir.SetItem):
name = stmt.target.name
if name not in lives_n_aliases:
continue
if type(stmt) in analysis.ir_extension_usedefs:
def_func = analysis.ir_extension_usedefs[type(stmt)]
uses, defs = def_func(stmt)
lives -= defs
lives |= uses
else:
lives |= {v.name for v in stmt.list_vars()}
if isinstance(stmt, ir.Assign):
lives.remove(lhs.name)
new_body.append(stmt)
new_body.reverse()
block.body = new_body
return removed
|
def remove_dead_block(
block, lives, call_table, arg_aliases, alias_map, alias_set, typemap
):
"""remove dead code using liveness info.
Mutable arguments (e.g. arrays) that are not definitely assigned are live
after return of function.
"""
# TODO: find mutable args that are not definitely assigned instead of
# assuming all args are live after return
removed = False
# add statements in reverse order
new_body = [block.terminator]
# for each statement in reverse order, excluding terminator
for stmt in reversed(block.body[:-1]):
# aliases of lives are also live
alias_lives = set()
init_alias_lives = (lives | arg_aliases) & alias_set
for v in init_alias_lives:
alias_lives |= alias_map[v]
# let external calls handle stmt if type matches
if type(stmt) in remove_dead_extensions:
f = remove_dead_extensions[type(stmt)]
stmt = f(stmt, lives, arg_aliases, alias_map, typemap)
if stmt is None:
removed = True
continue
# ignore assignments that their lhs is not live or lhs==rhs
if isinstance(stmt, ir.Assign):
lhs = stmt.target
rhs = stmt.value
if lhs.name not in lives and has_no_side_effect(rhs, lives, call_table):
removed = True
continue
if isinstance(rhs, ir.Var) and lhs.name == rhs.name:
removed = True
continue
# TODO: remove other nodes like SetItem etc.
if isinstance(stmt, ir.SetItem):
name = stmt.target.name
if not (name in lives or name in alias_lives or name in arg_aliases):
continue
if type(stmt) in analysis.ir_extension_usedefs:
def_func = analysis.ir_extension_usedefs[type(stmt)]
uses, defs = def_func(stmt)
lives -= defs
lives |= uses
else:
lives |= {v.name for v in stmt.list_vars()}
if isinstance(stmt, ir.Assign):
lives.remove(lhs.name)
new_body.append(stmt)
new_body.reverse()
block.body = new_body
return removed
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def find_potential_aliases(
blocks, args, typemap, func_ir, alias_map=None, arg_aliases=None
):
"find all array aliases and argument aliases to avoid remove as dead"
if alias_map is None:
alias_map = {}
if arg_aliases is None:
arg_aliases = set(a for a in args if not is_immutable_type(a, typemap))
# update definitions since they are not guaranteed to be up-to-date
# FIXME keep definitions up-to-date to avoid the need for rebuilding
func_ir._definitions = build_definitions(func_ir.blocks)
np_alias_funcs = ["ravel", "transpose", "reshape"]
for bl in blocks.values():
for instr in bl.body:
if type(instr) in alias_analysis_extensions:
f = alias_analysis_extensions[type(instr)]
f(instr, args, typemap, func_ir, alias_map, arg_aliases)
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target.name
# only mutable types can alias
if is_immutable_type(lhs, typemap):
continue
if isinstance(expr, ir.Var) and lhs != expr.name:
_add_alias(lhs, expr.name, alias_map, arg_aliases)
# subarrays like A = B[0] for 2D B
if isinstance(expr, ir.Expr) and (
expr.op == "cast" or expr.op in ["getitem", "static_getitem"]
):
_add_alias(lhs, expr.value.name, alias_map, arg_aliases)
# array attributes like A.T
if (
isinstance(expr, ir.Expr)
and expr.op == "getattr"
and expr.attr in ["T", "ctypes", "flat"]
):
_add_alias(lhs, expr.value.name, alias_map, arg_aliases)
# calls that can create aliases such as B = A.ravel()
if isinstance(expr, ir.Expr) and expr.op == "call":
fdef = guard(find_callname, func_ir, expr, typemap)
# TODO: sometimes gufunc backend creates duplicate code
# causing find_callname to fail. Example: test_argmax
# ignored here since those cases don't create aliases
# but should be fixed in general
if fdef is None:
continue
fname, fmod = fdef
if fmod == "numpy" and fname in np_alias_funcs:
_add_alias(lhs, expr.args[0].name, alias_map, arg_aliases)
if isinstance(fmod, ir.Var) and fname in np_alias_funcs:
_add_alias(lhs, fmod.name, alias_map, arg_aliases)
# copy to avoid changing size during iteration
old_alias_map = copy.deepcopy(alias_map)
# combine all aliases transitively
for v in old_alias_map:
for w in old_alias_map[v]:
alias_map[v] |= alias_map[w]
for w in old_alias_map[v]:
alias_map[w] = alias_map[v]
return alias_map, arg_aliases
|
def find_potential_aliases(blocks, args, typemap, alias_map=None, arg_aliases=None):
"find all array aliases and argument aliases to avoid remove as dead"
if alias_map is None:
alias_map = {}
if arg_aliases is None:
arg_aliases = set(a for a in args if not is_immutable_type(a, typemap))
for bl in blocks.values():
for instr in bl.body:
if type(instr) in alias_analysis_extensions:
f = alias_analysis_extensions[type(instr)]
f(instr, args, typemap, alias_map, arg_aliases)
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target.name
# only mutable types can alias
if is_immutable_type(lhs, typemap):
continue
if isinstance(expr, ir.Var) and lhs != expr.name:
_add_alias(lhs, expr.name, alias_map, arg_aliases)
# subarrays like A = B[0] for 2D B
if isinstance(expr, ir.Expr) and (
expr.op == "cast" or expr.op in ["getitem", "static_getitem"]
):
_add_alias(lhs, expr.value.name, alias_map, arg_aliases)
# copy to avoid changing size during iteration
old_alias_map = copy.deepcopy(alias_map)
# combine all aliases transitively
for v in old_alias_map:
for w in old_alias_map[v]:
alias_map[v] |= alias_map[w]
for w in old_alias_map[v]:
alias_map[w] = alias_map[v]
return alias_map, arg_aliases
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def simplify(func_ir, typemap, calltypes):
remove_dels(func_ir.blocks)
# get copies in to blocks and out from blocks
in_cps, out_cps = copy_propagate(func_ir.blocks, typemap)
# table mapping variable names to ir.Var objects to help replacement
name_var_table = get_name_var_table(func_ir.blocks)
save_copies = apply_copy_propagate(
func_ir.blocks, in_cps, name_var_table, typemap, calltypes
)
restore_copy_var_names(func_ir.blocks, save_copies, typemap)
# remove dead code to enable fusion
remove_dead(func_ir.blocks, func_ir.arg_names, func_ir, typemap)
func_ir.blocks = simplify_CFG(func_ir.blocks)
if config.DEBUG_ARRAY_OPT == 1:
dprint_func_ir(func_ir, "after simplify")
|
def simplify(func_ir, typemap, calltypes):
remove_dels(func_ir.blocks)
# get copies in to blocks and out from blocks
in_cps, out_cps = copy_propagate(func_ir.blocks, typemap)
# table mapping variable names to ir.Var objects to help replacement
name_var_table = get_name_var_table(func_ir.blocks)
save_copies = apply_copy_propagate(
func_ir.blocks, in_cps, name_var_table, typemap, calltypes
)
restore_copy_var_names(func_ir.blocks, save_copies, typemap)
# remove dead code to enable fusion
remove_dead(func_ir.blocks, func_ir.arg_names, typemap)
func_ir.blocks = simplify_CFG(func_ir.blocks)
if config.DEBUG_ARRAY_OPT == 1:
dprint_func_ir(func_ir, "after simplify")
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
typemap = lowerer.fndesc.typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
alias_map = {}
arg_aliases = {}
numba.parfor.find_potential_aliases_parfor(
parfor, parfor.params, typemap, lowerer.func_ir, alias_map, arg_aliases
)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params != None
parfor_output_arrays = numba.parfor.get_parfor_outputs(parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor.params, lowerer.fndesc.calltypes
)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = copy.copy(parfor.flags)
flags.set("error_model", "numpy")
# Can't get here unless flags.set('auto_parallel', ParallelOptions(True))
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfor.sequential_parfor_lowering = True
func, func_args, func_sig = _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
{},
bool(alias_map),
index_var_typ,
)
numba.parfor.sequential_parfor_lowering = False
# get the shape signature
get_shape_classes = parfor.get_shape_classes
func_args = ["sched"] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
gu_signature = _create_shape_signature(
get_shape_classes, num_inputs, num_reductions, func_args, func_sig
)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
loop_ranges,
parfor_redvars,
parfor_reddict,
parfor.init_block,
index_var_typ,
)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
|
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
typemap = lowerer.fndesc.typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
alias_map = {}
arg_aliases = {}
numba.parfor.find_potential_aliases_parfor(
parfor, parfor.params, typemap, alias_map, arg_aliases
)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params != None
parfor_output_arrays = numba.parfor.get_parfor_outputs(parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor.params, lowerer.fndesc.calltypes
)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = copy.copy(parfor.flags)
flags.set("error_model", "numpy")
# Can't get here unless flags.set('auto_parallel', ParallelOptions(True))
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfor.sequential_parfor_lowering = True
func, func_args, func_sig = _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
{},
bool(alias_map),
index_var_typ,
)
numba.parfor.sequential_parfor_lowering = False
# get the shape signature
get_shape_classes = parfor.get_shape_classes
func_args = ["sched"] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
gu_signature = _create_shape_signature(
get_shape_classes, num_inputs, num_reductions, func_args, func_sig
)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
loop_ranges,
parfor_redvars,
parfor_reddict,
parfor.init_block,
index_var_typ,
)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def remove_dead_parfor(parfor, lives, arg_aliases, alias_map, func_ir, typemap):
"""remove dead code inside parfor including get/sets"""
with dummy_return_in_loop_body(parfor.loop_body):
labels = find_topo_order(parfor.loop_body)
# get/setitem replacement should ideally use dataflow to propagate setitem
# saved values, but for simplicity we handle the common case of propagating
# setitems in the first block (which is dominant) if the array is not
# potentially changed in any way
first_label = labels[0]
first_block_saved_values = {}
_update_parfor_get_setitems(
parfor.loop_body[first_label].body,
parfor.index_var,
alias_map,
first_block_saved_values,
lives,
)
# remove saved first block setitems if array potentially changed later
saved_arrs = set(first_block_saved_values.keys())
for l in labels:
if l == first_label:
continue
for stmt in parfor.loop_body[l].body:
if (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "getitem"
and stmt.value.index.name == parfor.index_var.name
):
continue
varnames = set(v.name for v in stmt.list_vars())
rm_arrs = varnames & saved_arrs
for a in rm_arrs:
first_block_saved_values.pop(a, None)
# replace getitems with available value
# e.g. A[i] = v; ... s = A[i] -> s = v
for l in labels:
if l == first_label:
continue
block = parfor.loop_body[l]
saved_values = first_block_saved_values.copy()
_update_parfor_get_setitems(
block.body, parfor.index_var, alias_map, saved_values, lives
)
# after getitem replacement, remove extra setitems
blocks = parfor.loop_body.copy() # shallow copy is enough
last_label = max(blocks.keys())
return_label, tuple_var = _add_liveness_return_block(blocks, lives, typemap)
# jump to return label
jump = ir.Jump(return_label, ir.Loc("parfors_dummy", -1))
blocks[last_label].body.append(jump)
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
alias_set = set(alias_map.keys())
for label, block in blocks.items():
new_body = []
in_lives = {v.name for v in block.terminator.list_vars()}
# find live variables at the end of block
for out_blk, _data in cfg.successors(label):
in_lives |= live_map[out_blk]
for stmt in reversed(block.body):
# aliases of lives are also live for setitems
alias_lives = in_lives & alias_set
for v in alias_lives:
in_lives |= alias_map[v]
if (
isinstance(stmt, ir.SetItem)
and stmt.index.name == parfor.index_var.name
and stmt.target.name not in in_lives
and stmt.target.name not in arg_aliases
):
continue
in_lives |= {v.name for v in stmt.list_vars()}
new_body.append(stmt)
new_body.reverse()
block.body = new_body
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[last_label].body.pop() # remove jump
# process parfor body recursively
remove_dead_parfor_recursive(
parfor, lives, arg_aliases, alias_map, func_ir, typemap
)
# remove parfor if empty
is_empty = len(parfor.init_block.body) == 0
for block in parfor.loop_body.values():
is_empty &= len(block.body) == 0
if is_empty:
return None
return parfor
|
def remove_dead_parfor(parfor, lives, arg_aliases, alias_map, typemap):
"""remove dead code inside parfor including get/sets"""
with dummy_return_in_loop_body(parfor.loop_body):
labels = find_topo_order(parfor.loop_body)
# get/setitem replacement should ideally use dataflow to propagate setitem
# saved values, but for simplicity we handle the common case of propagating
# setitems in the first block (which is dominant) if the array is not
# potentially changed in any way
first_label = labels[0]
first_block_saved_values = {}
_update_parfor_get_setitems(
parfor.loop_body[first_label].body,
parfor.index_var,
alias_map,
first_block_saved_values,
lives,
)
# remove saved first block setitems if array potentially changed later
saved_arrs = set(first_block_saved_values.keys())
for l in labels:
if l == first_label:
continue
for stmt in parfor.loop_body[l].body:
if (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "getitem"
and stmt.value.index.name == parfor.index_var.name
):
continue
varnames = set(v.name for v in stmt.list_vars())
rm_arrs = varnames & saved_arrs
for a in rm_arrs:
first_block_saved_values.pop(a, None)
# replace getitems with available value
# e.g. A[i] = v; ... s = A[i] -> s = v
for l in labels:
if l == first_label:
continue
block = parfor.loop_body[l]
saved_values = first_block_saved_values.copy()
_update_parfor_get_setitems(
block.body, parfor.index_var, alias_map, saved_values, lives
)
# after getitem replacement, remove extra setitems
blocks = parfor.loop_body.copy() # shallow copy is enough
last_label = max(blocks.keys())
return_label, tuple_var = _add_liveness_return_block(blocks, lives, typemap)
# jump to return label
jump = ir.Jump(return_label, ir.Loc("parfors_dummy", -1))
blocks[last_label].body.append(jump)
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
alias_set = set(alias_map.keys())
for label, block in blocks.items():
new_body = []
in_lives = {v.name for v in block.terminator.list_vars()}
# find live variables at the end of block
for out_blk, _data in cfg.successors(label):
in_lives |= live_map[out_blk]
for stmt in reversed(block.body):
# aliases of lives are also live for setitems
alias_lives = in_lives & alias_set
for v in alias_lives:
in_lives |= alias_map[v]
if (
isinstance(stmt, ir.SetItem)
and stmt.index.name == parfor.index_var.name
and stmt.target.name not in in_lives
and stmt.target.name not in arg_aliases
):
continue
in_lives |= {v.name for v in stmt.list_vars()}
new_body.append(stmt)
new_body.reverse()
block.body = new_body
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[last_label].body.pop() # remove jump
# process parfor body recursively
remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map, typemap)
# remove parfor if empty
is_empty = len(parfor.init_block.body) == 0
for block in parfor.loop_body.values():
is_empty &= len(block.body) == 0
if is_empty:
return None
return parfor
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def remove_dead_parfor_recursive(
parfor, lives, arg_aliases, alias_map, func_ir, typemap
):
"""create a dummy function from parfor and call remove dead recursively"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
return_label, tuple_var = _add_liveness_return_block(blocks, lives, typemap)
# branch back to first body label to simulate loop
branch = ir.Branch(0, first_body_block, return_label, ir.Loc("parfors_dummy", -1))
blocks[last_label].body.append(branch)
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, ir.Loc("parfors_dummy", -1)))
# args var including aliases is ok
remove_dead(blocks, arg_aliases, func_ir, typemap, alias_map, arg_aliases)
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove branch
return
|
def remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map, typemap):
"""create a dummy function from parfor and call remove dead recursively"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
return_label, tuple_var = _add_liveness_return_block(blocks, lives, typemap)
# branch back to first body label to simulate loop
branch = ir.Branch(0, first_body_block, return_label, ir.Loc("parfors_dummy", -1))
blocks[last_label].body.append(branch)
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, ir.Loc("parfors_dummy", -1)))
# args var including aliases is ok
remove_dead(blocks, arg_aliases, typemap, alias_map, arg_aliases)
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove branch
return
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def find_potential_aliases_parfor(
parfor, args, typemap, func_ir, alias_map, arg_aliases
):
blocks = wrap_parfor_blocks(parfor)
ir_utils.find_potential_aliases(
blocks, args, typemap, func_ir, alias_map, arg_aliases
)
unwrap_parfor_blocks(parfor)
return
|
def find_potential_aliases_parfor(parfor, args, typemap, alias_map, arg_aliases):
blocks = wrap_parfor_blocks(parfor)
ir_utils.find_potential_aliases(blocks, args, typemap, alias_map, arg_aliases)
unwrap_parfor_blocks(parfor)
return
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def build_parfor_definitions(parfor, definitions=None):
"""get variable definition table for parfors"""
if definitions is None:
definitions = defaultdict(list)
# avoid wrap_parfor_blocks() since build_definitions is called inside
# find_potential_aliases_parfor where the parfor is already wrapped
build_definitions(parfor.loop_body, definitions)
build_definitions({0: parfor.init_block}, definitions)
return definitions
|
def build_parfor_definitions(parfor, definitions=None):
"""get variable definition table for parfors"""
if definitions is None:
definitions = dict()
blocks = wrap_parfor_blocks(parfor)
build_definitions(blocks, definitions)
unwrap_parfor_blocks(parfor)
return definitions
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def run(self):
"""Finds all calls to StencilFuncs in the IR and converts them to parfor."""
from numba.stencil import StencilFunc
# Get all the calls in the function IR.
call_table, _ = get_call_table(self.func_ir.blocks)
stencil_calls = []
stencil_dict = {}
for call_varname, call_list in call_table.items():
if isinstance(call_list[0], StencilFunc):
# Remember all calls to StencilFuncs.
stencil_calls.append(call_varname)
stencil_dict[call_varname] = call_list[0]
if not stencil_calls:
return # return early if no stencil calls found
# find and transform stencil calls
for label, block in self.func_ir.blocks.items():
for i, stmt in reversed(list(enumerate(block.body))):
# Found a call to a StencilFunc.
if (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "call"
and stmt.value.func.name in stencil_calls
):
kws = dict(stmt.value.kws)
# Create dictionary of input argument number to
# the argument itself.
input_dict = {
i: stmt.value.args[i] for i in range(len(stmt.value.args))
}
in_args = stmt.value.args
arg_typemap = tuple(self.typemap[i.name] for i in in_args)
for arg_type in arg_typemap:
if isinstance(arg_type, types.BaseTuple):
raise ValueError(
"Tuple parameters not supported "
"for stencil kernels in parallel=True mode."
)
out_arr = kws.get("out")
# Get the StencilFunc object corresponding to this call.
sf = stencil_dict[stmt.value.func.name]
stencil_ir, rt, arg_to_arr_dict = get_stencil_ir(
sf,
self.typingctx,
arg_typemap,
block.scope,
block.loc,
input_dict,
self.typemap,
self.calltypes,
)
index_offsets = sf.options.get("index_offsets", None)
gen_nodes = self._mk_stencil_parfor(
label,
in_args,
out_arr,
stencil_ir,
index_offsets,
stmt.target,
rt,
sf,
arg_to_arr_dict,
)
block.body = block.body[:i] + gen_nodes + block.body[i + 1 :]
# Found a call to a stencil via numba.stencil().
elif (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "call"
and guard(find_callname, self.func_ir, stmt.value)
== ("stencil", "numba")
):
# remove dummy stencil() call
stmt.value = ir.Const(0, stmt.loc)
|
def run(self):
"""Finds all calls to StencilFuncs in the IR and converts them to parfor."""
from numba.stencil import StencilFunc
# Get all the calls in the function IR.
call_table, _ = get_call_table(self.func_ir.blocks)
stencil_calls = []
stencil_dict = {}
for call_varname, call_list in call_table.items():
if isinstance(call_list[0], StencilFunc):
# Remember all calls to StencilFuncs.
stencil_calls.append(call_varname)
stencil_dict[call_varname] = call_list[0]
if not stencil_calls:
return # return early if no stencil calls found
# find and transform stencil calls
for label, block in self.func_ir.blocks.items():
for i, stmt in reversed(list(enumerate(block.body))):
# Found a call to a StencilFunc.
if (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "call"
and stmt.value.func.name in stencil_calls
):
kws = dict(stmt.value.kws)
# Create dictionary of input argument number to
# the argument itself.
input_dict = {
i: stmt.value.args[i] for i in range(len(stmt.value.args))
}
in_args = stmt.value.args
arg_typemap = tuple(self.typemap[i.name] for i in in_args)
for arg_type in arg_typemap:
if isinstance(arg_type, types.BaseTuple):
raise ValueError(
"Tuple parameters not supported "
"for stencil kernels in parallel=True mode."
)
out_arr = kws.get("out")
# Get the StencilFunc object corresponding to this call.
sf = stencil_dict[stmt.value.func.name]
stencil_blocks, rt, arg_to_arr_dict = get_stencil_blocks(
sf,
self.typingctx,
arg_typemap,
block.scope,
block.loc,
input_dict,
self.typemap,
self.calltypes,
)
index_offsets = sf.options.get("index_offsets", None)
gen_nodes = self._mk_stencil_parfor(
label,
in_args,
out_arr,
stencil_blocks,
index_offsets,
stmt.target,
rt,
sf,
arg_to_arr_dict,
)
block.body = block.body[:i] + gen_nodes + block.body[i + 1 :]
# Found a call to a stencil via numba.stencil().
elif (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "call"
and guard(find_callname, self.func_ir, stmt.value)
== ("stencil", "numba")
):
# remove dummy stencil() call
stmt.value = ir.Const(0, stmt.loc)
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def _mk_stencil_parfor(
self,
label,
in_args,
out_arr,
stencil_ir,
index_offsets,
target,
return_type,
stencil_func,
arg_to_arr_dict,
):
"""Converts a set of stencil kernel blocks to a parfor."""
gen_nodes = []
stencil_blocks = stencil_ir.blocks
if config.DEBUG_ARRAY_OPT == 1:
print(
"_mk_stencil_parfor",
label,
in_args,
out_arr,
index_offsets,
return_type,
stencil_func,
stencil_blocks,
)
ir_utils.dump_blocks(stencil_blocks)
in_arr = in_args[0]
# run copy propagate to replace in_args copies (e.g. a = A)
in_arr_typ = self.typemap[in_arr.name]
in_cps, out_cps = ir_utils.copy_propagate(stencil_blocks, self.typemap)
name_var_table = ir_utils.get_name_var_table(stencil_blocks)
ir_utils.apply_copy_propagate(
stencil_blocks, in_cps, name_var_table, self.typemap, self.calltypes
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after copy_propagate")
ir_utils.dump_blocks(stencil_blocks)
ir_utils.remove_dead(
stencil_blocks, self.func_ir.arg_names, stencil_ir, self.typemap
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after removing dead code")
ir_utils.dump_blocks(stencil_blocks)
# create parfor vars
ndims = self.typemap[in_arr.name].ndim
scope = in_arr.scope
loc = in_arr.loc
parfor_vars = []
for i in range(ndims):
parfor_var = ir.Var(scope, mk_unique_var("$parfor_index_var"), loc)
self.typemap[parfor_var.name] = types.intp
parfor_vars.append(parfor_var)
start_lengths, end_lengths = self._replace_stencil_accesses(
stencil_blocks,
parfor_vars,
in_args,
index_offsets,
stencil_func,
arg_to_arr_dict,
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after replace stencil accesses")
ir_utils.dump_blocks(stencil_blocks)
# create parfor loop nests
loopnests = []
equiv_set = self.array_analysis.get_equiv_set(label)
in_arr_dim_sizes = equiv_set.get_shape(in_arr)
assert ndims == len(in_arr_dim_sizes)
for i in range(ndims):
last_ind = self._get_stencil_last_ind(
in_arr_dim_sizes[i], end_lengths[i], gen_nodes, scope, loc
)
start_ind = self._get_stencil_start_ind(start_lengths[i], gen_nodes, scope, loc)
# start from stencil size to avoid invalid array access
loopnests.append(numba.parfor.LoopNest(parfor_vars[i], start_ind, last_ind, 1))
# We have to guarantee that the exit block has maximum label and that
# there's only one exit block for the parfor body.
# So, all return statements will change to jump to the parfor exit block.
parfor_body_exit_label = max(stencil_blocks.keys()) + 1
stencil_blocks[parfor_body_exit_label] = ir.Block(scope, loc)
exit_value_var = ir.Var(scope, mk_unique_var("$parfor_exit_value"), loc)
self.typemap[exit_value_var.name] = return_type.dtype
# create parfor index var
for_replacing_ret = []
if ndims == 1:
parfor_ind_var = parfor_vars[0]
else:
parfor_ind_var = ir.Var(scope, mk_unique_var("$parfor_index_tuple_var"), loc)
self.typemap[parfor_ind_var.name] = types.containers.UniTuple(types.intp, ndims)
tuple_call = ir.Expr.build_tuple(parfor_vars, loc)
tuple_assign = ir.Assign(tuple_call, parfor_ind_var, loc)
for_replacing_ret.append(tuple_assign)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after creating parfor index var")
ir_utils.dump_blocks(stencil_blocks)
# empty init block
init_block = ir.Block(scope, loc)
if out_arr == None:
in_arr_typ = self.typemap[in_arr.name]
shape_name = ir_utils.mk_unique_var("in_arr_shape")
shape_var = ir.Var(scope, shape_name, loc)
shape_getattr = ir.Expr.getattr(in_arr, "shape", loc)
self.typemap[shape_name] = types.containers.UniTuple(
types.intp, in_arr_typ.ndim
)
init_block.body.extend([ir.Assign(shape_getattr, shape_var, loc)])
zero_name = ir_utils.mk_unique_var("zero_val")
zero_var = ir.Var(scope, zero_name, loc)
if "cval" in stencil_func.options:
cval = stencil_func.options["cval"]
# TODO: Loosen this restriction to adhere to casting rules.
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
temp2 = return_type.dtype(cval)
else:
temp2 = return_type.dtype(0)
full_const = ir.Const(temp2, loc)
self.typemap[zero_name] = return_type.dtype
init_block.body.extend([ir.Assign(full_const, zero_var, loc)])
so_name = ir_utils.mk_unique_var("stencil_output")
out_arr = ir.Var(scope, so_name, loc)
self.typemap[out_arr.name] = numba.types.npytypes.Array(
return_type.dtype, in_arr_typ.ndim, in_arr_typ.layout
)
dtype_g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[dtype_g_np_var.name] = types.misc.Module(np)
dtype_g_np = ir.Global("np", np, loc)
dtype_g_np_assign = ir.Assign(dtype_g_np, dtype_g_np_var, loc)
init_block.body.append(dtype_g_np_assign)
dtype_np_attr_call = ir.Expr.getattr(
dtype_g_np_var, return_type.dtype.name, loc
)
dtype_attr_var = ir.Var(scope, mk_unique_var("$np_attr_attr"), loc)
self.typemap[dtype_attr_var.name] = types.functions.NumberClass(
return_type.dtype
)
dtype_attr_assign = ir.Assign(dtype_np_attr_call, dtype_attr_var, loc)
init_block.body.append(dtype_attr_assign)
stmts = ir_utils.gen_np_call(
"full",
np.full,
out_arr,
[shape_var, zero_var, dtype_attr_var],
self.typingctx,
self.typemap,
self.calltypes,
)
equiv_set.insert_equiv(out_arr, in_arr_dim_sizes)
init_block.body.extend(stmts)
self.replace_return_with_setitem(
stencil_blocks, exit_value_var, parfor_body_exit_label
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after replacing return")
ir_utils.dump_blocks(stencil_blocks)
setitem_call = ir.SetItem(out_arr, parfor_ind_var, exit_value_var, loc)
self.calltypes[setitem_call] = signature(
types.none,
self.typemap[out_arr.name],
self.typemap[parfor_ind_var.name],
self.typemap[out_arr.name].dtype,
)
stencil_blocks[parfor_body_exit_label].body.extend(for_replacing_ret)
stencil_blocks[parfor_body_exit_label].body.append(setitem_call)
# simplify CFG of parfor body (exit block could be simplified often)
# add dummy return to enable CFG
stencil_blocks[parfor_body_exit_label].body.append(
ir.Return(0, ir.Loc("stencilparfor_dummy", -1))
)
stencil_blocks = ir_utils.simplify_CFG(stencil_blocks)
stencil_blocks[max(stencil_blocks.keys())].body.pop()
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after adding SetItem")
ir_utils.dump_blocks(stencil_blocks)
pattern = ("stencil", [start_lengths, end_lengths])
parfor = numba.parfor.Parfor(
loopnests,
init_block,
stencil_blocks,
loc,
parfor_ind_var,
equiv_set,
pattern,
self.flags,
)
gen_nodes.append(parfor)
gen_nodes.append(ir.Assign(out_arr, target, loc))
return gen_nodes
|
def _mk_stencil_parfor(
self,
label,
in_args,
out_arr,
stencil_blocks,
index_offsets,
target,
return_type,
stencil_func,
arg_to_arr_dict,
):
"""Converts a set of stencil kernel blocks to a parfor."""
gen_nodes = []
if config.DEBUG_ARRAY_OPT == 1:
print(
"_mk_stencil_parfor",
label,
in_args,
out_arr,
index_offsets,
return_type,
stencil_func,
stencil_blocks,
)
ir_utils.dump_blocks(stencil_blocks)
in_arr = in_args[0]
# run copy propagate to replace in_args copies (e.g. a = A)
in_arr_typ = self.typemap[in_arr.name]
in_cps, out_cps = ir_utils.copy_propagate(stencil_blocks, self.typemap)
name_var_table = ir_utils.get_name_var_table(stencil_blocks)
ir_utils.apply_copy_propagate(
stencil_blocks, in_cps, name_var_table, self.typemap, self.calltypes
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after copy_propagate")
ir_utils.dump_blocks(stencil_blocks)
ir_utils.remove_dead(stencil_blocks, self.func_ir.arg_names, self.typemap)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after removing dead code")
ir_utils.dump_blocks(stencil_blocks)
# create parfor vars
ndims = self.typemap[in_arr.name].ndim
scope = in_arr.scope
loc = in_arr.loc
parfor_vars = []
for i in range(ndims):
parfor_var = ir.Var(scope, mk_unique_var("$parfor_index_var"), loc)
self.typemap[parfor_var.name] = types.intp
parfor_vars.append(parfor_var)
start_lengths, end_lengths = self._replace_stencil_accesses(
stencil_blocks,
parfor_vars,
in_args,
index_offsets,
stencil_func,
arg_to_arr_dict,
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after replace stencil accesses")
ir_utils.dump_blocks(stencil_blocks)
# create parfor loop nests
loopnests = []
equiv_set = self.array_analysis.get_equiv_set(label)
in_arr_dim_sizes = equiv_set.get_shape(in_arr)
assert ndims == len(in_arr_dim_sizes)
for i in range(ndims):
last_ind = self._get_stencil_last_ind(
in_arr_dim_sizes[i], end_lengths[i], gen_nodes, scope, loc
)
start_ind = self._get_stencil_start_ind(start_lengths[i], gen_nodes, scope, loc)
# start from stencil size to avoid invalid array access
loopnests.append(numba.parfor.LoopNest(parfor_vars[i], start_ind, last_ind, 1))
# We have to guarantee that the exit block has maximum label and that
# there's only one exit block for the parfor body.
# So, all return statements will change to jump to the parfor exit block.
parfor_body_exit_label = max(stencil_blocks.keys()) + 1
stencil_blocks[parfor_body_exit_label] = ir.Block(scope, loc)
exit_value_var = ir.Var(scope, mk_unique_var("$parfor_exit_value"), loc)
self.typemap[exit_value_var.name] = return_type.dtype
# create parfor index var
for_replacing_ret = []
if ndims == 1:
parfor_ind_var = parfor_vars[0]
else:
parfor_ind_var = ir.Var(scope, mk_unique_var("$parfor_index_tuple_var"), loc)
self.typemap[parfor_ind_var.name] = types.containers.UniTuple(types.intp, ndims)
tuple_call = ir.Expr.build_tuple(parfor_vars, loc)
tuple_assign = ir.Assign(tuple_call, parfor_ind_var, loc)
for_replacing_ret.append(tuple_assign)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after creating parfor index var")
ir_utils.dump_blocks(stencil_blocks)
# empty init block
init_block = ir.Block(scope, loc)
if out_arr == None:
in_arr_typ = self.typemap[in_arr.name]
shape_name = ir_utils.mk_unique_var("in_arr_shape")
shape_var = ir.Var(scope, shape_name, loc)
shape_getattr = ir.Expr.getattr(in_arr, "shape", loc)
self.typemap[shape_name] = types.containers.UniTuple(
types.intp, in_arr_typ.ndim
)
init_block.body.extend([ir.Assign(shape_getattr, shape_var, loc)])
zero_name = ir_utils.mk_unique_var("zero_val")
zero_var = ir.Var(scope, zero_name, loc)
if "cval" in stencil_func.options:
cval = stencil_func.options["cval"]
# TODO: Loosen this restriction to adhere to casting rules.
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
temp2 = return_type.dtype(cval)
else:
temp2 = return_type.dtype(0)
full_const = ir.Const(temp2, loc)
self.typemap[zero_name] = return_type.dtype
init_block.body.extend([ir.Assign(full_const, zero_var, loc)])
so_name = ir_utils.mk_unique_var("stencil_output")
out_arr = ir.Var(scope, so_name, loc)
self.typemap[out_arr.name] = numba.types.npytypes.Array(
return_type.dtype, in_arr_typ.ndim, in_arr_typ.layout
)
dtype_g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[dtype_g_np_var.name] = types.misc.Module(np)
dtype_g_np = ir.Global("np", np, loc)
dtype_g_np_assign = ir.Assign(dtype_g_np, dtype_g_np_var, loc)
init_block.body.append(dtype_g_np_assign)
dtype_np_attr_call = ir.Expr.getattr(
dtype_g_np_var, return_type.dtype.name, loc
)
dtype_attr_var = ir.Var(scope, mk_unique_var("$np_attr_attr"), loc)
self.typemap[dtype_attr_var.name] = types.functions.NumberClass(
return_type.dtype
)
dtype_attr_assign = ir.Assign(dtype_np_attr_call, dtype_attr_var, loc)
init_block.body.append(dtype_attr_assign)
stmts = ir_utils.gen_np_call(
"full",
np.full,
out_arr,
[shape_var, zero_var, dtype_attr_var],
self.typingctx,
self.typemap,
self.calltypes,
)
equiv_set.insert_equiv(out_arr, in_arr_dim_sizes)
init_block.body.extend(stmts)
self.replace_return_with_setitem(
stencil_blocks, exit_value_var, parfor_body_exit_label
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after replacing return")
ir_utils.dump_blocks(stencil_blocks)
setitem_call = ir.SetItem(out_arr, parfor_ind_var, exit_value_var, loc)
self.calltypes[setitem_call] = signature(
types.none,
self.typemap[out_arr.name],
self.typemap[parfor_ind_var.name],
self.typemap[out_arr.name].dtype,
)
stencil_blocks[parfor_body_exit_label].body.extend(for_replacing_ret)
stencil_blocks[parfor_body_exit_label].body.append(setitem_call)
# simplify CFG of parfor body (exit block could be simplified often)
# add dummy return to enable CFG
stencil_blocks[parfor_body_exit_label].body.append(
ir.Return(0, ir.Loc("stencilparfor_dummy", -1))
)
stencil_blocks = ir_utils.simplify_CFG(stencil_blocks)
stencil_blocks[max(stencil_blocks.keys())].body.pop()
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after adding SetItem")
ir_utils.dump_blocks(stencil_blocks)
pattern = ("stencil", [start_lengths, end_lengths])
parfor = numba.parfor.Parfor(
loopnests,
init_block,
stencil_blocks,
loc,
parfor_ind_var,
equiv_set,
pattern,
self.flags,
)
gen_nodes.append(parfor)
gen_nodes.append(ir.Assign(out_arr, target, loc))
return gen_nodes
|
https://github.com/numba/numba/issues/2954
|
Serial:
[ 0 1 2 3 4 15 16 17 18 19]
Parallel:
Parallel for-loop #0 is produced from pattern '('prange', 'user')' at <ipython-input-1-f9ca7dd40116> (18)
After fusion, function parallel has 1 parallel for-loop(s) #set([0]).
---------------------------------------------------------------------------
LoweringError Traceback (most recent call last)
<ipython-input-1-f9ca7dd40116> in <module>()
27 print('\nParallel:')
28 A = np.arange(10)
---> 29 parallel(A)
30 print(A)
/home/lgarrison/anaconda2/lib/python2.7/site-packages/numba/dispatcher.pyc in _compile_for_args(self, *args, **kws)
358 e.patch_message(''.join(e.args) + help_msg)
359 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 360 raise e
361
362 def inspect_llvm(self, signature=None):
LoweringError: Failed at nopython (nopython mode backend)
Failed at nopython (nopython mode backend)
expecting {{i8*, i8*, i64, i64, i64*, [1 x i64], [1 x i64]}, i64, i64*} but got i8*
File "<ipython-input-1-f9ca7dd40116>", line 18:
def parallel(A):
<source elided>
N = len(A)
for i in numba.prange(N):
^
[1] During: lowering "id=0[LoopNest(index_variable = parfor_index.2, range = (0, $N.4, 1))]{40: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 81: <ir.Block at <ipython-input-1-f9ca7dd40116> (18)>, 59: <ir.Block at <ipython-input-1-f9ca7dd40116> (19)>}Var(parfor_index.2, <ipython-input-1-f9ca7dd40116> (18))" at <ipython-input-1-f9ca7dd40116> (18)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If you need help writing a minimal reproducer please see:
http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
LoweringError
|
def np_nanmedian(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanmedian_impl(arry):
# Create a temporary workspace with only non-NaN values
temp_arry = np.empty(arry.size, arry.dtype)
n = 0
for view in np.nditer(arry):
v = view.item()
if not isnan(v):
temp_arry[n] = v
n += 1
# all NaNs
if n == 0:
return np.nan
return _median_inner(temp_arry, n)
return nanmedian_impl
|
def np_nanmedian(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanmedian_impl(arry):
# Create a temporary workspace with only non-NaN values
temp_arry = np.empty(arry.size, arry.dtype)
n = 0
for view in np.nditer(arry):
v = view.item()
if not isnan(v):
temp_arry[n] = v
n += 1
return _median_inner(temp_arry, n)
return nanmedian_impl
|
https://github.com/numba/numba/issues/2617
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-15-cb312ec8285d> in <module>()
8 a = np.array([np.nan])
9 print(np.nanmedian(a))
---> 10 print(f(np.array([np.nan, np.nan])))
AssertionError:
|
AssertionError
|
def nanmedian_impl(arry):
# Create a temporary workspace with only non-NaN values
temp_arry = np.empty(arry.size, arry.dtype)
n = 0
for view in np.nditer(arry):
v = view.item()
if not isnan(v):
temp_arry[n] = v
n += 1
# all NaNs
if n == 0:
return np.nan
return _median_inner(temp_arry, n)
|
def nanmedian_impl(arry):
# Create a temporary workspace with only non-NaN values
temp_arry = np.empty(arry.size, arry.dtype)
n = 0
for view in np.nditer(arry):
v = view.item()
if not isnan(v):
temp_arry[n] = v
n += 1
return _median_inner(temp_arry, n)
|
https://github.com/numba/numba/issues/2617
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-15-cb312ec8285d> in <module>()
8 a = np.array([np.nan])
9 print(np.nanmedian(a))
---> 10 print(f(np.array([np.nan, np.nan])))
AssertionError:
|
AssertionError
|
def replace_return_with_setitem(self, blocks, index_vars, out_name):
"""
Find return statements in the IR and replace them with a SetItem
call of the value "returned" by the kernel into the result array.
Returns the block labels that contained return statements.
"""
ret_blocks = []
for label, block in blocks.items():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Return):
ret_blocks.append(label)
# If 1D array then avoid the tuple construction.
if len(index_vars) == 1:
rvar = ir.Var(scope, out_name, loc)
ivar = ir.Var(scope, index_vars[0], loc)
new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))
else:
# Convert the string names of the index variables into
# ir.Var's.
var_index_vars = []
for one_var in index_vars:
index_var = ir.Var(scope, one_var, loc)
var_index_vars += [index_var]
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
# Build a tuple from the index ir.Var's.
tuple_call = ir.Expr.build_tuple(var_index_vars, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
rvar = ir.Var(scope, out_name, loc)
# Write the return statements original value into
# the array using the tuple index.
si = ir.SetItem(rvar, s_index_var, stmt.value, loc)
new_body.append(si)
else:
new_body.append(stmt)
block.body = new_body
return ret_blocks
|
def replace_return_with_setitem(self, blocks, index_vars, out_name):
"""
Find return statements in the IR and replace them with a SetItem
call of the value "returned" by the kernel into the result array.
"""
for block in blocks.values():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Return):
# If 1D array then avoid the tuple construction.
if len(index_vars) == 1:
rvar = ir.Var(scope, out_name, loc)
ivar = ir.Var(scope, index_vars[0], loc)
new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))
else:
# Convert the string names of the index variables into
# ir.Var's.
var_index_vars = []
for one_var in index_vars:
index_var = ir.Var(scope, one_var, loc)
var_index_vars += [index_var]
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
# Build a tuple from the index ir.Var's.
tuple_call = ir.Expr.build_tuple(var_index_vars, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
rvar = ir.Var(scope, out_name, loc)
# Write the return statements original value into
# the array using the tuple index.
si = ir.SetItem(rvar, s_index_var, stmt.value, loc)
new_body.append(si)
else:
new_body.append(stmt)
block.body = new_body
|
https://github.com/numba/numba/issues/2603
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-24-95489a051be1> in <module>()
----> 1 kernel1(np.array([[1,2,3], [4,5,6], [7,8,9]]))
~/envs/py36/lib/python3.6/site-packages/numba/stencil.py in __call__(self, *args, **kwargs)
682 (real_ret, typemap, calltypes) = self.get_return_type(array_types)
683 new_func = self._stencil_wrapper(result, None, real_ret, typemap,
--> 684 calltypes, *array_types_full)
685
686 if result is None:
~/envs/py36/lib/python3.6/site-packages/numba/stencil.py in _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args)
632 break
633
--> 634 stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
635 ir_utils.remove_dels(stencil_ir.blocks)
636
~/envs/py36/lib/python3.6/site-packages/numba/ir_utils.py in rename_labels(blocks)
1032 lowering requires this order.
1033 """
-> 1034 topo_order = find_topo_order(blocks)
1035
1036 # make a block with return last if available (just for readability)
~/envs/py36/lib/python3.6/site-packages/numba/ir_utils.py in find_topo_order(blocks, cfg)
925 """
926 if cfg == None:
--> 927 cfg = compute_cfg_from_blocks(blocks)
928 post_order = []
929 seen = set()
~/envs/py36/lib/python3.6/site-packages/numba/analysis.py in compute_cfg_from_blocks(blocks)
210 for k, b in blocks.items():
211 term = b.terminator
--> 212 for target in term.get_targets():
213 cfg.add_edge(k, target)
214
|
AttributeError
|
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimenions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes
)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes
)
if "out" in name_var_table:
raise ValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT == 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT == 1:
print(
"_stencil_wrapper",
return_type,
return_type.dtype,
type(return_type.dtype),
args,
)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id,
)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i), name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood", name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise ValueError(
"The first argument to a stencil kernel must "
"use relative indexing, not standard indexing."
)
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise ValueError(
"Standard indexing requested for an array name "
"not present in the stencil kernel definition."
)
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed
)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT == 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(
kernel_copy.blocks, index_vars, out_name
)
if config.DEBUG_ARRAY_OPT == 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(
stencil_func_name, ",".join(kernel_copy.arg_names), sig_extra
)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
out_init = "{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval, return_type.dtype
)
else:
out_init = "{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type.dtype
)
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),{}[{}]-max(0,{})):\n").format(
index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]
)
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT == 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = (
[sentinel_name, out_name, neighborhood_name, shape_name]
+ kernel_copy.arg_names
+ index_vars
)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift lables in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label
)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT == 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for l, b in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert isinstance(the_array, types.Type)
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT == 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{},
)
return new_func
|
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimenions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes
)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes
)
if "out" in name_var_table:
raise ValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT == 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT == 1:
print(
"_stencil_wrapper",
return_type,
return_type.dtype,
type(return_type.dtype),
args,
)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id,
)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i), name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood", name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise ValueError(
"The first argument to a stencil kernel must "
"use relative indexing, not standard indexing."
)
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise ValueError(
"Standard indexing requested for an array name "
"not present in the stencil kernel definition."
)
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed
)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT == 1:
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(
stencil_func_name, ",".join(kernel_copy.arg_names), sig_extra
)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
out_init = "{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval, return_type.dtype
)
else:
out_init = "{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type.dtype
)
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),{}[{}]-max(0,{})):\n").format(
index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]
)
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT == 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = (
[sentinel_name, out_name, neighborhood_name, shape_name]
+ kernel_copy.arg_names
+ index_vars
)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift lables in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label
)
new_label = max(kernel_copy.blocks.keys()) + 1
if config.DEBUG_ARRAY_OPT == 1:
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for l, b in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
body_last_label = max(kernel_copy.blocks.keys())
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block
# containing statements after the sentinel.
stencil_ir.blocks[body_last_label].append(ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert isinstance(the_array, types.Type)
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT == 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{},
)
return new_func
|
https://github.com/numba/numba/issues/2603
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-24-95489a051be1> in <module>()
----> 1 kernel1(np.array([[1,2,3], [4,5,6], [7,8,9]]))
~/envs/py36/lib/python3.6/site-packages/numba/stencil.py in __call__(self, *args, **kwargs)
682 (real_ret, typemap, calltypes) = self.get_return_type(array_types)
683 new_func = self._stencil_wrapper(result, None, real_ret, typemap,
--> 684 calltypes, *array_types_full)
685
686 if result is None:
~/envs/py36/lib/python3.6/site-packages/numba/stencil.py in _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args)
632 break
633
--> 634 stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
635 ir_utils.remove_dels(stencil_ir.blocks)
636
~/envs/py36/lib/python3.6/site-packages/numba/ir_utils.py in rename_labels(blocks)
1032 lowering requires this order.
1033 """
-> 1034 topo_order = find_topo_order(blocks)
1035
1036 # make a block with return last if available (just for readability)
~/envs/py36/lib/python3.6/site-packages/numba/ir_utils.py in find_topo_order(blocks, cfg)
925 """
926 if cfg == None:
--> 927 cfg = compute_cfg_from_blocks(blocks)
928 post_order = []
929 seen = set()
~/envs/py36/lib/python3.6/site-packages/numba/analysis.py in compute_cfg_from_blocks(blocks)
210 for k, b in blocks.items():
211 term = b.terminator
--> 212 for target in term.get_targets():
213 cfg.add_edge(k, target)
214
|
AttributeError
|
def _mk_stencil_parfor(
self,
label,
in_args,
out_arr,
stencil_blocks,
index_offsets,
target,
return_type,
stencil_func,
arg_to_arr_dict,
):
"""Converts a set of stencil kernel blocks to a parfor."""
gen_nodes = []
if config.DEBUG_ARRAY_OPT == 1:
print(
"_mk_stencil_parfor",
label,
in_args,
out_arr,
index_offsets,
return_type,
stencil_func,
stencil_blocks,
)
ir_utils.dump_blocks(stencil_blocks)
in_arr = in_args[0]
# run copy propagate to replace in_args copies (e.g. a = A)
in_arr_typ = self.typemap[in_arr.name]
in_cps, out_cps = ir_utils.copy_propagate(stencil_blocks, self.typemap)
name_var_table = ir_utils.get_name_var_table(stencil_blocks)
ir_utils.apply_copy_propagate(
stencil_blocks, in_cps, name_var_table, self.typemap, self.calltypes
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after copy_propagate")
ir_utils.dump_blocks(stencil_blocks)
ir_utils.remove_dead(stencil_blocks, self.func_ir.arg_names, self.typemap)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after removing dead code")
ir_utils.dump_blocks(stencil_blocks)
# create parfor vars
ndims = self.typemap[in_arr.name].ndim
scope = in_arr.scope
loc = in_arr.loc
parfor_vars = []
for i in range(ndims):
parfor_var = ir.Var(scope, mk_unique_var("$parfor_index_var"), loc)
self.typemap[parfor_var.name] = types.intp
parfor_vars.append(parfor_var)
start_lengths, end_lengths = self._replace_stencil_accesses(
stencil_blocks,
parfor_vars,
in_args,
index_offsets,
stencil_func,
arg_to_arr_dict,
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after replace stencil accesses")
ir_utils.dump_blocks(stencil_blocks)
# create parfor loop nests
loopnests = []
equiv_set = self.array_analysis.get_equiv_set(label)
in_arr_dim_sizes = equiv_set.get_shape(in_arr.name)
assert ndims == len(in_arr_dim_sizes)
for i in range(ndims):
last_ind = self._get_stencil_last_ind(
in_arr_dim_sizes[i], end_lengths[i], gen_nodes, scope, loc
)
start_ind = self._get_stencil_start_ind(start_lengths[i], gen_nodes, scope, loc)
# start from stencil size to avoid invalid array access
loopnests.append(numba.parfor.LoopNest(parfor_vars[i], start_ind, last_ind, 1))
# We have to guarantee that the exit block has maximum label and that
# there's only one exit block for the parfor body.
# So, all return statements will change to jump to the parfor exit block.
parfor_body_exit_label = max(stencil_blocks.keys()) + 1
stencil_blocks[parfor_body_exit_label] = ir.Block(scope, loc)
exit_value_var = ir.Var(scope, mk_unique_var("$parfor_exit_value"), loc)
self.typemap[exit_value_var.name] = return_type.dtype
# create parfor index var
for_replacing_ret = []
if ndims == 1:
parfor_ind_var = parfor_vars[0]
else:
parfor_ind_var = ir.Var(scope, mk_unique_var("$parfor_index_tuple_var"), loc)
self.typemap[parfor_ind_var.name] = types.containers.UniTuple(types.intp, ndims)
tuple_call = ir.Expr.build_tuple(parfor_vars, loc)
tuple_assign = ir.Assign(tuple_call, parfor_ind_var, loc)
for_replacing_ret.append(tuple_assign)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after creating parfor index var")
ir_utils.dump_blocks(stencil_blocks)
# empty init block
init_block = ir.Block(scope, loc)
if out_arr == None:
in_arr_typ = self.typemap[in_arr.name]
shape_name = ir_utils.mk_unique_var("in_arr_shape")
shape_var = ir.Var(scope, shape_name, loc)
shape_getattr = ir.Expr.getattr(in_arr, "shape", loc)
self.typemap[shape_name] = types.containers.UniTuple(
types.intp, in_arr_typ.ndim
)
init_block.body.extend([ir.Assign(shape_getattr, shape_var, loc)])
zero_name = ir_utils.mk_unique_var("zero_val")
zero_var = ir.Var(scope, zero_name, loc)
if "cval" in stencil_func.options:
cval = stencil_func.options["cval"]
# TODO: Loosen this restriction to adhere to casting rules.
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
temp2 = return_type.dtype(cval)
else:
temp2 = return_type.dtype(0)
full_const = ir.Const(temp2, loc)
self.typemap[zero_name] = return_type.dtype
init_block.body.extend([ir.Assign(full_const, zero_var, loc)])
so_name = ir_utils.mk_unique_var("stencil_output")
out_arr = ir.Var(scope, so_name, loc)
self.typemap[out_arr.name] = numba.types.npytypes.Array(
return_type.dtype, in_arr_typ.ndim, in_arr_typ.layout
)
dtype_g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[dtype_g_np_var.name] = types.misc.Module(np)
dtype_g_np = ir.Global("np", np, loc)
dtype_g_np_assign = ir.Assign(dtype_g_np, dtype_g_np_var, loc)
init_block.body.append(dtype_g_np_assign)
dtype_np_attr_call = ir.Expr.getattr(
dtype_g_np_var, return_type.dtype.name, loc
)
dtype_attr_var = ir.Var(scope, mk_unique_var("$np_attr_attr"), loc)
self.typemap[dtype_attr_var.name] = types.functions.NumberClass(
return_type.dtype
)
dtype_attr_assign = ir.Assign(dtype_np_attr_call, dtype_attr_var, loc)
init_block.body.append(dtype_attr_assign)
stmts = ir_utils.gen_np_call(
"full",
np.full,
out_arr,
[shape_var, zero_var, dtype_attr_var],
self.typingctx,
self.typemap,
self.calltypes,
)
equiv_set.insert_equiv(out_arr, in_arr_dim_sizes)
init_block.body.extend(stmts)
self.replace_return_with_setitem(
stencil_blocks, exit_value_var, parfor_body_exit_label
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after replacing return")
ir_utils.dump_blocks(stencil_blocks)
setitem_call = ir.SetItem(out_arr, parfor_ind_var, exit_value_var, loc)
self.calltypes[setitem_call] = signature(
types.none,
self.typemap[out_arr.name],
self.typemap[parfor_ind_var.name],
self.typemap[out_arr.name].dtype,
)
stencil_blocks[parfor_body_exit_label].body.extend(for_replacing_ret)
stencil_blocks[parfor_body_exit_label].body.append(setitem_call)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after adding SetItem")
ir_utils.dump_blocks(stencil_blocks)
parfor = numba.parfor.Parfor(
loopnests, init_block, stencil_blocks, loc, parfor_ind_var, equiv_set
)
parfor.patterns = [("stencil", [start_lengths, end_lengths])]
gen_nodes.append(parfor)
gen_nodes.append(ir.Assign(out_arr, target, loc))
return gen_nodes
|
def _mk_stencil_parfor(
self,
label,
in_args,
out_arr,
stencil_blocks,
index_offsets,
target,
return_type,
stencil_func,
arg_to_arr_dict,
):
"""Converts a set of stencil kernel blocks to a parfor."""
gen_nodes = []
if config.DEBUG_ARRAY_OPT == 1:
print(
"_mk_stencil_parfor",
label,
in_args,
out_arr,
index_offsets,
return_type,
stencil_func,
stencil_blocks,
)
ir_utils.dump_blocks(stencil_blocks)
in_arr = in_args[0]
# run copy propagate to replace in_args copies (e.g. a = A)
in_arr_typ = self.typemap[in_arr.name]
in_cps, out_cps = ir_utils.copy_propagate(stencil_blocks, self.typemap)
name_var_table = ir_utils.get_name_var_table(stencil_blocks)
ir_utils.apply_copy_propagate(
stencil_blocks, in_cps, name_var_table, self.typemap, self.calltypes
)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after copy_propagate")
ir_utils.dump_blocks(stencil_blocks)
ir_utils.remove_dead(stencil_blocks, self.func_ir.arg_names, self.typemap)
if config.DEBUG_ARRAY_OPT == 1:
print("stencil_blocks after removing dead code")
ir_utils.dump_blocks(stencil_blocks)
# create parfor vars
ndims = self.typemap[in_arr.name].ndim
scope = in_arr.scope
loc = in_arr.loc
parfor_vars = []
for i in range(ndims):
parfor_var = ir.Var(scope, mk_unique_var("$parfor_index_var"), loc)
self.typemap[parfor_var.name] = types.intp
parfor_vars.append(parfor_var)
start_lengths, end_lengths = self._replace_stencil_accesses(
stencil_blocks,
parfor_vars,
in_args,
index_offsets,
stencil_func,
arg_to_arr_dict,
)
# create parfor loop nests
loopnests = []
equiv_set = self.array_analysis.get_equiv_set(label)
in_arr_dim_sizes = equiv_set.get_shape(in_arr.name)
assert ndims == len(in_arr_dim_sizes)
for i in range(ndims):
last_ind = self._get_stencil_last_ind(
in_arr_dim_sizes[i], end_lengths[i], gen_nodes, scope, loc
)
start_ind = self._get_stencil_start_ind(start_lengths[i], gen_nodes, scope, loc)
# start from stencil size to avoid invalid array access
loopnests.append(numba.parfor.LoopNest(parfor_vars[i], start_ind, last_ind, 1))
# replace return value to setitem to output array
return_node = stencil_blocks[max(stencil_blocks.keys())].body.pop()
assert isinstance(return_node, ir.Return)
last_node = stencil_blocks[max(stencil_blocks.keys())].body.pop()
while (
not isinstance(last_node, ir.Assign)
or not isinstance(last_node.value, ir.Expr)
or not last_node.value.op == "cast"
):
last_node = stencil_blocks[max(stencil_blocks.keys())].body.pop()
assert isinstance(last_node, ir.Assign)
assert isinstance(last_node.value, ir.Expr)
assert last_node.value.op == "cast"
return_val = last_node.value.value
# create parfor index var
if ndims == 1:
parfor_ind_var = parfor_vars[0]
else:
parfor_ind_var = ir.Var(scope, mk_unique_var("$parfor_index_tuple_var"), loc)
self.typemap[parfor_ind_var.name] = types.containers.UniTuple(types.intp, ndims)
tuple_call = ir.Expr.build_tuple(parfor_vars, loc)
tuple_assign = ir.Assign(tuple_call, parfor_ind_var, loc)
stencil_blocks[max(stencil_blocks.keys())].body.append(tuple_assign)
# empty init block
init_block = ir.Block(scope, loc)
if out_arr == None:
in_arr_typ = self.typemap[in_arr.name]
shape_name = ir_utils.mk_unique_var("in_arr_shape")
shape_var = ir.Var(scope, shape_name, loc)
shape_getattr = ir.Expr.getattr(in_arr, "shape", loc)
self.typemap[shape_name] = types.containers.UniTuple(
types.intp, in_arr_typ.ndim
)
init_block.body.extend([ir.Assign(shape_getattr, shape_var, loc)])
zero_name = ir_utils.mk_unique_var("zero_val")
zero_var = ir.Var(scope, zero_name, loc)
if "cval" in stencil_func.options:
cval = stencil_func.options["cval"]
# TODO: Loosen this restriction to adhere to casting rules.
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError("cval type does not match stencil return type.")
temp2 = return_type.dtype(cval)
else:
temp2 = return_type.dtype(0)
full_const = ir.Const(temp2, loc)
self.typemap[zero_name] = return_type.dtype
init_block.body.extend([ir.Assign(full_const, zero_var, loc)])
so_name = ir_utils.mk_unique_var("stencil_output")
out_arr = ir.Var(scope, so_name, loc)
self.typemap[out_arr.name] = numba.types.npytypes.Array(
return_type.dtype, in_arr_typ.ndim, in_arr_typ.layout
)
dtype_g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[dtype_g_np_var.name] = types.misc.Module(np)
dtype_g_np = ir.Global("np", np, loc)
dtype_g_np_assign = ir.Assign(dtype_g_np, dtype_g_np_var, loc)
init_block.body.append(dtype_g_np_assign)
dtype_np_attr_call = ir.Expr.getattr(
dtype_g_np_var, return_type.dtype.name, loc
)
dtype_attr_var = ir.Var(scope, mk_unique_var("$np_attr_attr"), loc)
self.typemap[dtype_attr_var.name] = types.functions.NumberClass(
return_type.dtype
)
dtype_attr_assign = ir.Assign(dtype_np_attr_call, dtype_attr_var, loc)
init_block.body.append(dtype_attr_assign)
stmts = ir_utils.gen_np_call(
"full",
np.full,
out_arr,
[shape_var, zero_var, dtype_attr_var],
self.typingctx,
self.typemap,
self.calltypes,
)
equiv_set.insert_equiv(out_arr, in_arr_dim_sizes)
init_block.body.extend(stmts)
setitem_call = ir.SetItem(out_arr, parfor_ind_var, return_val, loc)
self.calltypes[setitem_call] = signature(
types.none,
self.typemap[out_arr.name],
self.typemap[parfor_ind_var.name],
self.typemap[out_arr.name].dtype,
)
stencil_blocks[max(stencil_blocks.keys())].body.append(setitem_call)
parfor = numba.parfor.Parfor(
loopnests, init_block, stencil_blocks, loc, parfor_ind_var, equiv_set
)
parfor.patterns = [("stencil", [start_lengths, end_lengths])]
gen_nodes.append(parfor)
gen_nodes.append(ir.Assign(out_arr, target, loc))
return gen_nodes
|
https://github.com/numba/numba/issues/2603
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-24-95489a051be1> in <module>()
----> 1 kernel1(np.array([[1,2,3], [4,5,6], [7,8,9]]))
~/envs/py36/lib/python3.6/site-packages/numba/stencil.py in __call__(self, *args, **kwargs)
682 (real_ret, typemap, calltypes) = self.get_return_type(array_types)
683 new_func = self._stencil_wrapper(result, None, real_ret, typemap,
--> 684 calltypes, *array_types_full)
685
686 if result is None:
~/envs/py36/lib/python3.6/site-packages/numba/stencil.py in _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args)
632 break
633
--> 634 stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
635 ir_utils.remove_dels(stencil_ir.blocks)
636
~/envs/py36/lib/python3.6/site-packages/numba/ir_utils.py in rename_labels(blocks)
1032 lowering requires this order.
1033 """
-> 1034 topo_order = find_topo_order(blocks)
1035
1036 # make a block with return last if available (just for readability)
~/envs/py36/lib/python3.6/site-packages/numba/ir_utils.py in find_topo_order(blocks, cfg)
925 """
926 if cfg == None:
--> 927 cfg = compute_cfg_from_blocks(blocks)
928 post_order = []
929 seen = set()
~/envs/py36/lib/python3.6/site-packages/numba/analysis.py in compute_cfg_from_blocks(blocks)
210 for k, b in blocks.items():
211 term = b.terminator
--> 212 for target in term.get_targets():
213 cfg.add_edge(k, target)
214
|
AttributeError
|
def _extract_loop_lifting_candidates(cfg, blocks):
"""
Returns a list of loops that are candidate for loop lifting
"""
# check well-formed-ness of the loop
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
succs = set(x for x, _ in cfg.successors(k))
if not succs:
# If the exit point has no successor, it contains an return
# statement, which is not handled by the looplifting code.
# Thus, this loop is not a candidate.
return False
outedges |= succs
return len(outedges) == 1
def one_entry(loop):
"there is one entry"
return len(loop.entries) == 1
def cannot_yield(loop):
"cannot have yield inside the loop"
insiders = set(loop.body) | set(loop.entries) | set(loop.exits)
for blk in map(blocks.__getitem__, insiders):
for inst in blk.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Yield):
return False
return True
return [
loop
for loop in find_top_level_loops(cfg)
if same_exit_point(loop) and one_entry(loop) and cannot_yield(loop)
]
|
def _extract_loop_lifting_candidates(cfg, blocks):
"""
Returns a list of loops that are candidate for loop lifting
"""
# check well-formed-ness of the loop
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
outedges |= set(x for x, _ in cfg.successors(k))
return len(outedges) == 1
def one_entry(loop):
"there is one entry"
return len(loop.entries) == 1
def cannot_yield(loop):
"cannot have yield inside the loop"
insiders = set(loop.body) | set(loop.entries) | set(loop.exits)
for blk in map(blocks.__getitem__, insiders):
for inst in blk.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Yield):
return False
return True
return [
loop
for loop in find_top_level_loops(cfg)
if same_exit_point(loop) and one_entry(loop) and cannot_yield(loop)
]
|
https://github.com/numba/numba/issues/2561
|
Traceback (most recent call last):
File "numbabug.py", line 11, in <module>
numbafunc([])
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 307, in _compile_for_args
return self.compile(tuple(argtypes))
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 579, in compile
cres = self._compiler.compile(args, return_type)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 80, in compile
flags=flags, locals=self.locals)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 763, in compile_extra
return pipeline.compile_extra(func)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 360, in compile_extra
return self._compile_bytecode()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 722, in _compile_bytecode
return self._compile_core()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 709, in _compile_core
res = pm.run(self.status)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 246, in run
raise patched_exception
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 238, in run
stage()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 431, in stage_objectmode_frontend
cres = self.frontend_looplift()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 411, in frontend_looplift
flags=loop_flags)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/transforms.py", line 191, in loop_lifting
func_ir.variable_lifetime.livemap)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/transforms.py", line 59, in _loop_lift_get_candidate_infos
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
ValueError: Failed at object (object mode frontend)
not enough values to unpack (expected 1, got 0)
|
ValueError
|
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
succs = set(x for x, _ in cfg.successors(k))
if not succs:
# If the exit point has no successor, it contains an return
# statement, which is not handled by the looplifting code.
# Thus, this loop is not a candidate.
return False
outedges |= succs
return len(outedges) == 1
|
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
outedges |= set(x for x, _ in cfg.successors(k))
return len(outedges) == 1
|
https://github.com/numba/numba/issues/2561
|
Traceback (most recent call last):
File "numbabug.py", line 11, in <module>
numbafunc([])
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 307, in _compile_for_args
return self.compile(tuple(argtypes))
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 579, in compile
cres = self._compiler.compile(args, return_type)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 80, in compile
flags=flags, locals=self.locals)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 763, in compile_extra
return pipeline.compile_extra(func)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 360, in compile_extra
return self._compile_bytecode()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 722, in _compile_bytecode
return self._compile_core()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 709, in _compile_core
res = pm.run(self.status)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 246, in run
raise patched_exception
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 238, in run
stage()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 431, in stage_objectmode_frontend
cres = self.frontend_looplift()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 411, in frontend_looplift
flags=loop_flags)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/transforms.py", line 191, in loop_lifting
func_ir.variable_lifetime.livemap)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/transforms.py", line 59, in _loop_lift_get_candidate_infos
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
ValueError: Failed at object (object mode frontend)
not enough values to unpack (expected 1, got 0)
|
ValueError
|
def _loop_lift_get_candidate_infos(cfg, blocks, livemap):
"""
Returns information on looplifting candidates.
"""
loops = _extract_loop_lifting_candidates(cfg, blocks)
loopinfos = []
for loop in loops:
[callfrom] = loop.entries # requirement checked earlier
# This loop exists to handle missing cfg.successors, only *an* exit is
# needed. Walk in stable order, higher exits first.
for an_exit in iter(sorted(loop.exits)):
# requirement checked earlier
ret = [x for x in cfg.successors(an_exit)]
if ret:
break
else:
continue # drop this loop from being liftable
[(returnto, _)] = ret
# note: sorted for stable ordering
inputs = sorted(livemap[callfrom])
outputs = sorted(livemap[returnto])
lli = _loop_lift_info(
loop=loop,
inputs=inputs,
outputs=outputs,
callfrom=callfrom,
returnto=returnto,
)
loopinfos.append(lli)
return loopinfos
|
def _loop_lift_get_candidate_infos(cfg, blocks, livemap):
"""
Returns information on looplifting candidates.
"""
loops = _extract_loop_lifting_candidates(cfg, blocks)
loopinfos = []
for loop in loops:
[callfrom] = loop.entries # requirement checked earlier
an_exit = next(iter(loop.exits)) # anyone of the exit block
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
# note: sorted for stable ordering
inputs = sorted(livemap[callfrom])
outputs = sorted(livemap[returnto])
lli = _loop_lift_info(
loop=loop,
inputs=inputs,
outputs=outputs,
callfrom=callfrom,
returnto=returnto,
)
loopinfos.append(lli)
return loopinfos
|
https://github.com/numba/numba/issues/2561
|
Traceback (most recent call last):
File "numbabug.py", line 11, in <module>
numbafunc([])
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 307, in _compile_for_args
return self.compile(tuple(argtypes))
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 579, in compile
cres = self._compiler.compile(args, return_type)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/dispatcher.py", line 80, in compile
flags=flags, locals=self.locals)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 763, in compile_extra
return pipeline.compile_extra(func)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 360, in compile_extra
return self._compile_bytecode()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 722, in _compile_bytecode
return self._compile_core()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 709, in _compile_core
res = pm.run(self.status)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 246, in run
raise patched_exception
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 238, in run
stage()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 431, in stage_objectmode_frontend
cres = self.frontend_looplift()
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/compiler.py", line 411, in frontend_looplift
flags=loop_flags)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/transforms.py", line 191, in loop_lifting
func_ir.variable_lifetime.livemap)
File "/home/tuukka/.local/lib/python3.5/site-packages/numba/transforms.py", line 59, in _loop_lift_get_candidate_infos
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
ValueError: Failed at object (object mode frontend)
not enough values to unpack (expected 1, got 0)
|
ValueError
|
def __init__(self, typingctx, targetctx, library, args, return_type, flags, locals):
# Make sure the environment is reloaded
config.reload_config()
typingctx.refresh()
targetctx.refresh()
self.typingctx = typingctx
self.targetctx = _make_subtarget(targetctx, flags)
self.library = library
self.args = args
self.return_type = return_type
self.flags = flags
self.locals = locals
# Results of various steps of the compilation pipeline
self.bc = None
self.func_id = None
self.func_ir = None
self.func_ir_original = None # used for fallback
self.lifted = None
self.lifted_from = None
self.typemap = None
self.calltypes = None
self.type_annotation = None
self.status = _CompileStatus(
can_fallback=self.flags.enable_pyobject, can_giveup=config.COMPATIBILITY_MODE
)
|
def __init__(self, typingctx, targetctx, library, args, return_type, flags, locals):
# Make sure the environment is reloaded
config.reload_config()
typingctx.refresh()
targetctx.refresh()
self.typingctx = typingctx
self.targetctx = _make_subtarget(targetctx, flags)
self.library = library
self.args = args
self.return_type = return_type
self.flags = flags
self.locals = locals
# Results of various steps of the compilation pipeline
self.bc = None
self.func_id = None
self.func_ir = None
self.lifted = None
self.lifted_from = None
self.typemap = None
self.calltypes = None
self.type_annotation = None
self.status = _CompileStatus(
can_fallback=self.flags.enable_pyobject, can_giveup=config.COMPATIBILITY_MODE
)
|
https://github.com/numba/numba/issues/2169
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
242 try:
--> 243 yield
244 except NumbaError as e:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
NotImplementedError: (<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-1-0cabfcdeff6e> in <module>()
12 return s
13
---> 14 test(np.array([3]), np.array([4]))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
284 argtypes.append(self.typeof_pyval(a))
285 try:
--> 286 return self.compile(tuple(argtypes))
287 except errors.TypingError as e:
288 # Intercept typing error that may be due to an argument
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, sig)
530
531 self._cache_misses[sig] += 1
--> 532 cres = self._compiler.compile(args, return_type)
533 self.add_overload(cres)
534 self._cache.save_overload(sig, cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, args, return_type)
79 impl,
80 args=args, return_type=return_type,
---> 81 flags=flags, locals=self.locals)
82 # Check typing error if object mode is used
83 if cres.typing_error is not None and not flags.enable_pyobject:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library)
682 pipeline = Pipeline(typingctx, targetctx, library,
683 args, return_type, flags, locals)
--> 684 return pipeline.compile_extra(func)
685
686
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(self, func)
346 self.lifted = ()
347 self.lifted_from = None
--> 348 return self._compile_bytecode()
349
350 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_bytecode(self)
647 """
648 assert self.func_ir is None
--> 649 return self._compile_core()
650
651 def _compile_ir(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_frontend(self)
413 if self.flags.enable_looplift:
414 assert not self.lifted
--> 415 cres = self.frontend_looplift()
416 if cres is not None:
417 raise _EarlyPipelineCompletion(cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in frontend_looplift(self)
404 self.args, self.return_type,
405 outer_flags, self.locals,
--> 406 lifted=tuple(loops), lifted_from=None)
407 return cres
408
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(typingctx, targetctx, func_ir, args, return_type, flags, locals, lifted, lifted_from, library)
696 args, return_type, flags, locals)
697 return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
--> 698 lifted_from=lifted_from)
699
700
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(self, func_ir, lifted, lifted_from)
354
355 self._set_and_check_ir(func_ir)
--> 356 return self._compile_ir()
357
358 def stage_analyze_bytecode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_ir(self)
654 """
655 assert self.func_ir is not None
--> 656 return self._compile_core()
657
658
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_backend(self)
552 """
553 lowerfn = self.backend_object_mode
--> 554 self._backend(lowerfn, objectmode=True)
555
556 # Warn if compiled function in object mode and force_pyobject not set
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
529 self.library.enable_object_caching()
530
--> 531 lowered = lowerfn()
532 signature = typing.signature(self.return_type, *self.args)
533 self.cr = compile_result(typing_context=self.typingctx,
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in backend_object_mode(self)
503 self.library,
504 self.func_ir,
--> 505 self.flags)
506
507 def backend_nopython_mode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in py_lowering_stage(targetctx, library, interp, flags)
824 fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(interp)
825 lower = objmode.PyLower(targetctx, library, fndesc, interp)
--> 826 lower.lower()
827 if not flags.no_cpython_wrapper:
828 lower.create_cpython_wrapper()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower(self)
124 if self.generator_info is None:
125 self.genlower = None
--> 126 self.lower_normal_function(self.fndesc)
127 else:
128 self.genlower = self.GeneratorLower(self)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
159 # Init argument values
160 self.extract_function_arguments()
--> 161 entry_block_tail = self.lower_function_body()
162
163 # Close tail of entry block
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_function_body(self)
184 bb = self.blkmap[offset]
185 self.builder.position_at_end(bb)
--> 186 self.lower_block(block)
187
188 self.post_lower()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
75 value = type()
76 try:
---> 77 self.gen.throw(type, value, traceback)
78 raise RuntimeError("generator didn't stop after throw()")
79 except StopIteration as exc:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
247 except Exception as e:
248 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
--> 249 six.reraise(type(newerr), newerr, sys.exc_info()[2])
250
251
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
241 errcls = kwargs.pop('errcls_', InternalError)
242 try:
--> 243 yield
244 except NumbaError as e:
245 e.add_context(_format_msg(fmt_, args, kwargs))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
143
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
147 def lower_assign(self, inst):
LoweringError: Failed at object (object mode frontend)
Failed at object (object mode backend)
(<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
File "<ipython-input-1-0cabfcdeff6e>", line 7
[1] During: lowering "a2[0] = $const0.5" at <ipython-input-1-0cabfcdeff6e> (7)
|
NotImplementedError
|
def stage_objectmode_frontend(self):
"""
Front-end: Analyze bytecode, generate Numba IR, infer types
"""
self.func_ir = self.func_ir_original or self.func_ir
if self.flags.enable_looplift:
assert not self.lifted
cres = self.frontend_looplift()
if cres is not None:
raise _EarlyPipelineCompletion(cres)
# Fallback typing: everything is a python object
self.typemap = defaultdict(lambda: types.pyobject)
self.calltypes = defaultdict(lambda: types.pyobject)
self.return_type = types.pyobject
|
def stage_objectmode_frontend(self):
"""
Front-end: Analyze bytecode, generate Numba IR, infer types
"""
if self.flags.enable_looplift:
assert not self.lifted
cres = self.frontend_looplift()
if cres is not None:
raise _EarlyPipelineCompletion(cres)
# Fallback typing: everything is a python object
self.typemap = defaultdict(lambda: types.pyobject)
self.calltypes = defaultdict(lambda: types.pyobject)
self.return_type = types.pyobject
|
https://github.com/numba/numba/issues/2169
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
242 try:
--> 243 yield
244 except NumbaError as e:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
NotImplementedError: (<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-1-0cabfcdeff6e> in <module>()
12 return s
13
---> 14 test(np.array([3]), np.array([4]))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
284 argtypes.append(self.typeof_pyval(a))
285 try:
--> 286 return self.compile(tuple(argtypes))
287 except errors.TypingError as e:
288 # Intercept typing error that may be due to an argument
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, sig)
530
531 self._cache_misses[sig] += 1
--> 532 cres = self._compiler.compile(args, return_type)
533 self.add_overload(cres)
534 self._cache.save_overload(sig, cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, args, return_type)
79 impl,
80 args=args, return_type=return_type,
---> 81 flags=flags, locals=self.locals)
82 # Check typing error if object mode is used
83 if cres.typing_error is not None and not flags.enable_pyobject:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library)
682 pipeline = Pipeline(typingctx, targetctx, library,
683 args, return_type, flags, locals)
--> 684 return pipeline.compile_extra(func)
685
686
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(self, func)
346 self.lifted = ()
347 self.lifted_from = None
--> 348 return self._compile_bytecode()
349
350 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_bytecode(self)
647 """
648 assert self.func_ir is None
--> 649 return self._compile_core()
650
651 def _compile_ir(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_frontend(self)
413 if self.flags.enable_looplift:
414 assert not self.lifted
--> 415 cres = self.frontend_looplift()
416 if cres is not None:
417 raise _EarlyPipelineCompletion(cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in frontend_looplift(self)
404 self.args, self.return_type,
405 outer_flags, self.locals,
--> 406 lifted=tuple(loops), lifted_from=None)
407 return cres
408
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(typingctx, targetctx, func_ir, args, return_type, flags, locals, lifted, lifted_from, library)
696 args, return_type, flags, locals)
697 return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
--> 698 lifted_from=lifted_from)
699
700
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(self, func_ir, lifted, lifted_from)
354
355 self._set_and_check_ir(func_ir)
--> 356 return self._compile_ir()
357
358 def stage_analyze_bytecode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_ir(self)
654 """
655 assert self.func_ir is not None
--> 656 return self._compile_core()
657
658
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_backend(self)
552 """
553 lowerfn = self.backend_object_mode
--> 554 self._backend(lowerfn, objectmode=True)
555
556 # Warn if compiled function in object mode and force_pyobject not set
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
529 self.library.enable_object_caching()
530
--> 531 lowered = lowerfn()
532 signature = typing.signature(self.return_type, *self.args)
533 self.cr = compile_result(typing_context=self.typingctx,
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in backend_object_mode(self)
503 self.library,
504 self.func_ir,
--> 505 self.flags)
506
507 def backend_nopython_mode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in py_lowering_stage(targetctx, library, interp, flags)
824 fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(interp)
825 lower = objmode.PyLower(targetctx, library, fndesc, interp)
--> 826 lower.lower()
827 if not flags.no_cpython_wrapper:
828 lower.create_cpython_wrapper()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower(self)
124 if self.generator_info is None:
125 self.genlower = None
--> 126 self.lower_normal_function(self.fndesc)
127 else:
128 self.genlower = self.GeneratorLower(self)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
159 # Init argument values
160 self.extract_function_arguments()
--> 161 entry_block_tail = self.lower_function_body()
162
163 # Close tail of entry block
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_function_body(self)
184 bb = self.blkmap[offset]
185 self.builder.position_at_end(bb)
--> 186 self.lower_block(block)
187
188 self.post_lower()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
75 value = type()
76 try:
---> 77 self.gen.throw(type, value, traceback)
78 raise RuntimeError("generator didn't stop after throw()")
79 except StopIteration as exc:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
247 except Exception as e:
248 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
--> 249 six.reraise(type(newerr), newerr, sys.exc_info()[2])
250
251
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
241 errcls = kwargs.pop('errcls_', InternalError)
242 try:
--> 243 yield
244 except NumbaError as e:
245 e.add_context(_format_msg(fmt_, args, kwargs))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
143
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
147 def lower_assign(self, inst):
LoweringError: Failed at object (object mode frontend)
Failed at object (object mode backend)
(<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
File "<ipython-input-1-0cabfcdeff6e>", line 7
[1] During: lowering "a2[0] = $const0.5" at <ipython-input-1-0cabfcdeff6e> (7)
|
NotImplementedError
|
def _compile_core(self):
"""
Populate and run compiler pipeline
"""
pm = _PipelineManager()
if not self.flags.force_pyobject:
pm.create_pipeline("nopython")
if self.func_ir is None:
pm.add_stage(self.stage_analyze_bytecode, "analyzing bytecode")
pm.add_stage(self.stage_process_ir, "processing IR")
if not self.flags.no_rewrites:
if self.status.can_fallback:
pm.add_stage(self.stage_preserve_ir, "preserve IR for fallback")
pm.add_stage(self.stage_generic_rewrites, "nopython rewrites")
pm.add_stage(self.stage_nopython_frontend, "nopython frontend")
pm.add_stage(self.stage_annotate_type, "annotate type")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
pm.add_stage(self.stage_nopython_backend, "nopython mode backend")
pm.add_stage(self.stage_cleanup, "cleanup intermediate results")
if self.status.can_fallback or self.flags.force_pyobject:
pm.create_pipeline("object")
if self.func_ir is None:
pm.add_stage(self.stage_analyze_bytecode, "analyzing bytecode")
pm.add_stage(self.stage_process_ir, "processing IR")
pm.add_stage(self.stage_objectmode_frontend, "object mode frontend")
pm.add_stage(self.stage_annotate_type, "annotate type")
pm.add_stage(self.stage_objectmode_backend, "object mode backend")
pm.add_stage(self.stage_cleanup, "cleanup intermediate results")
if self.status.can_giveup:
pm.create_pipeline("interp")
pm.add_stage(self.stage_compile_interp_mode, "compiling with interpreter mode")
pm.add_stage(self.stage_cleanup, "cleanup intermediate results")
pm.finalize()
res = pm.run(self.status)
if res is not None:
# Early pipeline completion
return res
else:
assert self.cr is not None
return self.cr
|
def _compile_core(self):
"""
Populate and run compiler pipeline
"""
pm = _PipelineManager()
if not self.flags.force_pyobject:
pm.create_pipeline("nopython")
if self.func_ir is None:
pm.add_stage(self.stage_analyze_bytecode, "analyzing bytecode")
pm.add_stage(self.stage_process_ir, "processing IR")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_generic_rewrites, "nopython rewrites")
pm.add_stage(self.stage_nopython_frontend, "nopython frontend")
pm.add_stage(self.stage_annotate_type, "annotate type")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
pm.add_stage(self.stage_nopython_backend, "nopython mode backend")
pm.add_stage(self.stage_cleanup, "cleanup intermediate results")
if self.status.can_fallback or self.flags.force_pyobject:
pm.create_pipeline("object")
if self.func_ir is None:
pm.add_stage(self.stage_analyze_bytecode, "analyzing bytecode")
pm.add_stage(self.stage_process_ir, "processing IR")
pm.add_stage(self.stage_objectmode_frontend, "object mode frontend")
pm.add_stage(self.stage_annotate_type, "annotate type")
pm.add_stage(self.stage_objectmode_backend, "object mode backend")
pm.add_stage(self.stage_cleanup, "cleanup intermediate results")
if self.status.can_giveup:
pm.create_pipeline("interp")
pm.add_stage(self.stage_compile_interp_mode, "compiling with interpreter mode")
pm.add_stage(self.stage_cleanup, "cleanup intermediate results")
pm.finalize()
res = pm.run(self.status)
if res is not None:
# Early pipeline completion
return res
else:
assert self.cr is not None
return self.cr
|
https://github.com/numba/numba/issues/2169
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
242 try:
--> 243 yield
244 except NumbaError as e:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
NotImplementedError: (<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-1-0cabfcdeff6e> in <module>()
12 return s
13
---> 14 test(np.array([3]), np.array([4]))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
284 argtypes.append(self.typeof_pyval(a))
285 try:
--> 286 return self.compile(tuple(argtypes))
287 except errors.TypingError as e:
288 # Intercept typing error that may be due to an argument
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, sig)
530
531 self._cache_misses[sig] += 1
--> 532 cres = self._compiler.compile(args, return_type)
533 self.add_overload(cres)
534 self._cache.save_overload(sig, cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, args, return_type)
79 impl,
80 args=args, return_type=return_type,
---> 81 flags=flags, locals=self.locals)
82 # Check typing error if object mode is used
83 if cres.typing_error is not None and not flags.enable_pyobject:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library)
682 pipeline = Pipeline(typingctx, targetctx, library,
683 args, return_type, flags, locals)
--> 684 return pipeline.compile_extra(func)
685
686
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(self, func)
346 self.lifted = ()
347 self.lifted_from = None
--> 348 return self._compile_bytecode()
349
350 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_bytecode(self)
647 """
648 assert self.func_ir is None
--> 649 return self._compile_core()
650
651 def _compile_ir(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_frontend(self)
413 if self.flags.enable_looplift:
414 assert not self.lifted
--> 415 cres = self.frontend_looplift()
416 if cres is not None:
417 raise _EarlyPipelineCompletion(cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in frontend_looplift(self)
404 self.args, self.return_type,
405 outer_flags, self.locals,
--> 406 lifted=tuple(loops), lifted_from=None)
407 return cres
408
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(typingctx, targetctx, func_ir, args, return_type, flags, locals, lifted, lifted_from, library)
696 args, return_type, flags, locals)
697 return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
--> 698 lifted_from=lifted_from)
699
700
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(self, func_ir, lifted, lifted_from)
354
355 self._set_and_check_ir(func_ir)
--> 356 return self._compile_ir()
357
358 def stage_analyze_bytecode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_ir(self)
654 """
655 assert self.func_ir is not None
--> 656 return self._compile_core()
657
658
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_backend(self)
552 """
553 lowerfn = self.backend_object_mode
--> 554 self._backend(lowerfn, objectmode=True)
555
556 # Warn if compiled function in object mode and force_pyobject not set
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
529 self.library.enable_object_caching()
530
--> 531 lowered = lowerfn()
532 signature = typing.signature(self.return_type, *self.args)
533 self.cr = compile_result(typing_context=self.typingctx,
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in backend_object_mode(self)
503 self.library,
504 self.func_ir,
--> 505 self.flags)
506
507 def backend_nopython_mode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in py_lowering_stage(targetctx, library, interp, flags)
824 fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(interp)
825 lower = objmode.PyLower(targetctx, library, fndesc, interp)
--> 826 lower.lower()
827 if not flags.no_cpython_wrapper:
828 lower.create_cpython_wrapper()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower(self)
124 if self.generator_info is None:
125 self.genlower = None
--> 126 self.lower_normal_function(self.fndesc)
127 else:
128 self.genlower = self.GeneratorLower(self)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
159 # Init argument values
160 self.extract_function_arguments()
--> 161 entry_block_tail = self.lower_function_body()
162
163 # Close tail of entry block
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_function_body(self)
184 bb = self.blkmap[offset]
185 self.builder.position_at_end(bb)
--> 186 self.lower_block(block)
187
188 self.post_lower()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
75 value = type()
76 try:
---> 77 self.gen.throw(type, value, traceback)
78 raise RuntimeError("generator didn't stop after throw()")
79 except StopIteration as exc:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
247 except Exception as e:
248 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
--> 249 six.reraise(type(newerr), newerr, sys.exc_info()[2])
250
251
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
241 errcls = kwargs.pop('errcls_', InternalError)
242 try:
--> 243 yield
244 except NumbaError as e:
245 e.add_context(_format_msg(fmt_, args, kwargs))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
143
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
147 def lower_assign(self, inst):
LoweringError: Failed at object (object mode frontend)
Failed at object (object mode backend)
(<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
File "<ipython-input-1-0cabfcdeff6e>", line 7
[1] During: lowering "a2[0] = $const0.5" at <ipython-input-1-0cabfcdeff6e> (7)
|
NotImplementedError
|
def copy(self):
new_ir = copy.copy(self)
new_ir.blocks = self.blocks.copy()
return new_ir
|
def copy(self):
block = Block(self.scope, self.loc)
block.body = self.body[:]
return block
|
https://github.com/numba/numba/issues/2169
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
242 try:
--> 243 yield
244 except NumbaError as e:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
NotImplementedError: (<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-1-0cabfcdeff6e> in <module>()
12 return s
13
---> 14 test(np.array([3]), np.array([4]))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
284 argtypes.append(self.typeof_pyval(a))
285 try:
--> 286 return self.compile(tuple(argtypes))
287 except errors.TypingError as e:
288 # Intercept typing error that may be due to an argument
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, sig)
530
531 self._cache_misses[sig] += 1
--> 532 cres = self._compiler.compile(args, return_type)
533 self.add_overload(cres)
534 self._cache.save_overload(sig, cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, args, return_type)
79 impl,
80 args=args, return_type=return_type,
---> 81 flags=flags, locals=self.locals)
82 # Check typing error if object mode is used
83 if cres.typing_error is not None and not flags.enable_pyobject:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library)
682 pipeline = Pipeline(typingctx, targetctx, library,
683 args, return_type, flags, locals)
--> 684 return pipeline.compile_extra(func)
685
686
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(self, func)
346 self.lifted = ()
347 self.lifted_from = None
--> 348 return self._compile_bytecode()
349
350 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_bytecode(self)
647 """
648 assert self.func_ir is None
--> 649 return self._compile_core()
650
651 def _compile_ir(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_frontend(self)
413 if self.flags.enable_looplift:
414 assert not self.lifted
--> 415 cres = self.frontend_looplift()
416 if cres is not None:
417 raise _EarlyPipelineCompletion(cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in frontend_looplift(self)
404 self.args, self.return_type,
405 outer_flags, self.locals,
--> 406 lifted=tuple(loops), lifted_from=None)
407 return cres
408
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(typingctx, targetctx, func_ir, args, return_type, flags, locals, lifted, lifted_from, library)
696 args, return_type, flags, locals)
697 return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
--> 698 lifted_from=lifted_from)
699
700
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(self, func_ir, lifted, lifted_from)
354
355 self._set_and_check_ir(func_ir)
--> 356 return self._compile_ir()
357
358 def stage_analyze_bytecode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_ir(self)
654 """
655 assert self.func_ir is not None
--> 656 return self._compile_core()
657
658
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_backend(self)
552 """
553 lowerfn = self.backend_object_mode
--> 554 self._backend(lowerfn, objectmode=True)
555
556 # Warn if compiled function in object mode and force_pyobject not set
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
529 self.library.enable_object_caching()
530
--> 531 lowered = lowerfn()
532 signature = typing.signature(self.return_type, *self.args)
533 self.cr = compile_result(typing_context=self.typingctx,
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in backend_object_mode(self)
503 self.library,
504 self.func_ir,
--> 505 self.flags)
506
507 def backend_nopython_mode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in py_lowering_stage(targetctx, library, interp, flags)
824 fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(interp)
825 lower = objmode.PyLower(targetctx, library, fndesc, interp)
--> 826 lower.lower()
827 if not flags.no_cpython_wrapper:
828 lower.create_cpython_wrapper()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower(self)
124 if self.generator_info is None:
125 self.genlower = None
--> 126 self.lower_normal_function(self.fndesc)
127 else:
128 self.genlower = self.GeneratorLower(self)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
159 # Init argument values
160 self.extract_function_arguments()
--> 161 entry_block_tail = self.lower_function_body()
162
163 # Close tail of entry block
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_function_body(self)
184 bb = self.blkmap[offset]
185 self.builder.position_at_end(bb)
--> 186 self.lower_block(block)
187
188 self.post_lower()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
75 value = type()
76 try:
---> 77 self.gen.throw(type, value, traceback)
78 raise RuntimeError("generator didn't stop after throw()")
79 except StopIteration as exc:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
247 except Exception as e:
248 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
--> 249 six.reraise(type(newerr), newerr, sys.exc_info()[2])
250
251
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
241 errcls = kwargs.pop('errcls_', InternalError)
242 try:
--> 243 yield
244 except NumbaError as e:
245 e.add_context(_format_msg(fmt_, args, kwargs))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
143
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
147 def lower_assign(self, inst):
LoweringError: Failed at object (object mode frontend)
Failed at object (object mode backend)
(<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
File "<ipython-input-1-0cabfcdeff6e>", line 7
[1] During: lowering "a2[0] = $const0.5" at <ipython-input-1-0cabfcdeff6e> (7)
|
NotImplementedError
|
def match(self, interp, block, typemap, calltypes):
self.getitems = getitems = {}
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op="getitem"):
if expr.op == "getitem":
try:
const = interp.infer_constant(expr.index)
except errors.ConstantInferenceError:
continue
getitems[expr] = const
return len(getitems) > 0
|
def match(self, interp, block, typemap, calltypes):
self.getitems = getitems = []
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op="getitem"):
if expr.op == "getitem":
try:
const = interp.infer_constant(expr.index)
except errors.ConstantInferenceError:
continue
getitems.append((expr, const))
return len(getitems) > 0
|
https://github.com/numba/numba/issues/2169
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
242 try:
--> 243 yield
244 except NumbaError as e:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
NotImplementedError: (<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-1-0cabfcdeff6e> in <module>()
12 return s
13
---> 14 test(np.array([3]), np.array([4]))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
284 argtypes.append(self.typeof_pyval(a))
285 try:
--> 286 return self.compile(tuple(argtypes))
287 except errors.TypingError as e:
288 # Intercept typing error that may be due to an argument
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, sig)
530
531 self._cache_misses[sig] += 1
--> 532 cres = self._compiler.compile(args, return_type)
533 self.add_overload(cres)
534 self._cache.save_overload(sig, cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, args, return_type)
79 impl,
80 args=args, return_type=return_type,
---> 81 flags=flags, locals=self.locals)
82 # Check typing error if object mode is used
83 if cres.typing_error is not None and not flags.enable_pyobject:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library)
682 pipeline = Pipeline(typingctx, targetctx, library,
683 args, return_type, flags, locals)
--> 684 return pipeline.compile_extra(func)
685
686
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(self, func)
346 self.lifted = ()
347 self.lifted_from = None
--> 348 return self._compile_bytecode()
349
350 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_bytecode(self)
647 """
648 assert self.func_ir is None
--> 649 return self._compile_core()
650
651 def _compile_ir(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_frontend(self)
413 if self.flags.enable_looplift:
414 assert not self.lifted
--> 415 cres = self.frontend_looplift()
416 if cres is not None:
417 raise _EarlyPipelineCompletion(cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in frontend_looplift(self)
404 self.args, self.return_type,
405 outer_flags, self.locals,
--> 406 lifted=tuple(loops), lifted_from=None)
407 return cres
408
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(typingctx, targetctx, func_ir, args, return_type, flags, locals, lifted, lifted_from, library)
696 args, return_type, flags, locals)
697 return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
--> 698 lifted_from=lifted_from)
699
700
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(self, func_ir, lifted, lifted_from)
354
355 self._set_and_check_ir(func_ir)
--> 356 return self._compile_ir()
357
358 def stage_analyze_bytecode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_ir(self)
654 """
655 assert self.func_ir is not None
--> 656 return self._compile_core()
657
658
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_backend(self)
552 """
553 lowerfn = self.backend_object_mode
--> 554 self._backend(lowerfn, objectmode=True)
555
556 # Warn if compiled function in object mode and force_pyobject not set
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
529 self.library.enable_object_caching()
530
--> 531 lowered = lowerfn()
532 signature = typing.signature(self.return_type, *self.args)
533 self.cr = compile_result(typing_context=self.typingctx,
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in backend_object_mode(self)
503 self.library,
504 self.func_ir,
--> 505 self.flags)
506
507 def backend_nopython_mode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in py_lowering_stage(targetctx, library, interp, flags)
824 fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(interp)
825 lower = objmode.PyLower(targetctx, library, fndesc, interp)
--> 826 lower.lower()
827 if not flags.no_cpython_wrapper:
828 lower.create_cpython_wrapper()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower(self)
124 if self.generator_info is None:
125 self.genlower = None
--> 126 self.lower_normal_function(self.fndesc)
127 else:
128 self.genlower = self.GeneratorLower(self)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
159 # Init argument values
160 self.extract_function_arguments()
--> 161 entry_block_tail = self.lower_function_body()
162
163 # Close tail of entry block
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_function_body(self)
184 bb = self.blkmap[offset]
185 self.builder.position_at_end(bb)
--> 186 self.lower_block(block)
187
188 self.post_lower()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
75 value = type()
76 try:
---> 77 self.gen.throw(type, value, traceback)
78 raise RuntimeError("generator didn't stop after throw()")
79 except StopIteration as exc:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
247 except Exception as e:
248 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
--> 249 six.reraise(type(newerr), newerr, sys.exc_info()[2])
250
251
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
241 errcls = kwargs.pop('errcls_', InternalError)
242 try:
--> 243 yield
244 except NumbaError as e:
245 e.add_context(_format_msg(fmt_, args, kwargs))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
143
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
147 def lower_assign(self, inst):
LoweringError: Failed at object (object mode frontend)
Failed at object (object mode backend)
(<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
File "<ipython-input-1-0cabfcdeff6e>", line 7
[1] During: lowering "a2[0] = $const0.5" at <ipython-input-1-0cabfcdeff6e> (7)
|
NotImplementedError
|
def apply(self):
"""
Rewrite all matching getitems as static_getitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const = self.getitems[expr]
new_expr = ir.Expr.static_getitem(
value=expr.value, index=const, index_var=expr.index, loc=expr.loc
)
inst = ir.Assign(value=new_expr, target=inst.target, loc=inst.loc)
new_block.append(inst)
return new_block
|
def apply(self):
"""
Rewrite all matching getitems as static_getitems.
"""
for expr, const in self.getitems:
expr.op = "static_getitem"
expr.index_var = expr.index
expr.index = const
return self.block
|
https://github.com/numba/numba/issues/2169
|
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
242 try:
--> 243 yield
244 except NumbaError as e:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
NotImplementedError: (<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-1-0cabfcdeff6e> in <module>()
12 return s
13
---> 14 test(np.array([3]), np.array([4]))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
284 argtypes.append(self.typeof_pyval(a))
285 try:
--> 286 return self.compile(tuple(argtypes))
287 except errors.TypingError as e:
288 # Intercept typing error that may be due to an argument
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, sig)
530
531 self._cache_misses[sig] += 1
--> 532 cres = self._compiler.compile(args, return_type)
533 self.add_overload(cres)
534 self._cache.save_overload(sig, cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/dispatcher.py in compile(self, args, return_type)
79 impl,
80 args=args, return_type=return_type,
---> 81 flags=flags, locals=self.locals)
82 # Check typing error if object mode is used
83 if cres.typing_error is not None and not flags.enable_pyobject:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library)
682 pipeline = Pipeline(typingctx, targetctx, library,
683 args, return_type, flags, locals)
--> 684 return pipeline.compile_extra(func)
685
686
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_extra(self, func)
346 self.lifted = ()
347 self.lifted_from = None
--> 348 return self._compile_bytecode()
349
350 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_bytecode(self)
647 """
648 assert self.func_ir is None
--> 649 return self._compile_core()
650
651 def _compile_ir(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_frontend(self)
413 if self.flags.enable_looplift:
414 assert not self.lifted
--> 415 cres = self.frontend_looplift()
416 if cres is not None:
417 raise _EarlyPipelineCompletion(cres)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in frontend_looplift(self)
404 self.args, self.return_type,
405 outer_flags, self.locals,
--> 406 lifted=tuple(loops), lifted_from=None)
407 return cres
408
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(typingctx, targetctx, func_ir, args, return_type, flags, locals, lifted, lifted_from, library)
696 args, return_type, flags, locals)
697 return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
--> 698 lifted_from=lifted_from)
699
700
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in compile_ir(self, func_ir, lifted, lifted_from)
354
355 self._set_and_check_ir(func_ir)
--> 356 return self._compile_ir()
357
358 def stage_analyze_bytecode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_ir(self)
654 """
655 assert self.func_ir is not None
--> 656 return self._compile_core()
657
658
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _compile_core(self)
634
635 pm.finalize()
--> 636 res = pm.run(self.status)
637 if res is not None:
638 # Early pipeline completion
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
233 # No more fallback pipelines?
234 if is_final_pipeline:
--> 235 raise patched_exception
236 # Go to next fallback pipeline
237 else:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in run(self, status)
225 try:
226 event(stage_name)
--> 227 stage()
228 except _EarlyPipelineCompletion as e:
229 return e.result
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in stage_objectmode_backend(self)
552 """
553 lowerfn = self.backend_object_mode
--> 554 self._backend(lowerfn, objectmode=True)
555
556 # Warn if compiled function in object mode and force_pyobject not set
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
529 self.library.enable_object_caching()
530
--> 531 lowered = lowerfn()
532 signature = typing.signature(self.return_type, *self.args)
533 self.cr = compile_result(typing_context=self.typingctx,
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in backend_object_mode(self)
503 self.library,
504 self.func_ir,
--> 505 self.flags)
506
507 def backend_nopython_mode(self):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/compiler.py in py_lowering_stage(targetctx, library, interp, flags)
824 fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(interp)
825 lower = objmode.PyLower(targetctx, library, fndesc, interp)
--> 826 lower.lower()
827 if not flags.no_cpython_wrapper:
828 lower.create_cpython_wrapper()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower(self)
124 if self.generator_info is None:
125 self.genlower = None
--> 126 self.lower_normal_function(self.fndesc)
127 else:
128 self.genlower = self.GeneratorLower(self)
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
159 # Init argument values
160 self.extract_function_arguments()
--> 161 entry_block_tail = self.lower_function_body()
162
163 # Close tail of entry block
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_function_body(self)
184 bb = self.blkmap[offset]
185 self.builder.position_at_end(bb)
--> 186 self.lower_block(block)
187
188 self.post_lower()
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
75 value = type()
76 try:
---> 77 self.gen.throw(type, value, traceback)
78 raise RuntimeError("generator didn't stop after throw()")
79 except StopIteration as exc:
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
247 except Exception as e:
248 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
--> 249 six.reraise(type(newerr), newerr, sys.exc_info()[2])
250
251
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
241 errcls = kwargs.pop('errcls_', InternalError)
242 try:
--> 243 yield
244 except NumbaError as e:
245 e.add_context(_format_msg(fmt_, args, kwargs))
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/lowering.py in lower_block(self, block)
199 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
200 loc=self.loc, errcls_=defaulterrcls):
--> 201 self.lower_inst(inst)
202
203 def create_cpython_wrapper(self, release_gil=False):
/Applications/anaconda/envs/numba029/lib/python3.5/site-packages/numba/objmode.py in lower_inst(self, inst)
143
144 else:
--> 145 raise NotImplementedError(type(inst), inst)
146
147 def lower_assign(self, inst):
LoweringError: Failed at object (object mode frontend)
Failed at object (object mode backend)
(<class 'numba.ir.StaticSetItem'>, a2[0] = $const0.5)
File "<ipython-input-1-0cabfcdeff6e>", line 7
[1] During: lowering "a2[0] = $const0.5" at <ipython-input-1-0cabfcdeff6e> (7)
|
NotImplementedError
|
def create_stream(self):
handle = drvapi.cu_stream()
driver.cuStreamCreate(byref(handle), 0)
return Stream(
weakref.proxy(self), handle, _stream_finalizer(self.deallocations, handle)
)
|
def create_stream(self):
handle = drvapi.cu_stream()
driver.cuStreamCreate(byref(handle), 0)
return Stream(
weakref.proxy(self), handle, _stream_finalizer(self.allocations, handle)
)
|
https://github.com/numba/numba/issues/2064
|
test_pinned (numba.cuda.tests.cudadrv.test_pinned.TestPinned) ... Exception ignored in: <finalize object at 0x7f75a9726260; dead>
Traceback (most recent call last):
File "/home/antoine/numba/numba/utils.py", line 593, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 847, in core
deallocs.add_item(driver.cuStreamDestroy, handle)
AttributeError: 'UniqueDict' object has no attribute 'add_item'
ok
test_unpinned (numba.cuda.tests.cudadrv.test_pinned.TestPinned) ... Exception ignored in: <finalize object at 0x7f75a97262a0; dead>
Traceback (most recent call last):
File "/home/antoine/numba/numba/utils.py", line 593, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 847, in core
deallocs.add_item(driver.cuStreamDestroy, handle)
AttributeError: 'UniqueDict' object has no attribute 'add_item'
ok
|
AttributeError
|
def _module_finalizer(context, handle):
trashing = context.trashing
modules = context.modules
def core():
def cleanup():
# All modules are owned by their parent Context.
# A Module is either released by a call to
# Context.unload_module, which clear the handle (pointer) mapping
# (checked by the following assertion), or, by Context.reset().
# Both releases the sole reference to the Module and trigger the
# finalizer for the Module instance. The actual call to
# cuModuleUnload is deferred to the trashing service to avoid
# further corruption of the CUDA context if a fatal error has
# occurred in the CUDA driver.
assert handle.value not in modules
driver.cuModuleUnload(handle)
trashing.add_trash(cleanup)
return core
|
def _module_finalizer(context, handle):
trashing = context.trashing
modules = context.modules
def core():
def cleanup():
if modules:
del modules[handle.value]
driver.cuModuleUnload(handle)
trashing.add_trash(cleanup)
return core
|
https://github.com/numba/numba/issues/1858
|
======================================================================
ERROR: test_cuda_driver_occupancy (numba.cuda.tests.cudadrv.test_cuda_driver.TestCudaDriver)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/cuda/tests/cudadrv/test_cuda_driver.py", line 117, in test_cuda_driver_occupancy
module = self.context.create_module_ptx(self.ptx)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 646, in create_module_ptx
return self.create_module_image(image)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 649, in create_module_image
self.trashing.service()
File "/home/antoine/numba/numba/servicelib/service.py", line 30, in service
next(self._task)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 321, in process
cb()
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 779, in cleanup
del modules[handle.value]
KeyError: 47652304
|
KeyError
|
def core():
def cleanup():
# All modules are owned by their parent Context.
# A Module is either released by a call to
# Context.unload_module, which clear the handle (pointer) mapping
# (checked by the following assertion), or, by Context.reset().
# Both releases the sole reference to the Module and trigger the
# finalizer for the Module instance. The actual call to
# cuModuleUnload is deferred to the trashing service to avoid
# further corruption of the CUDA context if a fatal error has
# occurred in the CUDA driver.
assert handle.value not in modules
driver.cuModuleUnload(handle)
trashing.add_trash(cleanup)
|
def core():
def cleanup():
if modules:
del modules[handle.value]
driver.cuModuleUnload(handle)
trashing.add_trash(cleanup)
|
https://github.com/numba/numba/issues/1858
|
======================================================================
ERROR: test_cuda_driver_occupancy (numba.cuda.tests.cudadrv.test_cuda_driver.TestCudaDriver)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/cuda/tests/cudadrv/test_cuda_driver.py", line 117, in test_cuda_driver_occupancy
module = self.context.create_module_ptx(self.ptx)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 646, in create_module_ptx
return self.create_module_image(image)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 649, in create_module_image
self.trashing.service()
File "/home/antoine/numba/numba/servicelib/service.py", line 30, in service
next(self._task)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 321, in process
cb()
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 779, in cleanup
del modules[handle.value]
KeyError: 47652304
|
KeyError
|
def cleanup():
# All modules are owned by their parent Context.
# A Module is either released by a call to
# Context.unload_module, which clear the handle (pointer) mapping
# (checked by the following assertion), or, by Context.reset().
# Both releases the sole reference to the Module and trigger the
# finalizer for the Module instance. The actual call to
# cuModuleUnload is deferred to the trashing service to avoid
# further corruption of the CUDA context if a fatal error has
# occurred in the CUDA driver.
assert handle.value not in modules
driver.cuModuleUnload(handle)
|
def cleanup():
if modules:
del modules[handle.value]
driver.cuModuleUnload(handle)
|
https://github.com/numba/numba/issues/1858
|
======================================================================
ERROR: test_cuda_driver_occupancy (numba.cuda.tests.cudadrv.test_cuda_driver.TestCudaDriver)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/cuda/tests/cudadrv/test_cuda_driver.py", line 117, in test_cuda_driver_occupancy
module = self.context.create_module_ptx(self.ptx)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 646, in create_module_ptx
return self.create_module_image(image)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 649, in create_module_image
self.trashing.service()
File "/home/antoine/numba/numba/servicelib/service.py", line 30, in service
next(self._task)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 321, in process
cb()
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 779, in cleanup
del modules[handle.value]
KeyError: 47652304
|
KeyError
|
def __init__(self, context, handle, info_log, finalizer=None):
self.context = context
self.handle = handle
self.info_log = info_log
self.finalizer = finalizer
if self.finalizer is not None:
self._finalizer = utils.finalize(self, finalizer)
|
def __init__(self, context, handle, info_log, finalizer=None):
self.context = context
self.handle = handle
self.info_log = info_log
self.finalizer = finalizer
self.is_managed = self.finalizer is not None
|
https://github.com/numba/numba/issues/1858
|
======================================================================
ERROR: test_cuda_driver_occupancy (numba.cuda.tests.cudadrv.test_cuda_driver.TestCudaDriver)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/cuda/tests/cudadrv/test_cuda_driver.py", line 117, in test_cuda_driver_occupancy
module = self.context.create_module_ptx(self.ptx)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 646, in create_module_ptx
return self.create_module_image(image)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 649, in create_module_image
self.trashing.service()
File "/home/antoine/numba/numba/servicelib/service.py", line 30, in service
next(self._task)
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 321, in process
cb()
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 779, in cleanup
del modules[handle.value]
KeyError: 47652304
|
KeyError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.