after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def scalar_angle_kwarg(context, builder, sig, args):
deg_mult = sig.return_type(180 / numpy.pi)
def scalar_angle_impl(val, deg):
if deg:
return numpy.arctan2(val.imag, val.real) * deg_mult
else:
return numpy.arctan2(val.imag, val.real)
if len(args) == 1:
args = args + (cgutils.false_bit,)
sig = signature(sig.return_type, *(sig.args + (types.boolean,)))
res = context.compile_internal(builder, scalar_angle_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
|
def scalar_angle_kwarg(context, builder, sig, args):
def scalar_angle_impl(val, deg=False):
if deg:
scal = 180 / numpy.pi
return numpy.arctan2(val.imag, val.real) * scal
else:
return numpy.arctan2(val.imag, val.real)
res = context.compile_internal(builder, scalar_angle_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def scalar_angle_impl(val, deg):
if deg:
return numpy.arctan2(val.imag, val.real) * deg_mult
else:
return numpy.arctan2(val.imag, val.real)
|
def scalar_angle_impl(val, deg=False):
if deg:
scal = 180 / numpy.pi
return numpy.arctan2(val.imag, val.real) * scal
else:
return numpy.arctan2(val.imag, val.real)
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def array_angle_kwarg(context, builder, sig, args):
arg = sig.args[0]
ret_dtype = sig.return_type.dtype
def array_angle_impl(arr, deg):
out = numpy.zeros_like(arr, dtype=ret_dtype)
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.angle(val, deg)
return out
if len(args) == 1:
args = args + (cgutils.false_bit,)
sig = signature(sig.return_type, *(sig.args + (types.boolean,)))
res = context.compile_internal(builder, array_angle_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
|
def array_angle_kwarg(context, builder, sig, args):
arg = sig.args[0]
if isinstance(arg.dtype, types.Complex):
retty = arg.dtype.underlying_float
else:
retty = arg.dtype
def array_angle_impl(arr, deg=False):
out = numpy.zeros_like(arr, dtype=retty)
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.angle(val, deg)
return out
res = context.compile_internal(builder, array_angle_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def array_angle_impl(arr, deg):
out = numpy.zeros_like(arr, dtype=ret_dtype)
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.angle(val, deg)
return out
|
def array_angle_impl(arr, deg=False):
out = numpy.zeros_like(arr, dtype=retty)
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.angle(val, deg)
return out
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def generic(self):
def typer(z, deg=False):
if isinstance(z, types.Array):
dtype = z.dtype
else:
dtype = z
if isinstance(dtype, types.Complex):
ret_dtype = dtype.underlying_float
elif isinstance(dtype, types.Float):
ret_dtype = dtype
else:
return
if isinstance(z, types.Array):
return z.copy(dtype=ret_dtype)
else:
return ret_dtype
return typer
|
def generic(self):
def typer(ref, deg=False):
if isinstance(ref, types.Array):
return ref.copy(dtype=ref.underlying_float)
else:
return types.float64
return typer
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def typer(z, deg=False):
if isinstance(z, types.Array):
dtype = z.dtype
else:
dtype = z
if isinstance(dtype, types.Complex):
ret_dtype = dtype.underlying_float
elif isinstance(dtype, types.Float):
ret_dtype = dtype
else:
return
if isinstance(z, types.Array):
return z.copy(dtype=ret_dtype)
else:
return ret_dtype
|
def typer(ref, deg=False):
if isinstance(ref, types.Array):
return ref.copy(dtype=ref.underlying_float)
else:
return types.float64
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def generic(self):
def typer(ref, k=0):
if isinstance(ref, types.Array):
if ref.ndim == 1:
rdim = 2
elif ref.ndim == 2:
rdim = 1
else:
return None
return types.Array(ndim=rdim, dtype=ref.dtype, layout="C")
return typer
|
def generic(self):
def typer(ref, k=0):
if isinstance(ref, types.Array):
if ref.ndim == 1:
rdim = 2
elif ref.ndim == 2:
rdim = 1
else:
return None
return types.Array(ndim=rdim, dtype=ref.dtype, layout="C")
return typer
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def typer(ref, k=0):
if isinstance(ref, types.Array):
if ref.ndim == 1:
rdim = 2
elif ref.ndim == 2:
rdim = 1
else:
return None
return types.Array(ndim=rdim, dtype=ref.dtype, layout="C")
|
def typer(ref, k=0):
if isinstance(ref, types.Array):
if ref.ndim == 1:
rdim = 2
elif ref.ndim == 2:
rdim = 1
else:
return None
return types.Array(ndim=rdim, dtype=ref.dtype, layout="C")
|
https://github.com/numba/numba/issues/1667
|
from numba import jit
@jit(nopython=True)
...: def angle(x): return np.angle(x)
...:
angle(np.complex128([1+1j]))
Traceback (most recent call last):
File "<ipython-input-6-186e6f934ae3>", line 1, in <module>
angle(np.complex128([1+1j]))
File "/home/antoine/numba/numba/dispatcher.py", line 171, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 349, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 684, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 372, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 381, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 665, in _compile_bytecode
res = pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 251, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 243, in run
stage()
File "/home/antoine/numba/numba/compiler.py", line 469, in stage_nopython_frontend
self.locals)
File "/home/antoine/numba/numba/compiler.py", line 799, in type_inference_stage
infer.propagate()
File "/home/antoine/numba/numba/typeinfer.py", line 565, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x7fa910005470>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/antoine/numba/numba/typeinfer.py", line 111, in propagate
constraint(typeinfer)
File "/home/antoine/numba/numba/typeinfer.py", line 284, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/home/antoine/numba/numba/typeinfer.py", line 311, in resolve
sig = context.resolve_function_type(fnty, pos_args, kw_args)
File "/home/antoine/numba/numba/typing/context.py", line 123, in resolve_function_type
return func.get_call_type(self, args, kws)
File "/home/antoine/numba/numba/types.py", line 264, in get_call_type
sig = temp.apply(args, kws)
File "/home/antoine/numba/numba/typing/templates.py", line 229, in apply
sig = typer(*args, **kws)
File "/home/antoine/numba/numba/typing/npydecl.py", line 850, in typer
return ref.copy(dtype=ref.underlying_float)
AttributeError: 'Array' object has no attribute 'underlying_float'
--%<-----------------------------------------------------------------
|
TypingError
|
def _get_module_for_linking(self):
"""
Internal: get a LLVM module suitable for linking multiple times
into another library. Exported functions are made "linkonce_odr"
to allow for multiple definitions, inlining, and removal of
unused exports.
See discussion in https://github.com/numba/numba/pull/890
"""
self._ensure_finalized()
if self._shared_module is not None:
return self._shared_module
mod = self._final_module
to_fix = []
nfuncs = 0
for fn in mod.functions:
nfuncs += 1
if not fn.is_declaration and fn.linkage == ll.Linkage.external:
to_fix.append(fn.name)
if nfuncs == 0:
# This is an issue which can occur if loading a module
# from an object file and trying to link with it, so detect it
# here to make debugging easier.
raise RuntimeError(
"library unfit for linking: no available functions in %s" % (self,)
)
if to_fix:
mod = mod.clone()
for name in to_fix:
# NOTE: this will mark the symbol WEAK if serialized
# to an ELF file
mod.get_function(name).linkage = "linkonce_odr"
self._shared_module = mod
return mod
|
def _get_module_for_linking(self):
"""
Internal: get a LLVM module suitable for linking multiple times
into another library. Exported functions are made "linkonce_odr"
to allow for multiple definitions, inlining, and removal of
unused exports.
See discussion in https://github.com/numba/numba/pull/890
"""
if self._shared_module is not None:
return self._shared_module
mod = self._final_module
to_fix = []
nfuncs = 0
for fn in mod.functions:
nfuncs += 1
if not fn.is_declaration and fn.linkage == ll.Linkage.external:
to_fix.append(fn.name)
if nfuncs == 0:
# This is an issue which can occur if loading a module
# from an object file and trying to link with it, so detect it
# here to make debugging easier.
raise RuntimeError(
"library unfit for linking: no available functions in %s" % (self,)
)
if to_fix:
mod = mod.clone()
for name in to_fix:
# NOTE: this will mark the symbol WEAK if serialized
# to an ELF file
mod.get_function(name).linkage = "linkonce_odr"
self._shared_module = mod
return mod
|
https://github.com/numba/numba/issues/1603
|
$ python app.py
Traceback (most recent call last):
File "app.py", line 21, in <module>
run(N)
File "app.py", line 15, in run
res = fcalc(x)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/dispatcher.py", line 172, in _compile_for_args
return self.compile(sig)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/dispatcher.py", line 350, in compile
flags=flags, locals=self.locals)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 644, in compile_extra
return pipeline.compile_extra(func)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 361, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 370, in compile_bytecode
return self._compile_bytecode()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 631, in _compile_bytecode
return pm.run(self.status)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 251, in run
raise patched_exception
RuntimeError: Caused By:
Traceback (most recent call last):
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 243, in run
res = stage()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 587, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 540, in _backend
lowered = lowerfn()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 527, in backend_nopython_mode
self.flags)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 786, in native_lowering_stage
cfunc = targetctx.get_executable(library, fndesc, env)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/cpu.py", line 147, in get_executable
baseptr = library.get_pointer_to_function(fndesc.llvm_func_name)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 377, in get_pointer_to_function
self._ensure_finalized()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 67, in _ensure_finalized
self.finalize()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 170, in finalize
library._get_module_for_linking(), preserve=True)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 113, in _get_module_for_linking
% (self,))
RuntimeError: library unfit for linking: no available functions in <Library 'func' at 0x10b73ef10>
Failed at nopython (nopython mode backend)
library unfit for linking: no available functions in <Library 'func' at 0x10b73ef10>
|
RuntimeError
|
def serialize_using_object_code(self):
"""
Serialize this library using its object code as the cached
representation. We also include its bitcode for further inlining
with other libraries.
"""
self._ensure_finalized()
data = (self._get_compiled_object(), self._get_module_for_linking().as_bitcode())
return (self._name, "object", data)
|
def serialize_using_object_code(self):
"""
Serialize this library using its object code as the cached
representation.
"""
self._ensure_finalized()
ll_module = self._final_module
return (self._name, "object", self._get_compiled_object())
|
https://github.com/numba/numba/issues/1603
|
$ python app.py
Traceback (most recent call last):
File "app.py", line 21, in <module>
run(N)
File "app.py", line 15, in run
res = fcalc(x)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/dispatcher.py", line 172, in _compile_for_args
return self.compile(sig)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/dispatcher.py", line 350, in compile
flags=flags, locals=self.locals)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 644, in compile_extra
return pipeline.compile_extra(func)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 361, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 370, in compile_bytecode
return self._compile_bytecode()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 631, in _compile_bytecode
return pm.run(self.status)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 251, in run
raise patched_exception
RuntimeError: Caused By:
Traceback (most recent call last):
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 243, in run
res = stage()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 587, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 540, in _backend
lowered = lowerfn()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 527, in backend_nopython_mode
self.flags)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 786, in native_lowering_stage
cfunc = targetctx.get_executable(library, fndesc, env)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/cpu.py", line 147, in get_executable
baseptr = library.get_pointer_to_function(fndesc.llvm_func_name)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 377, in get_pointer_to_function
self._ensure_finalized()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 67, in _ensure_finalized
self.finalize()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 170, in finalize
library._get_module_for_linking(), preserve=True)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 113, in _get_module_for_linking
% (self,))
RuntimeError: library unfit for linking: no available functions in <Library 'func' at 0x10b73ef10>
Failed at nopython (nopython mode backend)
library unfit for linking: no available functions in <Library 'func' at 0x10b73ef10>
|
RuntimeError
|
def _unserialize(cls, codegen, state):
name, kind, data = state
self = codegen.create_library(name)
assert isinstance(self, cls)
if kind == "bitcode":
# No need to re-run optimizations, just make the module ready
self._final_module = ll.parse_bitcode(data)
self._finalize_final_module()
return self
elif kind == "object":
object_code, shared_bitcode = data
self.enable_object_caching()
self._set_compiled_object(object_code)
self._shared_module = ll.parse_bitcode(shared_bitcode)
self._finalize_final_module()
return self
else:
raise ValueError("unsupported serialization kind %r" % (kind,))
|
def _unserialize(cls, codegen, state):
name, kind, data = state
self = codegen.create_library(name)
assert isinstance(self, cls)
if kind == "bitcode":
# No need to re-run optimizations, just make the module ready
self._final_module = ll.parse_bitcode(data)
self._finalize_final_module()
return self
elif kind == "object":
self.enable_object_caching()
self._set_compiled_object(data)
self._finalize_final_module()
return self
else:
raise ValueError("unsupported serialization kind %r" % (kind,))
|
https://github.com/numba/numba/issues/1603
|
$ python app.py
Traceback (most recent call last):
File "app.py", line 21, in <module>
run(N)
File "app.py", line 15, in run
res = fcalc(x)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/dispatcher.py", line 172, in _compile_for_args
return self.compile(sig)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/dispatcher.py", line 350, in compile
flags=flags, locals=self.locals)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 644, in compile_extra
return pipeline.compile_extra(func)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 361, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 370, in compile_bytecode
return self._compile_bytecode()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 631, in _compile_bytecode
return pm.run(self.status)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 251, in run
raise patched_exception
RuntimeError: Caused By:
Traceback (most recent call last):
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 243, in run
res = stage()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 587, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 540, in _backend
lowered = lowerfn()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 527, in backend_nopython_mode
self.flags)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/compiler.py", line 786, in native_lowering_stage
cfunc = targetctx.get_executable(library, fndesc, env)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/cpu.py", line 147, in get_executable
baseptr = library.get_pointer_to_function(fndesc.llvm_func_name)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 377, in get_pointer_to_function
self._ensure_finalized()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 67, in _ensure_finalized
self.finalize()
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 170, in finalize
library._get_module_for_linking(), preserve=True)
File "/Users/userx/anaconda/lib/python2.7/site-packages/numba/targets/codegen.py", line 113, in _get_module_for_linking
% (self,))
RuntimeError: library unfit for linking: no available functions in <Library 'func' at 0x10b73ef10>
Failed at nopython (nopython mode backend)
library unfit for linking: no available functions in <Library 'func' at 0x10b73ef10>
|
RuntimeError
|
def create_struct_proxy(fe_type, kind="value"):
"""
Returns a specialized StructProxy subclass for the given fe_type.
"""
cache_key = (fe_type, kind)
res = _struct_proxy_cache.get(cache_key)
if res is None:
base = {
"value": ValueStructProxy,
"data": DataStructProxy,
}[kind]
clsname = base.__name__ + "_" + str(fe_type)
bases = (base,)
clsmembers = dict(_fe_type=fe_type)
res = type(clsname, bases, clsmembers)
_struct_proxy_cache[cache_key] = res
return res
|
def create_struct_proxy(fe_type):
"""
Returns a specialized StructProxy subclass for the given fe_type.
"""
res = _struct_proxy_cache.get(fe_type)
if res is None:
clsname = StructProxy.__name__ + "_" + str(fe_type)
bases = (StructProxy,)
clsmembers = dict(_fe_type=fe_type)
res = type(clsname, bases, clsmembers)
_struct_proxy_cache[fe_type] = res
return res
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def make_payload_cls(list_type):
"""
Return the Structure representation of the given *list_type*'s payload
(an instance of types.List).
"""
return cgutils.create_struct_proxy(types.ListPayload(list_type), kind="data")
|
def make_payload_cls(list_type):
"""
Return the Structure representation of the given *list_type*'s payload
(an instance of types.List).
"""
return cgutils.create_struct_proxy(types.ListPayload(list_type))
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def getitem(self, idx):
ptr = self._gep(idx)
data_item = self._builder.load(ptr)
return self._datamodel.from_data(self._builder, data_item)
|
def getitem(self, idx):
ptr = self._gep(idx)
return self._builder.load(ptr)
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def setitem(self, idx, val):
ptr = self._gep(idx)
data_item = self._datamodel.as_data(self._builder, val)
self._builder.store(data_item, ptr)
|
def setitem(self, idx, val):
ptr = self._gep(idx)
self._builder.store(val, ptr)
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def inititem(self, idx, val):
ptr = self._gep(idx)
data_item = self._datamodel.as_data(self._builder, val)
self._builder.store(data_item, ptr)
|
def inititem(self, idx, val):
ptr = self._gep(idx)
self._builder.store(val, ptr)
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def __init__(self, context, builder, list_type, list_val):
self._context = context
self._builder = builder
self._ty = list_type
self._list = make_list_cls(list_type)(context, builder, list_val)
self._itemsize = get_itemsize(context, list_type)
self._datamodel = context.data_model_manager[list_type.dtype]
|
def __init__(self, context, builder, list_type, list_val):
self._context = context
self._builder = builder
self._ty = list_type
self._list = make_list_cls(list_type)(context, builder, list_val)
self._itemsize = get_itemsize(context, list_type)
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def __init__(self, context, builder, iter_type, iter_val):
self._context = context
self._builder = builder
self._ty = iter_type
self._iter = make_listiter_cls(iter_type)(context, builder, iter_val)
self._datamodel = context.data_model_manager[iter_type.yield_type]
|
def __init__(self, context, builder, iter_type, iter_val):
self._context = context
self._builder = builder
self._ty = iter_type
self._iter = make_listiter_cls(iter_type)(context, builder, iter_val)
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def _init_casting_rules(tm):
tcr = TypeCastingRules(tm)
tcr.safe_unsafe(types.boolean, types.int8)
tcr.safe_unsafe(types.boolean, types.uint8)
tcr.promote_unsafe(types.int8, types.int16)
tcr.promote_unsafe(types.uint8, types.uint16)
tcr.promote_unsafe(types.int16, types.int32)
tcr.promote_unsafe(types.uint16, types.uint32)
tcr.promote_unsafe(types.int32, types.int64)
tcr.promote_unsafe(types.uint32, types.uint64)
tcr.safe_unsafe(types.uint8, types.int16)
tcr.safe_unsafe(types.uint16, types.int32)
tcr.safe_unsafe(types.uint32, types.int64)
tcr.safe_unsafe(types.int16, types.float32)
tcr.safe_unsafe(types.int32, types.float64)
tcr.unsafe_unsafe(types.int32, types.float32)
# XXX this is inconsistent with the above; but we want to prefer
# float64 over int64 when typing a heterogenous operation,
# e.g. `float64 + int64`. Perhaps we need more granularity in the
# conversion kinds.
tcr.safe_unsafe(types.int64, types.float64)
tcr.safe_unsafe(types.uint64, types.float64)
tcr.promote_unsafe(types.float32, types.float64)
tcr.safe(types.float32, types.complex64)
tcr.safe(types.float64, types.complex128)
tcr.promote_unsafe(types.complex64, types.complex128)
# Allow integers to cast ot void*
tcr.unsafe_unsafe(types.uintp, types.voidptr)
return tcr
|
def _init_casting_rules(tm):
tcr = TypeCastingRules(tm)
tcr.safe_unsafe(types.boolean, types.int8)
tcr.safe_unsafe(types.boolean, types.uint8)
tcr.promote_unsafe(types.int8, types.int16)
tcr.promote_unsafe(types.uint8, types.uint16)
tcr.promote_unsafe(types.int16, types.int32)
tcr.promote_unsafe(types.uint16, types.uint32)
tcr.promote_unsafe(types.int32, types.int64)
tcr.promote_unsafe(types.uint32, types.uint64)
tcr.safe_unsafe(types.uint8, types.int16)
tcr.safe_unsafe(types.uint16, types.int32)
tcr.safe_unsafe(types.uint32, types.int64)
tcr.safe_unsafe(types.int32, types.float64)
tcr.unsafe_unsafe(types.int32, types.float32)
# XXX this is inconsistent with the above; but we want to prefer
# float64 over int64 when typing a heterogenous operation,
# e.g. `float64 + int64`. Perhaps we need more granularity in the
# conversion kinds.
tcr.safe_unsafe(types.int64, types.float64)
tcr.safe_unsafe(types.uint64, types.float64)
tcr.promote_unsafe(types.float32, types.float64)
tcr.safe(types.float32, types.complex64)
tcr.safe(types.float64, types.complex128)
tcr.promote_unsafe(types.complex64, types.complex128)
# Allow integers to cast ot void*
tcr.unsafe_unsafe(types.uintp, types.voidptr)
return tcr
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def unify_types(self, *typelist):
# Sort the type list according to bit width before doing
# pairwise unification (with thanks to aterrel).
def keyfunc(obj):
"""Uses bitwidth to order numeric-types.
Fallback to stable, deterministic sort.
"""
return getattr(obj, "bitwidth", 0)
typelist = sorted(typelist, key=keyfunc)
unified = typelist[0]
for tp in typelist[1:]:
unified = self.unify_pairs(unified, tp)
if unified is None:
break
return unified
|
def unify_types(self, *typelist):
# Sort the type list according to bit width before doing
# pairwise unification (with thanks to aterrel).
def keyfunc(obj):
"""Uses bitwidth to order numeric-types.
Fallback to hash() for arbitary ordering.
"""
return getattr(obj, "bitwidth", hash(obj))
typelist = sorted(typelist, key=keyfunc)
unified = typelist[0]
for tp in typelist[1:]:
unified = self.unify_pairs(unified, tp)
if unified is None:
break
return unified
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def keyfunc(obj):
"""Uses bitwidth to order numeric-types.
Fallback to stable, deterministic sort.
"""
return getattr(obj, "bitwidth", 0)
|
def keyfunc(obj):
"""Uses bitwidth to order numeric-types.
Fallback to hash() for arbitary ordering.
"""
return getattr(obj, "bitwidth", hash(obj))
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def unify_pairs(self, first, second):
"""
Try to unify the two given types. A third type is returned,
or pyobject in case of failure.
"""
if first == second:
return first
if first is types.undefined:
return second
elif second is types.undefined:
return first
# Types with special unification rules
unified = first.unify(self, second)
if unified is not None:
return unified
unified = second.unify(self, first)
if unified is not None:
return unified
# Other types with simple conversion rules
conv = self.can_convert(fromty=first, toty=second)
if conv is not None and conv <= Conversion.safe:
# Can convert from first to second
return second
conv = self.can_convert(fromty=second, toty=first)
if conv is not None and conv <= Conversion.safe:
# Can convert from second to first
return first
# Cannot unify
return types.pyobject
|
def unify_pairs(self, first, second):
"""
Try to unify the two given types. A third type is returned,
or None in case of failure.
"""
if first == second:
return first
if first is types.undefined:
return second
elif second is types.undefined:
return first
# Types with special unification rules
unified = first.unify(self, second)
if unified is not None:
return unified
unified = second.unify(self, first)
if unified is not None:
return unified
# Other types with simple conversion rules
conv = self.can_convert(fromty=first, toty=second)
if conv is not None and conv <= Conversion.safe:
return conv
return types.pyobject
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def get_data_type(self, ty):
"""
Get a LLVM data representation of the Numba type *ty* that is safe
for storage. Record data are stored as byte array.
The return value is a llvmlite.ir.Type object, or None if the type
is an opaque pointer (???).
"""
return self.data_model_manager[ty].get_data_type()
|
def get_data_type(self, ty):
"""
Get a LLVM data representation of the Numba type *ty* that is safe
for storage. Record data are stored as byte array.
The return value is a llvmlite.ir.Type object, or None if the type
is an opaque pointer (???).
"""
try:
fac = type_registry.match(ty)
except KeyError:
pass
else:
return fac(self, ty)
return self.data_model_manager[ty].get_data_type()
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def __init__(self):
self.functions = []
self.attributes = []
|
def __init__(self):
self.factories = {}
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def register(self, impl):
sigs = impl.function_signatures
impl.function_signatures = []
self.functions.append((impl, sigs))
return impl
|
def register(self, type_class):
"""
Register a LLVM type factory function for the given *type_class*
(i.e. a subclass of numba.types.Type).
"""
assert issubclass(type_class, types.Type)
def decorator(func):
self.factories[type_class] = func
return func
return decorator
|
https://github.com/numba/numba/issues/1373
|
def f(): return [True]
...
ff = jit(nopython=True)(f)
ff()
Traceback (most recent call last):
File "/home/antoine/numba/numba/lowering.py", line 173, in lower_block
self.lower_inst(inst)
File "/home/antoine/numba/numba/lowering.py", line 215, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/antoine/numba/numba/lowering.py", line 371, in lower_assign
return self.lower_expr(ty, value)
File "/home/antoine/numba/numba/lowering.py", line 733, in lower_expr
return self.context.build_list(self.builder, resty, castvals)
File "/home/antoine/numba/numba/targets/cpu.py", line 111, in build_list
return listobj.build_list(self, builder, list_type, items)
File "/home/antoine/numba/numba/targets/listobj.py", line 301, in build_list
inst = ListInstance.allocate(context, builder, list_type, nitems)
File "/home/antoine/numba/numba/targets/listobj.py", line 190, in allocate
self._payload.allocated = nitems
File "/home/antoine/numba/numba/targets/listobj.py", line 154, in _payload
return get_list_payload(self._context, self._builder, self._ty, self._list)
File "/home/antoine/numba/numba/targets/listobj.py", line 44, in get_list_payload
return make_payload_cls(list_type)(context, builder, ref=payload)
File "/home/antoine/numba/numba/cgutils.py", line 95, in __init__
% (self._be_type.as_pointer(), ref.type))
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/antoine/numba/numba/dispatcher.py", line 162, in _compile_for_args
return self.compile(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 327, in compile
flags=flags, locals=self.locals)
File "/home/antoine/numba/numba/compiler.py", line 594, in compile_extra
return pipeline.compile_extra(func)
File "/home/antoine/numba/numba/compiler.py", line 317, in compile_extra
return self.compile_bytecode(bc, func_attr=self.func_attr)
File "/home/antoine/numba/numba/compiler.py", line 326, in compile_bytecode
return self._compile_bytecode()
File "/home/antoine/numba/numba/compiler.py", line 581, in _compile_bytecode
return pm.run(self.status)
File "/home/antoine/numba/numba/compiler.py", line 209, in run
raise patched_exception
File "/home/antoine/numba/numba/compiler.py", line 201, in run
res = stage()
File "/home/antoine/numba/numba/compiler.py", line 537, in stage_nopython_backend
return self._backend(lowerfn, objectmode=False)
File "/home/antoine/numba/numba/compiler.py", line 492, in _backend
lowered = lowerfn()
File "/home/antoine/numba/numba/compiler.py", line 483, in backend_nopython_mode
self.flags)
File "/home/antoine/numba/numba/compiler.py", line 723, in native_lowering_stage
lower.lower()
File "/home/antoine/numba/numba/lowering.py", line 100, in lower
self.lower_normal_function(self.fndesc)
File "/home/antoine/numba/numba/lowering.py", line 135, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/antoine/numba/numba/lowering.py", line 160, in lower_function_body
self.lower_block(block)
File "/home/antoine/numba/numba/lowering.py", line 178, in lower_block
raise LoweringError(msg, inst.loc)
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
AssertionError: bad ref type: expected {i64, i64, i1}*, got {i64, i64, i8}*
File "<stdin>", line 1
|
AssertionError
|
def __init__(self, device, handle, finalizer=None):
self.device = device
self.handle = handle
self.external_finalizer = finalizer
self.trashing = TrashService(
"cuda.device%d.context%x.trash" % (self.device.id, self.handle.value)
)
self.allocations = utils.UniqueDict()
self.modules = utils.UniqueDict()
self.finalizer = utils.finalize(self, self._make_finalizer())
# For storing context specific data
self.extras = {}
|
def __init__(self, device, handle, finalizer=None):
self.device = device
self.handle = handle
self.finalizer = finalizer
self.trashing = TrashService(
"cuda.device%d.context%x.trash" % (self.device.id, self.handle.value)
)
self.is_managed = finalizer is not None
self.allocations = utils.UniqueDict()
self.modules = utils.UniqueDict()
# For storing context specific data
self.extras = {}
|
https://github.com/numba/numba/issues/1164
|
Exception ignored in: <bound method Context.__del__ of <CUDA context c_void_p(36426160) of device 0>>
Traceback (most recent call last):
File "/home/antoine/numba/numba/cuda/cudadrv/driver.py", line 467, in __del__
AttributeError: 'NoneType' object has no attribute 'print_exc'
|
AttributeError
|
def lower_expr(self, resty, expr):
if expr.op == "binop":
return self.lower_binop(resty, expr)
elif expr.op == "inplace_binop":
lty = self.typeof(expr.lhs.name)
if not lty.mutable:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.
return self.lower_binop(resty, expr)
elif expr.op == "unary":
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "call":
return self.lower_call(resty, expr)
elif expr.op == "pair_first":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
return self.context.pair_first(self.builder, val, ty)
elif expr.op == "pair_second":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
return self.context.pair_second(self.builder, val, ty)
elif expr.op in ("getiter", "iternext"):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "exhaust_iter":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# If we have a tuple, we needn't do anything
# (and we can't iterate over the heterogenous ones).
if isinstance(ty, types.BaseTuple):
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function("getiter", getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function("iternext", iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, self.builder.not_(is_valid)):
self.return_exception(ValueError)
item = self.context.pair_first(self.builder, pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.return_exception(ValueError)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
res = self.context.get_bound_function(self.builder, val, ty)
else:
impl = self.context.get_attribute(val, ty, expr.attr)
if impl is None:
# ignore the attribute
res = self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
return res
elif expr.op == "static_getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.context.get_constant(types.intp, expr.index)
if cgutils.is_struct(baseval.type):
# Statically extract the given element from the structure
# (structures aren't dynamically indexable).
return self.builder.extract_value(baseval, expr.index)
else:
# Fall back on the generic getitem() implementation
# for this type.
signature = typing.signature(
resty, self.typeof(expr.value.name), types.intp
)
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
res = impl(self.builder, argvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.loadvar(expr.index.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
argtyps = (self.typeof(expr.value.name), self.typeof(expr.index.name))
castvals = [
self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps, signature.args)
]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)
]
tup = self.context.get_constant_undef(resty)
for i in range(len(castvals)):
tup = self.builder.insert_value(tup, castvals[i], i)
return tup
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
return castval
raise NotImplementedError(expr)
|
def lower_expr(self, resty, expr):
if expr.op == "binop":
return self.lower_binop(resty, expr)
elif expr.op == "inplace_binop":
lty = self.typeof(expr.lhs.name)
if not lty.mutable:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.
return self.lower_binop(resty, expr)
elif expr.op == "unary":
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "call":
return self.lower_call(resty, expr)
elif expr.op == "pair_first":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
return self.context.pair_first(self.builder, val, ty)
elif expr.op == "pair_second":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
return self.context.pair_second(self.builder, val, ty)
elif expr.op in ("getiter", "iternext"):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "exhaust_iter":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# If we have a heterogenous tuple, we needn't do anything,
# and we can't iterate over it anyway.
if isinstance(ty, types.Tuple):
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function("getiter", getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function("iternext", iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, self.builder.not_(is_valid)):
self.return_exception(ValueError)
item = self.context.pair_first(self.builder, pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.return_exception(ValueError)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
res = self.context.get_bound_function(self.builder, val, ty)
else:
impl = self.context.get_attribute(val, ty, expr.attr)
if impl is None:
# ignore the attribute
res = self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
return res
elif expr.op == "static_getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.context.get_constant(types.intp, expr.index)
if cgutils.is_struct(baseval.type):
# Statically extract the given element from the structure
# (structures aren't dynamically indexable).
return self.builder.extract_value(baseval, expr.index)
else:
# Fall back on the generic getitem() implementation
# for this type.
signature = typing.signature(
resty, self.typeof(expr.value.name), types.intp
)
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
res = impl(self.builder, argvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.loadvar(expr.index.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
argtyps = (self.typeof(expr.value.name), self.typeof(expr.index.name))
castvals = [
self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps, signature.args)
]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)
]
tup = self.context.get_constant_undef(resty)
for i in range(len(castvals)):
tup = self.builder.insert_value(tup, castvals[i], i)
return tup
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
return castval
raise NotImplementedError(expr)
|
https://github.com/numba/numba/issues/1151
|
ff = jit(f, nopython=True)
ff((4,5,6,7))
(4, 5, 6)
ff((4,5))
Traceback (most recent call last):
[...]
numba.lowering.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
TypeError: Can't index at [2] in [2 x i32]
File "<stdin>", line 2
|
numba.lowering.LoweringError
|
def __call__(self, context, typevars):
oset = typevars[self.target]
for tp in typevars[self.iterator.name].get():
if isinstance(tp, types.BaseTuple):
if len(tp) == self.count:
oset.add_types(tp)
elif isinstance(tp, types.IterableType):
oset.add_types(
types.UniTuple(dtype=tp.iterator_type.yield_type, count=self.count)
)
|
def __call__(self, context, typevars):
oset = typevars[self.target]
for tp in typevars[self.iterator.name].get():
if isinstance(tp, types.IterableType):
oset.add_types(
types.UniTuple(dtype=tp.iterator_type.yield_type, count=self.count)
)
elif isinstance(tp, types.Tuple):
oset.add_types(tp)
|
https://github.com/numba/numba/issues/1151
|
ff = jit(f, nopython=True)
ff((4,5,6,7))
(4, 5, 6)
ff((4,5))
Traceback (most recent call last):
[...]
numba.lowering.LoweringError: Failed at nopython (nopython mode backend)
Internal error:
TypeError: Can't index at [2] in [2 x i32]
File "<stdin>", line 2
|
numba.lowering.LoweringError
|
def __init__(self, arg_count, py_func):
self.tm = default_type_manager
_dispatcher.Dispatcher.__init__(self, self.tm.get_pointer(), arg_count)
# A mapping of signatures to entry points
self.overloads = {}
# A mapping of signatures to compile results
self._compileinfos = {}
# A list of nopython signatures
self._npsigs = []
self.py_func = py_func
# other parts of Numba assume the old Python 2 name for code object
self.func_code = get_code_object(py_func)
# but newer python uses a different name
self.__code__ = self.func_code
self.doc = py_func.__doc__
self._compile_lock = utils.NonReentrantLock()
utils.finalize(self, self._make_finalizer())
|
def __init__(self, arg_count, py_func):
self.tm = default_type_manager
_dispatcher.Dispatcher.__init__(self, self.tm.get_pointer(), arg_count)
# A mapping of signatures to entry points
self.overloads = {}
# A mapping of signatures to compile results
self._compileinfos = {}
# A list of nopython signatures
self._npsigs = []
self.py_func = py_func
# other parts of Numba assume the old Python 2 name for code object
self.func_code = get_code_object(py_func)
# but newer python uses a different name
self.__code__ = self.func_code
self.doc = py_func.__doc__
self._compiling = False
utils.finalize(self, self._make_finalizer())
|
https://github.com/numba/numba/issues/908
|
Exception in thread Thread-14:
Traceback (most recent call last):
File "/home/antoine/34/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/home/antoine/34/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/home/antoine/numba/numba/dispatcher.py", line 151, in _compile_for_args
return self.jit(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 141, in jit
return self.compile(sig, **kws)
File "/home/antoine/numba/numba/dispatcher.py", line 312, in compile
with self._compile_lock():
File "/home/antoine/34/lib/python3.4/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/home/antoine/numba/numba/dispatcher.py", line 127, in _compile_lock
raise RuntimeError("Compiler re-entrant")
RuntimeError: Compiler re-entrant
|
RuntimeError
|
def is_compiling(self):
"""
Whether a specialization is currently being compiled.
"""
return self._compile_lock.is_owned()
|
def is_compiling(self):
return self._compiling
|
https://github.com/numba/numba/issues/908
|
Exception in thread Thread-14:
Traceback (most recent call last):
File "/home/antoine/34/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/home/antoine/34/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/home/antoine/numba/numba/dispatcher.py", line 151, in _compile_for_args
return self.jit(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 141, in jit
return self.compile(sig, **kws)
File "/home/antoine/numba/numba/dispatcher.py", line 312, in compile
with self._compile_lock():
File "/home/antoine/34/lib/python3.4/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/home/antoine/numba/numba/dispatcher.py", line 127, in _compile_lock
raise RuntimeError("Compiler re-entrant")
RuntimeError: Compiler re-entrant
|
RuntimeError
|
def compile(self, sig, locals={}, **targetoptions):
with self._compile_lock:
locs = self.locals.copy()
locs.update(locals)
topt = self.targetoptions.copy()
topt.update(targetoptions)
flags = compiler.Flags()
self.targetdescr.options.parse_as_flags(flags, topt)
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exist.
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing
cres = compiler.compile_extra(
self.typingctx,
self.targetctx,
self.py_func,
args=args,
return_type=return_type,
flags=flags,
locals=locs,
)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
|
def compile(self, sig, locals={}, **targetoptions):
with self._compile_lock():
locs = self.locals.copy()
locs.update(locals)
topt = self.targetoptions.copy()
topt.update(targetoptions)
flags = compiler.Flags()
self.targetdescr.options.parse_as_flags(flags, topt)
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exist.
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing
cres = compiler.compile_extra(
self.typingctx,
self.targetctx,
self.py_func,
args=args,
return_type=return_type,
flags=flags,
locals=locs,
)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
|
https://github.com/numba/numba/issues/908
|
Exception in thread Thread-14:
Traceback (most recent call last):
File "/home/antoine/34/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/home/antoine/34/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/home/antoine/numba/numba/dispatcher.py", line 151, in _compile_for_args
return self.jit(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 141, in jit
return self.compile(sig, **kws)
File "/home/antoine/numba/numba/dispatcher.py", line 312, in compile
with self._compile_lock():
File "/home/antoine/34/lib/python3.4/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/home/antoine/numba/numba/dispatcher.py", line 127, in _compile_lock
raise RuntimeError("Compiler re-entrant")
RuntimeError: Compiler re-entrant
|
RuntimeError
|
def compile(self, sig):
with self._compile_lock:
# FIXME this is mostly duplicated from Overloaded
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exist.
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
assert not flags.enable_looplift, "Enable looplift flags is on"
cres = compiler.compile_bytecode(
typingctx=self.typingctx,
targetctx=self.targetctx,
bc=self.bytecode,
args=args,
return_type=return_type,
flags=flags,
locals=self.locals,
)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
|
def compile(self, sig):
with self._compile_lock():
# FIXME this is mostly duplicated from Overloaded
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exist.
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
assert not flags.enable_looplift, "Enable looplift flags is on"
cres = compiler.compile_bytecode(
typingctx=self.typingctx,
targetctx=self.targetctx,
bc=self.bytecode,
args=args,
return_type=return_type,
flags=flags,
locals=self.locals,
)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
|
https://github.com/numba/numba/issues/908
|
Exception in thread Thread-14:
Traceback (most recent call last):
File "/home/antoine/34/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/home/antoine/34/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/home/antoine/numba/numba/dispatcher.py", line 151, in _compile_for_args
return self.jit(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 141, in jit
return self.compile(sig, **kws)
File "/home/antoine/numba/numba/dispatcher.py", line 312, in compile
with self._compile_lock():
File "/home/antoine/34/lib/python3.4/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/home/antoine/numba/numba/dispatcher.py", line 127, in _compile_lock
raise RuntimeError("Compiler re-entrant")
RuntimeError: Compiler re-entrant
|
RuntimeError
|
def _compile_for_args(self, *args, **kws):
"""
For internal use. Compile a specialized version of the function
for the given *args* and *kws*, and return the resulting callable.
"""
assert not kws
sig = tuple([self.typeof_pyval(a) for a in args])
return self.compile(sig)
|
def _compile_for_args(self, *args, **kws):
"""
For internal use. Compile a specialized version of the function
for the given *args* and *kws*, and return the resulting callable.
"""
assert not kws
sig = tuple([self.typeof_pyval(a) for a in args])
return self.jit(sig)
|
https://github.com/numba/numba/issues/908
|
Exception in thread Thread-14:
Traceback (most recent call last):
File "/home/antoine/34/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/home/antoine/34/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/home/antoine/numba/numba/dispatcher.py", line 151, in _compile_for_args
return self.jit(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 141, in jit
return self.compile(sig, **kws)
File "/home/antoine/numba/numba/dispatcher.py", line 312, in compile
with self._compile_lock():
File "/home/antoine/34/lib/python3.4/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/home/antoine/numba/numba/dispatcher.py", line 127, in _compile_lock
raise RuntimeError("Compiler re-entrant")
RuntimeError: Compiler re-entrant
|
RuntimeError
|
def make_multithread(inner_func, numthreads):
"""
Run the given function inside *numthreads* threads, splitting its
arguments into equal-sized chunks.
"""
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + 1) // numthreads
# Create argument tuples for each input chunk
chunks = [
[arg[i * chunklen : (i + 1) * chunklen] for arg in args]
for i in range(numthreads)
]
# Spawn one thread per chunk
threads = [threading.Thread(target=inner_func, args=chunk) for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
return func_mt
|
def make_multithread(inner_func, numthreads):
"""
Run the given function inside *numthreads* threads, splitting its
arguments into equal-sized chunks.
"""
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + 1) // numthreads
# Create argument tuples for each chunk
chunks = [
[arg[i * chunklen : (i + 1) * chunklen] for arg in args]
for i in range(numthreads)
]
# You should make sure inner_func is compiled at this point, because
# the compilation must happen in a single thread at a time. This is
# the case in this example because we use an explicit signature in jit().
threads = [threading.Thread(target=inner_func, args=chunk) for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
return func_mt
|
https://github.com/numba/numba/issues/908
|
Exception in thread Thread-14:
Traceback (most recent call last):
File "/home/antoine/34/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/home/antoine/34/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/home/antoine/numba/numba/dispatcher.py", line 151, in _compile_for_args
return self.jit(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 141, in jit
return self.compile(sig, **kws)
File "/home/antoine/numba/numba/dispatcher.py", line 312, in compile
with self._compile_lock():
File "/home/antoine/34/lib/python3.4/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/home/antoine/numba/numba/dispatcher.py", line 127, in _compile_lock
raise RuntimeError("Compiler re-entrant")
RuntimeError: Compiler re-entrant
|
RuntimeError
|
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + 1) // numthreads
# Create argument tuples for each input chunk
chunks = [
[arg[i * chunklen : (i + 1) * chunklen] for arg in args]
for i in range(numthreads)
]
# Spawn one thread per chunk
threads = [threading.Thread(target=inner_func, args=chunk) for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
|
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + 1) // numthreads
# Create argument tuples for each chunk
chunks = [
[arg[i * chunklen : (i + 1) * chunklen] for arg in args]
for i in range(numthreads)
]
# You should make sure inner_func is compiled at this point, because
# the compilation must happen in a single thread at a time. This is
# the case in this example because we use an explicit signature in jit().
threads = [threading.Thread(target=inner_func, args=chunk) for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
|
https://github.com/numba/numba/issues/908
|
Exception in thread Thread-14:
Traceback (most recent call last):
File "/home/antoine/34/lib/python3.4/threading.py", line 921, in _bootstrap_inner
self.run()
File "/home/antoine/34/lib/python3.4/threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "/home/antoine/numba/numba/dispatcher.py", line 151, in _compile_for_args
return self.jit(sig)
File "/home/antoine/numba/numba/dispatcher.py", line 141, in jit
return self.compile(sig, **kws)
File "/home/antoine/numba/numba/dispatcher.py", line 312, in compile
with self._compile_lock():
File "/home/antoine/34/lib/python3.4/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/home/antoine/numba/numba/dispatcher.py", line 127, in _compile_lock
raise RuntimeError("Compiler re-entrant")
RuntimeError: Compiler re-entrant
|
RuntimeError
|
def lower_expr(self, resty, expr):
if expr.op == "binop":
return self.lower_binop(resty, expr)
elif expr.op == "inplace_binop":
lty = self.typeof(expr.lhs.name)
if not lty.mutable:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.
return self.lower_binop(resty, expr)
elif expr.op == "unary":
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "call":
argvals = [self.loadvar(a.name) for a in expr.args]
argtyps = [self.typeof(a.name) for a in expr.args]
signature = self.fndesc.calltypes[expr]
if isinstance(expr.func, ir.Intrinsic):
fnty = expr.func.name
castvals = expr.func.args
else:
assert not expr.kws, expr.kws
fnty = self.typeof(expr.func.name)
castvals = [
self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps, signature.args)
]
if isinstance(fnty, types.Method):
# Method of objects are handled differently
fnobj = self.loadvar(expr.func.name)
res = self.context.call_class_method(
self.builder, fnobj, signature, castvals
)
elif isinstance(fnty, types.FunctionPointer):
# Handle function pointer
pointer = fnty.funcptr
res = self.context.call_function_pointer(
self.builder, pointer, signature, castvals, fnty.cconv
)
elif isinstance(fnty, cffi_support.ExternCFunction):
# XXX unused?
fndesc = ExternalFunctionDescriptor(
fnty.symbol, fnty.restype, fnty.argtypes
)
func = self.context.declare_external_function(
cgutils.get_module(self.builder), fndesc
)
res = self.context.call_external_function(
self.builder, func, fndesc.argtypes, castvals
)
else:
# Normal function resolution
impl = self.context.get_function(fnty, signature)
if signature.recvr:
# The "self" object is passed as the function object
# for bounded function
the_self = self.loadvar(expr.func.name)
# Prepend the self reference
castvals = [the_self] + castvals
res = impl(self.builder, castvals)
libs = getattr(impl, "libs", ())
for lib in libs:
self.library.add_linking_library(lib)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "pair_first":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
item = self.context.pair_first(self.builder, val, ty)
return self.context.get_argument_value(self.builder, ty.first_type, item)
elif expr.op == "pair_second":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
item = self.context.pair_second(self.builder, val, ty)
return self.context.get_argument_value(self.builder, ty.second_type, item)
elif expr.op in ("getiter", "iternext"):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "exhaust_iter":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# If we have a heterogenous tuple, we needn't do anything,
# and we can't iterate over it anyway.
if isinstance(ty, types.Tuple):
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function("getiter", getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function("iternext", iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
excid = self.add_exception(ValueError)
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, self.builder.not_(is_valid)):
self.context.return_user_exc(self.builder, excid)
item = self.context.pair_first(self.builder, pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.context.return_user_exc(self.builder, excid)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
res = self.context.get_bound_function(self.builder, val, ty)
else:
impl = self.context.get_attribute(val, ty, expr.attr)
if impl is None:
# ignore the attribute
res = self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
return res
elif expr.op == "static_getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.context.get_constant(types.intp, expr.index)
if cgutils.is_struct(baseval.type):
# Statically extract the given element from the structure
# (structures aren't dynamically indexable).
return self.builder.extract_value(baseval, expr.index)
else:
# Fall back on the generic getitem() implementation
# for this type.
signature = typing.signature(
resty, self.typeof(expr.value.name), types.intp
)
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
res = impl(self.builder, argvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.loadvar(expr.index.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
argtyps = (self.typeof(expr.value.name), self.typeof(expr.index.name))
castvals = [
self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps, signature.args)
]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)
]
tup = self.context.get_constant_undef(resty)
for i in range(len(castvals)):
tup = self.builder.insert_value(tup, castvals[i], i)
return tup
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
return castval
raise NotImplementedError(expr)
|
def lower_expr(self, resty, expr):
if expr.op == "binop":
return self.lower_binop(resty, expr)
elif expr.op == "inplace_binop":
lty = self.typeof(expr.lhs.name)
if not lty.mutable:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.
return self.lower_binop(resty, expr)
elif expr.op == "unary":
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "call":
argvals = [self.loadvar(a.name) for a in expr.args]
argtyps = [self.typeof(a.name) for a in expr.args]
signature = self.fndesc.calltypes[expr]
if isinstance(expr.func, ir.Intrinsic):
fnty = expr.func.name
castvals = expr.func.args
else:
assert not expr.kws, expr.kws
fnty = self.typeof(expr.func.name)
castvals = [
self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps, signature.args)
]
if isinstance(fnty, types.Method):
# Method of objects are handled differently
fnobj = self.loadvar(expr.func.name)
res = self.context.call_class_method(
self.builder, fnobj, signature, castvals
)
elif isinstance(fnty, types.FunctionPointer):
# Handle function pointer
pointer = fnty.funcptr
res = self.context.call_function_pointer(
self.builder, pointer, signature, castvals
)
elif isinstance(fnty, cffi_support.ExternCFunction):
# XXX unused?
fndesc = ExternalFunctionDescriptor(
fnty.symbol, fnty.restype, fnty.argtypes
)
func = self.context.declare_external_function(
cgutils.get_module(self.builder), fndesc
)
res = self.context.call_external_function(
self.builder, func, fndesc.argtypes, castvals
)
else:
# Normal function resolution
impl = self.context.get_function(fnty, signature)
if signature.recvr:
# The "self" object is passed as the function object
# for bounded function
the_self = self.loadvar(expr.func.name)
# Prepend the self reference
castvals = [the_self] + castvals
res = impl(self.builder, castvals)
libs = getattr(impl, "libs", ())
for lib in libs:
self.library.add_linking_library(lib)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "pair_first":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
item = self.context.pair_first(self.builder, val, ty)
return self.context.get_argument_value(self.builder, ty.first_type, item)
elif expr.op == "pair_second":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
item = self.context.pair_second(self.builder, val, ty)
return self.context.get_argument_value(self.builder, ty.second_type, item)
elif expr.op in ("getiter", "iternext"):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "exhaust_iter":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# If we have a heterogenous tuple, we needn't do anything,
# and we can't iterate over it anyway.
if isinstance(ty, types.Tuple):
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function("getiter", getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function("iternext", iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
excid = self.add_exception(ValueError)
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, self.builder.not_(is_valid)):
self.context.return_user_exc(self.builder, excid)
item = self.context.pair_first(self.builder, pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.context.return_user_exc(self.builder, excid)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
res = self.context.get_bound_function(self.builder, val, ty)
else:
impl = self.context.get_attribute(val, ty, expr.attr)
if impl is None:
# ignore the attribute
res = self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
return res
elif expr.op == "static_getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.context.get_constant(types.intp, expr.index)
if cgutils.is_struct(baseval.type):
# Statically extract the given element from the structure
# (structures aren't dynamically indexable).
return self.builder.extract_value(baseval, expr.index)
else:
# Fall back on the generic getitem() implementation
# for this type.
signature = typing.signature(
resty, self.typeof(expr.value.name), types.intp
)
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
res = impl(self.builder, argvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.loadvar(expr.index.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
argtyps = (self.typeof(expr.value.name), self.typeof(expr.index.name))
castvals = [
self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps, signature.args)
]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res, signature.return_type, resty)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)
]
tup = self.context.get_constant_undef(resty)
for i in range(len(castvals)):
tup = self.builder.insert_value(tup, castvals[i], i)
return tup
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
return castval
raise NotImplementedError(expr)
|
https://github.com/numba/numba/issues/903
|
ctypes.windll.kernel32.Sleep(100)
0
ctypes.cdll.kernel32.Sleep(100)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Procedure called with not enough arguments (4 bytes missing) or wron
g calling convention
|
ValueError
|
def call_function_pointer(self, builder, funcptr, signature, args, cconv=None):
retty = self.get_value_type(signature.return_type)
fnty = Type.function(retty, [a.type for a in args])
fnptrty = Type.pointer(fnty)
addr = self.get_constant(types.intp, funcptr)
ptr = builder.inttoptr(addr, fnptrty)
return builder.call(ptr, args, cconv=cconv)
|
def call_function_pointer(self, builder, funcptr, signature, args):
retty = self.get_value_type(signature.return_type)
fnty = Type.function(retty, [a.type for a in args])
fnptrty = Type.pointer(fnty)
addr = self.get_constant(types.intp, funcptr)
ptr = builder.inttoptr(addr, fnptrty)
return builder.call(ptr, args)
|
https://github.com/numba/numba/issues/903
|
ctypes.windll.kernel32.Sleep(100)
0
ctypes.cdll.kernel32.Sleep(100)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Procedure called with not enough arguments (4 bytes missing) or wron
g calling convention
|
ValueError
|
def __init__(self, template, funcptr, cconv=None):
self.funcptr = funcptr
self.cconv = cconv
super(FunctionPointer, self).__init__(template)
|
def __init__(self, template, funcptr):
self.funcptr = funcptr
super(FunctionPointer, self).__init__(template)
|
https://github.com/numba/numba/issues/903
|
ctypes.windll.kernel32.Sleep(100)
0
ctypes.cdll.kernel32.Sleep(100)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Procedure called with not enough arguments (4 bytes missing) or wron
g calling convention
|
ValueError
|
def make_function_type(cfnptr):
if cfnptr.argtypes is None:
raise TypeError(
"ctypes function %r doesn't define its argument types; "
"consider setting the `argtypes` attribute" % (cfnptr.__name__,)
)
cargs = [convert_ctypes(a) for a in cfnptr.argtypes]
cret = convert_ctypes(cfnptr.restype)
if sys.platform == "win32" and not cfnptr._flags_ & ctypes._FUNCFLAG_CDECL:
# 'stdcall' calling convention under Windows
cconv = "x86_stdcallcc"
else:
# Default C calling convention
cconv = None
cases = [templates.signature(cret, *cargs)]
template = templates.make_concrete_template("CFuncPtr", cfnptr, cases)
pointer = ctypes.cast(cfnptr, ctypes.c_void_p).value
return types.FunctionPointer(template, pointer, cconv=cconv)
|
def make_function_type(cfnptr):
cargs = [convert_ctypes(a) for a in cfnptr.argtypes]
cret = convert_ctypes(cfnptr.restype)
cases = [templates.signature(cret, *cargs)]
template = templates.make_concrete_template("CFuncPtr", cfnptr, cases)
pointer = ctypes.cast(cfnptr, ctypes.c_void_p).value
return types.FunctionPointer(template, pointer)
|
https://github.com/numba/numba/issues/903
|
ctypes.windll.kernel32.Sleep(100)
0
ctypes.cdll.kernel32.Sleep(100)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Procedure called with not enough arguments (4 bytes missing) or wron
g calling convention
|
ValueError
|
def _explain_ambiguous(self, *args, **kws):
assert not kws, "kwargs not handled"
args = tuple([self.typeof_pyval(a) for a in args])
sigs = [cr.signature for cr in self._compileinfos.values()]
resolve_overload(self.typingctx, self.py_func, sigs, args, kws)
|
def _explain_ambiguous(self, *args, **kws):
assert not kws, "kwargs not handled"
args = tuple([self.typeof_pyval(a) for a in args])
resolve_overload(
self.typingctx, self.py_func, tuple(self.overloads.keys()), args, kws
)
|
https://github.com/numba/numba/issues/776
|
Traceback (most recent call last):
File "test_disp.py", line 15, in <module>
foo(1, 1)
File "/Users/sklam/dev/numba/numba/dispatcher.py", line 161, in _explain_ambiguous
tuple(self.overloads.keys()), args, kws)
File "/Users/sklam/dev/numba/numba/typing/templates.py", line 84, in resolve_overload
if len(args) == len(case.args):
AttributeError: 'tuple' object has no attribute 'args'
|
AttributeError
|
def __init__(self, variable, context, types, assignment=False, **kwds):
super(PromotionType, self).__init__(variable, **kwds)
self.context = context
self.types = oset.OrderedSet(types)
self.assignment = assignment
variable.type = self
self.add_parents(type for type in types if type.is_unresolved)
self.count = PromotionType.count
PromotionType.count += 1
|
def __init__(self, variable, context, types, assignment=False, **kwds):
super(PromotionType, self).__init__(variable, **kwds)
self.context = context
self.types = types
self.assignment = assignment
variable.type = self
self.add_parents(type for type in types if type.is_unresolved)
self.count = PromotionType.count
PromotionType.count += 1
|
https://github.com/numba/numba/issues/117
|
Traceback (most recent call last):
File "./test_typeinfer_bug.py", line 26, in <module>
test()
File "./test_typeinfer_bug.py", line 23, in test
jenks_matrices(data)
File "numbawrapper.pyx", line 93, in numba.numbawrapper.NumbaSpecializingWrapper.__call__ (numba/numbawrapper.c:2827)
File "/Users/sklam/dev/numba/numba/decorators.py", line 211, in compile_function
compiled_function = dec(f)
File "/Users/sklam/dev/numba/numba/decorators.py", line 299, in _jit2_decorator
**kwargs)
File "/Users/sklam/dev/numba/numba/functions.py", line 222, in compile_function
ctypes=ctypes, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 397, in compile
context, func, restype, argtypes, codegen=True, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 365, in _infer_types
return run_pipeline(context, func, ast, func_signature, **kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 359, in run_pipeline
return pipeline, pipeline.run_pipeline()
File "/Users/sklam/dev/numba/numba/pipeline.py", line 188, in run_pipeline
ast = getattr(self, method_name)(ast)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 255, in type_infer
type_inference.TypeInferer, ast, **self.kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 149, in make_specializer
**kwds)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 170, in __init__
self.init_locals()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 249, in init_locals
self.resolve_variable_types()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 495, in resolve_variable_types
self.remove_resolved_type(start_point)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 393, in remove_resolved_type
assert not type.is_scc
AssertionError
|
AssertionError
|
def find_types(self, seen):
types = oset.OrderedSet([self])
seen.add(self)
seen.add(self.variable.deferred_type)
self.dfs(types, seen)
types.remove(self)
return types
|
def find_types(self, seen):
types = set([self])
seen.add(self)
seen.add(self.variable.deferred_type)
self.dfs(types, seen)
types.remove(self)
return types
|
https://github.com/numba/numba/issues/117
|
Traceback (most recent call last):
File "./test_typeinfer_bug.py", line 26, in <module>
test()
File "./test_typeinfer_bug.py", line 23, in test
jenks_matrices(data)
File "numbawrapper.pyx", line 93, in numba.numbawrapper.NumbaSpecializingWrapper.__call__ (numba/numbawrapper.c:2827)
File "/Users/sklam/dev/numba/numba/decorators.py", line 211, in compile_function
compiled_function = dec(f)
File "/Users/sklam/dev/numba/numba/decorators.py", line 299, in _jit2_decorator
**kwargs)
File "/Users/sklam/dev/numba/numba/functions.py", line 222, in compile_function
ctypes=ctypes, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 397, in compile
context, func, restype, argtypes, codegen=True, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 365, in _infer_types
return run_pipeline(context, func, ast, func_signature, **kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 359, in run_pipeline
return pipeline, pipeline.run_pipeline()
File "/Users/sklam/dev/numba/numba/pipeline.py", line 188, in run_pipeline
ast = getattr(self, method_name)(ast)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 255, in type_infer
type_inference.TypeInferer, ast, **self.kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 149, in make_specializer
**kwds)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 170, in __init__
self.init_locals()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 249, in init_locals
self.resolve_variable_types()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 495, in resolve_variable_types
self.remove_resolved_type(start_point)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 393, in remove_resolved_type
assert not type.is_scc
AssertionError
|
AssertionError
|
def find_simple(self, seen):
types = oset.OrderedSet()
for type in self.types:
if type.is_promotion:
types.add(type.types)
else:
type.add(type)
return types
|
def find_simple(self, seen):
types = set()
for type in self.types:
if type.is_promotion:
types.add(type.types)
else:
type.add(type)
return types
|
https://github.com/numba/numba/issues/117
|
Traceback (most recent call last):
File "./test_typeinfer_bug.py", line 26, in <module>
test()
File "./test_typeinfer_bug.py", line 23, in test
jenks_matrices(data)
File "numbawrapper.pyx", line 93, in numba.numbawrapper.NumbaSpecializingWrapper.__call__ (numba/numbawrapper.c:2827)
File "/Users/sklam/dev/numba/numba/decorators.py", line 211, in compile_function
compiled_function = dec(f)
File "/Users/sklam/dev/numba/numba/decorators.py", line 299, in _jit2_decorator
**kwargs)
File "/Users/sklam/dev/numba/numba/functions.py", line 222, in compile_function
ctypes=ctypes, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 397, in compile
context, func, restype, argtypes, codegen=True, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 365, in _infer_types
return run_pipeline(context, func, ast, func_signature, **kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 359, in run_pipeline
return pipeline, pipeline.run_pipeline()
File "/Users/sklam/dev/numba/numba/pipeline.py", line 188, in run_pipeline
ast = getattr(self, method_name)(ast)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 255, in type_infer
type_inference.TypeInferer, ast, **self.kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 149, in make_specializer
**kwds)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 170, in __init__
self.init_locals()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 249, in init_locals
self.resolve_variable_types()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 495, in resolve_variable_types
self.remove_resolved_type(start_point)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 393, in remove_resolved_type
assert not type.is_scc
AssertionError
|
AssertionError
|
def _simplify(self, seen=None):
"""
Simplify a promotion type tree:
promote(int_, float_)
-> float_
promote(deferred(x), promote(float_, double), int_, promote(<self>))
-> promote(deferred(x), double)
promote(deferred(x), deferred(y))
-> promote(deferred(x), deferred(y))
"""
if seen is None:
seen = set()
# Find all types in the type graph and eliminate nested promotion types
types = self.find_types(seen)
# types = self.find_simple(seen)
resolved_types = [type for type in types if not type.is_unresolved]
unresolved_types = [type for type in types if type.is_unresolved]
self.get_partial_types(unresolved_types)
self.variable.type = self
if not resolved_types:
# Everything is deferred
self.resolved_type = None
return False
else:
# Simplify as much as possible
if self.assignment:
result_type, unresolved_types = promote_for_assignment(
self.context, resolved_types, unresolved_types, self.variable.name
)
else:
result_type = promote_for_arithmetic(self.context, resolved_types)
self.resolved_type = result_type
if len(resolved_types) == len(types) or not unresolved_types:
self.variable.type = result_type
return True
else:
old_types = self.types
self.types = oset.OrderedSet([result_type] + unresolved_types)
return old_types != self.types
|
def _simplify(self, seen=None):
"""
Simplify a promotion type tree:
promote(int_, float_)
-> float_
promote(deferred(x), promote(float_, double), int_, promote(<self>))
-> promote(deferred(x), double)
promote(deferred(x), deferred(y))
-> promote(deferred(x), deferred(y))
"""
if seen is None:
seen = set()
# Find all types in the type graph and eliminate nested promotion types
types = self.find_types(seen)
# types = self.find_simple(seen)
resolved_types = [type for type in types if not type.is_unresolved]
unresolved_types = [type for type in types if type.is_unresolved]
self.get_partial_types(unresolved_types)
self.variable.type = self
if not resolved_types:
# Everything is deferred
self.resolved_type = None
return False
else:
# Simplify as much as possible
if self.assignment:
result_type, unresolved_types = promote_for_assignment(
self.context, resolved_types, unresolved_types, self.variable.name
)
else:
result_type = promote_for_arithmetic(self.context, resolved_types)
self.resolved_type = result_type
if len(resolved_types) == len(types) or not unresolved_types:
self.variable.type = result_type
return True
else:
old_types = self.types
self.types = set([result_type] + unresolved_types)
return old_types != self.types
|
https://github.com/numba/numba/issues/117
|
Traceback (most recent call last):
File "./test_typeinfer_bug.py", line 26, in <module>
test()
File "./test_typeinfer_bug.py", line 23, in test
jenks_matrices(data)
File "numbawrapper.pyx", line 93, in numba.numbawrapper.NumbaSpecializingWrapper.__call__ (numba/numbawrapper.c:2827)
File "/Users/sklam/dev/numba/numba/decorators.py", line 211, in compile_function
compiled_function = dec(f)
File "/Users/sklam/dev/numba/numba/decorators.py", line 299, in _jit2_decorator
**kwargs)
File "/Users/sklam/dev/numba/numba/functions.py", line 222, in compile_function
ctypes=ctypes, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 397, in compile
context, func, restype, argtypes, codegen=True, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 365, in _infer_types
return run_pipeline(context, func, ast, func_signature, **kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 359, in run_pipeline
return pipeline, pipeline.run_pipeline()
File "/Users/sklam/dev/numba/numba/pipeline.py", line 188, in run_pipeline
ast = getattr(self, method_name)(ast)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 255, in type_infer
type_inference.TypeInferer, ast, **self.kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 149, in make_specializer
**kwds)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 170, in __init__
self.init_locals()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 249, in init_locals
self.resolve_variable_types()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 495, in resolve_variable_types
self.remove_resolved_type(start_point)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 393, in remove_resolved_type
assert not type.is_scc
AssertionError
|
AssertionError
|
def __init__(self, scc, **kwds):
super(StronglyConnectedCircularType, self).__init__(None, **kwds)
self.scc = scc
types = oset.OrderedSet(scc)
for type in scc:
self.add_children(type.children - types)
self.add_parents(type.parents - types)
self.types = scc
self.promotions = oset.OrderedSet(type for type in scc if type.is_promotion)
self.reanalyzeable = oset.OrderedSet(
type for type in scc if type.is_reanalyze_circular
)
|
def __init__(self, scc, **kwds):
super(StronglyConnectedCircularType, self).__init__(None, **kwds)
self.scc = scc
types = set(scc)
for type in scc:
self.add_children(type.children - types)
self.add_parents(type.parents - types)
self.types = scc
self.promotions = set(type for type in scc if type.is_promotion)
self.reanalyzeable = set(type for type in scc if type.is_reanalyze_circular)
|
https://github.com/numba/numba/issues/117
|
Traceback (most recent call last):
File "./test_typeinfer_bug.py", line 26, in <module>
test()
File "./test_typeinfer_bug.py", line 23, in test
jenks_matrices(data)
File "numbawrapper.pyx", line 93, in numba.numbawrapper.NumbaSpecializingWrapper.__call__ (numba/numbawrapper.c:2827)
File "/Users/sklam/dev/numba/numba/decorators.py", line 211, in compile_function
compiled_function = dec(f)
File "/Users/sklam/dev/numba/numba/decorators.py", line 299, in _jit2_decorator
**kwargs)
File "/Users/sklam/dev/numba/numba/functions.py", line 222, in compile_function
ctypes=ctypes, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 397, in compile
context, func, restype, argtypes, codegen=True, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 365, in _infer_types
return run_pipeline(context, func, ast, func_signature, **kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 359, in run_pipeline
return pipeline, pipeline.run_pipeline()
File "/Users/sklam/dev/numba/numba/pipeline.py", line 188, in run_pipeline
ast = getattr(self, method_name)(ast)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 255, in type_infer
type_inference.TypeInferer, ast, **self.kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 149, in make_specializer
**kwds)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 170, in __init__
self.init_locals()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 249, in init_locals
self.resolve_variable_types()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 495, in resolve_variable_types
self.remove_resolved_type(start_point)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 393, in remove_resolved_type
assert not type.is_scc
AssertionError
|
AssertionError
|
def simplify(self):
if self.reanalyzeable:
self.retry_infer()
elif self.promotions:
self.resolve_promotion_cycles()
else:
# All dependencies are resolved, we are done
pass
self.is_resolved = True
|
def simplify(self):
if self.reanalyzeable:
self.retry_infer()
elif self.promotions:
self.resolve_promotion_cycles()
else:
assert False
self.is_resolved = True
|
https://github.com/numba/numba/issues/117
|
Traceback (most recent call last):
File "./test_typeinfer_bug.py", line 26, in <module>
test()
File "./test_typeinfer_bug.py", line 23, in test
jenks_matrices(data)
File "numbawrapper.pyx", line 93, in numba.numbawrapper.NumbaSpecializingWrapper.__call__ (numba/numbawrapper.c:2827)
File "/Users/sklam/dev/numba/numba/decorators.py", line 211, in compile_function
compiled_function = dec(f)
File "/Users/sklam/dev/numba/numba/decorators.py", line 299, in _jit2_decorator
**kwargs)
File "/Users/sklam/dev/numba/numba/functions.py", line 222, in compile_function
ctypes=ctypes, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 397, in compile
context, func, restype, argtypes, codegen=True, **kwds)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 365, in _infer_types
return run_pipeline(context, func, ast, func_signature, **kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 359, in run_pipeline
return pipeline, pipeline.run_pipeline()
File "/Users/sklam/dev/numba/numba/pipeline.py", line 188, in run_pipeline
ast = getattr(self, method_name)(ast)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 255, in type_infer
type_inference.TypeInferer, ast, **self.kwargs)
File "/Users/sklam/dev/numba/numba/pipeline.py", line 149, in make_specializer
**kwds)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 170, in __init__
self.init_locals()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 249, in init_locals
self.resolve_variable_types()
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 495, in resolve_variable_types
self.remove_resolved_type(start_point)
File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 393, in remove_resolved_type
assert not type.is_scc
AssertionError
|
AssertionError
|
def pprint(self, *args, **kws):
pprint.pprint(self.__dict__, *args, **kws)
|
def pprint(self, *args, **kws):
pprint.pprint(self.__dict__)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def build_cfg(cls, code_obj, *args, **kws):
ret_val = cls(*args, **kws)
opmap = opcode.opname
ret_val.crnt_block = 0
ret_val.code_len = len(code_obj.co_code)
ret_val.add_block(0)
ret_val.blocks_writes[0] = set(range(code_obj.co_argcount))
last_was_jump = True # At start there is no prior basic block
# to link up with, so skip building a
# fallthrough edge.
for i, op, arg in itercode(code_obj.co_code):
if i in ret_val.blocks:
if not last_was_jump:
ret_val.add_edge(ret_val.crnt_block, i)
ret_val.crnt_block = i
last_was_jump = False
method_name = "op_" + opmap[op]
if hasattr(ret_val, method_name):
last_was_jump = getattr(ret_val, method_name)(i, op, arg)
ret_val.unlink_unreachables()
del ret_val.crnt_block, ret_val.code_len
return ret_val
|
def build_cfg(cls, code_obj, *args, **kws):
ret_val = cls(*args, **kws)
opmap = opcode.opname
ret_val.crnt_block = 0
ret_val.code_len = len(code_obj.co_code)
ret_val.add_block(0)
ret_val.blocks_writes[0] = set(range(code_obj.co_argcount))
last_was_jump = True # At start there is no prior basic block
# to link up with, so skip building a
# fallthrough edge.
for i, op, arg in itercode(code_obj.co_code):
if i in ret_val.blocks:
if not last_was_jump:
ret_val.add_edge(ret_val.crnt_block, i)
ret_val.crnt_block = i
last_was_jump = False
method_name = "op_" + opmap[op]
if hasattr(ret_val, method_name):
last_was_jump = getattr(ret_val, method_name)(i, op, arg)
del ret_val.crnt_block, ret_val.code_len
return ret_val
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def compute_use_defs(blocks):
"""
Find variable use/def per block.
"""
var_use_map = {} # { block offset -> set of vars }
var_def_map = {} # { block offset -> set of vars }
for offset, ir_block in blocks.items():
var_use_map[offset] = use_set = set()
var_def_map[offset] = def_set = set()
for stmt in ir_block.body:
if type(stmt) in ir_extension_usedefs:
func = ir_extension_usedefs[type(stmt)]
func(stmt, use_set, def_set)
continue
if isinstance(stmt, ir.Assign):
if isinstance(stmt.value, ir.Inst):
rhs_set = set(var.name for var in stmt.value.list_vars())
elif isinstance(stmt.value, ir.Var):
rhs_set = set([stmt.value.name])
elif isinstance(stmt.value, (ir.Arg, ir.Const, ir.Global, ir.FreeVar)):
rhs_set = ()
else:
raise AssertionError("unreachable", type(stmt.value))
# If lhs not in rhs of the assignment
if stmt.target.name not in rhs_set:
def_set.add(stmt.target.name)
for var in stmt.list_vars():
# do not include locally defined vars to use-map
if var.name not in def_set:
use_set.add(var.name)
return _use_defs_result(usemap=var_use_map, defmap=var_def_map)
|
def compute_use_defs(blocks):
"""
Find variable use/def per block.
"""
var_use_map = {} # { block offset -> set of vars }
var_def_map = {} # { block offset -> set of vars }
for offset, ir_block in blocks.items():
var_use_map[offset] = use_set = set()
var_def_map[offset] = def_set = set()
for stmt in ir_block.body:
for T, def_func in ir_extension_defs.items():
if isinstance(stmt, T):
def_set.update(def_func(stmt))
if isinstance(stmt, ir.Assign):
if isinstance(stmt.value, ir.Inst):
rhs_set = set(var.name for var in stmt.value.list_vars())
elif isinstance(stmt.value, ir.Var):
rhs_set = set([stmt.value.name])
elif isinstance(stmt.value, (ir.Arg, ir.Const, ir.Global, ir.FreeVar)):
rhs_set = ()
else:
raise AssertionError("unreachable", type(stmt.value))
# If lhs not in rhs of the assignment
if stmt.target.name not in rhs_set:
def_set.add(stmt.target.name)
for var in stmt.list_vars():
# do not include locally defined vars to use-map
if var.name not in def_set:
use_set.add(var.name)
return _use_defs_result(usemap=var_use_map, defmap=var_def_map)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _analyze_inst(self, inst):
if isinstance(inst, ir.Assign):
return self._analyze_assign(inst)
elif type(inst) in array_analysis_extensions:
# let external calls handle stmt if type matches
f = array_analysis_extensions[type(inst)]
return f(inst, self)
return []
|
def _analyze_inst(self, inst):
if isinstance(inst, ir.Assign):
return self._analyze_assign(inst)
return []
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _analyze_assign(self, assign):
lhs = assign.target.name
rhs = assign.value
if isinstance(rhs, ir.Global):
for T in MAP_TYPES:
if isinstance(rhs.value, T):
self.map_calls.append(lhs)
if isinstance(rhs.value, pytypes.ModuleType) and rhs.value == numpy:
self.numpy_globals.append(lhs)
if isinstance(rhs, ir.Expr) and rhs.op == "getattr":
if rhs.value.name in self.numpy_globals:
self.numpy_calls[lhs] = rhs.attr
elif rhs.value.name in self.numpy_calls:
# numpy submodule call like np.random.ranf
# we keep random.ranf as call name
self.numpy_calls[lhs] = self.numpy_calls[rhs.value.name] + "." + rhs.attr
elif self._isarray(rhs.value.name):
self.array_attr_calls[lhs] = (rhs.attr, rhs.value)
if isinstance(rhs, ir.Expr) and rhs.op == "build_tuple":
self.tuple_table[lhs] = rhs.items
if isinstance(rhs, ir.Expr) and rhs.op == "build_list":
self.list_table[lhs] = rhs.items
if isinstance(rhs, ir.Const) and isinstance(rhs.value, tuple):
self.tuple_table[lhs] = rhs.value
if isinstance(rhs, ir.Const): # and np.isscalar(rhs.value):
self.constant_table[lhs] = rhs.value
# rhs_class_out = self._analyze_rhs_classes(rhs)
size_calls = []
if self._isarray(lhs):
analyze_out = self._analyze_rhs_classes(rhs)
if analyze_out is None:
rhs_corr = self._add_array_corr(lhs)
else:
rhs_corr = copy.copy(analyze_out)
if lhs in self.array_shape_classes:
# if shape already inferred in another basic block,
# make sure this new inference is compatible
if self.array_shape_classes[lhs] != rhs_corr:
self.array_shape_classes[lhs] = [-1] * self._get_ndims(lhs)
self.array_size_vars.pop(lhs, None)
if config.DEBUG_ARRAY_OPT == 1:
print("incompatible array shapes in control flow")
return []
self.array_shape_classes[lhs] = rhs_corr
self.array_size_vars[lhs] = [-1] * self._get_ndims(lhs)
# make sure output lhs array has size variables for each dimension
for i, corr in enumerate(rhs_corr):
# if corr unknown or new
if corr == -1 or corr not in self.class_sizes.keys():
# generate size call nodes for this dimension
nodes = self._gen_size_call(assign.target, i)
size_calls += nodes
assert isinstance(nodes[-1], ir.Assign)
size_var = nodes[-1].target
if corr != -1:
self.class_sizes[corr] = [size_var]
self.array_size_vars[lhs][i] = size_var
else:
# reuse a size variable from this correlation
# TODO: consider CFG?
self.array_size_vars[lhs][i] = self.class_sizes[corr][0]
else:
self._analyze_rhs_classes_no_lhs_array(rhs)
return size_calls
|
def _analyze_assign(self, assign):
lhs = assign.target.name
rhs = assign.value
if isinstance(rhs, ir.Global):
for T in MAP_TYPES:
if isinstance(rhs.value, T):
self.map_calls.append(lhs)
if isinstance(rhs.value, pytypes.ModuleType) and rhs.value == numpy:
self.numpy_globals.append(lhs)
if isinstance(rhs, ir.Expr) and rhs.op == "getattr":
if rhs.value.name in self.numpy_globals:
self.numpy_calls[lhs] = rhs.attr
elif rhs.value.name in self.numpy_calls:
# numpy submodule call like np.random.ranf
# we keep random.ranf as call name
self.numpy_calls[lhs] = self.numpy_calls[rhs.value.name] + "." + rhs.attr
elif self._isarray(rhs.value.name):
self.array_attr_calls[lhs] = (rhs.attr, rhs.value)
if isinstance(rhs, ir.Expr) and rhs.op == "build_tuple":
self.tuple_table[lhs] = rhs.items
if isinstance(rhs, ir.Expr) and rhs.op == "build_list":
self.list_table[lhs] = rhs.items
if isinstance(rhs, ir.Const) and isinstance(rhs.value, tuple):
self.tuple_table[lhs] = rhs.value
if isinstance(rhs, ir.Const): # and np.isscalar(rhs.value):
self.constant_table[lhs] = rhs.value
# rhs_class_out = self._analyze_rhs_classes(rhs)
size_calls = []
if self._isarray(lhs):
analyze_out = self._analyze_rhs_classes(rhs)
if analyze_out is None:
rhs_corr = self._add_array_corr(lhs)
else:
rhs_corr = copy.copy(analyze_out)
if lhs in self.array_shape_classes:
# if shape already inferred in another basic block,
# make sure this new inference is compatible
if self.array_shape_classes[lhs] != rhs_corr:
self.array_shape_classes[lhs] = [-1] * self._get_ndims(lhs)
self.array_size_vars.pop(lhs, None)
if config.DEBUG_ARRAY_OPT == 1:
print("incompatible array shapes in control flow")
return []
self.array_shape_classes[lhs] = rhs_corr
self.array_size_vars[lhs] = [-1] * self._get_ndims(lhs)
# make sure output lhs array has size variables for each dimension
for i, corr in enumerate(rhs_corr):
# if corr unknown or new
if corr == -1 or corr not in self.class_sizes.keys():
# generate size call nodes for this dimension
nodes = self._gen_size_call(assign.target, i)
size_calls += nodes
assert isinstance(nodes[-1], ir.Assign)
size_var = nodes[-1].target
if corr != -1:
self.class_sizes[corr] = [size_var]
self.array_size_vars[lhs][i] = size_var
else:
# reuse a size variable from this correlation
# TODO: consider CFG?
self.array_size_vars[lhs][i] = self.class_sizes[corr][0]
# print(self.array_shape_classes)
return size_calls
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _analyze_rhs_classes(self, node):
"""analysis of rhs when lhs is array so rhs has to return array"""
if isinstance(node, ir.Arg):
return None
# can't assume node.name is valid variable
# assert self._isarray(node.name)
# return self._add_array_corr(node.name)
elif isinstance(node, ir.Var):
return copy.copy(self.array_shape_classes[node.name])
elif isinstance(node, (ir.Global, ir.FreeVar)):
# XXX: currently, global variables are frozen in Numba (can change)
if isinstance(node.value, numpy.ndarray):
shape = node.value.shape
out_eqs = []
for c in shape:
new_class = self._get_next_class_with_size(c)
out_eqs.append(new_class)
return out_eqs
elif isinstance(node, ir.Expr):
if node.op == "unary" and node.fn in UNARY_MAP_OP:
assert isinstance(node.value, ir.Var)
in_var = node.value.name
assert self._isarray(in_var)
return copy.copy(self.array_shape_classes[in_var])
elif node.op == "binop" and node.fn in BINARY_MAP_OP:
arg1 = node.lhs.name
arg2 = node.rhs.name
return self._broadcast_and_match_shapes([arg1, arg2])
elif node.op == "inplace_binop" and node.immutable_fn in BINARY_MAP_OP:
arg1 = node.lhs.name
arg2 = node.rhs.name
return self._broadcast_and_match_shapes([arg1, arg2])
elif node.op == "arrayexpr":
# set to remove duplicates
args = {v.name for v in node.list_vars()}
return self._broadcast_and_match_shapes(list(args))
elif node.op == "cast":
return copy.copy(self.array_shape_classes[node.value.name])
elif node.op == "call":
call_name = "NULL"
args = copy.copy(node.args)
if node.func.name in self.map_calls:
return copy.copy(self.array_shape_classes[args[0].name])
if node.func.name in self.numpy_calls.keys():
call_name = self.numpy_calls[node.func.name]
elif node.func.name in self.array_attr_calls.keys():
call_name, arr = self.array_attr_calls[node.func.name]
args.insert(0, arr)
if call_name is not "NULL":
return self._analyze_np_call(call_name, args, dict(node.kws))
else:
if config.DEBUG_ARRAY_OPT == 1:
# no need to raise since this is not a failure and
# analysis can continue (might limit optimization
# later)
print("can't find shape for unknown call:", node)
return None
elif node.op == "getattr" and self._isarray(node.value.name):
# numpy recarray, e.g. X.a
val = node.value.name
val_typ = self.typemap[val]
if (
isinstance(val_typ.dtype, types.npytypes.Record)
and node.attr in val_typ.dtype.fields
):
return copy.copy(self.array_shape_classes[val])
# matrix transpose
if node.attr == "T":
return self._analyze_np_call("transpose", [node.value], dict())
elif node.op == "getattr" and isinstance(
self.typemap[node.value.name], types.npytypes.Record
):
# nested arrays in numpy records
val = node.value.name
val_typ = self.typemap[val]
if node.attr in val_typ.fields and isinstance(
val_typ.fields[node.attr][0], types.npytypes.NestedArray
):
shape = val_typ.fields[node.attr][0].shape
return self._get_classes_from_const_shape(shape)
elif node.op == "getitem" or node.op == "static_getitem":
# getitem where output is array is possibly accessing elements
# of numpy records, e.g. X['a']
val = node.value.name
val_typ = self.typemap[val]
if (
self._isarray(val)
and isinstance(val_typ.dtype, types.npytypes.Record)
and node.index in val_typ.dtype.fields
):
return copy.copy(self.array_shape_classes[val])
else:
if config.DEBUG_ARRAY_OPT == 1:
# no need to raise since this is not a failure and
# analysis can continue (might limit optimization later)
print("can't find shape classes for expr", node, " of op", node.op)
if config.DEBUG_ARRAY_OPT == 1:
# no need to raise since this is not a failure and
# analysis can continue (might limit optimization later)
print("can't find shape classes for node", node, " of type ", type(node))
return None
|
def _analyze_rhs_classes(self, node):
if isinstance(node, ir.Arg):
assert self._isarray(node.name)
return self._add_array_corr(node.name)
elif isinstance(node, ir.Var):
return copy.copy(self.array_shape_classes[node.name])
elif isinstance(node, (ir.Global, ir.FreeVar)):
# XXX: currently, global variables are frozen in Numba (can change)
if isinstance(node.value, numpy.ndarray):
shape = node.value.shape
out_eqs = []
for c in shape:
new_class = self._get_next_class_with_size(c)
out_eqs.append(new_class)
return out_eqs
elif isinstance(node, ir.Expr):
if node.op == "unary" and node.fn in UNARY_MAP_OP:
assert isinstance(node.value, ir.Var)
in_var = node.value.name
assert self._isarray(in_var)
return copy.copy(self.array_shape_classes[in_var])
elif node.op == "binop" and node.fn in BINARY_MAP_OP:
arg1 = node.lhs.name
arg2 = node.rhs.name
return self._broadcast_and_match_shapes([arg1, arg2])
elif node.op == "inplace_binop" and node.immutable_fn in BINARY_MAP_OP:
arg1 = node.lhs.name
arg2 = node.rhs.name
return self._broadcast_and_match_shapes([arg1, arg2])
elif node.op == "arrayexpr":
# set to remove duplicates
args = {v.name for v in node.list_vars()}
return self._broadcast_and_match_shapes(list(args))
elif node.op == "cast":
return copy.copy(self.array_shape_classes[node.value.name])
elif node.op == "call":
call_name = "NULL"
args = copy.copy(node.args)
if node.func.name in self.map_calls:
return copy.copy(self.array_shape_classes[args[0].name])
if node.func.name in self.numpy_calls.keys():
call_name = self.numpy_calls[node.func.name]
elif node.func.name in self.array_attr_calls.keys():
call_name, arr = self.array_attr_calls[node.func.name]
args.insert(0, arr)
if call_name is not "NULL":
return self._analyze_np_call(call_name, args, dict(node.kws))
else:
if config.DEBUG_ARRAY_OPT == 1:
# no need to raise since this is not a failure and
# analysis can continue (might limit optimization later)
print("can't find shape for unknown call:", node)
return None
elif node.op == "getattr" and self._isarray(node.value.name):
# numpy recarray, e.g. X.a
val = node.value.name
val_typ = self.typemap[val]
if (
isinstance(val_typ.dtype, types.npytypes.Record)
and node.attr in val_typ.dtype.fields
):
return copy.copy(self.array_shape_classes[val])
# matrix transpose
if node.attr == "T":
return self._analyze_np_call("transpose", [node.value], dict())
elif node.op == "getattr" and isinstance(
self.typemap[node.value.name], types.npytypes.Record
):
# nested arrays in numpy records
val = node.value.name
val_typ = self.typemap[val]
if node.attr in val_typ.fields and isinstance(
val_typ.fields[node.attr][0], types.npytypes.NestedArray
):
shape = val_typ.fields[node.attr][0].shape
return self._get_classes_from_const_shape(shape)
elif node.op == "getitem" or node.op == "static_getitem":
# getitem where output is array is possibly accessing elements
# of numpy records, e.g. X['a']
val = node.value.name
val_typ = self.typemap[val]
if (
self._isarray(val)
and isinstance(val_typ.dtype, types.npytypes.Record)
and node.index in val_typ.dtype.fields
):
return copy.copy(self.array_shape_classes[val])
else:
if config.DEBUG_ARRAY_OPT == 1:
# no need to raise since this is not a failure and
# analysis can continue (might limit optimization later)
print("can't find shape classes for expr", node, " of op", node.op)
if config.DEBUG_ARRAY_OPT == 1:
# no need to raise since this is not a failure and
# analysis can continue (might limit optimization later)
print("can't find shape classes for node", node, " of type ", type(node))
return None
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _analyze_np_call(self, call_name, args, kws):
# print("numpy call ",call_name,args)
if call_name == "transpose":
out_eqs = copy.copy(self.array_shape_classes[args[0].name])
out_eqs.reverse()
return out_eqs
elif call_name in array_creation:
# these calls (e.g. empty) have only a "shape" argument
shape_arg = None
if len(args) > 0:
shape_arg = args[0]
elif "shape" in kws:
shape_arg = kws["shape"]
else:
return None
return self._get_classes_from_shape(shape_arg)
elif call_name in random_1arg_size:
# these calls have only a "size" argument
size_arg = None
if len(args) > 0:
size_arg = args[0]
elif "size" in kws:
size_arg = kws["size"]
else:
return None
return self._get_classes_from_shape(size_arg)
elif call_name in random_int_args:
# e.g. random.rand
# arguments are integers (not a tuple as in previous calls)
return self._get_classes_from_dim_args(args)
elif call_name in random_3arg_sizelast:
# normal, uniform, ... have 3 args, last one is size
size_arg = None
if len(args) == 3:
size_arg = args[2]
elif "size" in kws:
size_arg = kws["size"]
else:
return None
return self._get_classes_from_shape(size_arg)
elif call_name in random_2arg_sizelast:
# have 2 args, last one is size
size_arg = None
if len(args) == 2:
size_arg = args[1]
elif "size" in kws:
size_arg = kws["size"]
else:
return None
return self._get_classes_from_shape(size_arg)
elif call_name == "random.randint":
# has 4 args, 3rd one is size
size_arg = None
if len(args) >= 3:
size_arg = args[2]
elif "size" in kws:
size_arg = kws["size"]
else:
return None
return self._get_classes_from_shape(size_arg)
elif call_name == "random.triangular":
# has 4 args, last one is size
size_arg = None
if len(args) == 4:
size_arg = args[3]
elif "size" in kws:
size_arg = kws["size"]
else:
return None
return self._get_classes_from_shape(size_arg)
elif call_name == "eye":
# if one input n, output is n*n
# two inputs n,m, output is n*m
# N is either positional or kw arg
if "N" in kws:
assert len(args) == 0
args.append(kws["N"])
if "M" in kws:
assert len(args) == 1
args.append(kws["M"])
new_class1 = self._get_next_class_with_size(args[0].name)
out_eqs = [new_class1]
if len(args) > 1:
new_class2 = self._get_next_class_with_size(args[1].name)
out_eqs.append(new_class2)
else:
out_eqs.append(new_class1)
return out_eqs
elif call_name == "identity":
# input n, output is n*n
new_class1 = self._get_next_class_with_size(args[0].name)
return [new_class1, new_class1]
elif call_name == "diag":
k = self._get_second_arg_or_kw(args, kws, "k")
# TODO: support k other than 0 (other diagonal smaller size than
# main)
if k == 0:
in_arr = args[0].name
in_class = self.array_shape_classes[in_arr][0]
# if 1D input v, create 2D output with v on diagonal
# if 2D input v, return v's diagonal
if self._get_ndims(in_arr) == 1:
return [in_class, in_class]
else:
self._get_ndims(in_arr) == 2
return [in_class]
elif call_name in [
"empty_like",
"zeros_like",
"ones_like",
"full_like",
"copy",
"asfortranarray",
]:
# shape same as input
if args[0].name in self.array_shape_classes:
out_corrs = copy.copy(self.array_shape_classes[args[0].name])
else:
# array scalars: constant input results in 0-dim array
assert not self._isarray(args[0].name)
# TODO: make sure arg is scalar
out_corrs = []
# asfortranarray converts 0-d to 1-d automatically
if out_corrs == [] and call_name == "asfortranarray":
out_corrs = [CONST_CLASS]
return out_corrs
elif call_name == "reshape":
# print("reshape args: ", args)
# TODO: infer shape from length of args[0] in case of -1 input
if len(args) == 2:
# shape is either Int or tuple of Int
return self._get_classes_from_shape(args[1])
else:
# a list integers for shape
return self._get_classes_from_shape_list(args[1:])
elif call_name == "array":
# only 1D list is supported, and not ndmin arg
if args[0].name in self.list_table:
l = self.list_table[args[0].name]
new_class1 = self._get_next_class_with_size(len(l))
return [new_class1]
elif call_name == "concatenate":
# all dimensions of output are same as inputs, except axis
axis = self._get_second_arg_or_kw(args, kws, "axis")
if axis == -1: # don't know shape if axis is not constant
return None
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
ndims = self._get_ndims(arr_args[0].name)
if ndims <= axis:
return None
out_eqs = [-1] * ndims
new_class1 = self._get_next_class()
# TODO: set size to sum of input array's size along axis
out_eqs[axis] = new_class1
for i in range(ndims):
if i == axis:
continue
c = self.array_shape_classes[arr_args[0].name][i]
for v in arr_args:
# all input arrays have equal dimensions, except on axis
c = self._merge_classes(c, self.array_shape_classes[v.name][i])
out_eqs[i] = c
return out_eqs
elif call_name == "stack":
# all dimensions of output are same as inputs, but extra on axis
axis = self._get_second_arg_or_kw(args, kws, "axis")
if axis == -1: # don't know shape if axis is not constant
return None
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
ndims = self._get_ndims(arr_args[0].name)
out_eqs = [-1] * ndims
# all input arrays have equal dimensions
for i in range(ndims):
c = self.array_shape_classes[arr_args[0].name][i]
for v in arr_args:
c = self._merge_classes(c, self.array_shape_classes[v.name][i])
out_eqs[i] = c
# output has one extra dimension
new_class1 = self._get_next_class_with_size(len(arr_args))
out_eqs.insert(axis, new_class1)
# TODO: set size to sum of input array's size along axis
return out_eqs
elif call_name == "hstack":
# hstack is same as concatenate with axis=1 for ndim>=2
dummy_one_var = ir.Var(args[0].scope, "__dummy_1", args[0].loc)
self.constant_table["__dummy_1"] = 1
args.append(dummy_one_var)
return self._analyze_np_call("concatenate", args, kws)
elif call_name == "dstack":
# dstack is same as concatenate with axis=2, atleast_3d args
args[0] = self.convert_seq_to_atleast_3d(args[0])
dummy_two_var = ir.Var(args[0].scope, "__dummy_2", args[0].loc)
self.constant_table["__dummy_2"] = 2
args.append(dummy_two_var)
return self._analyze_np_call("concatenate", args, kws)
elif call_name == "vstack":
# vstack is same as concatenate with axis=0 if 2D input dims or more
# TODO: set size to sum of input array's size for 1D
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
ndims = self._get_ndims(arr_args[0].name)
if ndims >= 2:
dummy_zero_var = ir.Var(args[0].scope, "__dummy_0", args[0].loc)
self.constant_table["__dummy_0"] = 0
args.append(dummy_zero_var)
return self._analyze_np_call("concatenate", args, kws)
elif call_name == "column_stack":
# 1D arrays turn into columns of 2D array
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
c = self.array_shape_classes[arr_args[0].name][0]
for v in arr_args:
c = self._merge_classes(c, self.array_shape_classes[v.name][0])
new_class = self._get_next_class_with_size(len(arr_args))
return [c, new_class]
elif call_name in ["cumsum", "cumprod"]:
in_arr = args[0].name
in_ndims = self._get_ndims(in_arr)
# for 1D, output has same size
# TODO: return flattened size for multi-dimensional input
if in_ndims == 1:
return copy.copy(self.array_shape_classes[in_arr])
elif call_name == "linspace":
# default is 50, arg3 is size
LINSPACE_DEFAULT_SIZE = 50
size = LINSPACE_DEFAULT_SIZE
if len(args) >= 3:
size = args[2].name
new_class = self._get_next_class_with_size(size)
return [new_class]
elif call_name == "dot":
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html
# for multi-dimensional arrays, last dimension of arg1 and second
# to last dimension of arg2 should be equal since used in dot product.
# if arg2 is 1D, its only dimension is used for dot product and
# should be equal to second to last of arg1.
assert len(args) == 2 or len(args) == 3
in1 = args[0].name
in2 = args[1].name
ndims1 = self._get_ndims(in1)
ndims2 = self._get_ndims(in2)
c1 = self.array_shape_classes[in1][ndims1 - 1]
c2 = UNKNOWN_CLASS
if ndims2 == 1:
c2 = self.array_shape_classes[in2][0]
else:
c2 = self.array_shape_classes[in2][ndims2 - 2]
c_inner = self._merge_classes(c1, c2)
c_out = []
for i in range(ndims1 - 1):
c_out.append(self.array_shape_classes[in1][i])
for i in range(ndims2 - 2):
c_out.append(self.array_shape_classes[in2][i])
if ndims2 > 1:
c_out.append(self.array_shape_classes[in2][ndims2 - 1])
return c_out
elif call_name in UFUNC_MAP_OP:
return self._broadcast_and_match_shapes([a.name for a in args])
if config.DEBUG_ARRAY_OPT == 1:
print("unknown numpy call:", call_name, " ", args)
return None
|
def _analyze_np_call(self, call_name, args, kws):
# print("numpy call ",call_name,args)
if call_name == "transpose":
out_eqs = copy.copy(self.array_shape_classes[args[0].name])
out_eqs.reverse()
return out_eqs
elif call_name in [
"empty",
"zeros",
"ones",
"full",
"random.ranf",
"random.random_sample",
"random.sample",
]:
shape_arg = None
if len(args) > 0:
shape_arg = args[0]
elif "shape" in kws:
shape_arg = kws["shape"]
else:
return None
return self._get_classes_from_shape(shape_arg)
elif call_name in ["random.rand", "random.randn"]:
# arguments are integers, not a tuple
return self._get_classes_from_dim_args(args)
elif call_name == "eye":
# if one input n, output is n*n
# two inputs n,m, output is n*m
# N is either positional or kw arg
if "N" in kws:
assert len(args) == 0
args.append(kws["N"])
if "M" in kws:
assert len(args) == 1
args.append(kws["M"])
new_class1 = self._get_next_class_with_size(args[0].name)
out_eqs = [new_class1]
if len(args) > 1:
new_class2 = self._get_next_class_with_size(args[1].name)
out_eqs.append(new_class2)
else:
out_eqs.append(new_class1)
return out_eqs
elif call_name == "identity":
# input n, output is n*n
new_class1 = self._get_next_class_with_size(args[0].name)
return [new_class1, new_class1]
elif call_name == "diag":
k = self._get_second_arg_or_kw(args, kws, "k")
# TODO: support k other than 0 (other diagonal smaller size than main)
if k == 0:
in_arr = args[0].name
in_class = self.array_shape_classes[in_arr][0]
# if 1D input v, create 2D output with v on diagonal
# if 2D input v, return v's diagonal
if self._get_ndims(in_arr) == 1:
return [in_class, in_class]
else:
self._get_ndims(in_arr) == 2
return [in_class]
elif call_name in [
"empty_like",
"zeros_like",
"ones_like",
"full_like",
"copy",
"asfortranarray",
]:
# shape same as input
if args[0].name in self.array_shape_classes:
out_corrs = copy.copy(self.array_shape_classes[args[0].name])
else:
# array scalars: constant input results in 0-dim array
assert not self._isarray(args[0].name)
# TODO: make sure arg is scalar
out_corrs = []
# asfortranarray converts 0-d to 1-d automatically
if out_corrs == [] and call_name == "asfortranarray":
out_corrs = [CONST_CLASS]
return out_corrs
elif call_name == "reshape":
# print("reshape args: ", args)
# TODO: infer shape from length of args[0] in case of -1 input
if len(args) == 2:
# shape is either Int or tuple of Int
return self._get_classes_from_shape(args[1])
else:
# a list integers for shape
return self._get_classes_from_shape_list(args[1:])
elif call_name == "array":
# only 1D list is supported, and not ndmin arg
if args[0].name in self.list_table:
l = self.list_table[args[0].name]
new_class1 = self._get_next_class_with_size(len(l))
return [new_class1]
elif call_name == "concatenate":
# all dimensions of output are same as inputs, except axis
axis = self._get_second_arg_or_kw(args, kws, "axis")
if axis == -1: # don't know shape if axis is not constant
return None
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
ndims = self._get_ndims(arr_args[0].name)
if ndims <= axis:
return None
out_eqs = [-1] * ndims
new_class1 = self._get_next_class()
# TODO: set size to sum of input array's size along axis
out_eqs[axis] = new_class1
for i in range(ndims):
if i == axis:
continue
c = self.array_shape_classes[arr_args[0].name][i]
for v in arr_args:
# all input arrays have equal dimensions, except on axis
c = self._merge_classes(c, self.array_shape_classes[v.name][i])
out_eqs[i] = c
return out_eqs
elif call_name == "stack":
# all dimensions of output are same as inputs, but extra on axis
axis = self._get_second_arg_or_kw(args, kws, "axis")
if axis == -1: # don't know shape if axis is not constant
return None
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
ndims = self._get_ndims(arr_args[0].name)
out_eqs = [-1] * ndims
# all input arrays have equal dimensions
for i in range(ndims):
c = self.array_shape_classes[arr_args[0].name][i]
for v in arr_args:
c = self._merge_classes(c, self.array_shape_classes[v.name][i])
out_eqs[i] = c
# output has one extra dimension
new_class1 = self._get_next_class_with_size(len(arr_args))
out_eqs.insert(axis, new_class1)
# TODO: set size to sum of input array's size along axis
return out_eqs
elif call_name == "hstack":
# hstack is same as concatenate with axis=1 for ndim>=2
dummy_one_var = ir.Var(args[0].scope, "__dummy_1", args[0].loc)
self.constant_table["__dummy_1"] = 1
args.append(dummy_one_var)
return self._analyze_np_call("concatenate", args, kws)
elif call_name == "dstack":
# dstack is same as concatenate with axis=2, atleast_3d args
args[0] = self.convert_seq_to_atleast_3d(args[0])
dummy_two_var = ir.Var(args[0].scope, "__dummy_2", args[0].loc)
self.constant_table["__dummy_2"] = 2
args.append(dummy_two_var)
return self._analyze_np_call("concatenate", args, kws)
elif call_name == "vstack":
# vstack is same as concatenate with axis=0 if 2D input dims or more
# TODO: set size to sum of input array's size for 1D
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
ndims = self._get_ndims(arr_args[0].name)
if ndims >= 2:
dummy_zero_var = ir.Var(args[0].scope, "__dummy_0", args[0].loc)
self.constant_table["__dummy_0"] = 0
args.append(dummy_zero_var)
return self._analyze_np_call("concatenate", args, kws)
elif call_name == "column_stack":
# 1D arrays turn into columns of 2D array
arr_args = self._get_sequence_arrs(args[0].name)
if len(arr_args) == 0:
return None
c = self.array_shape_classes[arr_args[0].name][0]
for v in arr_args:
c = self._merge_classes(c, self.array_shape_classes[v.name][0])
new_class = self._get_next_class_with_size(len(arr_args))
return [c, new_class]
elif call_name in ["cumsum", "cumprod"]:
in_arr = args[0].name
in_ndims = self._get_ndims(in_arr)
# for 1D, output has same size
# TODO: return flattened size for multi-dimensional input
if in_ndims == 1:
return copy.copy(self.array_shape_classes[in_arr])
elif call_name == "linspace":
# default is 50, arg3 is size
LINSPACE_DEFAULT_SIZE = 50
size = LINSPACE_DEFAULT_SIZE
if len(args) >= 3:
size = args[2].name
new_class = self._get_next_class_with_size(size)
return [new_class]
elif call_name == "dot":
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html
# for multi-dimensional arrays, last dimension of arg1 and second
# to last dimension of arg2 should be equal since used in dot product.
# if arg2 is 1D, its only dimension is used for dot product and
# should be equal to second to last of arg1.
assert len(args) == 2 or len(args) == 3
in1 = args[0].name
in2 = args[1].name
ndims1 = self._get_ndims(in1)
ndims2 = self._get_ndims(in2)
c1 = self.array_shape_classes[in1][ndims1 - 1]
c2 = UNKNOWN_CLASS
if ndims2 == 1:
c2 = self.array_shape_classes[in2][0]
else:
c2 = self.array_shape_classes[in2][ndims2 - 2]
c_inner = self._merge_classes(c1, c2)
c_out = []
for i in range(ndims1 - 1):
c_out.append(self.array_shape_classes[in1][i])
for i in range(ndims2 - 2):
c_out.append(self.array_shape_classes[in2][i])
if ndims2 > 1:
c_out.append(self.array_shape_classes[in2][ndims2 - 1])
return c_out
elif call_name in UFUNC_MAP_OP:
return self._broadcast_and_match_shapes([a.name for a in args])
if config.DEBUG_ARRAY_OPT == 1:
print("unknown numpy call:", call_name, " ", args)
return None
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def copy_propagate_update_analysis(stmt, var_dict, array_analysis):
"""update array analysis data during copy propagation.
If an array is in defs of a statement, we update its size variables.
"""
array_shape_classes = array_analysis.array_shape_classes
class_sizes = array_analysis.class_sizes
array_size_vars = array_analysis.array_size_vars
# find defs of stmt
def_set = set()
if isinstance(stmt, ir.Assign):
def_set.add(stmt.target.name)
for T, def_func in analysis.ir_extension_usedefs.items():
if isinstance(stmt, T):
_, def_set = def_func(stmt)
# update analysis for arrays in defs
for var in def_set:
if var in array_shape_classes:
if var in array_size_vars:
array_size_vars[var] = replace_vars_inner(
array_size_vars[var], var_dict
)
shape_corrs = array_shape_classes[var]
for c in shape_corrs:
if c != -1:
class_sizes[c] = replace_vars_inner(class_sizes[c], var_dict)
return
|
def copy_propagate_update_analysis(stmt, var_dict, array_analysis):
"""update array analysis data during copy propagation.
If an array is in defs of a statement, we update its size variables.
"""
array_shape_classes = array_analysis.array_shape_classes
class_sizes = array_analysis.class_sizes
array_size_vars = array_analysis.array_size_vars
# find defs of stmt
def_set = set()
if isinstance(stmt, ir.Assign):
def_set.add(stmt.target.name)
for T, def_func in analysis.ir_extension_defs.items():
if isinstance(stmt, T):
def_set = def_func(stmt)
# update analysis for arrays in defs
for var in def_set:
if var in array_shape_classes:
if var in array_size_vars:
array_size_vars[var] = replace_vars_inner(
array_size_vars[var], var_dict
)
shape_corrs = array_shape_classes[var]
for c in shape_corrs:
if c != -1:
class_sizes[c] = replace_vars_inner(class_sizes[c], var_dict)
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def stage_parfor_pass(self):
"""
Convert data-parallel computations into Parfor nodes
"""
# Ensure we have an IR and type information.
assert self.func_ir
parfor_pass = ParforPass(
self.func_ir,
self.type_annotation.typemap,
self.type_annotation.calltypes,
self.return_type,
self.typingctx,
)
parfor_pass.run()
|
def stage_parfor_pass(self):
"""
Convert data-parallel computations into Parfor nodes
"""
# Ensure we have an IR and type information.
assert self.func_ir
parfor_pass = ParforPass(
self.func_ir,
self.type_annotation.typemap,
self.type_annotation.calltypes,
self.return_type,
)
parfor_pass.run()
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def stage_inline_pass(self):
"""
Inline calls to locally defined closures.
"""
# Ensure we have an IR and type information.
assert self.func_ir
inline_pass = InlineClosureCallPass(self.func_ir, self.flags, run_frontend)
inline_pass.run()
# Remove all Dels, and re-run postproc
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if config.DEBUG or config.DUMP_IR:
name = self.func_ir.func_id.func_qualname
print(("IR DUMP: %s" % name).center(80, "-"))
self.func_ir.dump()
|
def stage_inline_pass(self):
"""
Inline calls to locally defined closures.
"""
# Ensure we have an IR and type information.
assert self.func_ir
inline_pass = InlineClosureCallPass(self.func_ir, run_frontend)
inline_pass.run()
# Remove all Dels, and re-run postproc
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if config.DEBUG or config.DUMP_IR:
name = self.func_ir.func_id.func_qualname
print(("IR DUMP: %s" % name).center(80, "-"))
self.func_ir.dump()
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def legalize_return_type(return_type, interp, targetctx):
"""
Only accept array return type iff it is passed into the function.
Reject function object return types if in nopython mode.
"""
if not targetctx.enable_nrt and isinstance(return_type, types.Array):
# Walk IR to discover all arguments and all return statements
retstmts = []
caststmts = {}
argvars = set()
for bid, blk in interp.blocks.items():
for inst in blk.body:
if isinstance(inst, ir.Return):
retstmts.append(inst.value.name)
elif isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Expr) and inst.value.op == "cast":
caststmts[inst.target.name] = inst.value
elif isinstance(inst.value, ir.Arg):
argvars.add(inst.target.name)
assert retstmts, "No return statements?"
for var in retstmts:
cast = caststmts.get(var)
if cast is None or cast.value.name not in argvars:
raise TypeError(
"Only accept returning of array passed into the "
"function as argument"
)
elif isinstance(return_type, types.Function) or isinstance(
return_type, types.Phantom
):
msg = "Can't return function object ({}) in nopython mode"
raise TypeError(msg.format(return_type))
|
def legalize_return_type(return_type, interp, targetctx):
"""
Only accept array return type iff it is passed into the function.
Reject function object return types if in nopython mode.
"""
if not targetctx.enable_nrt and isinstance(return_type, types.Array):
# Walk IR to discover all arguments and all return statements
retstmts = []
caststmts = {}
argvars = set()
for bid, blk in interp.blocks.items():
for inst in blk.body:
if isinstance(inst, ir.Return):
retstmts.append(inst.value.name)
elif isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Expr) and inst.value.op == "cast":
caststmts[inst.target.name] = inst.value
elif isinstance(inst.value, ir.Arg):
argvars.add(inst.target.name)
assert retstmts, "No return statements?"
for var in retstmts:
cast = caststmts.get(var)
if cast is None or cast.value.name not in argvars:
raise TypeError(
"Only accept returning of array passed into the "
"function as argument"
)
elif isinstance(return_type, types.Function) or isinstance(
return_type, types.Phantom
):
raise TypeError("Can't return function object in nopython mode")
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __init__(self, shape, strides, dtype, stream=0, writeback=None, gpu_data=None):
"""
Args
----
shape
array shape.
strides
array strides.
dtype
data type as np.dtype.
stream
cuda stream.
writeback
Deprecated.
gpu_data
user provided device memory for the ndarray data buffer
"""
if isinstance(shape, (int, long)):
shape = (shape,)
if isinstance(strides, (int, long)):
strides = (strides,)
self.ndim = len(shape)
if len(strides) != self.ndim:
raise ValueError("strides not match ndim")
self._dummy = dummyarray.Array.from_desc(0, shape, strides, dtype.itemsize)
self.shape = tuple(shape)
self.strides = tuple(strides)
self.dtype = np.dtype(dtype)
self.size = int(np.prod(self.shape))
# prepare gpu memory
if self.size > 0:
if gpu_data is None:
self.alloc_size = _driver.memory_size_from_info(
self.shape, self.strides, self.dtype.itemsize
)
gpu_data = devices.get_context().memalloc(self.alloc_size)
else:
self.alloc_size = _driver.device_memory_size(gpu_data)
else:
# Make NULL pointer for empty allocation
gpu_data = _driver.MemoryPointer(
context=devices.get_context(), pointer=c_void_p(0), size=0
)
self.alloc_size = 0
self.gpu_data = gpu_data
self.__writeback = writeback # should deprecate the use of this
self.stream = 0
|
def __init__(self, shape, strides, dtype, stream=0, writeback=None, gpu_data=None):
"""
Args
----
shape
array shape.
strides
array strides.
dtype
data type as np.dtype.
stream
cuda stream.
writeback
Deprecated.
gpu_data
user provided device memory for the ndarray data buffer
"""
if isinstance(shape, (int, long)):
shape = (shape,)
if isinstance(strides, (int, long)):
strides = (strides,)
self.ndim = len(shape)
if len(strides) != self.ndim:
raise ValueError("strides not match ndim")
self._dummy = dummyarray.Array.from_desc(0, shape, strides, dtype.itemsize)
self.shape = tuple(shape)
self.strides = tuple(strides)
self.dtype = np.dtype(dtype)
self.size = int(np.prod(self.shape))
# prepare gpu memory
if self.size > 0:
if gpu_data is None:
self.alloc_size = _driver.memory_size_from_info(
self.shape, self.strides, self.dtype.itemsize
)
gpu_data = devices.get_context().memalloc(self.alloc_size)
else:
self.alloc_size = _driver.device_memory_size(gpu_data)
else:
gpu_data = None
self.alloc_size = 0
self.gpu_data = gpu_data
self.__writeback = writeback # should deprecate the use of this
self.stream = 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def view(self, start, stop=None):
if stop is None:
size = self.size - start
else:
size = stop - start
# Handle NULL/empty memory buffer
if self.device_pointer.value is None:
if size != 0:
raise RuntimeError("non-empty slice into empty slice")
view = self # new view is just a reference to self
# Handle normal case
else:
base = self.device_pointer.value + start
if size < 0:
raise RuntimeError("size cannot be negative")
pointer = drvapi.cu_device_ptr(base)
view = MemoryPointer(self.context, pointer, size, owner=self.owner)
return OwnedPointer(weakref.proxy(self.owner), view)
|
def view(self, start, stop=None):
base = self.device_pointer.value + start
if stop is None:
size = self.size - start
else:
size = stop - start
assert size > 0, "zero or negative memory size"
pointer = drvapi.cu_device_ptr(base)
view = MemoryPointer(self.context, pointer, size, owner=self.owner)
return OwnedPointer(weakref.proxy(self.owner), view)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def jit(signature_or_function=None, locals={}, target="cpu", cache=False, **options):
"""
This decorator is used to compile a Python function into native code.
Args
-----
signature:
The (optional) signature or list of signatures to be compiled.
If not passed, required signatures will be compiled when the
decorated function is called, depending on the argument values.
As a convenience, you can directly pass the function to be compiled
instead.
locals: dict
Mapping of local variable names to Numba types. Used to override the
types deduced by Numba's type inference engine.
target: str
Specifies the target platform to compile for. Valid targets are cpu,
gpu, npyufunc, and cuda. Defaults to cpu.
options:
For a cpu target, valid options are:
nopython: bool
Set to True to disable the use of PyObjects and Python API
calls. The default behavior is to allow the use of PyObjects
and Python API. Default value is False.
forceobj: bool
Set to True to force the use of PyObjects for every value.
Default value is False.
looplift: bool
Set to True to enable jitting loops in nopython mode while
leaving surrounding code in object mode. This allows functions
to allocate NumPy arrays and use Python objects, while the
tight loops in the function can still be compiled in nopython
mode. Any arrays that the tight loop uses should be created
before the loop is entered. Default value is True.
error_model: str
The error-model affects divide-by-zero behavior.
Valid values are 'python' and 'numpy'. The 'python' model
raises exception. The 'numpy' model sets the result to
*+/-inf* or *nan*.
Returns
--------
A callable usable as a compiled function. Actual compiling will be
done lazily if no explicit signatures are passed.
Examples
--------
The function can be used in the following ways:
1) jit(signatures, target='cpu', **targetoptions) -> jit(function)
Equivalent to:
d = dispatcher(function, targetoptions)
for signature in signatures:
d.compile(signature)
Create a dispatcher object for a python function. Then, compile
the function with the given signature(s).
Example:
@jit("int32(int32, int32)")
def foo(x, y):
return x + y
@jit(["int32(int32, int32)", "float32(float32, float32)"])
def bar(x, y):
return x + y
2) jit(function, target='cpu', **targetoptions) -> dispatcher
Create a dispatcher function object that specializes at call site.
Examples:
@jit
def foo(x, y):
return x + y
@jit(target='cpu', nopython=True)
def bar(x, y):
return x + y
"""
if "argtypes" in options:
raise DeprecationError(_msg_deprecated_signature_arg.format("argtypes"))
if "restype" in options:
raise DeprecationError(_msg_deprecated_signature_arg.format("restype"))
if options.get("parallel"):
uns1 = sys.platform.startswith("win32") and sys.version_info[:2] == (2, 7)
uns2 = sys.maxsize <= 2**32
if uns1 or uns2:
msg = (
"The 'parallel' target is not currently supported on "
"Windows operating systems when using Python 2.7, or "
"on 32 bit hardware."
)
raise RuntimeError(msg)
if cache:
msg = (
"Caching is not available when the 'parallel' target is in "
"use. Caching is now being disabled to allow execution to "
"continue."
)
warnings.warn(msg, RuntimeWarning)
cache = False
# Handle signature
if signature_or_function is None:
# No signature, no function
pyfunc = None
sigs = None
elif isinstance(signature_or_function, list):
# A list of signatures is passed
pyfunc = None
sigs = signature_or_function
elif sigutils.is_signature(signature_or_function):
# A single signature is passed
pyfunc = None
sigs = [signature_or_function]
else:
# A function is passed
pyfunc = signature_or_function
sigs = None
wrapper = _jit(
sigs, locals=locals, target=target, cache=cache, targetoptions=options
)
if pyfunc is not None:
return wrapper(pyfunc)
else:
return wrapper
|
def jit(signature_or_function=None, locals={}, target="cpu", cache=False, **options):
"""
This decorator is used to compile a Python function into native code.
Args
-----
signature:
The (optional) signature or list of signatures to be compiled.
If not passed, required signatures will be compiled when the
decorated function is called, depending on the argument values.
As a convenience, you can directly pass the function to be compiled
instead.
locals: dict
Mapping of local variable names to Numba types. Used to override the
types deduced by Numba's type inference engine.
target: str
Specifies the target platform to compile for. Valid targets are cpu,
gpu, npyufunc, and cuda. Defaults to cpu.
options:
For a cpu target, valid options are:
nopython: bool
Set to True to disable the use of PyObjects and Python API
calls. The default behavior is to allow the use of PyObjects
and Python API. Default value is False.
forceobj: bool
Set to True to force the use of PyObjects for every value.
Default value is False.
looplift: bool
Set to True to enable jitting loops in nopython mode while
leaving surrounding code in object mode. This allows functions
to allocate NumPy arrays and use Python objects, while the
tight loops in the function can still be compiled in nopython
mode. Any arrays that the tight loop uses should be created
before the loop is entered. Default value is True.
error_model: str
The error-model affects divide-by-zero behavior.
Valid values are 'python' and 'numpy'. The 'python' model
raises exception. The 'numpy' model sets the result to
*+/-inf* or *nan*.
Returns
--------
A callable usable as a compiled function. Actual compiling will be
done lazily if no explicit signatures are passed.
Examples
--------
The function can be used in the following ways:
1) jit(signatures, target='cpu', **targetoptions) -> jit(function)
Equivalent to:
d = dispatcher(function, targetoptions)
for signature in signatures:
d.compile(signature)
Create a dispatcher object for a python function. Then, compile
the function with the given signature(s).
Example:
@jit("int32(int32, int32)")
def foo(x, y):
return x + y
@jit(["int32(int32, int32)", "float32(float32, float32)"])
def bar(x, y):
return x + y
2) jit(function, target='cpu', **targetoptions) -> dispatcher
Create a dispatcher function object that specializes at call site.
Examples:
@jit
def foo(x, y):
return x + y
@jit(target='cpu', nopython=True)
def bar(x, y):
return x + y
"""
if "argtypes" in options:
raise DeprecationError(_msg_deprecated_signature_arg.format("argtypes"))
if "restype" in options:
raise DeprecationError(_msg_deprecated_signature_arg.format("restype"))
# Handle signature
if signature_or_function is None:
# No signature, no function
pyfunc = None
sigs = None
elif isinstance(signature_or_function, list):
# A list of signatures is passed
pyfunc = None
sigs = signature_or_function
elif sigutils.is_signature(signature_or_function):
# A single signature is passed
pyfunc = None
sigs = [signature_or_function]
else:
# A function is passed
pyfunc = signature_or_function
sigs = None
wrapper = _jit(
sigs, locals=locals, target=target, cache=cache, targetoptions=options
)
if pyfunc is not None:
return wrapper(pyfunc)
else:
return wrapper
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.start, item.stop, item.step
single = False
else:
single = True
start = item
stop = start + 1
step = None
# Default values
# Start value is default to zero
if start is None:
start = 0
# Stop value is default to self.size
if stop is None:
stop = self.size
# Step is default to 1
if step is None:
step = 1
stride = step * self.stride
# Compute start in bytes
if start >= 0:
start = self.start + start * self.stride
else:
start = self.stop + start * self.stride
start = max(start, self.start)
# Compute stop in bytes
if stop >= 0:
stop = self.start + stop * self.stride
else:
stop = self.stop + stop * self.stride
stop = min(stop, self.stop)
# Clip stop
if (stop - start) > self.size * self.stride:
stop = start + self.size * stride
size = (stop - start + (stride - 1)) // stride
if stop < start:
start = stop
size = 0
return Dim(start, stop, size, stride, single)
|
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.start, item.stop, item.step
single = False
else:
single = True
start = item
stop = start + 1
step = None
if start is None:
start = 0
if stop is None:
stop = self.size
if step is None:
step = 1
stride = step * self.stride
if start >= 0:
start = self.start + start * self.stride
else:
start = self.stop + start * self.stride
if stop >= 0:
stop = self.start + stop * self.stride
else:
stop = self.stop + stop * self.stride
size = (stop - start + (stride - 1)) // stride
if self.start >= start >= self.stop:
raise IndexError("start index out-of-bound")
if self.start >= stop >= self.stop:
raise IndexError("stop index out-of-bound")
if stop < start:
start = stop
size = 0
return Dim(start, stop, size, stride, single)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _compute_extent(self):
firstidx = [0] * self.ndim
lastidx = [s - 1 for s in self.shape]
start = compute_index(firstidx, self.dims)
stop = compute_index(lastidx, self.dims) + self.itemsize
stop = max(stop, start) # ensure postive extent
return Extent(start, stop)
|
def _compute_extent(self):
firstidx = [0] * self.ndim
lastidx = [s - 1 for s in self.shape]
start = compute_index(firstidx, self.dims)
stop = compute_index(lastidx, self.dims) + self.itemsize
return Extent(start, stop)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def new_error_context(fmt_, *args, **kwargs):
"""
A contextmanager that prepend contextual information to any exception
raised within. If the exception type is not an instance of NumbaError,
it will be wrapped into a InternalError. The exception class can be
changed by providing a "errcls_" keyword argument with the exception
constructor.
The first argument is a message that describes the context. It can be a
format string. If there are additional arguments, it will be used as
``fmt_.format(*args, **kwargs)`` to produce the final message string.
"""
errcls = kwargs.pop("errcls_", InternalError)
loc = kwargs.get("loc", None)
if loc is not None and not loc.filename.startswith(_numba_path):
loc_info.update(kwargs)
try:
yield
except NumbaError as e:
e.add_context(_format_msg(fmt_, args, kwargs))
raise
except Exception as e:
newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
six.reraise(type(newerr), newerr, sys.exc_info()[2])
|
def new_error_context(fmt_, *args, **kwargs):
"""
A contextmanager that prepend contextual information to any exception
raised within. If the exception type is not an instance of NumbaError,
it will be wrapped into a InternalError. The exception class can be
changed by providing a "errcls_" keyword argument with the exception
constructor.
The first argument is a message that describes the context. It can be a
format string. If there are additional arguments, it will be used as
``fmt_.format(*args, **kwargs)`` to produce the final message string.
"""
errcls = kwargs.pop("errcls_", InternalError)
try:
yield
except NumbaError as e:
e.add_context(_format_msg(fmt_, args, kwargs))
raise
except Exception as e:
newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
six.reraise(type(newerr), newerr, sys.exc_info()[2])
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __init__(self, func_ir, flags, run_frontend):
self.func_ir = func_ir
self.flags = flags
self.run_frontend = run_frontend
|
def __init__(self, func_ir, run_frontend):
self.func_ir = func_ir
self.run_frontend = run_frontend
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def run(self):
"""Run inline closure call pass."""
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i in range(len(block.body)):
instr = block.body[i]
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
func_def = guard(_get_definition, self.func_ir, expr.func)
debug_print("found call to ", expr.func, " def = ", func_def)
if isinstance(func_def, ir.Expr) and func_def.op == "make_function":
new_blocks = self.inline_closure_call(block, i, func_def)
for block in new_blocks:
work_list.append(block)
modified = True
# current block is modified, skip the rest
break
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(
_inline_arraycall,
self.func_ir,
cfg,
visited,
loops[k],
self.flags.auto_parallel,
):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further
# removed
while remove_dead(self.func_ir.blocks, self.func_ir.arg_names):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
debug_print("END")
|
def run(self):
"""Run inline closure call pass."""
modified = False
work_list = list(self.func_ir.blocks.items())
_debug_print("START InlineClosureCall")
while work_list:
label, block = work_list.pop()
for i in range(len(block.body)):
instr = block.body[i]
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
try:
func_def = self.func_ir.get_definition(expr.func)
except KeyError:
func_def = None
_debug_print("found call to ", expr.func, " def = ", func_def)
if isinstance(func_def, ir.Expr) and func_def.op == "make_function":
new_blocks = self.inline_closure_call(block, i, func_def)
for block in new_blocks:
work_list.append(block)
modified = True
# current block is modified, skip the rest
break
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further removed
while remove_dead(self.func_ir.blocks, self.func_ir.arg_names):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def inline_closure_call(self, block, i, callee):
"""Inline the body of `callee` at its callsite (`i`-th instruction of `block`)"""
scope = block.scope
instr = block.body[i]
call_expr = instr.value
debug_print = _make_debug_print("inline_closure_call")
debug_print("Found closure call: ", instr, " with callee = ", callee)
func_ir = self.func_ir
# first, get the IR of the callee
callee_ir = self.get_ir_of_code(callee.code)
callee_blocks = callee_ir.blocks
# 1. relabel callee_ir by adding an offset
max_label = max(func_ir.blocks.keys())
callee_blocks = add_offset_to_labels(callee_blocks, max_label + 1)
callee_ir.blocks = callee_blocks
min_label = min(callee_blocks.keys())
max_label = max(callee_blocks.keys())
# reset globals in ir_utils before we use it
ir_utils._max_label = max_label
debug_print("After relabel")
_debug_dump(callee_ir)
# 2. rename all local variables in callee_ir with new locals created in func_ir
callee_scopes = _get_all_scopes(callee_blocks)
debug_print("callee_scopes = ", callee_scopes)
# one function should only have one local scope
assert len(callee_scopes) == 1
callee_scope = callee_scopes[0]
var_dict = {}
for var in callee_scope.localvars._con.values():
if not (var.name in callee.code.co_freevars):
new_var = scope.define(mk_unique_var(var.name), loc=var.loc)
var_dict[var.name] = new_var
debug_print("var_dict = ", var_dict)
replace_vars(callee_blocks, var_dict)
debug_print("After local var rename")
_debug_dump(callee_ir)
# 3. replace formal parameters with actual arguments
args = list(call_expr.args)
if callee.defaults:
debug_print("defaults = ", callee.defaults)
if isinstance(callee.defaults, tuple): # Python 3.5
args = args + list(callee.defaults)
elif isinstance(callee.defaults, ir.Var) or isinstance(callee.defaults, str):
defaults = func_ir.get_definition(callee.defaults)
assert isinstance(defaults, ir.Const)
loc = defaults.loc
args = args + [ir.Const(value=v, loc=loc) for v in defaults.value]
else:
raise NotImplementedError(
"Unsupported defaults to make_function: {}".format(defaults)
)
_replace_args_with(callee_blocks, args)
debug_print("After arguments rename: ")
_debug_dump(callee_ir)
# 4. replace freevar with actual closure var
if callee.closure:
closure = func_ir.get_definition(callee.closure)
assert isinstance(closure, ir.Expr) and closure.op == "build_tuple"
assert len(callee.code.co_freevars) == len(closure.items)
debug_print("callee's closure = ", closure)
_replace_freevars(callee_blocks, closure.items)
debug_print("After closure rename")
_debug_dump(callee_ir)
# 5. split caller blocks into two
new_blocks = []
new_block = ir.Block(scope, block.loc)
new_block.body = block.body[i + 1 :]
new_label = next_label()
func_ir.blocks[new_label] = new_block
new_blocks.append((new_label, new_block))
block.body = block.body[:i]
block.body.append(ir.Jump(min_label, instr.loc))
# 6. replace Return with assignment to LHS
topo_order = find_topo_order(callee_blocks)
_replace_returns(callee_blocks, instr.target, new_label)
# remove the old definition of instr.target too
if instr.target.name in func_ir._definitions:
func_ir._definitions[instr.target.name] = []
# 7. insert all new blocks, and add back definitions
for label in topo_order:
# block scope must point to parent's
block = callee_blocks[label]
block.scope = scope
_add_definitions(func_ir, block)
func_ir.blocks[label] = block
new_blocks.append((label, block))
debug_print("After merge in")
_debug_dump(func_ir)
return new_blocks
|
def inline_closure_call(self, block, i, callee):
"""Inline the body of `callee` at its callsite (`i`-th instruction of `block`)"""
scope = block.scope
instr = block.body[i]
call_expr = instr.value
_debug_print("Found closure call: ", instr, " with callee = ", callee)
func_ir = self.func_ir
# first, get the IR of the callee
from_ir = self.get_ir_of_code(callee.code)
from_blocks = from_ir.blocks
# 1. relabel from_ir by adding an offset
max_label = max(func_ir.blocks.keys())
from_blocks = add_offset_to_labels(from_blocks, max_label + 1)
from_ir.blocks = from_blocks
min_label = min(from_blocks.keys())
max_label = max(from_blocks.keys())
# reset globals in ir_utils before we use it
ir_utils._max_label = max_label
ir_utils.visit_vars_extensions = {}
# 2. rename all local variables in from_ir with new locals created in func_ir
from_scopes = _get_all_scopes(from_blocks)
_debug_print("obj_IR has scopes: ", from_scopes)
# one function should only have one local scope
assert len(from_scopes) == 1
from_scope = from_scopes[0]
var_dict = {}
for var in from_scope.localvars._con.values():
if not (var.name in callee.code.co_freevars):
var_dict[var.name] = scope.make_temp(var.loc)
_debug_print("Before local var rename: var_dict = ", var_dict)
_debug_dump(from_ir)
replace_vars(from_blocks, var_dict)
_debug_print("After local var rename: ")
_debug_dump(from_ir)
# 3. replace formal parameters with actual arguments
args = list(call_expr.args)
if callee.defaults:
_debug_print("defaults", callee.defaults)
if isinstance(callee.defaults, tuple): # Python 3.5
args = args + list(callee.defaults)
elif isinstance(callee.defaults, ir.Var) or isinstance(callee.defaults, str):
defaults = func_ir.get_definition(callee.defaults)
assert isinstance(defaults, ir.Const)
loc = defaults.loc
args = args + [ir.Const(value=v, loc=loc) for v in defaults.value]
else:
raise NotImplementedError(
"Unsupported defaults to make_function: {}".format(defaults)
)
_replace_args_with(from_blocks, args)
_debug_print("After arguments rename: ")
_debug_dump(from_ir)
# 4. replace freevar with actual closure var
if callee.closure:
closure = func_ir.get_definition(callee.closure)
assert isinstance(closure, ir.Expr) and closure.op == "build_tuple"
assert len(callee.code.co_freevars) == len(closure.items)
_debug_print("callee's closure = ", closure)
_replace_freevars(from_blocks, closure.items)
_debug_print("After closure rename: ")
_debug_dump(from_ir)
# 5. split caller blocks into two
new_blocks = []
new_block = ir.Block(scope, block.loc)
new_block.body = block.body[i + 1 :]
new_label = next_label()
func_ir.blocks[new_label] = new_block
new_blocks.append((new_label, new_block))
block.body = block.body[:i]
block.body.append(ir.Jump(min_label, instr.loc))
# 6. replace Return with assignment to LHS
_replace_returns(from_blocks, instr.target, new_label)
# 7. insert all new blocks, and add back definitions
for label, block in from_blocks.items():
# block scope must point to parent's
block.scope = scope
_add_definition(func_ir, block)
func_ir.blocks[label] = block
new_blocks.append((label, block))
_debug_print("After merge: ")
_debug_dump(func_ir)
return new_blocks
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _replace_returns(blocks, target, return_label):
"""
Return return statement by assigning directly to target, and a jump.
"""
for label, block in blocks.items():
casts = []
for i in range(len(block.body)):
stmt = block.body[i]
if isinstance(stmt, ir.Return):
assert i + 1 == len(block.body)
block.body[i] = ir.Assign(stmt.value, target, stmt.loc)
block.body.append(ir.Jump(return_label, stmt.loc))
# remove cast of the returned value
for cast in casts:
if cast.target.name == stmt.value.name:
cast.value = cast.value.value
elif (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "cast"
):
casts.append(stmt)
|
def _replace_returns(blocks, target, return_label):
"""
Return return statement by assigning directly to target, and a jump.
"""
for label, block in blocks.items():
for i in range(len(block.body)):
stmt = block.body[i]
if isinstance(stmt, ir.Return):
assert i + 1 == len(block.body)
block.body[i] = ir.Assign(stmt.value, target, stmt.loc)
block.body.append(ir.Jump(return_label, stmt.loc))
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_definition(self, value, lhs_only=False):
"""
Get the definition site for the given variable name or instance.
A Expr instance is returned by default, but if lhs_only is set
to True, the left-hand-side variable is returned instead.
"""
lhs = value
while True:
if isinstance(value, Var):
lhs = value
name = value.name
elif isinstance(value, str):
lhs = value
name = value
else:
return lhs if lhs_only else value
defs = self._definitions[name]
if len(defs) == 0:
raise KeyError("no definition for %r" % (name,))
if len(defs) > 1:
raise KeyError("more than one definition for %r" % (name,))
value = defs[0]
|
def get_definition(self, value):
"""
Get the definition site for the given variable name or instance.
A Expr instance is returned.
"""
while True:
if isinstance(value, Var):
name = value.name
elif isinstance(value, str):
name = value
else:
return value
defs = self._definitions[name]
if len(defs) == 0:
raise KeyError("no definition for %r" % (name,))
if len(defs) > 1:
raise KeyError("more than one definition for %r" % (name,))
value = defs[0]
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def mk_alloc(typemap, calltypes, lhs, size_var, dtype, scope, loc):
"""generate an array allocation with np.empty() and return list of nodes.
size_var can be an int variable or tuple of int variables.
"""
out = []
ndims = 1
size_typ = types.intp
if isinstance(size_var, tuple):
if len(size_var) == 1:
size_var = size_var[0]
size_var = convert_size_to_var(size_var, typemap, scope, loc, out)
else:
# tuple_var = build_tuple([size_var...])
ndims = len(size_var)
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
if typemap:
typemap[tuple_var.name] = types.containers.UniTuple(types.intp, ndims)
# constant sizes need to be assigned to vars
new_sizes = [
convert_size_to_var(s, typemap, scope, loc, out) for s in size_var
]
tuple_call = ir.Expr.build_tuple(new_sizes, loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
out.append(tuple_assign)
size_var = tuple_var
size_typ = types.containers.UniTuple(types.intp, ndims)
# g_np_var = Global(numpy)
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
if typemap:
typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global("np", numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# attr call: empty_attr = getattr(g_np_var, empty)
empty_attr_call = ir.Expr.getattr(g_np_var, "empty", loc)
attr_var = ir.Var(scope, mk_unique_var("$empty_attr_attr"), loc)
if typemap:
typemap[attr_var.name] = get_np_ufunc_typ(numpy.empty)
attr_assign = ir.Assign(empty_attr_call, attr_var, loc)
# alloc call: lhs = empty_attr(size_var, typ_var)
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
if typemap:
typemap[typ_var.name] = types.functions.NumberClass(dtype)
# assuming str(dtype) returns valid np dtype string
np_typ_getattr = ir.Expr.getattr(g_np_var, str(dtype), loc)
typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)
alloc_call = ir.Expr.call(attr_var, [size_var, typ_var], (), loc)
if calltypes:
calltypes[alloc_call] = typemap[attr_var.name].get_call_type(
typing.Context(), [size_typ, types.functions.NumberClass(dtype)], {}
)
# signature(
# types.npytypes.Array(dtype, ndims, 'C'), size_typ,
# types.functions.NumberClass(dtype))
alloc_assign = ir.Assign(alloc_call, lhs, loc)
out.extend([g_np_assign, attr_assign, typ_var_assign, alloc_assign])
return out
|
def mk_alloc(typemap, calltypes, lhs, size_var, dtype, scope, loc):
"""generate an array allocation with np.empty() and return list of nodes.
size_var can be an int variable or tuple of int variables.
"""
out = []
ndims = 1
size_typ = types.intp
if isinstance(size_var, tuple):
if len(size_var) == 1:
size_var = size_var[0]
else:
# tuple_var = build_tuple([size_var...])
ndims = len(size_var)
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
if typemap:
typemap[tuple_var.name] = types.containers.UniTuple(types.intp, ndims)
# constant sizes need to be assigned to vars
new_sizes = []
for size in size_var:
if isinstance(size, ir.Var):
new_size = size
else:
assert isinstance(size, int)
new_size = ir.Var(scope, mk_unique_var("$alloc_size"), loc)
if typemap:
typemap[new_size.name] = types.intp
size_assign = ir.Assign(ir.Const(size, loc), new_size, loc)
out.append(size_assign)
new_sizes.append(new_size)
tuple_call = ir.Expr.build_tuple(new_sizes, loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
out.append(tuple_assign)
size_var = tuple_var
size_typ = types.containers.UniTuple(types.intp, ndims)
# g_np_var = Global(numpy)
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
if typemap:
typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global("np", numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# attr call: empty_attr = getattr(g_np_var, empty)
empty_attr_call = ir.Expr.getattr(g_np_var, "empty", loc)
attr_var = ir.Var(scope, mk_unique_var("$empty_attr_attr"), loc)
if typemap:
typemap[attr_var.name] = get_np_ufunc_typ(numpy.empty)
attr_assign = ir.Assign(empty_attr_call, attr_var, loc)
# alloc call: lhs = empty_attr(size_var, typ_var)
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
if typemap:
typemap[typ_var.name] = types.functions.NumberClass(dtype)
# assuming str(dtype) returns valid np dtype string
np_typ_getattr = ir.Expr.getattr(g_np_var, str(dtype), loc)
typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)
alloc_call = ir.Expr.call(attr_var, [size_var, typ_var], (), loc)
if calltypes:
calltypes[alloc_call] = typemap[attr_var.name].get_call_type(
typing.Context(), [size_typ, types.functions.NumberClass(dtype)], {}
)
# signature(
# types.npytypes.Array(dtype, ndims, 'C'), size_typ,
# types.functions.NumberClass(dtype))
alloc_assign = ir.Assign(alloc_call, lhs, loc)
out.extend([g_np_assign, attr_assign, typ_var_assign, alloc_assign])
return out
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def visit_vars_stmt(stmt, callback, cbdata):
# let external calls handle stmt if type matches
for t, f in visit_vars_extensions.items():
if isinstance(stmt, t):
f(stmt, callback, cbdata)
return
if isinstance(stmt, ir.Assign):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.Arg):
stmt.name = visit_vars_inner(stmt.name, callback, cbdata)
elif isinstance(stmt, ir.Return):
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.Branch):
stmt.cond = visit_vars_inner(stmt.cond, callback, cbdata)
elif isinstance(stmt, ir.Jump):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
elif isinstance(stmt, ir.Del):
# Because Del takes only a var name, we make up by
# constructing a temporary variable.
var = ir.Var(None, stmt.value, stmt.loc)
var = visit_vars_inner(var, callback, cbdata)
stmt.value = var.name
elif isinstance(stmt, ir.DelAttr):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)
elif isinstance(stmt, ir.SetAttr):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.DelItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index = visit_vars_inner(stmt.index, callback, cbdata)
elif isinstance(stmt, ir.StaticSetItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index_var = visit_vars_inner(stmt.index_var, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.SetItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index = visit_vars_inner(stmt.index, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
else:
# TODO: raise NotImplementedError("no replacement for IR node: ", stmt)
pass
return
|
def visit_vars_stmt(stmt, callback, cbdata):
# let external calls handle stmt if type matches
for t, f in visit_vars_extensions.items():
if isinstance(stmt, t):
f(stmt, callback, cbdata)
return
if isinstance(stmt, ir.Assign):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.Arg):
stmt.name = visit_vars_inner(stmt.name, callback, cbdata)
elif isinstance(stmt, ir.Return):
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.Branch):
stmt.cond = visit_vars_inner(stmt.cond, callback, cbdata)
elif isinstance(stmt, ir.Jump):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
elif isinstance(stmt, ir.Del):
# Because Del takes only a var name, we make up by
# constructing a temporary variable.
var = ir.Var(None, stmt.value, stmt.loc)
var = visit_vars_inner(var, callback, cbdata)
stmt.value = var.name
elif isinstance(stmt, ir.DelAttr):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)
elif isinstance(stmt, ir.SetAttr):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.DelItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index = visit_vars_inner(stmt.index, callback, cbdata)
elif isinstance(stmt, ir.StaticSetItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index_var = visit_vars_inner(stmt.index_var, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.SetItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index = visit_vars_inner(stmt.index, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
else:
pass # TODO: raise NotImplementedError("no replacement for IR node: ", stmt)
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def add_offset_to_labels(blocks, offset):
"""add an offset to all block labels and jump/branch targets"""
new_blocks = {}
for l, b in blocks.items():
# some parfor last blocks might be empty
term = None
if b.body:
term = b.body[-1]
for inst in b.body:
for T, f in add_offset_to_labels_extensions.items():
if isinstance(inst, T):
f_max = f(inst, offset)
if isinstance(term, ir.Jump):
term.target += offset
if isinstance(term, ir.Branch):
term.truebr += offset
term.falsebr += offset
new_blocks[l + offset] = b
return new_blocks
|
def add_offset_to_labels(blocks, offset):
"""add an offset to all block labels and jump/branch targets"""
new_blocks = {}
for l, b in blocks.items():
term = b.body[-1]
if isinstance(term, ir.Jump):
term.target += offset
if isinstance(term, ir.Branch):
term.truebr += offset
term.falsebr += offset
new_blocks[l + offset] = b
return new_blocks
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def remove_dead(blocks, args, typemap=None, alias_map=None, arg_aliases=None):
"""dead code elimination using liveness and CFG info.
Returns True if something has been removed, or False if nothing is removed.
"""
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
if alias_map is None or arg_aliases is None:
alias_map, arg_aliases = find_potential_aliases(blocks, args, typemap)
if config.DEBUG_ARRAY_OPT == 1:
print("alias map:", alias_map)
# keep set for easier search
alias_set = set(alias_map.keys())
call_table, _ = get_call_table(blocks)
removed = False
for label, block in blocks.items():
# find live variables at each statement to delete dead assignment
lives = {v.name for v in block.terminator.list_vars()}
# find live variables at the end of block
for out_blk, _data in cfg.successors(label):
lives |= live_map[out_blk]
lives |= arg_aliases
removed |= remove_dead_block(
block, lives, call_table, arg_aliases, alias_map, alias_set, typemap
)
return removed
|
def remove_dead(blocks, args):
"""dead code elimination using liveness and CFG info.
Returns True if something has been removed, or False if nothing is removed."""
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
arg_aliases = find_potential_aliases(blocks, args)
call_table, _ = get_call_table(blocks)
removed = False
for label, block in blocks.items():
# find live variables at each statement to delete dead assignment
lives = {v.name for v in block.terminator.list_vars()}
# find live variables at the end of block
for out_blk, _data in cfg.successors(label):
lives |= live_map[out_blk]
if label in cfg.exit_points():
lives |= arg_aliases
removed |= remove_dead_block(block, lives, call_table, arg_aliases)
return removed
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def remove_dead_block(
block, lives, call_table, arg_aliases, alias_map, alias_set, typemap
):
"""remove dead code using liveness info.
Mutable arguments (e.g. arrays) that are not definitely assigned are live
after return of function.
"""
# TODO: find mutable args that are not definitely assigned instead of
# assuming all args are live after return
removed = False
# add statements in reverse order
new_body = [block.terminator]
# for each statement in reverse order, excluding terminator
for stmt in reversed(block.body[:-1]):
# aliases of lives are also live
alias_lives = set()
init_alias_lives = lives & alias_set
for v in init_alias_lives:
alias_lives |= alias_map[v]
# let external calls handle stmt if type matches
for t, f in remove_dead_extensions.items():
if isinstance(stmt, t):
f(stmt, lives, arg_aliases, alias_map, typemap)
# ignore assignments that their lhs is not live or lhs==rhs
if isinstance(stmt, ir.Assign):
lhs = stmt.target
rhs = stmt.value
if lhs.name not in lives and has_no_side_effect(rhs, lives, call_table):
removed = True
continue
if isinstance(rhs, ir.Var) and lhs.name == rhs.name:
removed = True
continue
# TODO: remove other nodes like SetItem etc.
if isinstance(stmt, ir.SetItem):
if stmt.target.name not in lives and stmt.target.name not in alias_lives:
continue
if type(stmt) in analysis.ir_extension_usedefs:
def_func = analysis.ir_extension_usedefs[type(stmt)]
uses, defs = def_func(stmt)
lives -= defs
lives |= uses
else:
lives |= {v.name for v in stmt.list_vars()}
if isinstance(stmt, ir.Assign):
lives.remove(lhs.name)
new_body.append(stmt)
new_body.reverse()
block.body = new_body
return removed
|
def remove_dead_block(block, lives, call_table, args):
"""remove dead code using liveness info.
Mutable arguments (e.g. arrays) that are not definitely assigned are live
after return of function.
"""
# TODO: find mutable args that are not definitely assigned instead of
# assuming all args are live after return
removed = False
# add statements in reverse order
new_body = [block.terminator]
# for each statement in reverse order, excluding terminator
for stmt in reversed(block.body[:-1]):
# let external calls handle stmt if type matches
for t, f in remove_dead_extensions.items():
if isinstance(stmt, t):
f(stmt, lives, args)
# ignore assignments that their lhs is not live or lhs==rhs
if isinstance(stmt, ir.Assign):
lhs = stmt.target
rhs = stmt.value
if lhs.name not in lives and has_no_side_effect(rhs, lives, call_table):
removed = True
continue
if isinstance(rhs, ir.Var) and lhs.name == rhs.name:
removed = True
continue
# TODO: remove other nodes like SetItem etc.
if isinstance(stmt, ir.SetItem):
if stmt.target.name not in lives:
continue
lives |= {v.name for v in stmt.list_vars()}
if isinstance(stmt, ir.Assign):
lives.remove(lhs.name)
for T, def_func in analysis.ir_extension_defs.items():
if isinstance(stmt, T):
lives -= def_func(stmt)
new_body.append(stmt)
new_body.reverse()
block.body = new_body
return removed
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def has_no_side_effect(rhs, lives, call_table):
# TODO: find side-effect free calls like Numpy calls
if isinstance(rhs, ir.Expr) and rhs.op == "call":
func_name = rhs.func.name
if func_name not in call_table or call_table[func_name] == []:
return False
call_list = call_table[func_name]
if call_list == ["empty", numpy] or call_list == [slice]:
return True
from numba.targets.registry import CPUDispatcher
from numba.targets.linalg import dot_3_mv_check_args
if isinstance(call_list[0], CPUDispatcher):
py_func = call_list[0].py_func
if py_func == dot_3_mv_check_args:
return True
return False
if isinstance(rhs, ir.Expr) and rhs.op == "inplace_binop":
return rhs.lhs.name not in lives
if isinstance(rhs, ir.Yield):
return False
return True
|
def has_no_side_effect(rhs, lives, call_table):
# TODO: find side-effect free calls like Numpy calls
if isinstance(rhs, ir.Expr) and rhs.op == "call":
func_name = rhs.func.name
if func_name not in call_table:
return False
call_list = call_table[func_name]
if call_list == ["empty", numpy] or call_list == [slice]:
return True
return False
if isinstance(rhs, ir.Expr) and rhs.op == "inplace_binop":
return rhs.lhs.name not in lives
if isinstance(rhs, ir.Yield):
return False
return True
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def find_potential_aliases(blocks, args, typemap, alias_map=None, arg_aliases=None):
"find all array aliases and argument aliases to avoid remove as dead"
if alias_map is None:
alias_map = {}
if arg_aliases is None:
arg_aliases = set(a for a in args if not is_immutable_type(a, typemap))
for bl in blocks.values():
for instr in bl.body:
if type(instr) in alias_analysis_extensions:
f = alias_analysis_extensions[type(instr)]
f(instr, args, typemap, alias_map, arg_aliases)
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target.name
# only mutable types can alias
if is_immutable_type(lhs, typemap):
continue
if isinstance(expr, ir.Var) and lhs != expr.name:
_add_alias(lhs, expr.name, alias_map, arg_aliases)
# subarrays like A = B[0] for 2D B
if isinstance(expr, ir.Expr) and expr.op in [
"getitem",
"static_getitem",
]:
_add_alias(lhs, expr.value.name, alias_map, arg_aliases)
# copy to avoid changing size during iteration
old_alias_map = copy.deepcopy(alias_map)
# combine all aliases transitively
for v in old_alias_map:
for w in old_alias_map[v]:
alias_map[v] |= alias_map[w]
for w in old_alias_map[v]:
alias_map[w] = alias_map[v]
return alias_map, arg_aliases
|
def find_potential_aliases(blocks, args):
aliases = set(args)
for bl in blocks.values():
for instr in bl.body:
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target.name
if isinstance(expr, ir.Var) and expr.name in aliases:
aliases.add(lhs)
return aliases
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_block_copies(blocks, typemap):
"""get copies generated and killed by each block"""
block_copies = {}
extra_kill = {}
for label, block in blocks.items():
assign_dict = {}
extra_kill[label] = set()
# assignments as dict to replace with latest value
for stmt in block.body:
for T, f in copy_propagate_extensions.items():
if isinstance(stmt, T):
gen_set, kill_set = f(stmt, typemap)
for lhs, rhs in gen_set:
assign_dict[lhs] = rhs
# if a=b is in dict and b is killed, a is also killed
new_assign_dict = {}
for l, r in assign_dict.items():
if l not in kill_set and r not in kill_set:
new_assign_dict[l] = r
if r in kill_set:
extra_kill[label].add(l)
assign_dict = new_assign_dict
extra_kill[label] |= kill_set
if isinstance(stmt, ir.Assign):
lhs = stmt.target.name
if isinstance(stmt.value, ir.Var):
rhs = stmt.value.name
# copy is valid only if same type (see
# TestCFunc.test_locals)
if typemap[lhs] == typemap[rhs]:
assign_dict[lhs] = rhs
continue
if isinstance(stmt.value, ir.Expr) and stmt.value.op == "inplace_binop":
in1_var = stmt.value.lhs.name
in1_typ = typemap[in1_var]
# inplace_binop assigns first operand if mutable
if not (
isinstance(in1_typ, types.Number) or in1_typ == types.string
):
extra_kill[label].add(in1_var)
# if a=b is in dict and b is killed, a is also killed
new_assign_dict = {}
for l, r in assign_dict.items():
if l != in1_var and r != in1_var:
new_assign_dict[l] = r
if r == in1_var:
extra_kill[label].add(l)
assign_dict = new_assign_dict
extra_kill[label].add(lhs)
block_cps = set(assign_dict.items())
block_copies[label] = block_cps
return block_copies, extra_kill
|
def get_block_copies(blocks, typemap):
"""get copies generated and killed by each block"""
block_copies = {}
extra_kill = {}
for label, block in blocks.items():
assign_dict = {}
extra_kill[label] = set()
# assignments as dict to replace with latest value
for stmt in block.body:
for T, f in copy_propagate_extensions.items():
if isinstance(stmt, T):
gen_set, kill_set = f(stmt, typemap)
for lhs, rhs in gen_set:
assign_dict[lhs] = rhs
extra_kill[label] |= kill_set
if isinstance(stmt, ir.Assign):
lhs = stmt.target.name
if isinstance(stmt.value, ir.Var):
rhs = stmt.value.name
# copy is valid only if same type (see TestCFunc.test_locals)
if typemap[lhs] == typemap[rhs]:
assign_dict[lhs] = rhs
continue
extra_kill[label].add(lhs)
block_copies[label] = set(assign_dict.items())
return block_copies, extra_kill
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def apply_copy_propagate(
blocks, in_copies, name_var_table, ext_func, ext_data, typemap, calltypes
):
"""apply copy propagation to IR: replace variables when copies available"""
for label, block in blocks.items():
var_dict = {l: name_var_table[r] for l, r in in_copies[label]}
# assignments as dict to replace with latest value
for stmt in block.body:
ext_func(stmt, var_dict, ext_data)
if type(stmt) in apply_copy_propagate_extensions:
f = apply_copy_propagate_extensions[type(stmt)]
f(
stmt,
var_dict,
name_var_table,
ext_func,
ext_data,
typemap,
calltypes,
)
# only rhs of assignments should be replaced
# e.g. if x=y is available, x in x=z shouldn't be replaced
elif isinstance(stmt, ir.Assign):
stmt.value = replace_vars_inner(stmt.value, var_dict)
else:
replace_vars_stmt(stmt, var_dict)
fix_setitem_type(stmt, typemap, calltypes)
for T, f in copy_propagate_extensions.items():
if isinstance(stmt, T):
gen_set, kill_set = f(stmt, typemap)
for lhs, rhs in gen_set:
if rhs in name_var_table:
var_dict[lhs] = name_var_table[rhs]
for l, r in var_dict.copy().items():
if l in kill_set or r.name in kill_set:
var_dict.pop(l)
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var):
lhs = stmt.target.name
rhs = stmt.value.name
# rhs could be replaced with lhs from previous copies
if lhs != rhs:
# copy is valid only if same type (see
# TestCFunc.test_locals)
if typemap[lhs] == typemap[rhs] and rhs in name_var_table:
var_dict[lhs] = name_var_table[rhs]
else:
var_dict.pop(lhs, None)
# a=b kills previous t=a
lhs_kill = []
for k, v in var_dict.items():
if v.name == lhs:
lhs_kill.append(k)
for k in lhs_kill:
var_dict.pop(k, None)
return
|
def apply_copy_propagate(
blocks, in_copies, name_var_table, ext_func, ext_data, typemap, calltypes
):
"""apply copy propagation to IR: replace variables when copies available"""
for label, block in blocks.items():
var_dict = {l: name_var_table[r] for l, r in in_copies[label]}
# assignments as dict to replace with latest value
for stmt in block.body:
ext_func(stmt, var_dict, ext_data)
for T, f in apply_copy_propagate_extensions.items():
if isinstance(stmt, T):
f(
stmt,
var_dict,
name_var_table,
ext_func,
ext_data,
typemap,
calltypes,
)
# only rhs of assignments should be replaced
# e.g. if x=y is available, x in x=z shouldn't be replaced
if isinstance(stmt, ir.Assign):
stmt.value = replace_vars_inner(stmt.value, var_dict)
else:
replace_vars_stmt(stmt, var_dict)
fix_setitem_type(stmt, typemap, calltypes)
for T, f in copy_propagate_extensions.items():
if isinstance(stmt, T):
gen_set, kill_set = f(stmt, typemap)
for lhs, rhs in gen_set:
var_dict[lhs] = name_var_table[rhs]
for l, r in var_dict.copy().items():
if l in kill_set or r.name in kill_set:
var_dict.pop(l)
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var):
lhs = stmt.target.name
rhs = stmt.value.name
# rhs could be replaced with lhs from previous copies
if lhs != rhs:
# copy is valid only if same type (see TestCFunc.test_locals)
if typemap[lhs] == typemap[rhs]:
var_dict[lhs] = name_var_table[rhs]
else:
var_dict.pop(lhs, None)
# a=b kills previous t=a
lhs_kill = []
for k, v in var_dict.items():
if v.name == lhs:
lhs_kill.append(k)
for k in lhs_kill:
var_dict.pop(k, None)
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_call_table(blocks, call_table=None, reverse_call_table=None):
"""returns a dictionary of call variables and their references."""
# call_table example: c = np.zeros becomes c:["zeroes", np]
# reverse_call_table example: c = np.zeros becomes np_var:c
if call_table is None:
call_table = {}
if reverse_call_table is None:
reverse_call_table = {}
topo_order = find_topo_order(blocks)
for label in reversed(topo_order):
for inst in reversed(blocks[label].body):
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == "call":
call_table[rhs.func.name] = []
if isinstance(rhs, ir.Expr) and rhs.op == "getattr":
if lhs in call_table:
call_table[lhs].append(rhs.attr)
reverse_call_table[rhs.value.name] = lhs
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.attr)
reverse_call_table[rhs.value.name] = call_var
if isinstance(rhs, ir.Global):
if lhs in call_table:
call_table[lhs].append(rhs.value)
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.value)
for T, f in call_table_extensions.items():
if isinstance(inst, T):
f(inst, call_table, reverse_call_table)
return call_table, reverse_call_table
|
def get_call_table(blocks, call_table={}, reverse_call_table={}):
"""returns a dictionary of call variables and their references."""
# call_table example: c = np.zeros becomes c:["zeroes", np]
# reverse_call_table example: c = np.zeros becomes np_var:c
topo_order = find_topo_order(blocks)
for label in reversed(topo_order):
for inst in reversed(blocks[label].body):
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == "call":
call_table[rhs.func.name] = []
if isinstance(rhs, ir.Expr) and rhs.op == "getattr":
if lhs in call_table:
call_table[lhs].append(rhs.attr)
reverse_call_table[rhs.value.name] = lhs
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.attr)
reverse_call_table[rhs.value.name] = call_var
if isinstance(rhs, ir.Global):
if lhs in call_table:
call_table[lhs].append(rhs.value)
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.value)
for T, f in call_table_extensions.items():
if isinstance(inst, T):
f(inst, call_table, reverse_call_table)
return call_table, reverse_call_table
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_tuple_table(blocks, tuple_table=None):
"""returns a dictionary of tuple variables and their values."""
if tuple_table is None:
tuple_table = {}
for block in blocks.values():
for inst in block.body:
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == "build_tuple":
tuple_table[lhs] = rhs.items
if isinstance(rhs, ir.Const) and isinstance(rhs.value, tuple):
tuple_table[lhs] = rhs.value
for T, f in tuple_table_extensions.items():
if isinstance(inst, T):
f(inst, tuple_table)
return tuple_table
|
def get_tuple_table(blocks, tuple_table={}):
"""returns a dictionary of tuple variables and their values."""
for block in blocks.values():
for inst in block.body:
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == "build_tuple":
tuple_table[lhs] = rhs.items
if isinstance(rhs, ir.Const) and isinstance(rhs.value, tuple):
tuple_table[lhs] = rhs.value
for T, f in tuple_table_extensions.items():
if isinstance(inst, T):
f(inst, tuple_table)
return tuple_table
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_array_accesses(blocks, accesses=None):
"""returns a dictionary of arrays accessed and their indices."""
if accesses is None:
accesses = {}
for block in blocks.values():
for inst in block.body:
if isinstance(inst, ir.SetItem):
accesses[inst.target.name] = inst.index.name
if isinstance(inst, ir.StaticSetItem):
accesses[inst.target.name] = inst.index_var.name
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == "getitem":
accesses[rhs.value.name] = rhs.index.name
if isinstance(rhs, ir.Expr) and rhs.op == "static_getitem":
accesses[rhs.value.name] = rhs.index_var.name
for T, f in array_accesses_extensions.items():
if isinstance(inst, T):
f(inst, accesses)
return accesses
|
def get_array_accesses(blocks, accesses={}):
"""returns a dictionary of arrays accessed and their indices."""
for block in blocks.values():
for inst in block.body:
if isinstance(inst, ir.SetItem):
accesses[inst.target.name] = inst.index.name
if isinstance(inst, ir.StaticSetItem):
accesses[inst.target.name] = inst.index_var.name
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == "getitem":
accesses[rhs.value.name] = rhs.index.name
if isinstance(rhs, ir.Expr) and rhs.op == "static_getitem":
accesses[rhs.value.name] = rhs.index_var.name
for T, f in array_accesses_extensions.items():
if isinstance(inst, T):
f(inst, accesses)
return accesses
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
"""
Using typing and a basic block, search the basic block for array
expressions.
Return True when one or more matches were found, False otherwise.
"""
# We can trivially reject everything if there are no
# calls in the type results.
if len(calltypes) == 0:
return False
self.crnt_block = block
self.typemap = typemap
# { variable name: IR assignment (of a function call or operator) }
self.array_assigns = OrderedDict()
# { variable name: IR assignment (of a constant) }
self.const_assigns = {}
assignments = block.find_insts(ir.Assign)
for instr in assignments:
target_name = instr.target.name
expr = instr.value
# Does it assign an expression to an array variable?
if isinstance(expr, ir.Expr) and isinstance(
typemap.get(target_name, None), types.Array
):
self._match_array_expr(instr, expr, target_name)
elif isinstance(expr, ir.Const):
# Track constants since we might need them for an
# array expression.
self.const_assigns[target_name] = expr
return len(self.array_assigns) > 0
|
def match(self, interp, block, typemap, calltypes):
"""
Using typing and a basic block, search the basic block for array
expressions.
Return True when one or more matches were found, False otherwise.
"""
# We can trivially reject everything if there are no
# calls in the type results.
if len(calltypes) == 0:
return False
self.crnt_block = block
self.typemap = typemap
# { variable name: IR assignment (of a function call or operator) }
self.array_assigns = OrderedDict()
# { variable name: IR assignment (of a constant) }
self.const_assigns = {}
assignments = block.find_insts(ir.Assign)
for instr in assignments:
target_name = instr.target.name
expr = instr.value
# Does it assign an expression to an array variable?
if isinstance(expr, ir.Expr) and isinstance(
typemap.get(target_name, None), types.Array
):
self._match_array_expr(instr, expr, target_name)
elif isinstance(expr, ir.Const):
# Track constants since we might need them for an
# array expression.
self.const_assigns[target_name] = expr
return len(self.array_assigns) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
typemap = lowerer.fndesc.typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params
parfor_output_arrays = numba.parfor.get_parfor_outputs(parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor.params
)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = compiler.Flags()
flags.set("error_model", "numpy")
flags.set("auto_parallel")
numba.parfor.sequential_parfor_lowering = True
func, func_args, func_sig = _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, {}
)
numba.parfor.sequential_parfor_lowering = False
# get the shape signature
array_shape_classes = parfor.array_analysis.array_shape_classes
func_args = ["sched"] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
gu_signature = _create_shape_signature(
array_shape_classes, num_inputs, num_reductions, func_args, func_sig
)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
loop_ranges,
parfor_redvars,
parfor_reddict,
parfor.init_block,
)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
|
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
typemap = lowerer.fndesc.typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
parfor_output_arrays = numba.parfor.get_parfor_outputs(parfor)
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(parfor)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = compiler.Flags()
flags.set("error_model", "numpy")
func, func_args, func_sig = _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, {}
)
# get the shape signature
array_shape_classes = parfor.array_analysis.array_shape_classes
func_args = ["sched"] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
gu_signature = _create_shape_signature(
array_shape_classes, num_inputs, num_reductions, func_args, func_sig
)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
array_size_vars = parfor.array_analysis.array_size_vars
if config.DEBUG_ARRAY_OPT:
print("array_size_vars = ", sorted(array_size_vars.items()))
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
loop_ranges,
array_size_vars,
parfor_redvars,
parfor_reddict,
parfor.init_block,
)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _create_shape_signature(classes, num_inputs, num_reductions, args, func_sig):
"""Create shape signature for GUFunc"""
num_inouts = len(args) - num_reductions
# maximum class number for array shapes
max_shape_num = max(sum([list(x) for x in classes.values()], [1]))
if config.DEBUG_ARRAY_OPT:
print("create_shape_signature = ", max_shape_num)
gu_sin = []
gu_sout = []
count = 0
syms_sin = ()
for var, typ in zip(args, func_sig.args):
# print("create_shape_signature: var = ", var, " typ = ", typ)
count = count + 1
if isinstance(typ, types.Array):
if var in classes:
var_shape = classes[var]
assert len(var_shape) == typ.ndim
else:
var_shape = []
for i in range(typ.ndim):
max_shape_num = max_shape_num + 1
var_shape.append(max_shape_num)
# TODO: use prefix + class number instead of single char
dim_syms = tuple([chr(97 + i) for i in var_shape]) # chr(97) = 'a'
else:
dim_syms = ()
if count > num_inouts:
# assume all reduction vars are scalar
gu_sout.append(())
elif count > num_inputs and all([s in syms_sin for s in dim_syms]):
# only when dim_syms are found in gu_sin, we consider this as
# output
gu_sout.append(dim_syms)
else:
gu_sin.append(dim_syms)
syms_sin += dim_syms
return (gu_sin, gu_sout)
|
def _create_shape_signature(classes, num_inputs, num_reductions, args, func_sig):
"""Create shape signature for GUFunc"""
num_inouts = len(args) - num_reductions
# maximum class number for array shapes
max_shape_num = max(sum([list(x) for x in classes.values()], []))
if config.DEBUG_ARRAY_OPT:
print("create_shape_signature = ", max_shape_num)
gu_sin = []
gu_sout = []
count = 0
syms_sin = ()
for var, typ in zip(args, func_sig.args):
# print("create_shape_signature: var = ", var, " typ = ", typ)
count = count + 1
if isinstance(typ, types.Array):
if var in classes:
var_shape = classes[var]
assert len(var_shape) == typ.ndim
else:
var_shape = []
for i in range(typ.ndim):
max_shape_num = max_shape_num + 1
var_shape.append(max_shape_num)
# TODO: use prefix + class number instead of single char
dim_syms = tuple([chr(97 + i) for i in var_shape]) # chr(97) = 'a'
else:
dim_syms = ()
if count > num_inouts:
# assume all reduction vars are scalar
gu_sout.append(())
elif count > num_inputs and all([s in syms_sin for s in dim_syms]):
# only when dim_syms are found in gu_sin, we consider this as output
gu_sout.append(dim_syms)
else:
gu_sin.append(dim_syms)
syms_sin += dim_syms
return (gu_sin, gu_sout)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, locals
):
"""
Takes a parfor and creates a gufunc function for its body.
There are two parts to this function.
1) Code to iterate across the iteration space as defined by the schedule.
2) The parfor body that does the work for a single point in the iteration space.
Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point
in the IR where the parfor body should be added.
This Python text is 'exec'ed into existence and its IR retrieved with run_frontend.
The IR is scanned for the sentinel assignment where that basic block is split and the IR
for the parfor body inserted.
"""
# TODO: need copy?
# The parfor body and the main function body share ir.Var nodes.
# We have to do some replacements of Var names in the parfor body to make them
# legal parameter names. If we don't copy then the Vars in the main function also
# would incorrectly change their name.
loop_body = copy.copy(parfor.loop_body)
parfor_dim = len(parfor.loop_nests)
loop_indices = [l.index_variable.name for l in parfor.loop_nests]
# Get all the parfor params.
parfor_params = parfor.params
# Get just the outputs of the parfor.
parfor_outputs = numba.parfor.get_parfor_outputs(parfor, parfor_params)
# Get all parfor reduction vars, and operators.
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(
parfor, parfor_params
)
# Compute just the parfor inputs as a set difference.
parfor_inputs = sorted(
list(set(parfor_params) - set(parfor_outputs) - set(parfor_redvars))
)
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))
# Reduction variables are represented as arrays, so they go under
# different names.
parfor_redarrs = []
for var in parfor_redvars:
arr = var + "_arr"
parfor_redarrs.append(arr)
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
# Reorder all the params so that inputs go first then outputs.
parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
# print("loop_ranges = ", loop_ranges, " ", type(loop_ranges))
print("loop_indices = ", loop_indices, " ", type(loop_indices))
print("loop_body = ", loop_body, " ", type(loop_body))
_print_body(loop_body)
# Some Var are not legal parameter names so create a dict of potentially illegal
# param name to guaranteed legal name.
param_dict = legalize_names(parfor_params + parfor_redvars)
if config.DEBUG_ARRAY_OPT == 1:
print("param_dict = ", sorted(param_dict.items()), " ", type(param_dict))
# Some loop_indices are not legal parameter names so create a dict of potentially illegal
# loop index to guaranteed legal name.
ind_dict = legalize_names(loop_indices)
# Compute a new list of legal loop index names.
legal_loop_indices = [ind_dict[v] for v in loop_indices]
if config.DEBUG_ARRAY_OPT == 1:
print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict))
print(
"legal_loop_indices = ", legal_loop_indices, " ", type(legal_loop_indices)
)
for pd in parfor_params:
print("pd = ", pd)
print("pd type = ", typemap[pd], " ", type(typemap[pd]))
# Get the types of each parameter.
param_types = [typemap[v] for v in parfor_params]
# if config.DEBUG_ARRAY_OPT==1:
# param_types_dict = { v:typemap[v] for v in parfor_params }
# print("param_types_dict = ", param_types_dict, " ", type(param_types_dict))
# print("param_types = ", param_types, " ", type(param_types))
# Replace illegal parameter names in the loop body with legal ones.
replace_var_names(loop_body, param_dict)
# remember the name before legalizing as the actual arguments
parfor_args = parfor_params
# Change parfor_params to be legal names.
parfor_params = [param_dict[v] for v in parfor_params]
# Change parfor body to replace illegal loop index vars with legal ones.
replace_var_names(loop_body, ind_dict)
if config.DEBUG_ARRAY_OPT == 1:
print("legal parfor_params = ", parfor_params, " ", type(parfor_params))
# Determine the unique names of the scheduling and gufunc functions.
# sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_"))
gufunc_name = "__numba_parfor_gufunc_%s" % (hex(hash(parfor)).replace("-", "_"))
if config.DEBUG_ARRAY_OPT:
# print("sched_func_name ", type(sched_func_name), " ", sched_func_name)
print("gufunc_name ", type(gufunc_name), " ", gufunc_name)
# Create the gufunc function.
gufunc_txt = "def " + gufunc_name + "(sched, " + (", ".join(parfor_params)) + "):\n"
# Add initialization of reduction variables
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[var] + "=" + param_dict[arr] + "[0]\n"
# For each dimension of the parfor, create a for loop in the generated gufunc function.
# Iterate across the proper values extracted from the schedule.
# The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0,
# end_dim1, ..., end_dimN
for eachdim in range(parfor_dim):
for indent in range(eachdim + 1):
gufunc_txt += " "
sched_dim = eachdim
gufunc_txt += (
"for "
+ legal_loop_indices[eachdim]
+ " in range(sched["
+ str(sched_dim)
+ "], sched["
+ str(sched_dim + parfor_dim)
+ "] + 1):\n"
)
# Add the sentinel assignment so that we can find the loop body position
# in the IR.
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += "__sentinel__ = 0\n"
# Add assignments of reduction variables (for returning the value)
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[arr] + "[0] = " + param_dict[var] + "\n"
gufunc_txt += " return None\n"
if config.DEBUG_ARRAY_OPT:
print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt)
# Force gufunc outline into existence.
exec(gufunc_txt)
gufunc_func = eval(gufunc_name)
if config.DEBUG_ARRAY_OPT:
print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func)
# Get the IR for the gufunc outline.
gufunc_ir = compiler.run_frontend(gufunc_func)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump ", type(gufunc_ir))
gufunc_ir.dump()
print("loop_body dump ", type(loop_body))
_print_body(loop_body)
# rename all variables in gufunc_ir afresh
var_table = get_name_var_table(gufunc_ir.blocks)
new_var_dict = {}
reserved_names = ["__sentinel__"] + list(param_dict.values()) + legal_loop_indices
for name, var in var_table.items():
if not (name in reserved_names):
new_var_dict[name] = mk_unique_var(name)
replace_var_names(gufunc_ir.blocks, new_var_dict)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump after renaming ")
gufunc_ir.dump()
gufunc_param_types = [numba.types.npytypes.Array(numba.intp, 1, "C")] + param_types
if config.DEBUG_ARRAY_OPT:
print(
"gufunc_param_types = ", type(gufunc_param_types), "\n", gufunc_param_types
)
gufunc_stub_last_label = max(gufunc_ir.blocks.keys())
# Add gufunc stub last label to each parfor.loop_body label to prevent
# label conflicts.
loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label)
# new label for splitting sentinel block
new_label = max(loop_body.keys()) + 1
if config.DEBUG_ARRAY_OPT:
_print_body(loop_body)
# Search all the block in the gufunc outline for the sentinel assignment.
for label, block in gufunc_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == "__sentinel__":
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the sentinel
# but the new block maintains the current block label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after the sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(loop_body.keys())
# The previous block jumps to the minimum labelled block of the
# parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc function's
# IR.
for l, b in loop_body.items():
gufunc_ir.blocks[l] = b
body_last_label = max(loop_body.keys())
gufunc_ir.blocks[new_label] = block
gufunc_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block containing
# statements after the sentinel.
gufunc_ir.blocks[body_last_label].append(ir.Jump(new_label, loc))
break
else:
continue
break
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump before renaming")
gufunc_ir.dump()
gufunc_ir.blocks = rename_labels(gufunc_ir.blocks)
remove_dels(gufunc_ir.blocks)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump")
gufunc_ir.dump()
kernel_func = compiler.compile_ir(
typingctx, targetctx, gufunc_ir, gufunc_param_types, types.none, flags, locals
)
kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("kernel_sig = ", kernel_sig)
return kernel_func, parfor_args, kernel_sig
|
def _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, locals
):
"""
Takes a parfor and creates a gufunc function for its body.
There are two parts to this function.
1) Code to iterate across the iteration space as defined by the schedule.
2) The parfor body that does the work for a single point in the iteration space.
Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point
in the IR where the parfor body should be added.
This Python text is 'exec'ed into existence and its IR retrieved with run_frontend.
The IR is scanned for the sentinel assignment where that basic block is split and the IR
for the parfor body inserted.
"""
# TODO: need copy?
# The parfor body and the main function body share ir.Var nodes.
# We have to do some replacements of Var names in the parfor body to make them
# legal parameter names. If we don't copy then the Vars in the main function also
# would incorrectly change their name.
loop_body = copy.copy(parfor.loop_body)
parfor_dim = len(parfor.loop_nests)
loop_indices = [l.index_variable.name for l in parfor.loop_nests]
# Get all the parfor params.
parfor_params = numba.parfor.get_parfor_params(parfor)
# Get just the outputs of the parfor.
parfor_outputs = numba.parfor.get_parfor_outputs(parfor)
# Get all parfor reduction vars, and operators.
parfor_redvars, parfor_reddict = numba.parfor.get_parfor_reductions(parfor)
# Compute just the parfor inputs as a set difference.
parfor_inputs = sorted(
list(set(parfor_params) - set(parfor_outputs) - set(parfor_redvars))
)
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))
# Reduction variables are represented as arrays, so they go under different names.
parfor_redarrs = []
for var in parfor_redvars:
arr = var + "_arr"
parfor_redarrs.append(arr)
typemap[arr] = types.npytypes.Array(typemap[var], 1, "C")
# Reorder all the params so that inputs go first then outputs.
parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs
if config.DEBUG_ARRAY_OPT == 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
# print("loop_ranges = ", loop_ranges, " ", type(loop_ranges))
print("loop_indices = ", loop_indices, " ", type(loop_indices))
print("loop_body = ", loop_body, " ", type(loop_body))
_print_body(loop_body)
# Some Var are not legal parameter names so create a dict of potentially illegal
# param name to guaranteed legal name.
param_dict = legalize_names(parfor_params + parfor_redvars)
if config.DEBUG_ARRAY_OPT == 1:
print("param_dict = ", sorted(param_dict.items()), " ", type(param_dict))
# Some loop_indices are not legal parameter names so create a dict of potentially illegal
# loop index to guaranteed legal name.
ind_dict = legalize_names(loop_indices)
# Compute a new list of legal loop index names.
legal_loop_indices = [ind_dict[v] for v in loop_indices]
if config.DEBUG_ARRAY_OPT == 1:
print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict))
print(
"legal_loop_indices = ", legal_loop_indices, " ", type(legal_loop_indices)
)
for pd in parfor_params:
print("pd = ", pd)
print("pd type = ", typemap[pd], " ", type(typemap[pd]))
# Get the types of each parameter.
param_types = [typemap[v] for v in parfor_params]
# if config.DEBUG_ARRAY_OPT==1:
# param_types_dict = { v:typemap[v] for v in parfor_params }
# print("param_types_dict = ", param_types_dict, " ", type(param_types_dict))
# print("param_types = ", param_types, " ", type(param_types))
# Replace illegal parameter names in the loop body with legal ones.
replace_var_names(loop_body, param_dict)
parfor_args = (
parfor_params # remember the name before legalizing as the actual arguments
)
# Change parfor_params to be legal names.
parfor_params = [param_dict[v] for v in parfor_params]
# Change parfor body to replace illegal loop index vars with legal ones.
replace_var_names(loop_body, ind_dict)
if config.DEBUG_ARRAY_OPT == 1:
print("legal parfor_params = ", parfor_params, " ", type(parfor_params))
# Determine the unique names of the scheduling and gufunc functions.
# sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_"))
gufunc_name = "__numba_parfor_gufunc_%s" % (hex(hash(parfor)).replace("-", "_"))
if config.DEBUG_ARRAY_OPT:
# print("sched_func_name ", type(sched_func_name), " ", sched_func_name)
print("gufunc_name ", type(gufunc_name), " ", gufunc_name)
# Create the gufunc function.
gufunc_txt = "def " + gufunc_name + "(sched, " + (", ".join(parfor_params)) + "):\n"
# Add initialization of reduction variables
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[var] + "=" + param_dict[arr] + "[0]\n"
# For each dimension of the parfor, create a for loop in the generated gufunc function.
# Iterate across the proper values extracted from the schedule.
# The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0,
# end_dim1, ..., end_dimN
for eachdim in range(parfor_dim):
for indent in range(eachdim + 1):
gufunc_txt += " "
sched_dim = eachdim
gufunc_txt += (
"for "
+ legal_loop_indices[eachdim]
+ " in range(sched["
+ str(sched_dim)
+ "], sched["
+ str(sched_dim + parfor_dim)
+ "] + 1):\n"
)
# Add the sentinel assignment so that we can find the loop body position in the IR.
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += "__sentinel__ = 0\n"
# Add assignments of reduction variables (for returning the value)
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[arr] + "[0] = " + param_dict[var] + "\n"
gufunc_txt += " return None\n"
if config.DEBUG_ARRAY_OPT:
print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt)
# Force gufunc outline into existence.
exec(gufunc_txt)
gufunc_func = eval(gufunc_name)
if config.DEBUG_ARRAY_OPT:
print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func)
# Get the IR for the gufunc outline.
gufunc_ir = compiler.run_frontend(gufunc_func)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump ", type(gufunc_ir))
gufunc_ir.dump()
print("loop_body dump ", type(loop_body))
_print_body(loop_body)
# rename all variables in gufunc_ir afresh
var_table = get_name_var_table(gufunc_ir.blocks)
new_var_dict = {}
reserved_names = ["__sentinel__"] + list(param_dict.values()) + legal_loop_indices
for name, var in var_table.items():
if not (name in reserved_names):
new_var_dict[name] = mk_unique_var(name)
replace_var_names(gufunc_ir.blocks, new_var_dict)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump after renaming ")
gufunc_ir.dump()
gufunc_param_types = [numba.types.npytypes.Array(numba.intp, 1, "C")] + param_types
if config.DEBUG_ARRAY_OPT:
print(
"gufunc_param_types = ", type(gufunc_param_types), "\n", gufunc_param_types
)
gufunc_stub_last_label = max(gufunc_ir.blocks.keys())
# Add gufunc stub last label to each parfor.loop_body label to prevent label conflicts.
loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label)
# new label for splitting sentinel block
new_label = max(loop_body.keys()) + 1
if config.DEBUG_ARRAY_OPT:
_print_body(loop_body)
# Search all the block in the gufunc outline for the sentinel assignment.
for label, block in gufunc_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name == "__sentinel__":
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the sentinel
# but the new block maintains the current block label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after the sentinel.
block.body = block.body[i + 1 :]
# But the current block gets a new label.
body_first_label = min(loop_body.keys())
# The previous block jumps to the minimum labelled block of the
# parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc function's IR.
for l, b in loop_body.items():
gufunc_ir.blocks[l] = b
body_last_label = max(loop_body.keys())
gufunc_ir.blocks[new_label] = block
gufunc_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block containing
# statements after the sentinel.
gufunc_ir.blocks[body_last_label].append(ir.Jump(new_label, loc))
break
else:
continue
break
gufunc_ir.blocks = rename_labels(gufunc_ir.blocks)
remove_dels(gufunc_ir.blocks)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump")
gufunc_ir.dump()
kernel_func = compiler.compile_ir(
typingctx, targetctx, gufunc_ir, gufunc_param_types, types.none, flags, locals
)
kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("kernel_sig = ", kernel_sig)
return kernel_func, parfor_args, kernel_sig
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def call_parallel_gufunc(
lowerer,
cres,
gu_signature,
outer_sig,
expr_args,
loop_ranges,
redvars,
reddict,
init_block,
):
"""
Adds the call to the gufunc function from the main function.
"""
context = lowerer.context
builder = lowerer.builder
library = lowerer.library
from .parallel import (
ParallelGUFuncBuilder,
build_gufunc_wrapper,
get_thread_count,
_launch_threads,
_init,
)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("args = ", expr_args)
print(
"outer_sig = ",
outer_sig.args,
outer_sig.return_type,
outer_sig.recvr,
outer_sig.pysig,
)
print("loop_ranges = ", loop_ranges)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
_init()
wrapper_ptr, env, wrapper_name = build_gufunc_wrapper(
llvm_func, cres, sin, sout, {}
)
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.intp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert step == 1 # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print(
"call_parallel_gufunc loop_ranges[{}] = ".format(i), start, stop, step
)
cgutils.printf(
builder, "loop range[{}]: %d %d (%d)\n".format(i), start, stop, step
)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
sched_name = expr_args.pop(0)
sched_typ = outer_sig.args[0]
sched_sig = sin.pop(0)
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_dim), name="dims"
)
dim_stops = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_dim), name="dims"
)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(dim_starts, [context.get_constant(types.intp, i)])
)
builder.store(
stop, builder.gep(dim_stops, [context.get_constant(types.intp, i)])
)
sched_size = get_thread_count() * num_dim * 2
sched = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, sched_size), name="sched"
)
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = lc.Type.function(
intp_ptr_t, [intp_t, intp_ptr_t, intp_ptr_t, uintp_t, intp_ptr_t, intp_t]
)
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling"
)
builder.call(
do_scheduling,
[
context.get_constant(types.intp, num_dim),
dim_starts,
dim_stops,
context.get_constant(types.uintp, get_thread_count()),
sched,
context.get_constant(types.intp, debug_flag),
],
)
# init reduction array allocation here.
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
redarrs = []
for i in range(nredvars):
redvar_typ = lowerer.fndesc.typemap[redvars[i]]
# we need to use the default initial value instead of existing value in
# redvar
op, imop, init_val = reddict[redvars[i]]
val = context.get_constant(redvar_typ, init_val)
typ = context.get_value_type(redvar_typ)
size = get_thread_count()
arr = cgutils.alloca_once(
builder, typ, size=context.get_constant(types.intp, size)
)
redarrs.append(arr)
for j in range(size):
dst = builder.gep(arr, [context.get_constant(types.intp, j)])
builder.store(val, dst)
if config.DEBUG_ARRAY_OPT:
for i in range(get_thread_count()):
cgutils.printf(builder, "sched[" + str(i) + "] = ")
for j in range(num_dim * 2):
cgutils.printf(
builder,
"%d ",
builder.load(
builder.gep(
sched,
[context.get_constant(types.intp, i * num_dim * 2 + j)],
)
),
)
cgutils.printf(builder, "\n")
# Prepare arguments: args, shapes, steps, data
all_args = [lowerer.loadvar(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(types.intp, 1 + num_args),
name="pargs",
)
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
aty = outer_sig.args[i + 1] # skip first argument sched
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
builder.store(builder.bitcast(arg, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
for var, arg, aty, gu_sig in zip(
expr_args[:ninouts], all_args[:ninouts], outer_sig.args[1:], sin + sout
):
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " gu_sig = ", gu_sig)
i = 0
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", shapes[i])
occurances.append(dim_sym)
i = i + 1
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(context.get_constant(types.intp, get_thread_count()), shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym],
builder.gep(shapes, [context.get_constant(types.intp, i)]),
)
i = i + 1
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_steps), name="psteps"
)
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp), steps)
# The steps for all others are 0. (TODO: except reduction results)
for i in range(num_args):
if i >= ninouts: # steps for reduction vars are abi_sizeof(typ)
j = i - ninouts
typ = context.get_value_type(lowerer.fndesc.typemap[redvars[j]])
sizeof = context.get_abi_sizeof(typ)
stepsize = context.get_constant(types.intp, sizeof)
else:
# steps are strides
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# prepare data
data = builder.inttoptr(zero, byte_ptr_t)
fnty = lc.Type.function(
lc.Type.void(), [byte_ptr_ptr_t, intp_ptr_t, intp_ptr_t, byte_ptr_t]
)
fn = builder.module.get_or_insert_function(fnty, name=wrapper_name)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
result = builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
scope = init_block.scope
loc = init_block.loc
calltypes = lowerer.fndesc.calltypes
# Accumulate all reduction arrays back to a single value
for i in range(get_thread_count()):
for name, arr in zip(redvars, redarrs):
tmpname = mk_unique_var(name)
op, imop, init_val = reddict[name]
src = builder.gep(arr, [context.get_constant(types.intp, i)])
val = builder.load(src)
vty = lowerer.fndesc.typemap[name]
lowerer.fndesc.typemap[tmpname] = vty
lowerer.storevar(val, tmpname)
accvar = ir.Var(scope, name, loc)
tmpvar = ir.Var(scope, tmpname, loc)
acc_call = ir.Expr.inplace_binop(op, imop, accvar, tmpvar, loc)
calltypes[acc_call] = signature(vty, vty, vty)
inst = ir.Assign(acc_call, accvar, loc)
lowerer.lower_inst(inst)
# TODO: scalar output must be assigned back to corresponding output
# variables
return
|
def call_parallel_gufunc(
lowerer,
cres,
gu_signature,
outer_sig,
expr_args,
loop_ranges,
array_size_vars,
redvars,
reddict,
init_block,
):
"""
Adds the call to the gufunc function from the main function.
"""
context = lowerer.context
builder = lowerer.builder
library = lowerer.library
from .parallel import (
ParallelGUFuncBuilder,
build_gufunc_wrapper,
get_thread_count,
_launch_threads,
_init,
)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("args = ", expr_args)
print(
"outer_sig = ",
outer_sig.args,
outer_sig.return_type,
outer_sig.recvr,
outer_sig.pysig,
)
print("loop_ranges = ", loop_ranges)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
_init()
wrapper_ptr, env, wrapper_name = build_gufunc_wrapper(
llvm_func, cres, sin, sout, {}
)
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.intp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert step == 1 # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print(
"call_parallel_gufunc loop_ranges[{}] = ".format(i), start, stop, step
)
cgutils.printf(
builder, "loop range[{}]: %d %d (%d)\n".format(i), start, stop, step
)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
sched_name = expr_args.pop(0)
sched_typ = outer_sig.args[0]
sched_sig = sin.pop(0)
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_dim), name="dims"
)
dim_stops = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_dim), name="dims"
)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(dim_starts, [context.get_constant(types.intp, i)])
)
builder.store(
stop, builder.gep(dim_stops, [context.get_constant(types.intp, i)])
)
sched_size = get_thread_count() * num_dim * 2
sched = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, sched_size), name="sched"
)
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = lc.Type.function(
intp_ptr_t, [intp_t, intp_ptr_t, intp_ptr_t, uintp_t, intp_ptr_t, intp_t]
)
do_scheduling = builder.module.get_or_insert_function(
scheduling_fnty, name="do_scheduling"
)
builder.call(
do_scheduling,
[
context.get_constant(types.intp, num_dim),
dim_starts,
dim_stops,
context.get_constant(types.uintp, get_thread_count()),
sched,
context.get_constant(types.intp, debug_flag),
],
)
# init reduction array allocation here.
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
redarrs = []
for i in range(nredvars):
# arr = expr_args[-(nredvars - i)]
val = lowerer.loadvar(redvars[i])
# cgutils.printf(builder, "nredvar(" + redvars[i] + ") = %d\n", val)
typ = context.get_value_type(lowerer.fndesc.typemap[redvars[i]])
size = get_thread_count()
arr = cgutils.alloca_once(
builder, typ, size=context.get_constant(types.intp, size)
)
redarrs.append(arr)
for j in range(size):
dst = builder.gep(arr, [context.get_constant(types.intp, j)])
builder.store(val, dst)
if config.DEBUG_ARRAY_OPT:
for i in range(get_thread_count()):
cgutils.printf(builder, "sched[" + str(i) + "] = ")
for j in range(num_dim * 2):
cgutils.printf(
builder,
"%d ",
builder.load(
builder.gep(
sched,
[context.get_constant(types.intp, i * num_dim * 2 + j)],
)
),
)
cgutils.printf(builder, "\n")
# Prepare arguments: args, shapes, steps, data
all_args = [lowerer.loadvar(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(types.intp, 1 + num_args),
name="pargs",
)
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
aty = outer_sig.args[i + 1] # skip first argument sched
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
builder.store(builder.bitcast(arg, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (
context.get_data_type(aty)
if aty != types.boolean
else lc.Type.int(1)
)
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
for var, arg, aty, gu_sig in zip(
expr_args[:ninouts], all_args[:ninouts], outer_sig.args[1:], sin + sout
):
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " gu_sig = ", gu_sig)
i = 0
for dim_sym in gu_sig:
dim = array_size_vars[var][i]
if isinstance(dim, ir.Var):
sig_dim_dict[dim_sym] = lowerer.loadvar(dim.name)
elif isinstance(dim, int):
sig_dim_dict[dim_sym] = context.get_constant(types.intp, dim)
else:
# raise NotImplementedError("wrong dimension value encoutered: ", dim)
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", size = ", array_size_vars[var][i])
occurances.append(dim_sym)
i = i + 1
# Prepare shapes, which is a single number (outer loop size), followed by the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(context.get_constant(types.intp, get_thread_count()), shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym],
builder.gep(shapes, [context.get_constant(types.intp, i)]),
)
i = i + 1
# Prepare steps for each argument. Note that all steps are counted in bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(types.intp, num_steps), name="psteps"
)
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp), steps)
# The steps for all others are 0. (TODO: except reduction results)
for i in range(num_args):
if i >= ninouts: # steps for reduction vars are abi_sizeof(typ)
j = i - ninouts
typ = context.get_value_type(lowerer.fndesc.typemap[redvars[j]])
sizeof = context.get_abi_sizeof(typ)
stepsize = context.get_constant(types.intp, sizeof)
else:
# steps are strides
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# prepare data
data = builder.inttoptr(zero, byte_ptr_t)
fnty = lc.Type.function(
lc.Type.void(), [byte_ptr_ptr_t, intp_ptr_t, intp_ptr_t, byte_ptr_t]
)
fn = builder.module.get_or_insert_function(fnty, name=wrapper_name)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
result = builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
scope = init_block.scope
loc = init_block.loc
calltypes = lowerer.fndesc.calltypes
# Accumulate all reduction arrays back to a single value
for i in range(get_thread_count()):
for name, arr in zip(redvars, redarrs):
tmpname = mk_unique_var(name)
op, imop = reddict[name]
src = builder.gep(arr, [context.get_constant(types.intp, i)])
val = builder.load(src)
vty = lowerer.fndesc.typemap[name]
lowerer.fndesc.typemap[tmpname] = vty
lowerer.storevar(val, tmpname)
accvar = ir.Var(scope, name, loc)
tmpvar = ir.Var(scope, tmpname, loc)
acc_call = ir.Expr.inplace_binop(op, imop, accvar, tmpvar, loc)
calltypes[acc_call] = signature(vty, vty, vty)
inst = ir.Assign(acc_call, accvar, loc)
lowerer.lower_inst(inst)
# TODO: scalar output must be assigned back to corresponding output variables
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _build_element_wise_ufunc_wrapper(cres, signature):
"""Build a wrapper for the ufunc loop entry point given by the
compilation result object, using the element-wise signature.
"""
ctx = cres.target_context
library = cres.library
fname = cres.fndesc.llvm_func_name
env = cres.environment
envptr = env.as_pointer(ctx)
with compiler.lock_compiler:
ptr = build_ufunc_wrapper(
library, ctx, fname, signature, cres.objectmode, envptr, env
)
# Get dtypes
dtypenums = [as_dtype(a).num for a in signature.args]
dtypenums.append(as_dtype(signature.return_type).num)
return dtypenums, ptr, env
|
def _build_element_wise_ufunc_wrapper(cres, signature):
"""Build a wrapper for the ufunc loop entry point given by the
compilation result object, using the element-wise signature.
"""
ctx = cres.target_context
library = cres.library
fname = cres.fndesc.llvm_func_name
env = cres.environment
envptr = env.as_pointer(ctx)
ptr = build_ufunc_wrapper(
library, ctx, fname, signature, cres.objectmode, envptr, env
)
# Get dtypes
dtypenums = [as_dtype(a).num for a in signature.args]
dtypenums.append(as_dtype(signature.return_type).num)
return dtypenums, ptr, env
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def build(self, cres):
"""
Returns (dtype numbers, function ptr, EnvironmentObject)
"""
# Buider wrapper for ufunc entry point
signature = cres.signature
with compiler.lock_compiler:
ptr, env, wrapper_name = build_gufunc_wrapper(
self.py_func, cres, self.sin, self.sout, cache=self.cache
)
# Get dtypes
dtypenums = []
for a in signature.args:
if isinstance(a, types.Array):
ty = a.dtype
else:
ty = a
dtypenums.append(as_dtype(ty).num)
return dtypenums, ptr, env
|
def build(self, cres):
"""
Returns (dtype numbers, function ptr, EnvironmentObject)
"""
# Buider wrapper for ufunc entry point
signature = cres.signature
ptr, env, wrapper_name = build_gufunc_wrapper(
self.py_func, cres, self.sin, self.sout, cache=self.cache
)
# Get dtypes
dtypenums = []
for a in signature.args:
if isinstance(a, types.Array):
ty = a.dtype
else:
ty = a
dtypenums.append(as_dtype(ty).num)
return dtypenums, ptr, env
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def as_dtype(nbtype):
"""
Return a numpy dtype instance corresponding to the given Numba type.
NotImplementedError is if no correspondence is known.
"""
if isinstance(nbtype, (types.Complex, types.Integer, types.Float)):
return np.dtype(str(nbtype))
if nbtype is types.bool_:
return np.dtype("?")
if isinstance(nbtype, (types.NPDatetime, types.NPTimedelta)):
letter = _as_dtype_letters[type(nbtype)]
if nbtype.unit:
return np.dtype("%s[%s]" % (letter, nbtype.unit))
else:
return np.dtype(letter)
if isinstance(nbtype, (types.CharSeq, types.UnicodeCharSeq)):
letter = _as_dtype_letters[type(nbtype)]
return np.dtype("%s%d" % (letter, nbtype.count))
if isinstance(nbtype, types.Record):
return nbtype.dtype
if isinstance(nbtype, types.EnumMember):
return as_dtype(nbtype.dtype)
raise NotImplementedError("%r cannot be represented as a Numpy dtype" % (nbtype,))
|
def as_dtype(nbtype):
"""
Return a numpy dtype instance corresponding to the given Numba type.
NotImplementedError is if no correspondence is known.
"""
if isinstance(nbtype, (types.Complex, types.Integer, types.Float)):
return np.dtype(str(nbtype))
if nbtype is types.bool_:
return np.dtype("?")
if isinstance(nbtype, (types.NPDatetime, types.NPTimedelta)):
letter = _as_dtype_letters[type(nbtype)]
if nbtype.unit:
return np.dtype("%s[%s]" % (letter, nbtype.unit))
else:
return np.dtype(letter)
if isinstance(nbtype, (types.CharSeq, types.UnicodeCharSeq)):
letter = _as_dtype_letters[type(nbtype)]
return np.dtype("%s%d" % (letter, nbtype.count))
if isinstance(nbtype, types.Record):
return nbtype.dtype
raise NotImplementedError("%r cannot be represented as a Numpy dtype" % (nbtype,))
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __init__(self, loop_nests, init_block, loop_body, loc, array_analysis, index_var):
super(Parfor, self).__init__(op="parfor", loc=loc)
self.id = type(self).id_counter
type(self).id_counter += 1
# self.input_info = input_info
# self.output_info = output_info
self.loop_nests = loop_nests
self.init_block = init_block
self.loop_body = loop_body
self.array_analysis = array_analysis
self.index_var = index_var
self.params = None # filled right before parallel lowering
|
def __init__(self, loop_nests, init_block, loop_body, loc, array_analysis, index_var):
super(Parfor, self).__init__(op="parfor", loc=loc)
self.id = type(self).id_counter
type(self).id_counter = +1
# self.input_info = input_info
# self.output_info = output_info
self.loop_nests = loop_nests
self.init_block = init_block
self.loop_body = loop_body
self.array_analysis = array_analysis
self.index_var = index_var
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __repr__(self):
return repr(self.loop_nests) + repr(self.loop_body) + repr(self.index_var)
|
def __repr__(self):
return repr(self.loop_nests) + repr(self.loop_body) + repr(self.index_var)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def dump(self, file=None):
file = file or sys.stdout
print(("begin parfor {}".format(self.id)).center(20, "-"), file=file)
print("index_var = ", self.index_var)
for loopnest in self.loop_nests:
print(loopnest, file=file)
print("init block:", file=file)
self.init_block.dump()
for offset, block in sorted(self.loop_body.items()):
print("label %s:" % (offset,), file=file)
block.dump(file)
print(("end parfor {}".format(self.id)).center(20, "-"), file=file)
|
def dump(self, file=None):
file = file or sys.stdout
print(("begin parfor {}".format(self.id)).center(20, "-"), file=file)
print("index_var = ", self.index_var)
for loopnest in self.loop_nests:
print(loopnest, file=file)
print("init block:", file=file)
self.init_block.dump()
for offset, block in sorted(self.loop_body.items()):
print("label %s:" % (offset,), file=file)
block.dump(file)
print(("end parfor").center(20, "-"), file=file)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __init__(self, func_ir, typemap, calltypes, return_type, typingctx):
self.func_ir = func_ir
self.typemap = typemap
self.calltypes = calltypes
self.typingctx = typingctx
self.return_type = return_type
self.array_analysis = array_analysis.ArrayAnalysis(func_ir, typemap, calltypes)
ir_utils._max_label = max(func_ir.blocks.keys())
|
def __init__(self, func_ir, typemap, calltypes, return_type):
self.func_ir = func_ir
self.typemap = typemap
self.calltypes = calltypes
self.return_type = return_type
self.array_analysis = array_analysis.ArrayAnalysis(func_ir, typemap, calltypes)
ir_utils._max_label = max(func_ir.blocks.keys())
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def run(self):
"""run parfor conversion pass: replace Numpy calls
with Parfors when possible and optimize the IR."""
self.func_ir.blocks = simplify_CFG(self.func_ir.blocks)
# remove Del statements for easier optimization
remove_dels(self.func_ir.blocks)
# e.g. convert A.sum() to np.sum(A) for easier match and optimization
canonicalize_array_math(
self.func_ir.blocks, self.typemap, self.calltypes, self.typingctx
)
self.array_analysis.run()
self._convert_prange(self.func_ir.blocks)
self._convert_numpy(self.func_ir.blocks)
dprint_func_ir(self.func_ir, "after parfor pass")
simplify(
self.func_ir,
self.typemap,
self.array_analysis,
self.calltypes,
array_analysis.copy_propagate_update_analysis,
)
# dprint_func_ir(self.func_ir, "after remove_dead")
# reorder statements to maximize fusion
maximize_fusion(self.func_ir.blocks)
fuse_parfors(self.func_ir.blocks)
# remove dead code after fusion to remove extra arrays and variables
remove_dead(self.func_ir.blocks, self.func_ir.arg_names, self.typemap)
# dprint_func_ir(self.func_ir, "after second remove_dead")
# push function call variables inside parfors so gufunc function
# wouldn't need function variables as argument
push_call_vars(self.func_ir.blocks, {}, {})
remove_dead(self.func_ir.blocks, self.func_ir.arg_names, self.typemap)
# after optimization, some size variables are not available anymore
remove_dead_class_sizes(self.func_ir.blocks, self.array_analysis)
dprint_func_ir(self.func_ir, "after optimization")
if config.DEBUG_ARRAY_OPT == 1:
print("variable types: ", sorted(self.typemap.items()))
print("call types: ", self.calltypes)
# run post processor again to generate Del nodes
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if self.func_ir.is_generator:
fix_generator_types(self.func_ir.generator_info, self.return_type, self.typemap)
if sequential_parfor_lowering:
lower_parfor_sequential(self.func_ir, self.typemap, self.calltypes)
else:
# prepare for parallel lowering
# add parfor params to parfors here since lowering is destructive
# changing the IR after this is not allowed
get_parfor_params(self.func_ir.blocks)
return
|
def run(self):
"""run parfor conversion pass: replace Numpy calls
with Parfors when possible and optimize the IR."""
self.array_analysis.run()
topo_order = find_topo_order(self.func_ir.blocks)
# variables available in the program so far (used for finding map
# functions in array_expr lowering)
avail_vars = []
for label in topo_order:
block = self.func_ir.blocks[label]
new_body = []
for instr in block.body:
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target
# only translate C order since we can't allocate F
if self._has_known_shape(lhs) and self._is_C_order(lhs.name):
if self._is_supported_npycall(expr):
instr = self._numpy_to_parfor(lhs, expr)
elif isinstance(expr, ir.Expr) and expr.op == "arrayexpr":
instr = self._arrayexpr_to_parfor(lhs, expr, avail_vars)
elif self._is_supported_npyreduction(expr):
instr = self._reduction_to_parfor(lhs, expr)
avail_vars.append(lhs.name)
new_body.append(instr)
block.body = new_body
# remove Del statements for easier optimization
remove_dels(self.func_ir.blocks)
dprint_func_ir(self.func_ir, "after parfor pass")
# get copies in to blocks and out from blocks
in_cps, out_cps = copy_propagate(self.func_ir.blocks, self.typemap)
# table mapping variable names to ir.Var objects to help replacement
name_var_table = get_name_var_table(self.func_ir.blocks)
apply_copy_propagate(
self.func_ir.blocks,
in_cps,
name_var_table,
array_analysis.copy_propagate_update_analysis,
self.array_analysis,
self.typemap,
self.calltypes,
)
# remove dead code to enable fusion
remove_dead(self.func_ir.blocks, self.func_ir.arg_names)
# dprint_func_ir(self.func_ir, "after remove_dead")
# reorder statements to maximize fusion
maximize_fusion(self.func_ir.blocks)
fuse_parfors(self.func_ir.blocks)
# remove dead code after fusion to remove extra arrays and variables
remove_dead(self.func_ir.blocks, self.func_ir.arg_names)
# dprint_func_ir(self.func_ir, "after second remove_dead")
# push function call variables inside parfors so gufunc function
# wouldn't need function variables as argument
push_call_vars(self.func_ir.blocks, {}, {})
remove_dead(self.func_ir.blocks, self.func_ir.arg_names)
# after optimization, some size variables are not available anymore
remove_dead_class_sizes(self.func_ir.blocks, self.array_analysis)
dprint_func_ir(self.func_ir, "after optimization")
if config.DEBUG_ARRAY_OPT == 1:
print("variable types: ", sorted(self.typemap.items()))
print("call types: ", self.calltypes)
# run post processor again to generate Del nodes
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if self.func_ir.is_generator:
fix_generator_types(self.func_ir.generator_info, self.return_type, self.typemap)
# lower_parfor_sequential(self.func_ir, self.typemap, self.calltypes)
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _is_supported_npycall(self, expr):
"""check if we support parfor translation for
this Numpy call.
"""
# return False # turn off for now
if not (isinstance(expr, ir.Expr) and expr.op == "call"):
return False
if expr.func.name not in self.array_analysis.numpy_calls.keys():
return False
call_name = self.array_analysis.numpy_calls[expr.func.name]
supported_calls = ["zeros", "ones"] + random_calls
if call_name in supported_calls:
return True
# TODO: add more calls
if call_name == "dot":
# only translate matrix/vector and vector/vector multiply to parfor
# (don't translate matrix/matrix multiply)
if (
self._get_ndims(expr.args[0].name) <= 2
and self._get_ndims(expr.args[1].name) == 1
):
return True
return False
|
def _is_supported_npycall(self, expr):
"""check if we support parfor translation for
this Numpy call.
"""
# return False # turn off for now
if not (isinstance(expr, ir.Expr) and expr.op == "call"):
return False
if expr.func.name not in self.array_analysis.numpy_calls.keys():
return False
call_name = self.array_analysis.numpy_calls[expr.func.name]
if call_name in ["zeros", "ones", "random.ranf"]:
return True
# TODO: add more calls
if call_name == "dot":
# only translate matrix/vector and vector/vector multiply to parfor
# (don't translate matrix/matrix multiply)
if (
self._get_ndims(expr.args[0].name) <= 2
and self._get_ndims(expr.args[1].name) == 1
):
return True
return False
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _numpy_to_parfor(self, lhs, expr):
assert isinstance(expr, ir.Expr) and expr.op == "call"
call_name = self.array_analysis.numpy_calls[expr.func.name]
args = expr.args
kws = dict(expr.kws)
if call_name in ["zeros", "ones"] or call_name.startswith("random."):
return self._numpy_map_to_parfor(call_name, lhs, args, kws, expr)
if call_name == "dot":
assert len(args) == 2 or len(args) == 3
# if 3 args, output is allocated already
out = None
if len(args) == 3:
out = args[2]
if "out" in kws:
out = kws["out"]
in1 = args[0]
in2 = args[1]
el_typ = self.typemap[lhs.name].dtype
assert self._get_ndims(in1.name) <= 2 and self._get_ndims(in2.name) == 1
# loop range correlation is same as first dimention of 1st input
corr = self.array_analysis.array_shape_classes[in1.name][0]
size_var = self.array_analysis.array_size_vars[in1.name][0]
scope = lhs.scope
loc = expr.loc
index_var = ir.Var(scope, mk_unique_var("parfor_index"), lhs.loc)
self.typemap[index_var.name] = types.intp
loopnests = [LoopNest(index_var, 0, size_var, 1, corr)]
init_block = ir.Block(scope, loc)
parfor = Parfor(loopnests, init_block, {}, loc, self.array_analysis, index_var)
if self._get_ndims(in1.name) == 2:
# for 2D input, there is an inner loop
# correlation of inner dimension
inner_size_var = self.array_analysis.array_size_vars[in1.name][1]
# loop structure: range block, header block, body
range_label = next_label()
header_label = next_label()
body_label = next_label()
out_label = next_label()
if out is None:
alloc_nodes = mk_alloc(
self.typemap, self.calltypes, lhs, size_var, el_typ, scope, loc
)
init_block.body = alloc_nodes
else:
out_assign = ir.Assign(out, lhs, loc)
init_block.body = [out_assign]
init_block.body.extend(
_gen_dotmv_check(
self.typemap, self.calltypes, in1, in2, lhs, scope, loc
)
)
# sum_var = 0
const_node = ir.Const(0, loc)
const_var = ir.Var(scope, mk_unique_var("$const"), loc)
self.typemap[const_var.name] = el_typ
const_assign = ir.Assign(const_node, const_var, loc)
sum_var = ir.Var(scope, mk_unique_var("$sum_var"), loc)
self.typemap[sum_var.name] = el_typ
sum_assign = ir.Assign(const_var, sum_var, loc)
range_block = mk_range_block(
self.typemap, 0, inner_size_var, 1, self.calltypes, scope, loc
)
range_block.body = [const_assign, sum_assign] + range_block.body
range_block.body[-1].target = header_label # fix jump target
phi_var = range_block.body[-2].target
header_block = mk_loop_header(
self.typemap, phi_var, self.calltypes, scope, loc
)
header_block.body[-1].truebr = body_label
header_block.body[-1].falsebr = out_label
phi_b_var = header_block.body[-2].target
body_block = _mk_mvdot_body(
self.typemap,
self.calltypes,
phi_b_var,
index_var,
in1,
in2,
sum_var,
scope,
loc,
el_typ,
)
body_block.body[-1].target = header_label
out_block = ir.Block(scope, loc)
# lhs[parfor_index] = sum_var
setitem_node = ir.SetItem(lhs, index_var, sum_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[lhs.name], types.intp, el_typ
)
out_block.body = [setitem_node]
parfor.loop_body = {
range_label: range_block,
header_label: header_block,
body_label: body_block,
out_label: out_block,
}
else: # self._get_ndims(in1.name)==1 (reduction)
NotImplementedError("no reduction for dot() " + expr)
if config.DEBUG_ARRAY_OPT == 1:
print("generated parfor for numpy call:")
parfor.dump()
return parfor
# return error if we couldn't handle it (avoid rewrite infinite loop)
raise NotImplementedError("parfor translation failed for ", expr)
|
def _numpy_to_parfor(self, lhs, expr):
assert isinstance(expr, ir.Expr) and expr.op == "call"
call_name = self.array_analysis.numpy_calls[expr.func.name]
args = expr.args
kws = dict(expr.kws)
if call_name in ["zeros", "ones", "random.ranf"]:
return self._numpy_map_to_parfor(call_name, lhs, args, kws, expr)
if call_name == "dot":
assert len(args) == 2 or len(args) == 3
# if 3 args, output is allocated already
out = None
if len(args) == 3:
out = args[2]
if "out" in kws:
out = kws["out"]
in1 = args[0]
in2 = args[1]
el_typ = self.typemap[lhs.name].dtype
assert self._get_ndims(in1.name) <= 2 and self._get_ndims(in2.name) == 1
# loop range correlation is same as first dimention of 1st input
corr = self.array_analysis.array_shape_classes[in1.name][0]
size_var = self.array_analysis.array_size_vars[in1.name][0]
scope = lhs.scope
loc = expr.loc
index_var = ir.Var(scope, mk_unique_var("parfor_index"), lhs.loc)
self.typemap[index_var.name] = types.intp
loopnests = [LoopNest(index_var, 0, size_var, 1, corr)]
init_block = ir.Block(scope, loc)
parfor = Parfor(loopnests, init_block, {}, loc, self.array_analysis, index_var)
if self._get_ndims(in1.name) == 2:
# for 2D input, there is an inner loop
# correlation of inner dimension
inner_size_var = self.array_analysis.array_size_vars[in1.name][1]
# loop structure: range block, header block, body
range_label = next_label()
header_label = next_label()
body_label = next_label()
out_label = next_label()
if out == None:
alloc_nodes = mk_alloc(
self.typemap, self.calltypes, lhs, size_var, el_typ, scope, loc
)
init_block.body = alloc_nodes
else:
out_assign = ir.Assign(out, lhs, loc)
init_block.body = [out_assign]
init_block.body.extend(
_gen_dotmv_check(
self.typemap, self.calltypes, in1, in2, lhs, scope, loc
)
)
# sum_var = 0
const_node = ir.Const(0, loc)
const_var = ir.Var(scope, mk_unique_var("$const"), loc)
self.typemap[const_var.name] = el_typ
const_assign = ir.Assign(const_node, const_var, loc)
sum_var = ir.Var(scope, mk_unique_var("$sum_var"), loc)
self.typemap[sum_var.name] = el_typ
sum_assign = ir.Assign(const_var, sum_var, loc)
range_block = mk_range_block(
self.typemap, 0, inner_size_var, 1, self.calltypes, scope, loc
)
range_block.body = [const_assign, sum_assign] + range_block.body
range_block.body[-1].target = header_label # fix jump target
phi_var = range_block.body[-2].target
header_block = mk_loop_header(
self.typemap, phi_var, self.calltypes, scope, loc
)
header_block.body[-1].truebr = body_label
header_block.body[-1].falsebr = out_label
phi_b_var = header_block.body[-2].target
body_block = _mk_mvdot_body(
self.typemap,
self.calltypes,
phi_b_var,
index_var,
in1,
in2,
sum_var,
scope,
loc,
el_typ,
)
body_block.body[-1].target = header_label
out_block = ir.Block(scope, loc)
# lhs[parfor_index] = sum_var
setitem_node = ir.SetItem(lhs, index_var, sum_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[lhs.name], types.intp, el_typ
)
out_block.body = [setitem_node]
parfor.loop_body = {
range_label: range_block,
header_label: header_block,
body_label: body_block,
out_label: out_block,
}
else: # self._get_ndims(in1.name)==1 (reduction)
NotImplementedError("no reduction for dot() " + expr)
if config.DEBUG_ARRAY_OPT == 1:
print("generated parfor for numpy call:")
parfor.dump()
return parfor
# return error if we couldn't handle it (avoid rewrite infinite loop)
raise NotImplementedError("parfor translation failed for ", expr)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _numpy_map_to_parfor(self, call_name, lhs, args, kws, expr):
"""generate parfor from Numpy calls that are maps."""
scope = lhs.scope
loc = lhs.loc
arr_typ = self.typemap[lhs.name]
el_typ = arr_typ.dtype
# generate loopnests and size variables from lhs correlations
loopnests = []
size_vars = []
index_vars = []
for this_dim in range(arr_typ.ndim):
corr = self.array_analysis.array_shape_classes[lhs.name][this_dim]
size_var = self.array_analysis.array_size_vars[lhs.name][this_dim]
size_vars.append(size_var)
index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc)
index_vars.append(index_var)
self.typemap[index_var.name] = types.intp
loopnests.append(LoopNest(index_var, 0, size_var, 1, corr))
# generate init block and body
init_block = ir.Block(scope, loc)
init_block.body = mk_alloc(
self.typemap, self.calltypes, lhs, tuple(size_vars), el_typ, scope, loc
)
body_label = next_label()
body_block = ir.Block(scope, loc)
expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc)
self.typemap[expr_out_var.name] = el_typ
index_var, index_var_typ = self._make_index_var(scope, index_vars, body_block)
if call_name == "zeros":
value = ir.Const(0, loc)
elif call_name == "ones":
value = ir.Const(1, loc)
elif call_name.startswith("random."):
# remove size arg to reuse the call expr for single value
_remove_size_arg(call_name, expr)
# update expr type
new_arg_typs, new_kw_types = _get_call_arg_types(expr, self.typemap)
self.calltypes.pop(expr)
self.calltypes[expr] = self.typemap[expr.func.name].get_call_type(
typing.Context(), new_arg_typs, new_kw_types
)
value = expr
else:
NotImplementedError(
"Map of numpy.{} to parfor is not implemented".format(call_name)
)
value_assign = ir.Assign(value, expr_out_var, loc)
body_block.body.append(value_assign)
parfor = Parfor(loopnests, init_block, {}, loc, self.array_analysis, index_var)
setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[lhs.name], index_var_typ, el_typ
)
body_block.body.append(setitem_node)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT == 1:
print("generated parfor for numpy map:")
parfor.dump()
return parfor
|
def _numpy_map_to_parfor(self, call_name, lhs, args, kws, expr):
"""generate parfor from Numpy calls that are maps."""
scope = lhs.scope
loc = lhs.loc
arr_typ = self.typemap[lhs.name]
el_typ = arr_typ.dtype
# generate loopnests and size variables from lhs correlations
loopnests = []
size_vars = []
index_vars = []
for this_dim in range(arr_typ.ndim):
corr = self.array_analysis.array_shape_classes[lhs.name][this_dim]
size_var = self.array_analysis.array_size_vars[lhs.name][this_dim]
size_vars.append(size_var)
index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc)
index_vars.append(index_var)
self.typemap[index_var.name] = types.intp
loopnests.append(LoopNest(index_var, 0, size_var, 1, corr))
# generate init block and body
init_block = ir.Block(scope, loc)
init_block.body = mk_alloc(
self.typemap, self.calltypes, lhs, tuple(size_vars), el_typ, scope, loc
)
body_label = next_label()
body_block = ir.Block(scope, loc)
expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc)
self.typemap[expr_out_var.name] = el_typ
index_var, index_var_typ = self._make_index_var(scope, index_vars, body_block)
if call_name == "zeros":
value = ir.Const(0, loc)
elif call_name == "ones":
value = ir.Const(1, loc)
elif call_name == "random.ranf":
# reuse the call expr for single value
expr.args = []
self.calltypes.pop(expr)
self.calltypes[expr] = self.typemap[expr.func.name].get_call_type(
typing.Context(), [], {}
)
value = expr
else:
NotImplementedError(
"Map of numpy.{} to parfor is not implemented".format(call_name)
)
value_assign = ir.Assign(value, expr_out_var, loc)
body_block.body.append(value_assign)
parfor = Parfor(loopnests, init_block, {}, loc, self.array_analysis, index_var)
setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[lhs.name], index_var_typ, el_typ
)
body_block.body.append(setitem_node)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT == 1:
print("generated parfor for numpy map:")
parfor.dump()
return parfor
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _reduction_to_parfor(self, lhs, expr):
assert isinstance(expr, ir.Expr) and expr.op == "call"
call_name = self.array_analysis.numpy_calls[expr.func.name]
args = expr.args
kws = dict(expr.kws)
if call_name in _reduction_ops:
acc_op, im_op, init_val = _reduction_ops[call_name]
assert len(args) in [1, 2] # vector dot has 2 args
in1 = args[0]
arr_typ = self.typemap[in1.name]
in_typ = arr_typ.dtype
im_op_func_typ = find_op_typ(im_op, [in_typ, in_typ])
el_typ = im_op_func_typ.return_type
ndims = arr_typ.ndim
# For full reduction, loop range correlation is same as 1st input
corrs = self.array_analysis.array_shape_classes[in1.name]
sizes = self.array_analysis.array_size_vars[in1.name]
assert ndims == len(sizes) and ndims == len(corrs)
scope = lhs.scope
loc = expr.loc
loopnests = []
parfor_index = []
for i in range(ndims):
index_var = ir.Var(scope, mk_unique_var("$parfor_index" + str(i)), loc)
self.typemap[index_var.name] = types.intp
parfor_index.append(index_var)
loopnests.append(LoopNest(index_var, 0, sizes[i], 1, corrs[i]))
acc_var = lhs
# init value
init_const = ir.Const(el_typ(init_val), loc)
# init block has to init the reduction variable
init_block = ir.Block(scope, loc)
init_block.body.append(ir.Assign(init_const, acc_var, loc))
# loop body accumulates acc_var
acc_block = ir.Block(scope, loc)
tmp_var = ir.Var(scope, mk_unique_var("$val"), loc)
self.typemap[tmp_var.name] = in_typ
index_var, index_var_type = self._make_index_var(scope, parfor_index, acc_block)
getitem_call = ir.Expr.getitem(in1, index_var, loc)
self.calltypes[getitem_call] = signature(in_typ, arr_typ, index_var_type)
acc_block.body.append(ir.Assign(getitem_call, tmp_var, loc))
if call_name is "dot":
# dot has two inputs
tmp_var1 = tmp_var
in2 = args[1]
tmp_var2 = ir.Var(scope, mk_unique_var("$val"), loc)
self.typemap[tmp_var2.name] = in_typ
getitem_call2 = ir.Expr.getitem(in2, index_var, loc)
self.calltypes[getitem_call2] = signature(in_typ, arr_typ, index_var_type)
acc_block.body.append(ir.Assign(getitem_call2, tmp_var2, loc))
mult_call = ir.Expr.binop("*", tmp_var1, tmp_var2, loc)
mult_func_typ = find_op_typ("*", [in_typ, in_typ])
self.calltypes[mult_call] = mult_func_typ
tmp_var = ir.Var(scope, mk_unique_var("$val"), loc)
acc_block.body.append(ir.Assign(mult_call, tmp_var, loc))
acc_call = ir.Expr.inplace_binop(acc_op, im_op, acc_var, tmp_var, loc)
# for some reason, type template of += returns None,
# so type template of + should be used
self.calltypes[acc_call] = im_op_func_typ
# FIXME: we had to break assignment: acc += ... acc ...
# into two assignment: acc_tmp = ... acc ...; x = acc_tmp
# in order to avoid an issue in copy propagation.
acc_tmp_var = ir.Var(scope, mk_unique_var("$acc"), loc)
self.typemap[acc_tmp_var.name] = el_typ
acc_block.body.append(ir.Assign(acc_call, acc_tmp_var, loc))
acc_block.body.append(ir.Assign(acc_tmp_var, acc_var, loc))
loop_body = {next_label(): acc_block}
# parfor
parfor = Parfor(
loopnests, init_block, loop_body, loc, self.array_analysis, index_var
)
return parfor
# return error if we couldn't handle it (avoid rewrite infinite loop)
raise NotImplementedError("parfor translation failed for ", expr)
|
def _reduction_to_parfor(self, lhs, expr):
assert isinstance(expr, ir.Expr) and expr.op == "call"
call_name = self.array_analysis.numpy_calls[expr.func.name]
args = expr.args
kws = dict(expr.kws)
if call_name in _reduction_ops:
acc_op, im_op, init_val = _reduction_ops[call_name]
assert len(args) == 1
in1 = args[0]
arr_typ = self.typemap[in1.name]
in_typ = arr_typ.dtype
im_op_func_typ = find_op_typ(im_op, [in_typ, in_typ])
el_typ = im_op_func_typ.return_type
ndims = arr_typ.ndim
# For full reduction, loop range correlation is same as 1st input
corrs = self.array_analysis.array_shape_classes[in1.name]
sizes = self.array_analysis.array_size_vars[in1.name]
assert ndims == len(sizes) and ndims == len(corrs)
scope = lhs.scope
loc = expr.loc
loopnests = []
parfor_index = []
for i in range(ndims):
index_var = ir.Var(scope, mk_unique_var("$parfor_index" + str(i)), loc)
self.typemap[index_var.name] = types.intp
parfor_index.append(index_var)
loopnests.append(LoopNest(index_var, 0, sizes[i], 1, corrs[i]))
acc_var = lhs
# init value
init_const = ir.Const(el_typ(init_val), loc)
# init block has to init the reduction variable
init_block = ir.Block(scope, loc)
init_block.body.append(ir.Assign(init_const, acc_var, loc))
# loop body accumulates acc_var
acc_block = ir.Block(scope, loc)
tmp_var = ir.Var(scope, mk_unique_var("$val"), loc)
self.typemap[tmp_var.name] = in_typ
index_var, index_var_type = self._make_index_var(scope, parfor_index, acc_block)
getitem_call = ir.Expr.getitem(in1, index_var, loc)
self.calltypes[getitem_call] = signature(in_typ, arr_typ, index_var_type)
acc_block.body.append(ir.Assign(getitem_call, tmp_var, loc))
acc_call = ir.Expr.inplace_binop(acc_op, im_op, acc_var, tmp_var, loc)
# for some reason, type template of += returns None,
# so type template of + should be used
self.calltypes[acc_call] = im_op_func_typ
# FIXME: we had to break assignment: acc += ... acc ...
# into two assignment: acc_tmp = ... acc ...; x = acc_tmp
# in order to avoid an issue in copy propagation.
acc_tmp_var = ir.Var(scope, mk_unique_var("$acc"), loc)
self.typemap[acc_tmp_var.name] = el_typ
acc_block.body.append(ir.Assign(acc_call, acc_tmp_var, loc))
acc_block.body.append(ir.Assign(acc_tmp_var, acc_var, loc))
loop_body = {next_label(): acc_block}
# parfor
parfor = Parfor(
loopnests, init_block, loop_body, loc, self.array_analysis, index_var
)
return parfor
# return error if we couldn't handle it (avoid rewrite infinite loop)
raise NotImplementedError("parfor translation failed for ", expr)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def lower_parfor_sequential(func_ir, typemap, calltypes):
ir_utils._max_label = ir_utils.find_max_label(func_ir.blocks) + 1
parfor_found = False
new_blocks = {}
for block_label, block in func_ir.blocks.items():
block_label, parfor_found = _lower_parfor_sequential_block(
block_label, block, new_blocks, typemap, calltypes, parfor_found
)
# old block stays either way
new_blocks[block_label] = block
func_ir.blocks = new_blocks
dprint_func_ir(func_ir, "before rename")
# rename only if parfor found and replaced (avoid test_flow_control error)
if parfor_found:
func_ir.blocks = rename_labels(func_ir.blocks)
dprint_func_ir(func_ir, "after parfor sequential lowering")
local_array_analysis = array_analysis.ArrayAnalysis(func_ir, typemap, calltypes)
simplify(
func_ir,
typemap,
local_array_analysis,
calltypes,
array_analysis.copy_propagate_update_analysis,
)
dprint_func_ir(func_ir, "after parfor sequential simplify")
return
|
def lower_parfor_sequential(func_ir, typemap, calltypes):
parfor_found = False
new_blocks = {}
for block_label, block in func_ir.blocks.items():
scope = block.scope
i = _find_first_parfor(block.body)
while i != -1:
parfor_found = True
inst = block.body[i]
loc = inst.init_block.loc
# split block across parfor
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
block.body = block.body[i + 1 :]
# previous block jump to parfor init block
init_label = next_label()
prev_block.body.append(ir.Jump(init_label, loc))
new_blocks[init_label] = inst.init_block
new_blocks[block_label] = prev_block
block_label = next_label()
ndims = len(inst.loop_nests)
for i in range(ndims):
loopnest = inst.loop_nests[i]
# create range block for loop
range_label = next_label()
header_label = next_label()
range_block = mk_range_block(
typemap,
loopnest.start,
loopnest.stop,
loopnest.step,
calltypes,
scope,
loc,
)
range_block.body[-1].target = header_label # fix jump target
phi_var = range_block.body[-2].target
new_blocks[range_label] = range_block
header_block = mk_loop_header(typemap, phi_var, calltypes, scope, loc)
header_block.body[-2].target = loopnest.index_variable
new_blocks[header_label] = header_block
# jump to this new inner loop
if i == 0:
inst.init_block.body.append(ir.Jump(range_label, loc))
header_block.body[-1].falsebr = block_label
else:
new_blocks[prev_header_label].body[-1].truebr = range_label
header_block.body[-1].falsebr = prev_header_label
prev_header_label = header_label # to set truebr next loop
# last body block jump to inner most header
body_last_label = max(inst.loop_body.keys())
inst.loop_body[body_last_label].body.append(ir.Jump(header_label, loc))
# inner most header jumps to first body block
body_first_label = min(inst.loop_body.keys())
header_block.body[-1].truebr = body_first_label
# add parfor body to blocks
for l, b in inst.loop_body.items():
new_blocks[l] = b
i = _find_first_parfor(block.body)
# old block stays either way
new_blocks[block_label] = block
func_ir.blocks = new_blocks
# rename only if parfor found and replaced (avoid test_flow_control error)
if parfor_found:
func_ir.blocks = rename_labels(func_ir.blocks)
dprint_func_ir(func_ir, "after parfor sequential lowering")
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_parfor_params(blocks):
"""find variables used in body of parfors from outside and save them.
computed as live variables at entry of first block.
"""
# since parfor wrap creates a back-edge to first non-init basic block,
# live_map[first_non_init_block] contains variables defined in parfor body
# that could be undefined before. So we only consider variables that are
# actually defined before the parfor body in the program.
pre_defs = set()
_, all_defs = compute_use_defs(blocks)
topo_order = find_topo_order(blocks)
for label in topo_order:
block = blocks[label]
for i, parfor in _find_parfors(block.body):
# find variable defs before the parfor in the same block
dummy_block = ir.Block(block.scope, block.loc)
dummy_block.body = block.body[:i]
before_defs = compute_use_defs({0: dummy_block}).defmap[0]
pre_defs |= before_defs
parfor.params = get_parfor_params_inner(parfor, pre_defs)
pre_defs |= all_defs[label]
return
|
def get_parfor_params(parfor):
"""find variables used in body of parfor from outside.
computed as live variables at entry of first block.
"""
blocks = wrap_parfor_blocks(parfor)
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
unwrap_parfor_blocks(parfor)
keylist = sorted(live_map.keys())
first_non_init_block = keylist[1]
# remove parfor index variables since they are not input
for l in parfor.loop_nests:
live_map[first_non_init_block] -= {l.index_variable.name}
return sorted(live_map[first_non_init_block])
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_parfor_outputs(parfor, parfor_params):
"""get arrays that are written to inside the parfor and need to be passed
as parameters to gufunc.
"""
# FIXME: The following assumes the target of all SetItem are outputs,
# which is wrong!
last_label = max(parfor.loop_body.keys())
outputs = []
for blk in parfor.loop_body.values():
for stmt in blk.body:
if isinstance(stmt, ir.SetItem):
if stmt.index.name == parfor.index_var.name:
outputs.append(stmt.target.name)
# make sure these written arrays are in parfor parameters (live coming in)
outputs = list(set(outputs) & set(parfor_params))
return sorted(outputs)
|
def get_parfor_outputs(parfor):
"""get arrays that are written to inside the parfor and need to be passed
as parameters to gufunc.
"""
# FIXME: The following assumes the target of all SetItem are outputs, which is wrong!
last_label = max(parfor.loop_body.keys())
outputs = []
for blk in parfor.loop_body.values():
for stmt in blk.body:
if isinstance(stmt, ir.SetItem):
if stmt.index.name == parfor.index_var.name:
outputs.append(stmt.target.name)
parfor_params = get_parfor_params(parfor)
# make sure these written arrays are in parfor parameters (live coming in)
outputs = list(set(outputs) & set(parfor_params))
return sorted(outputs)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.