after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def get_parfor_reductions(parfor, parfor_params, reductions=None, names=None):
"""get variables that are accumulated using inplace_binop inside the parfor
and need to be passed as reduction parameters to gufunc.
"""
if reductions is None:
reductions = {}
if names is None:
names = []
last_label = max(parfor.loop_body.keys())
for blk in parfor.loop_body.values():
for stmt in blk.body:
if (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "inplace_binop"
):
name = stmt.value.lhs.name
if name in parfor_params:
names.append(name)
red_info = None
for acc_op, imm_op, init_val in _reduction_ops.values():
if imm_op == stmt.value.immutable_fn:
red_info = (
stmt.value.fn,
stmt.value.immutable_fn,
init_val,
)
break
if red_info is None:
raise NotImplementedError(
"Reduction is not support for inplace operator %s"
% stmt.value.fn
)
reductions[name] = red_info
if isinstance(stmt, Parfor):
# recursive parfors can have reductions like test_prange8
get_parfor_reductions(stmt, parfor_params, reductions, names)
return names, reductions
|
def get_parfor_reductions(parfor):
"""get variables that are accumulated using inplace_binop inside the parfor
and need to be passed as reduction parameters to gufunc.
"""
last_label = max(parfor.loop_body.keys())
reductions = {}
names = []
parfor_params = get_parfor_params(parfor)
for blk in parfor.loop_body.values():
for stmt in blk.body:
if (
isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == "inplace_binop"
):
name = stmt.value.lhs.name
if name in parfor_params:
names.append(name)
reductions[name] = (stmt.value.fn, stmt.value.immutable_fn)
return sorted(names), reductions
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def parfor_defs(parfor, use_set=None, def_set=None):
"""list variables written in this parfor by recursively
calling compute_use_defs() on body and combining block defs.
"""
if use_set is None:
use_set = set()
if def_set is None:
def_set = set()
blocks = wrap_parfor_blocks(parfor)
uses, defs = compute_use_defs(blocks)
cfg = compute_cfg_from_blocks(blocks)
last_label = max(blocks.keys())
unwrap_parfor_blocks(parfor)
# Conservatively, only add defs for blocks that are definitely executed
# Go through blocks in order, as if they are statements of the block that
# includes the parfor, and update uses/defs.
# no need for topo order of ir_utils
topo_order = cfg.topo_order()
# blocks that dominate last block are definitely executed
definitely_executed = cfg.dominators()[last_label]
# except loop bodies that might not execute
for loop in cfg.loops().values():
definitely_executed -= loop.body
for label in topo_order:
if label in definitely_executed:
# see compute_use_defs() in analysis.py
# variables defined in the block that includes the parfor are not
# uses of that block (are not potentially live in the beginning of
# the block)
use_set.update(uses[label] - def_set)
def_set.update(defs[label])
else:
use_set.update(uses[label] - def_set)
# treat loop variables and size variables as use
loop_vars = {l.start.name for l in parfor.loop_nests if isinstance(l.start, ir.Var)}
loop_vars |= {l.stop.name for l in parfor.loop_nests if isinstance(l.stop, ir.Var)}
loop_vars |= {l.step.name for l in parfor.loop_nests if isinstance(l.step, ir.Var)}
use_set.update(loop_vars)
return analysis._use_defs_result(usemap=use_set, defmap=def_set)
|
def parfor_defs(parfor):
"""list variables written in this parfor by recursively
calling compute_use_defs() on body and combining block defs.
"""
all_defs = set()
# index variables are sematically defined here
for l in parfor.loop_nests:
all_defs.add(l.index_variable.name)
# all defs of body blocks
for l, b in parfor.loop_body.items():
for stmt in b.body:
if isinstance(stmt, ir.Assign):
all_defs.add(stmt.target.name)
elif isinstance(stmt, Parfor):
all_defs.update(parfor_defs(stmt))
# all defs of init block
for stmt in parfor.init_block.body:
if isinstance(stmt, ir.Assign):
all_defs.add(stmt.target.name)
elif isinstance(stmt, Parfor):
all_defs.update(parfor_defs(stmt))
return all_defs
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def maximize_fusion(blocks):
call_table, _ = get_call_table(blocks)
for block in blocks.values():
order_changed = True
while order_changed:
order_changed = False
i = 0
while i < len(block.body) - 2:
stmt = block.body[i]
next_stmt = block.body[i + 1]
# swap only parfors with non-parfors
# don't reorder calls with side effects (e.g. file close)
# only read-read dependencies are OK
# make sure there is no write-write, write-read dependencies
if (
isinstance(stmt, Parfor)
and not isinstance(next_stmt, Parfor)
and (
not isinstance(next_stmt, ir.Assign)
or has_no_side_effect(next_stmt.value, set(), call_table)
)
):
stmt_accesses = {v.name for v in stmt.list_vars()}
stmt_writes = get_parfor_writes(stmt)
next_accesses = {v.name for v in next_stmt.list_vars()}
next_writes = get_stmt_writes(next_stmt)
if (
len(
(stmt_writes & next_accesses)
| (next_writes & stmt_accesses)
)
== 0
):
block.body[i] = next_stmt
block.body[i + 1] = stmt
order_changed = True
i += 1
return
|
def maximize_fusion(blocks):
for block in blocks.values():
order_changed = True
while order_changed:
order_changed = False
i = 0
while i < len(block.body) - 2:
stmt = block.body[i]
next_stmt = block.body[i + 1]
# swap only parfors with non-parfors
# only read-read dependencies are OK
# make sure there is no write-write, write-read dependencies
if isinstance(stmt, Parfor) and not isinstance(next_stmt, Parfor):
stmt_accesses = {v.name for v in stmt.list_vars()}
stmt_writes = get_parfor_writes(stmt)
next_accesses = {v.name for v in next_stmt.list_vars()}
next_writes = get_stmt_writes(next_stmt)
if (
len(
(stmt_writes & next_accesses)
| (next_writes & stmt_accesses)
)
== 0
):
block.body[i] = next_stmt
block.body[i + 1] = stmt
order_changed = True
i += 1
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def try_fuse(parfor1, parfor2):
"""try to fuse parfors and return a fused parfor, otherwise return None"""
dprint("try_fuse trying to fuse \n", parfor1, "\n", parfor2)
# fusion of parfors with different dimensions not supported yet
if len(parfor1.loop_nests) != len(parfor2.loop_nests):
dprint("try_fuse parfors number of dimensions mismatch")
return None
ndims = len(parfor1.loop_nests)
# all loops should be equal length
for i in range(ndims):
if parfor1.loop_nests[i].correlation != parfor2.loop_nests[i].correlation:
dprint("try_fuse parfor dimension correlation mismatch", i)
return None
# TODO: make sure parfor1's reduction output is not used in parfor2
# only data parallel loops
if has_cross_iter_dep(parfor1) or has_cross_iter_dep(parfor2):
dprint("try_fuse parfor cross iteration dependency found")
return None
# make sure parfor2's init block isn't using any output of parfor1
parfor1_body_usedefs = compute_use_defs(parfor1.loop_body)
parfor1_body_vardefs = set()
for defs in parfor1_body_usedefs.defmap.values():
parfor1_body_vardefs |= defs
init2_uses = compute_use_defs({0: parfor2.init_block}).usemap[0]
if not parfor1_body_vardefs.isdisjoint(init2_uses):
dprint("try_fuse parfor2 init block depends on parfor1 body")
return None
return fuse_parfors_inner(parfor1, parfor2)
|
def try_fuse(parfor1, parfor2):
"""try to fuse parfors and return a fused parfor, otherwise return None"""
dprint("try_fuse trying to fuse \n", parfor1, "\n", parfor2)
# fusion of parfors with different dimensions not supported yet
if len(parfor1.loop_nests) != len(parfor1.loop_nests):
dprint("try_fuse parfors number of dimensions mismatch")
return None
ndims = len(parfor1.loop_nests)
# all loops should be equal length
for i in range(ndims):
if parfor1.loop_nests[i].correlation != parfor2.loop_nests[i].correlation:
dprint("try_fuse parfor dimension correlation mismatch", i)
return None
# TODO: make sure parfor1's reduction output is not used in parfor2
# only data parallel loops
if has_cross_iter_dep(parfor1) or has_cross_iter_dep(parfor2):
dprint("try_fuse parfor cross iteration dependency found")
return None
# make sure parfor2's init block isn't using any output of parfor1
parfor1_body_usedefs = compute_use_defs(parfor1.loop_body)
parfor1_body_vardefs = set()
for defs in parfor1_body_usedefs.defmap.values():
parfor1_body_vardefs |= defs
init2_uses = compute_use_defs({0: parfor2.init_block}).usemap[0]
if not parfor1_body_vardefs.isdisjoint(init2_uses):
dprint("try_fuse parfor2 init block depends on parfor1 body")
return None
return fuse_parfors_inner(parfor1, parfor2)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def remove_dead_parfor(parfor, lives, arg_aliases, alias_map, typemap):
# remove dead get/sets in last block
# FIXME: I think that "in the last block" is not sufficient in general. We might need to
# remove from any block.
last_label = max(parfor.loop_body.keys())
last_block = parfor.loop_body[last_label]
# save array values set to replace getitems
saved_values = {}
new_body = []
for stmt in last_block.body:
if (
isinstance(stmt, ir.SetItem)
and stmt.index.name == parfor.index_var.name
and stmt.target.name not in lives
):
saved_values[stmt.target.name] = stmt.value
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
rhs = stmt.value
if rhs.op == "getitem" and rhs.index.name == parfor.index_var.name:
# replace getitem if value saved
stmt.value = saved_values.get(rhs.value.name, rhs)
new_body.append(stmt)
last_block.body = new_body
alias_set = set(alias_map.keys())
# after getitem replacement, remove extra setitems
new_body = []
in_lives = copy.copy(lives)
for stmt in reversed(last_block.body):
# aliases of lives are also live for setitems
alias_lives = in_lives & alias_set
for v in alias_lives:
in_lives |= alias_map[v]
if (
isinstance(stmt, ir.SetItem)
and stmt.index.name == parfor.index_var.name
and stmt.target.name not in in_lives
):
continue
in_lives |= {v.name for v in stmt.list_vars()}
new_body.append(stmt)
new_body.reverse()
last_block.body = new_body
# process parfor body recursively
remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map, typemap)
return
|
def remove_dead_parfor(parfor, lives, args):
# remove dead get/sets in last block
# FIXME: I think that "in the last block" is not sufficient in general. We might need to
# remove from any block.
last_label = max(parfor.loop_body.keys())
last_block = parfor.loop_body[last_label]
# save array values set to replace getitems
saved_values = {}
new_body = []
for stmt in last_block.body:
if (
isinstance(stmt, ir.SetItem)
and stmt.index.name == parfor.index_var.name
and stmt.target.name not in lives
):
saved_values[stmt.target.name] = stmt.value
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
rhs = stmt.value
if rhs.op == "getitem" and rhs.index.name == parfor.index_var.name:
# replace getitem if value saved
stmt.value = saved_values.get(rhs.value.name, rhs)
new_body.append(stmt)
last_block.body = new_body
# process parfor body recursively
remove_dead_parfor_recursive(parfor, lives, args)
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map, typemap):
"""create a dummy function from parfor and call remove dead recursively"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
return_label = last_label + 1
loc = blocks[last_label].loc
scope = blocks[last_label].scope
blocks[return_label] = ir.Block(scope, loc)
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, loc))
# add lives in a dummpy return to last block to avoid their removal
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
# dummy type for tuple_var
typemap[tuple_var.name] = types.containers.UniTuple(types.intp, 2)
live_vars = [ir.Var(scope, v, loc) for v in lives]
tuple_call = ir.Expr.build_tuple(live_vars, loc)
blocks[return_label].body.append(ir.Assign(tuple_call, tuple_var, loc))
blocks[return_label].body.append(ir.Return(tuple_var, loc))
branch = ir.Branch(0, first_body_block, return_label, loc)
blocks[last_label].body.append(branch)
# args var including aliases is ok
remove_dead(blocks, arg_aliases, typemap, alias_map, arg_aliases)
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove branch
return
|
def remove_dead_parfor_recursive(parfor, lives, args):
"""create a dummy function from parfor and call remove dead recursively"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
if len(blocks[last_label].body) == 0:
return
loc = blocks[last_label].body[-1].loc
scope = blocks[last_label].scope
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, loc))
# add lives in a dummpy return to last block to avoid their removal
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
live_vars = [ir.Var(scope, v, loc) for v in lives]
tuple_call = ir.Expr.build_tuple(live_vars, loc)
blocks[last_label].body.append(ir.Assign(tuple_call, tuple_var, loc))
blocks[last_label].body.append(ir.Return(tuple_var, loc))
remove_dead(blocks, args)
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove dummy return
blocks[last_label].body.pop() # remove dummy tupple
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def wrap_parfor_blocks(parfor):
"""wrap parfor blocks for analysis/optimization like CFG"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
loc = blocks[last_label].loc
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, loc))
blocks[last_label].body.append(ir.Jump(first_body_block, loc))
return blocks
|
def wrap_parfor_blocks(parfor):
"""wrap parfor blocks for analysis/optimization like CFG"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
loc = blocks[last_label].body[-1].loc
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, loc))
blocks[last_label].body.append(ir.Jump(first_body_block, loc))
return blocks
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def unwrap_parfor_blocks(parfor, blocks=None):
"""
unwrap parfor blocks after analysis/optimization.
Allows changes to the parfor loop.
"""
if blocks is not None:
# make sure init block isn't removed
assert 0 in blocks
# update loop body blocks
blocks.pop(0)
parfor.loop_body = blocks
# make sure dummy jump to loop body isn't altered
first_body_label = min(parfor.loop_body.keys())
assert isinstance(parfor.init_block.body[-1], ir.Jump)
assert parfor.init_block.body[-1].target == first_body_label
# remove dummy jump to loop body
parfor.init_block.body.pop()
# make sure dummy jump back to loop body isn't altered
last_label = max(parfor.loop_body.keys())
assert isinstance(parfor.loop_body[last_label].body[-1], ir.Jump)
assert parfor.loop_body[last_label].body[-1].target == first_body_label
# remove dummy jump back to loop
parfor.loop_body[last_label].body.pop()
return
|
def unwrap_parfor_blocks(parfor):
last_label = max(parfor.loop_body.keys())
parfor.init_block.body.pop() # remove dummy jump
parfor.loop_body[last_label].body.pop() # remove dummy return
return
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_copies_parfor(parfor, typemap):
"""find copies generated/killed by parfor"""
blocks = wrap_parfor_blocks(parfor)
in_copies_parfor, out_copies_parfor = copy_propagate(blocks, typemap)
in_gen_copies, in_extra_kill = get_block_copies(blocks, typemap)
unwrap_parfor_blocks(parfor)
# parfor's extra kill is kills of its init block,
# and all possible gens and kills of it's body loop.
# body doesn't gen and only kills since it may or may not run
# TODO: save copies that are repeated in parfor
kill_set = in_extra_kill[0]
for label in parfor.loop_body.keys():
kill_set |= {l for l, r in in_gen_copies[label]}
kill_set |= in_extra_kill[label]
# gen copies is copies generated by init that are not killed by body
last_label = max(parfor.loop_body.keys())
gens = out_copies_parfor[last_label] & in_gen_copies[0]
if config.DEBUG_ARRAY_OPT == 1:
print("copy propagate parfor gens:", gens, "kill_set", kill_set)
return gens, kill_set
|
def get_copies_parfor(parfor, typemap):
"""find copies generated/killed by parfor"""
blocks = wrap_parfor_blocks(parfor)
in_copies_parfor, out_copies_parfor = copy_propagate(blocks, typemap)
in_gen_copies, in_extra_kill = get_block_copies(blocks, typemap)
unwrap_parfor_blocks(parfor)
# parfor's extra kill is all possible gens and kills of it's loop
kill_set = in_extra_kill[0]
for label in parfor.loop_body.keys():
kill_set |= {l for l, r in in_gen_copies[label]}
last_label = max(parfor.loop_body.keys())
if config.DEBUG_ARRAY_OPT == 1:
print(
"copy propagate parfor out_copies:",
out_copies_parfor[last_label],
"kill_set",
kill_set,
)
return out_copies_parfor[last_label], kill_set
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_parfor_call_table(parfor, call_table=None, reverse_call_table=None):
if call_table is None:
call_table = {}
if reverse_call_table is None:
reverse_call_table = {}
blocks = wrap_parfor_blocks(parfor)
call_table, reverse_call_table = get_call_table(
blocks, call_table, reverse_call_table
)
unwrap_parfor_blocks(parfor)
return call_table, reverse_call_table
|
def get_parfor_call_table(parfor, call_table={}, reverse_call_table={}):
blocks = wrap_parfor_blocks(parfor)
call_table, reverse_call_table = get_call_table(
blocks, call_table, reverse_call_table
)
unwrap_parfor_blocks(parfor)
return call_table, reverse_call_table
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def get_parfor_tuple_table(parfor, tuple_table=None):
if tuple_table is None:
tuple_table = {}
blocks = wrap_parfor_blocks(parfor)
tuple_table = ir_utils.get_tuple_table(blocks, tuple_table)
unwrap_parfor_blocks(parfor)
return tuple_table
|
def get_parfor_tuple_table(parfor, tuple_table={}):
blocks = wrap_parfor_blocks(parfor)
tuple_table = ir_utils.get_tuple_table(blocks, tuple_table)
unwrap_parfor_blocks(parfor)
return tuple_table
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _exec_command(command, use_shell=None, use_tee=None, **env):
"""
Internal workhorse for exec_command().
Code from https://github.com/numpy/numpy/pull/7862
"""
if use_shell is None:
use_shell = os.name == "posix"
if use_tee is None:
use_tee = os.name == "posix"
executable = None
if os.name == "posix" and use_shell:
# On POSIX, subprocess always uses /bin/sh, override
sh = os.environ.get("SHELL", "/bin/sh")
if _is_sequence(command):
command = [sh, "-c", " ".join(command)]
else:
command = [sh, "-c", command]
use_shell = False
elif os.name == "nt" and _is_sequence(command):
# On Windows, join the string for CreateProcess() ourselves as
# subprocess does it a bit differently
command = " ".join(_quote_arg(arg) for arg in command)
# Inherit environment by default
env = env or None
try:
proc = subprocess.Popen(
command,
shell=use_shell,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except EnvironmentError:
# Return 127, as os.spawn*() and /bin/sh do
return "", 127
text, err = proc.communicate()
# Only append stderr if the command failed, as otherwise
# the output may become garbled for parsing
if proc.returncode:
if text:
text += "\n"
text += err
# Another historical oddity
if text[-1:] == "\n":
text = text[:-1]
if use_tee:
print(text)
return proc.returncode, text
|
def _exec_command(command, use_shell=None, use_tee=None, **env):
"""
Internal workhorse for exec_command().
Code from https://github.com/numpy/numpy/pull/7862
"""
if use_shell is None:
use_shell = os.name == "posix"
if use_tee is None:
use_tee = os.name == "posix"
executable = None
if os.name == "posix" and use_shell:
# On POSIX, subprocess always uses /bin/sh, override
sh = os.environ.get("SHELL", "/bin/sh")
if is_sequence(command):
command = [sh, "-c", " ".join(command)]
else:
command = [sh, "-c", command]
use_shell = False
elif os.name == "nt" and _is_sequence(command):
# On Windows, join the string for CreateProcess() ourselves as
# subprocess does it a bit differently
command = " ".join(_quote_arg(arg) for arg in command)
# Inherit environment by default
env = env or None
try:
proc = subprocess.Popen(
command,
shell=use_shell,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except EnvironmentError:
# Return 127, as os.spawn*() and /bin/sh do
return "", 127
text, err = proc.communicate()
# Only append stderr if the command failed, as otherwise
# the output may become garbled for parsing
if proc.returncode:
if text:
text += "\n"
text += err
# Another historical oddity
if text[-1:] == "\n":
text = text[:-1]
if use_tee:
print(text)
return proc.returncode, text
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
self.prints = prints = {}
self.block = block
# Find all assignments with a right-hand print() call
for inst in block.find_insts(ir.Assign):
if isinstance(inst.value, ir.Expr) and inst.value.op == "call":
expr = inst.value
if expr.kws:
# Only positional args are supported
continue
try:
callee = func_ir.infer_constant(expr.func)
except errors.ConstantInferenceError:
continue
if callee is print:
prints[inst] = expr
return len(prints) > 0
|
def match(self, interp, block, typemap, calltypes):
self.prints = prints = {}
self.block = block
# Find all assignments with a right-hand print() call
for inst in block.find_insts(ir.Assign):
if isinstance(inst.value, ir.Expr) and inst.value.op == "call":
expr = inst.value
if expr.kws:
# Only positional args are supported
continue
try:
callee = interp.infer_constant(expr.func)
except errors.ConstantInferenceError:
continue
if callee is print:
prints[inst] = expr
return len(prints) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
self.consts = consts = {}
self.block = block
for inst in block.find_insts(ir.Print):
if inst.consts:
# Already rewritten
continue
for idx, var in enumerate(inst.args):
try:
const = func_ir.infer_constant(var)
except errors.ConstantInferenceError:
continue
consts.setdefault(inst, {})[idx] = const
return len(consts) > 0
|
def match(self, interp, block, typemap, calltypes):
self.consts = consts = {}
self.block = block
for inst in block.find_insts(ir.Print):
if inst.consts:
# Already rewritten
continue
for idx, var in enumerate(inst.args):
try:
const = interp.infer_constant(var)
except errors.ConstantInferenceError:
continue
consts.setdefault(inst, {})[idx] = const
return len(consts) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
"""
Look for potential macros for expand and store their expansions.
"""
self.block = block
self.rewrites = rewrites = {}
for inst in block.body:
if isinstance(inst, ir.Assign):
rhs = inst.value
if (
isinstance(rhs, ir.Expr)
and rhs.op == "call"
and isinstance(rhs.func, ir.Var)
):
# Is it a callable macro?
try:
const = func_ir.infer_constant(rhs.func)
except errors.ConstantInferenceError:
continue
if isinstance(const, Macro):
assert const.callable
new_expr = self._expand_callable_macro(func_ir, rhs, const, rhs.loc)
rewrites[rhs] = new_expr
elif isinstance(rhs, ir.Expr) and rhs.op == "getattr":
# Is it a non-callable macro looked up as a constant attribute?
try:
const = func_ir.infer_constant(inst.target)
except errors.ConstantInferenceError:
continue
if isinstance(const, Macro) and not const.callable:
new_expr = self._expand_non_callable_macro(const, rhs.loc)
rewrites[rhs] = new_expr
return len(rewrites) > 0
|
def match(self, interp, block, typemap, calltypes):
"""
Look for potential macros for expand and store their expansions.
"""
self.block = block
self.rewrites = rewrites = {}
for inst in block.body:
if isinstance(inst, ir.Assign):
rhs = inst.value
if (
isinstance(rhs, ir.Expr)
and rhs.op == "call"
and isinstance(rhs.func, ir.Var)
):
# Is it a callable macro?
try:
const = interp.infer_constant(rhs.func)
except errors.ConstantInferenceError:
continue
if isinstance(const, Macro):
assert const.callable
new_expr = self._expand_callable_macro(interp, rhs, const, rhs.loc)
rewrites[rhs] = new_expr
elif isinstance(rhs, ir.Expr) and rhs.op == "getattr":
# Is it a non-callable macro looked up as a constant attribute?
try:
const = interp.infer_constant(inst.target)
except errors.ConstantInferenceError:
continue
if isinstance(const, Macro) and not const.callable:
new_expr = self._expand_non_callable_macro(const, rhs.loc)
rewrites[rhs] = new_expr
return len(rewrites) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _expand_callable_macro(self, func_ir, call, macro, loc):
"""
Return the IR expression of expanding the macro call.
"""
assert macro.callable
# Resolve all macro arguments as constants, or fail
args = [func_ir.infer_constant(arg.name) for arg in call.args]
kws = {}
for k, v in call.kws:
try:
kws[k] = func_ir.infer_constant(v)
except errors.ConstantInferenceError:
msg = "Argument {name!r} must be a constant at {loc}".format(
name=k, loc=loc
)
raise ValueError(msg)
try:
result = macro.func(*args, **kws)
except Exception as e:
msg = str(e)
headfmt = "Macro expansion failed at {line}"
head = headfmt.format(line=loc)
newmsg = "{0}:\n{1}".format(head, msg)
raise errors.MacroError(newmsg)
assert result is not None
result.loc = call.loc
new_expr = ir.Expr.call(func=result, args=call.args, kws=call.kws, loc=loc)
return new_expr
|
def _expand_callable_macro(self, interp, call, macro, loc):
"""
Return the IR expression of expanding the macro call.
"""
assert macro.callable
# Resolve all macro arguments as constants, or fail
args = [interp.infer_constant(arg.name) for arg in call.args]
kws = {}
for k, v in call.kws:
try:
kws[k] = interp.infer_constant(v)
except errors.ConstantInferenceError:
msg = "Argument {name!r} must be a constant at {loc}".format(
name=k, loc=loc
)
raise ValueError(msg)
try:
result = macro.func(*args, **kws)
except Exception as e:
msg = str(e)
headfmt = "Macro expansion failed at {line}"
head = headfmt.format(line=loc)
newmsg = "{0}:\n{1}".format(head, msg)
raise errors.MacroError(newmsg)
assert result is not None
result.loc = call.loc
new_expr = ir.Expr.call(func=result, args=call.args, kws=call.kws, loc=loc)
return new_expr
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
"""Overload this method to check an IR block for matching terms in the
rewrite.
"""
return False
|
def match(self, block, typemap, calltypes):
"""Overload this method to check an IR block for matching terms in the
rewrite.
"""
return False
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def register(self, kind):
"""
Decorator adding a subclass of Rewrite to the registry for
the given *kind*.
"""
if kind not in self._kinds:
raise KeyError("invalid kind %r" % (kind,))
def do_register(rewrite_cls):
if not issubclass(rewrite_cls, Rewrite):
raise TypeError("{0} is not a subclass of Rewrite".format(rewrite_cls))
self.rewrites[kind].append(rewrite_cls)
return rewrite_cls
return do_register
|
def register(self, kind):
"""
Decorator adding a subclass of Rewrite to the registry for
the given *kind*.
"""
if not kind in self._kinds:
raise KeyError("invalid kind %r" % (kind,))
def do_register(rewrite_cls):
if not issubclass(rewrite_cls, Rewrite):
raise TypeError("{0} is not a subclass of Rewrite".format(rewrite_cls))
self.rewrites[kind].append(rewrite_cls)
return rewrite_cls
return do_register
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def apply(self, kind, pipeline, func_ir):
"""Given a pipeline and a dictionary of basic blocks, exhaustively
attempt to apply all registered rewrites to all basic blocks.
"""
assert kind in self._kinds
blocks = func_ir.blocks
old_blocks = blocks.copy()
for rewrite_cls in self.rewrites[kind]:
# Exhaustively apply a rewrite until it stops matching.
rewrite = rewrite_cls(pipeline)
work_list = list(blocks.items())
while work_list:
key, block = work_list.pop()
matches = rewrite.match(
func_ir, block, pipeline.typemap, pipeline.calltypes
)
if matches:
if config.DEBUG or config.DUMP_IR:
print("_" * 70)
print("REWRITING (%s):" % rewrite_cls.__name__)
block.dump()
print("_" * 60)
new_block = rewrite.apply()
blocks[key] = new_block
work_list.append((key, new_block))
if config.DEBUG or config.DUMP_IR:
new_block.dump()
print("_" * 70)
# If any blocks were changed, perform a sanity check.
for key, block in blocks.items():
if block != old_blocks[key]:
block.verify()
|
def apply(self, kind, pipeline, interp):
"""Given a pipeline and a dictionary of basic blocks, exhaustively
attempt to apply all registered rewrites to all basic blocks.
"""
assert kind in self._kinds
blocks = interp.blocks
old_blocks = blocks.copy()
for rewrite_cls in self.rewrites[kind]:
# Exhaustively apply a rewrite until it stops matching.
rewrite = rewrite_cls(pipeline)
work_list = list(blocks.items())
while work_list:
key, block = work_list.pop()
matches = rewrite.match(interp, block, pipeline.typemap, pipeline.calltypes)
if matches:
if config.DEBUG or config.DUMP_IR:
print("_" * 70)
print("REWRITING (%s):" % rewrite_cls.__name__)
block.dump()
print("_" * 60)
new_block = rewrite.apply()
blocks[key] = new_block
work_list.append((key, new_block))
if config.DEBUG or config.DUMP_IR:
new_block.dump()
print("_" * 70)
# If any blocks were changed, perform a sanity check.
for key, block in blocks.items():
if block != old_blocks[key]:
block.verify()
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
self.static_lhs = {}
self.static_rhs = {}
self.block = block
# Find binop expressions with a constant lhs or rhs
for expr in block.find_exprs(op="binop"):
try:
if expr.fn in self.rhs_operators and expr.static_rhs is ir.UNDEFINED:
self.static_rhs[expr] = func_ir.infer_constant(expr.rhs)
except errors.ConstantInferenceError:
continue
return len(self.static_lhs) > 0 or len(self.static_rhs) > 0
|
def match(self, interp, block, typemap, calltypes):
self.static_lhs = {}
self.static_rhs = {}
self.block = block
# Find binop expressions with a constant lhs or rhs
for expr in block.find_exprs(op="binop"):
try:
if expr.fn in self.rhs_operators and expr.static_rhs is ir.UNDEFINED:
self.static_rhs[expr] = interp.infer_constant(expr.rhs)
except errors.ConstantInferenceError:
continue
return len(self.static_lhs) > 0 or len(self.static_rhs) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
self.getitems = getitems = {}
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op="getitem"):
if expr.op == "getitem":
try:
const = func_ir.infer_constant(expr.index)
except errors.ConstantInferenceError:
continue
getitems[expr] = const
return len(getitems) > 0
|
def match(self, interp, block, typemap, calltypes):
self.getitems = getitems = {}
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op="getitem"):
if expr.op == "getitem":
try:
const = interp.infer_constant(expr.index)
except errors.ConstantInferenceError:
continue
getitems[expr] = const
return len(getitems) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
self.setitems = setitems = {}
self.block = block
# Detect all setitem statements and find which ones can be
# rewritten
for inst in block.find_insts(ir.SetItem):
try:
const = func_ir.infer_constant(inst.index)
except errors.ConstantInferenceError:
continue
setitems[inst] = const
return len(setitems) > 0
|
def match(self, interp, block, typemap, calltypes):
self.setitems = setitems = {}
self.block = block
# Detect all setitem statements and find which ones can be
# rewritten
for inst in block.find_insts(ir.SetItem):
try:
const = interp.infer_constant(inst.index)
except errors.ConstantInferenceError:
continue
setitems[inst] = const
return len(setitems) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _break_constant(self, const):
"""
Break down constant exception.
"""
if isinstance(const, BaseException):
return const.__class__, const.args
elif self._is_exception_type(const):
return const, None
else:
raise NotImplementedError("unsupported exception constant %r" % (const,))
|
def _break_constant(self, interp, const):
"""
Break down constant exception.
"""
if isinstance(const, BaseException):
return const.__class__, const.args
elif self._is_exception_type(const):
return const, None
else:
raise NotImplementedError("unsupported exception constant %r" % (const,))
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def match(self, func_ir, block, typemap, calltypes):
self.raises = raises = {}
self.block = block
# Detect all raise statements and find which ones can be
# rewritten
for inst in block.find_insts(ir.Raise):
if inst.exception is None:
# re-reraise
exc_type, exc_args = None, None
else:
# raise <something> => find the definition site for <something>
const = func_ir.infer_constant(inst.exception)
exc_type, exc_args = self._break_constant(const)
raises[inst] = exc_type, exc_args
return len(raises) > 0
|
def match(self, interp, block, typemap, calltypes):
self.raises = raises = {}
self.block = block
# Detect all raise statements and find which ones can be
# rewritten
for inst in block.find_insts(ir.Raise):
if inst.exception is None:
# re-reraise
exc_type, exc_args = None, None
else:
# raise <something> => find the definition site for <something>
const = interp.infer_constant(inst.exception)
exc_type, exc_args = self._break_constant(interp, const)
raises[inst] = exc_type, exc_args
return len(raises) > 0
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _partition(A, low, high):
mid = (low + high) >> 1
# NOTE: the pattern of swaps below for the pivot choice and the
# partitioning gives good results (i.e. regular O(n log n))
# on sorted, reverse-sorted, and uniform arrays. Subtle changes
# risk breaking this property.
# Use median of three {low, middle, high} as the pivot
if A[mid] < A[low]:
A[low], A[mid] = A[mid], A[low]
if A[high] < A[mid]:
A[high], A[mid] = A[mid], A[high]
if A[mid] < A[low]:
A[low], A[mid] = A[mid], A[low]
pivot = A[mid]
A[high], A[mid] = A[mid], A[high]
i = low
j = high - 1
while True:
while i < high and A[i] < pivot:
i += 1
while j >= low and pivot < A[j]:
j -= 1
if i >= j:
break
A[i], A[j] = A[j], A[i]
i += 1
j -= 1
# Put the pivot back in its final place (all items before `i`
# are smaller than the pivot, all items at/after `i` are larger)
A[i], A[high] = A[high], A[i]
return i
|
def _partition(A, low, high):
mid = (low + high) >> 1
# NOTE: the pattern of swaps below for the pivot choice and the
# partitioning gives good results (i.e. regular O(n log n))
# on sorted, reverse-sorted, and uniform arrays. Subtle changes
# risk breaking this property.
# Use median of three {low, middle, high} as the pivot
if A[mid] < A[low]:
A[low], A[mid] = A[mid], A[low]
if A[high] < A[mid]:
A[high], A[mid] = A[mid], A[high]
if A[mid] < A[low]:
A[low], A[mid] = A[mid], A[low]
pivot = A[mid]
A[high], A[mid] = A[mid], A[high]
i = low
for j in range(low, high):
if A[j] <= pivot:
A[i], A[j] = A[j], A[i]
i += 1
A[i], A[high] = A[high], A[i]
return i
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def enum_eq(context, builder, sig, args):
tu, tv = sig.args
u, v = args
res = context.generic_compare(builder, "==", (tu.dtype, tv.dtype), (u, v))
return impl_ret_untracked(context, builder, sig.return_type, res)
|
def enum_eq(context, builder, sig, args):
tu, tv = sig.args
u, v = args
res = context.generic_compare(builder, "!=", (tu.dtype, tv.dtype), (u, v))
return impl_ret_untracked(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def _build_array(context, builder, array_ty, input_types, inputs):
"""Utility function to handle allocation of an implicit output array
given the target context, builder, output array type, and a list of
_ArrayHelper instances.
"""
intp_ty = context.get_value_type(types.intp)
def make_intp_const(val):
return context.get_constant(types.intp, val)
ZERO = make_intp_const(0)
ONE = make_intp_const(1)
src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "src_shape")
dest_ndim = make_intp_const(array_ty.ndim)
dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "dest_shape")
dest_shape_addrs = tuple(
cgutils.gep_inbounds(builder, dest_shape, index)
for index in range(array_ty.ndim)
)
# Initialize the destination shape with all ones.
for dest_shape_addr in dest_shape_addrs:
builder.store(ONE, dest_shape_addr)
# For each argument, try to broadcast onto the destination shape,
# mutating along any axis where the argument shape is not one and
# the destination shape is one.
for arg_number, arg in enumerate(inputs):
if not hasattr(arg, "ndim"): # Skip scalar arguments
continue
arg_ndim = make_intp_const(arg.ndim)
for index in range(arg.ndim):
builder.store(
arg.shape[index], cgutils.gep_inbounds(builder, src_shape, index)
)
arg_result = context.compile_internal(
builder,
_broadcast_onto,
_broadcast_onto_sig,
[arg_ndim, src_shape, dest_ndim, dest_shape],
)
with cgutils.if_unlikely(builder, builder.icmp(lc.ICMP_SLT, arg_result, ONE)):
msg = "unable to broadcast argument %d to output array" % (arg_number,)
loc = errors.loc_info.get("loc", None)
if loc is not None:
msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line)
context.call_conv.return_user_exc(builder, ValueError, (msg,))
real_array_ty = array_ty.as_array
dest_shape_tup = tuple(
builder.load(dest_shape_addr) for dest_shape_addr in dest_shape_addrs
)
array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty, dest_shape_tup)
# Get the best argument to call __array_wrap__ on
array_wrapper_index = select_array_wrapper(input_types)
array_wrapper_ty = input_types[array_wrapper_index]
try:
# __array_wrap__(source wrapped array, out array) -> out wrapped array
array_wrap = context.get_function(
"__array_wrap__", array_ty(array_wrapper_ty, real_array_ty)
)
except NotImplementedError:
# If it's the same priority as a regular array, assume we
# should use the allocated array unchanged.
if array_wrapper_ty.array_priority != types.Array.array_priority:
raise
out_val = array_val._getvalue()
else:
wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())
out_val = array_wrap(builder, wrap_args)
ndim = array_ty.ndim
shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)
strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)
return _ArrayHelper(
context,
builder,
shape,
strides,
array_val.data,
array_ty.layout,
array_ty.dtype,
ndim,
out_val,
)
|
def _build_array(context, builder, array_ty, input_types, inputs):
"""Utility function to handle allocation of an implicit output array
given the target context, builder, output array type, and a list of
_ArrayHelper instances.
"""
intp_ty = context.get_value_type(types.intp)
def make_intp_const(val):
return context.get_constant(types.intp, val)
ZERO = make_intp_const(0)
ONE = make_intp_const(1)
src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "src_shape")
dest_ndim = make_intp_const(array_ty.ndim)
dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "dest_shape")
dest_shape_addrs = tuple(
cgutils.gep_inbounds(builder, dest_shape, index)
for index in range(array_ty.ndim)
)
# Initialize the destination shape with all ones.
for dest_shape_addr in dest_shape_addrs:
builder.store(ONE, dest_shape_addr)
# For each argument, try to broadcast onto the destination shape,
# mutating along any axis where the argument shape is not one and
# the destination shape is one.
for arg_number, arg in enumerate(inputs):
if not hasattr(arg, "ndim"): # Skip scalar arguments
continue
arg_ndim = make_intp_const(arg.ndim)
for index in range(arg.ndim):
builder.store(
arg.shape[index], cgutils.gep_inbounds(builder, src_shape, index)
)
arg_result = context.compile_internal(
builder,
_broadcast_onto,
_broadcast_onto_sig,
[arg_ndim, src_shape, dest_ndim, dest_shape],
)
with cgutils.if_unlikely(builder, builder.icmp(lc.ICMP_SLT, arg_result, ONE)):
msg = "unable to broadcast argument %d to output array" % (arg_number,)
context.call_conv.return_user_exc(builder, ValueError, (msg,))
real_array_ty = array_ty.as_array
dest_shape_tup = tuple(
builder.load(dest_shape_addr) for dest_shape_addr in dest_shape_addrs
)
array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty, dest_shape_tup)
# Get the best argument to call __array_wrap__ on
array_wrapper_index = select_array_wrapper(input_types)
array_wrapper_ty = input_types[array_wrapper_index]
try:
# __array_wrap__(source wrapped array, out array) -> out wrapped array
array_wrap = context.get_function(
"__array_wrap__", array_ty(array_wrapper_ty, real_array_ty)
)
except NotImplementedError:
# If it's the same priority as a regular array, assume we
# should use the allocated array unchanged.
if array_wrapper_ty.array_priority != types.Array.array_priority:
raise
out_val = array_val._getvalue()
else:
wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())
out_val = array_wrap(builder, wrap_args)
ndim = array_ty.ndim
shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)
strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)
return _ArrayHelper(
context,
builder,
shape,
strides,
array_val.data,
array_ty.layout,
array_ty.dtype,
ndim,
out_val,
)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def print_item_impl(context, builder, sig, args):
"""
Print a single native value by boxing it in a Python object and
invoking the Python interpreter's print routine.
"""
(ty,) = sig.args
(val,) = args
pyapi = context.get_python_api(builder)
env_manager = context.get_env_manager(builder)
if context.enable_nrt:
context.nrt.incref(builder, ty, val)
obj = pyapi.from_native_value(ty, val, env_manager)
with builder.if_else(cgutils.is_not_null(builder, obj), likely=True) as (
if_ok,
if_error,
):
with if_ok:
pyapi.print_object(obj)
pyapi.decref(obj)
with if_error:
cstr = context.insert_const_string(builder.module, "the print() function")
strobj = pyapi.string_from_string(cstr)
pyapi.err_write_unraisable(strobj)
pyapi.decref(strobj)
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, sig.return_type, res)
|
def print_item_impl(context, builder, sig, args):
"""
Print a single native value by boxing it in a Python object and
invoking the Python interpreter's print routine.
"""
(ty,) = sig.args
(val,) = args
pyapi = context.get_python_api(builder)
if context.enable_nrt:
context.nrt.incref(builder, ty, val)
# XXX unfortunately, we don't have access to the env manager from here
obj = pyapi.from_native_value(ty, val)
with builder.if_else(cgutils.is_not_null(builder, obj), likely=True) as (
if_ok,
if_error,
):
with if_ok:
pyapi.print_object(obj)
pyapi.decref(obj)
with if_error:
cstr = context.insert_const_string(builder.module, "the print() function")
strobj = pyapi.string_from_string(cstr)
pyapi.err_write_unraisable(strobj)
pyapi.decref(strobj)
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, sig.return_type, res)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def make_range_impl(int_type, range_state_type, range_iter_type):
RangeState = cgutils.create_struct_proxy(range_state_type)
@lower_builtin(range, int_type)
@lower_builtin(prange, int_type)
def range1_impl(context, builder, sig, args):
"""
range(stop: int) -> range object
"""
[stop] = args
state = RangeState(context, builder)
state.start = context.get_constant(int_type, 0)
state.stop = stop
state.step = context.get_constant(int_type, 1)
return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
@lower_builtin(range, int_type, int_type)
@lower_builtin(prange, int_type, int_type)
def range2_impl(context, builder, sig, args):
"""
range(start: int, stop: int) -> range object
"""
start, stop = args
state = RangeState(context, builder)
state.start = start
state.stop = stop
state.step = context.get_constant(int_type, 1)
return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
@lower_builtin(range, int_type, int_type, int_type)
@lower_builtin(prange, int_type, int_type, int_type)
def range3_impl(context, builder, sig, args):
"""
range(start: int, stop: int, step: int) -> range object
"""
[start, stop, step] = args
state = RangeState(context, builder)
state.start = start
state.stop = stop
state.step = step
return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
@lower_builtin(len, range_state_type)
def range_len(context, builder, sig, args):
"""
len(range)
"""
(value,) = args
state = RangeState(context, builder, value)
res = RangeIter.from_range_state(context, builder, state)
return impl_ret_untracked(context, builder, int_type, builder.load(res.count))
@lower_builtin("getiter", range_state_type)
def getiter_range32_impl(context, builder, sig, args):
"""
range.__iter__
"""
(value,) = args
state = RangeState(context, builder, value)
res = RangeIter.from_range_state(context, builder, state)._getvalue()
return impl_ret_untracked(context, builder, range_iter_type, res)
@iterator_impl(range_state_type, range_iter_type)
class RangeIter(make_range_iterator(range_iter_type)):
@classmethod
def from_range_state(cls, context, builder, state):
"""
Create a RangeIter initialized from the given RangeState *state*.
"""
self = cls(context, builder)
start = state.start
stop = state.stop
step = state.step
startptr = cgutils.alloca_once(builder, start.type)
builder.store(start, startptr)
countptr = cgutils.alloca_once(builder, start.type)
self.iter = startptr
self.stop = stop
self.step = step
self.count = countptr
diff = builder.sub(stop, start)
zero = context.get_constant(int_type, 0)
one = context.get_constant(int_type, 1)
pos_diff = builder.icmp(lc.ICMP_SGT, diff, zero)
pos_step = builder.icmp(lc.ICMP_SGT, step, zero)
sign_differs = builder.xor(pos_diff, pos_step)
zero_step = builder.icmp(lc.ICMP_EQ, step, zero)
with cgutils.if_unlikely(builder, zero_step):
# step shouldn't be zero
context.call_conv.return_user_exc(
builder, ValueError, ("range() arg 3 must not be zero",)
)
with builder.if_else(sign_differs) as (then, orelse):
with then:
builder.store(zero, self.count)
with orelse:
rem = builder.srem(diff, step)
rem = builder.select(pos_diff, rem, builder.neg(rem))
uneven = builder.icmp(lc.ICMP_SGT, rem, zero)
newcount = builder.add(
builder.sdiv(diff, step), builder.select(uneven, one, zero)
)
builder.store(newcount, self.count)
return self
def iternext(self, context, builder, result):
zero = context.get_constant(int_type, 0)
countptr = self.count
count = builder.load(countptr)
is_valid = builder.icmp(lc.ICMP_SGT, count, zero)
result.set_valid(is_valid)
with builder.if_then(is_valid):
value = builder.load(self.iter)
result.yield_(value)
one = context.get_constant(int_type, 1)
builder.store(builder.sub(count, one, flags=["nsw"]), countptr)
builder.store(builder.add(value, self.step), self.iter)
|
def make_range_impl(int_type, range_state_type, range_iter_type):
RangeState = cgutils.create_struct_proxy(range_state_type)
@lower_builtin(range, int_type)
def range1_impl(context, builder, sig, args):
"""
range(stop: int) -> range object
"""
[stop] = args
state = RangeState(context, builder)
state.start = context.get_constant(int_type, 0)
state.stop = stop
state.step = context.get_constant(int_type, 1)
return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
@lower_builtin(range, int_type, int_type)
def range2_impl(context, builder, sig, args):
"""
range(start: int, stop: int) -> range object
"""
start, stop = args
state = RangeState(context, builder)
state.start = start
state.stop = stop
state.step = context.get_constant(int_type, 1)
return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
@lower_builtin(range, int_type, int_type, int_type)
def range3_impl(context, builder, sig, args):
"""
range(start: int, stop: int, step: int) -> range object
"""
[start, stop, step] = args
state = RangeState(context, builder)
state.start = start
state.stop = stop
state.step = step
return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
@lower_builtin(len, range_state_type)
def range_len(context, builder, sig, args):
"""
len(range)
"""
(value,) = args
state = RangeState(context, builder, value)
res = RangeIter.from_range_state(context, builder, state)
return impl_ret_untracked(context, builder, int_type, builder.load(res.count))
@lower_builtin("getiter", range_state_type)
def getiter_range32_impl(context, builder, sig, args):
"""
range.__iter__
"""
(value,) = args
state = RangeState(context, builder, value)
res = RangeIter.from_range_state(context, builder, state)._getvalue()
return impl_ret_untracked(context, builder, range_iter_type, res)
@iterator_impl(range_state_type, range_iter_type)
class RangeIter(make_range_iterator(range_iter_type)):
@classmethod
def from_range_state(cls, context, builder, state):
"""
Create a RangeIter initialized from the given RangeState *state*.
"""
self = cls(context, builder)
start = state.start
stop = state.stop
step = state.step
startptr = cgutils.alloca_once(builder, start.type)
builder.store(start, startptr)
countptr = cgutils.alloca_once(builder, start.type)
self.iter = startptr
self.stop = stop
self.step = step
self.count = countptr
diff = builder.sub(stop, start)
zero = context.get_constant(int_type, 0)
one = context.get_constant(int_type, 1)
pos_diff = builder.icmp(lc.ICMP_SGT, diff, zero)
pos_step = builder.icmp(lc.ICMP_SGT, step, zero)
sign_differs = builder.xor(pos_diff, pos_step)
zero_step = builder.icmp(lc.ICMP_EQ, step, zero)
with cgutils.if_unlikely(builder, zero_step):
# step shouldn't be zero
context.call_conv.return_user_exc(
builder, ValueError, ("range() arg 3 must not be zero",)
)
with builder.if_else(sign_differs) as (then, orelse):
with then:
builder.store(zero, self.count)
with orelse:
rem = builder.srem(diff, step)
rem = builder.select(pos_diff, rem, builder.neg(rem))
uneven = builder.icmp(lc.ICMP_SGT, rem, zero)
newcount = builder.add(
builder.sdiv(diff, step), builder.select(uneven, one, zero)
)
builder.store(newcount, self.count)
return self
def iternext(self, context, builder, result):
zero = context.get_constant(int_type, 0)
countptr = self.count
count = builder.load(countptr)
is_valid = builder.icmp(lc.ICMP_SGT, count, zero)
result.set_valid(is_valid)
with builder.if_then(is_valid):
value = builder.load(self.iter)
result.yield_(value)
one = context.get_constant(int_type, 1)
builder.store(builder.sub(count, one, flags=["nsw"]), countptr)
builder.store(builder.add(value, self.step), self.iter)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def range_iter_len(typingctx, val):
"""
An implementation of len(range_iter) for internal use.
"""
if isinstance(val, types.RangeIteratorType):
val_type = val.yield_type
def codegen(context, builder, sig, args):
(value,) = args
iter_type = range_impl_map[val_type][1]
iterobj = cgutils.create_struct_proxy(iter_type)(context, builder, value)
int_type = iterobj.count.type
return impl_ret_untracked(
context, builder, int_type, builder.load(iterobj.count)
)
return signature(val_type, val), codegen
elif isinstance(val, types.ListIter):
def codegen(context, builder, sig, args):
(value,) = args
intp_t = context.get_value_type(types.intp)
iterobj = ListIterInstance(context, builder, sig.args[0], value)
return impl_ret_untracked(context, builder, intp_t, iterobj.size)
return signature(types.intp, val), codegen
elif isinstance(val, types.ArrayIterator):
def codegen(context, builder, sig, args):
(iterty,) = sig.args
(value,) = args
intp_t = context.get_value_type(types.intp)
iterobj = context.make_helper(builder, iterty, value=value)
arrayty = iterty.array_type
ary = make_array(arrayty)(context, builder, value=iterobj.array)
shape = cgutils.unpack_tuple(builder, ary.shape)
# array iterates along the outer dimension
return impl_ret_untracked(context, builder, intp_t, shape[0])
return signature(types.intp, val), codegen
|
def range_iter_len(typingctx, val):
"""
An implementation of len(range_iter) for internal use.
"""
if isinstance(val, types.RangeIteratorType):
val_type = val.yield_type
def codegen(context, builder, sig, args):
(value,) = args
iter_type = range_impl_map[val_type][1]
state = cgutils.create_struct_proxy(iter_type)(context, builder, value)
int_type = state.count.type
return impl_ret_untracked(
context, builder, int_type, builder.load(state.count)
)
return signature(val_type, val), codegen
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def codegen(context, builder, sig, args):
(iterty,) = sig.args
(value,) = args
intp_t = context.get_value_type(types.intp)
iterobj = context.make_helper(builder, iterty, value=value)
arrayty = iterty.array_type
ary = make_array(arrayty)(context, builder, value=iterobj.array)
shape = cgutils.unpack_tuple(builder, ary.shape)
# array iterates along the outer dimension
return impl_ret_untracked(context, builder, intp_t, shape[0])
|
def codegen(context, builder, sig, args):
(value,) = args
iter_type = range_impl_map[val_type][1]
state = cgutils.create_struct_proxy(iter_type)(context, builder, value)
int_type = state.count.type
return impl_ret_untracked(context, builder, int_type, builder.load(state.count))
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def refine(self, typeinfer, target_type):
# Do not back-propagate to locked variables (e.g. constants)
assert target_type.is_precise()
typeinfer.add_type(self.src, target_type, unless_locked=True, loc=self.loc)
|
def refine(self, typeinfer, target_type):
# Do not back-propagate to locked variables (e.g. constants)
typeinfer.add_type(self.src, target_type, unless_locked=True, loc=self.loc)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of argument at {0}", self.loc):
typevars = typeinfer.typevars
src = typevars[self.src]
if not src.defined:
return
ty = src.getone()
if isinstance(ty, types.Omitted):
ty = typeinfer.context.resolve_value_type(ty.value)
assert ty.is_precise()
typeinfer.add_type(self.dst, ty, loc=self.loc)
|
def __call__(self, typeinfer):
with new_error_context("typing of argument at {0}", self.loc):
typevars = typeinfer.typevars
src = typevars[self.src]
if not src.defined:
return
ty = src.getone()
if isinstance(ty, types.Omitted):
ty = typeinfer.context.resolve_value_type(ty.value)
typeinfer.add_type(self.dst, ty, loc=self.loc)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of tuple at {0}", self.loc):
typevars = typeinfer.typevars
tsets = [typevars[i.name].get() for i in self.items]
oset = typevars[self.target]
for vals in itertools.product(*tsets):
if vals and all(vals[0] == v for v in vals):
tup = types.UniTuple(dtype=vals[0], count=len(vals))
else:
# empty tuples fall here as well
tup = types.Tuple(vals)
assert tup.is_precise()
typeinfer.add_type(self.target, tup, loc=self.loc)
|
def __call__(self, typeinfer):
with new_error_context("typing of tuple at {0}", self.loc):
typevars = typeinfer.typevars
tsets = [typevars[i.name].get() for i in self.items]
oset = typevars[self.target]
for vals in itertools.product(*tsets):
if vals and all(vals[0] == v for v in vals):
tup = types.UniTuple(dtype=vals[0], count=len(vals))
else:
# empty tuples fall here as well
tup = types.Tuple(vals)
typeinfer.add_type(self.target, tup, loc=self.loc)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of exhaust iter at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.iterator.name].get():
# unpack optional
tp = tp.type if isinstance(tp, types.Optional) else tp
if isinstance(tp, types.BaseTuple):
if len(tp) == self.count:
assert tp.is_precise()
typeinfer.add_type(self.target, tp, loc=self.loc)
break
else:
raise ValueError(
"wrong tuple length for %r: "
"expected %d, got %d"
% (self.iterator.name, self.count, len(tp))
)
elif isinstance(tp, types.IterableType):
tup = types.UniTuple(
dtype=tp.iterator_type.yield_type, count=self.count
)
assert tup.is_precise()
typeinfer.add_type(self.target, tup, loc=self.loc)
break
else:
raise TypingError("failed to unpack {}".format(tp), loc=self.loc)
|
def __call__(self, typeinfer):
with new_error_context("typing of exhaust iter at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.iterator.name].get():
# unpack optional
tp = tp.type if isinstance(tp, types.Optional) else tp
if isinstance(tp, types.BaseTuple):
if len(tp) == self.count:
typeinfer.add_type(self.target, tp, loc=self.loc)
break
else:
raise ValueError(
"wrong tuple length for %r: "
"expected %d, got %d"
% (self.iterator.name, self.count, len(tp))
)
elif isinstance(tp, types.IterableType):
tup = types.UniTuple(
dtype=tp.iterator_type.yield_type, count=self.count
)
typeinfer.add_type(self.target, tup, loc=self.loc)
break
else:
raise TypingError("failed to unpack {}".format(tp), loc=self.loc)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of pair-first at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
assert tp.first_type.is_precise()
typeinfer.add_type(self.target, tp.first_type, loc=self.loc)
|
def __call__(self, typeinfer):
with new_error_context("typing of pair-first at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
typeinfer.add_type(self.target, tp.first_type, loc=self.loc)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of pair-second at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
assert tp.second_type.is_precise()
typeinfer.add_type(self.target, tp.second_type, loc=self.loc)
|
def __call__(self, typeinfer):
with new_error_context("typing of pair-second at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
typeinfer.add_type(self.target, tp.second_type, loc=self.loc)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of static-get-item at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for ty in typevars[self.value.name].get():
itemty = typeinfer.context.resolve_static_getitem(
value=ty, index=self.index
)
if itemty is not None:
assert itemty.is_precise()
typeinfer.add_type(self.target, itemty, loc=self.loc)
elif self.fallback is not None:
self.fallback(typeinfer)
|
def __call__(self, typeinfer):
with new_error_context("typing of static-get-item at {0}", self.loc):
typevars = typeinfer.typevars
oset = typevars[self.target]
for ty in typevars[self.value.name].get():
itemty = typeinfer.context.resolve_static_getitem(
value=ty, index=self.index
)
if itemty is not None:
typeinfer.add_type(self.target, itemty, loc=self.loc)
elif self.fallback is not None:
self.fallback(typeinfer)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def resolve(self, typeinfer, typevars, fnty):
assert fnty
context = typeinfer.context
r = fold_arg_vars(typevars, self.args, self.vararg, self.kws)
if r is None:
# Cannot resolve call type until all argument types are known
return
pos_args, kw_args = r
# Check argument to be precise
for a in itertools.chain(pos_args, kw_args.values()):
if not a.is_precise():
# Getitem on non-precise array is allowed to
# support array-comprehension
if fnty == "getitem" and isinstance(pos_args[0], types.Array):
pass
# Otherwise, don't compute type yet
else:
return
# Resolve call type
sig = typeinfer.resolve_call(fnty, pos_args, kw_args)
if sig is None:
# Arguments are invalid => explain why
headtemp = "Invalid usage of {0} with parameters ({1})"
args = [str(a) for a in pos_args]
args += ["%s=%s" % (k, v) for k, v in sorted(kw_args.items())]
head = headtemp.format(fnty, ", ".join(map(str, args)))
desc = context.explain_function_type(fnty)
msg = "\n".join([head, desc])
raise TypingError(msg, loc=self.loc)
typeinfer.add_type(self.target, sig.return_type, loc=self.loc)
# If the function is a bound function and its receiver type
# was refined, propagate it.
if (
isinstance(fnty, types.BoundFunction)
and sig.recvr is not None
and sig.recvr != fnty.this
):
refined_this = context.unify_pairs(sig.recvr, fnty.this)
if refined_this is not None and refined_this.is_precise():
refined_fnty = fnty.copy(this=refined_this)
typeinfer.propagate_refined_type(self.func, refined_fnty)
# If the return type is imprecise but can be unified with the
# target variable's inferred type, use the latter.
# Useful for code such as::
# s = set()
# s.add(1)
# (the set() call must be typed as int64(), not undefined())
if not sig.return_type.is_precise():
target = typevars[self.target]
if target.defined:
targetty = target.getone()
if context.unify_pairs(targetty, sig.return_type) == targetty:
sig = sig.replace(return_type=targetty)
self.signature = sig
target_type = typevars[self.target].getone()
if isinstance(target_type, types.Array) and isinstance(
sig.return_type.dtype, types.Undefined
):
typeinfer.refine_map[self.target] = self
|
def resolve(self, typeinfer, typevars, fnty):
assert fnty
context = typeinfer.context
r = fold_arg_vars(typevars, self.args, self.vararg, self.kws)
if r is None:
# Cannot resolve call type until all argument types are known
return
pos_args, kw_args = r
# Resolve call type
sig = typeinfer.resolve_call(fnty, pos_args, kw_args)
if sig is None:
# Arguments are invalid => explain why
headtemp = "Invalid usage of {0} with parameters ({1})"
args = [str(a) for a in pos_args]
args += ["%s=%s" % (k, v) for k, v in sorted(kw_args.items())]
head = headtemp.format(fnty, ", ".join(map(str, args)))
desc = context.explain_function_type(fnty)
msg = "\n".join([head, desc])
raise TypingError(msg, loc=self.loc)
typeinfer.add_type(self.target, sig.return_type, loc=self.loc)
# If the function is a bound function and its receiver type
# was refined, propagate it.
if (
isinstance(fnty, types.BoundFunction)
and sig.recvr is not None
and sig.recvr != fnty.this
):
refined_this = context.unify_pairs(sig.recvr, fnty.this)
if refined_this is not None and refined_this.is_precise():
refined_fnty = fnty.copy(this=refined_this)
typeinfer.propagate_refined_type(self.func, refined_fnty)
# If the return type is imprecise but can be unified with the
# target variable's inferred type, use the latter.
# Useful for code such as::
# s = set()
# s.add(1)
# (the set() call must be typed as int64(), not undefined())
if not sig.return_type.is_precise():
target = typevars[self.target]
if target.defined:
targetty = target.getone()
if context.unify_pairs(targetty, sig.return_type) == targetty:
sig.return_type = targetty
self.signature = sig
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of get attribute at {0}", self.loc):
typevars = typeinfer.typevars
valtys = typevars[self.value.name].get()
for ty in valtys:
attrty = typeinfer.context.resolve_getattr(ty, self.attr)
if attrty is None:
raise UntypedAttributeError(ty, self.attr, loc=self.inst.loc)
else:
assert attrty.is_precise()
typeinfer.add_type(self.target, attrty, loc=self.loc)
typeinfer.refine_map[self.target] = self
|
def __call__(self, typeinfer):
with new_error_context("typing of get attribute at {0}", self.loc):
typevars = typeinfer.typevars
valtys = typevars[self.value.name].get()
for ty in valtys:
attrty = typeinfer.context.resolve_getattr(ty, self.attr)
if attrty is None:
raise UntypedAttributeError(ty, self.attr, loc=self.inst.loc)
else:
typeinfer.add_type(self.target, attrty, loc=self.loc)
typeinfer.refine_map[self.target] = self
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def refine(self, typeinfer, target_type):
if isinstance(target_type, types.BoundFunction):
recvr = target_type.this
assert recvr.is_precise()
typeinfer.add_type(self.value.name, recvr, loc=self.loc)
source_constraint = typeinfer.refine_map.get(self.value.name)
if source_constraint is not None:
source_constraint.refine(typeinfer, recvr)
|
def refine(self, typeinfer, target_type):
if isinstance(target_type, types.BoundFunction):
recvr = target_type.this
typeinfer.add_type(self.value.name, recvr, loc=self.loc)
source_constraint = typeinfer.refine_map.get(self.value.name)
if source_constraint is not None:
source_constraint.refine(typeinfer, recvr)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __call__(self, typeinfer):
with new_error_context("typing of setitem at {0}", self.loc):
typevars = typeinfer.typevars
if not all(
typevars[var.name].defined for var in (self.target, self.index, self.value)
):
return
targetty = typevars[self.target.name].getone()
idxty = typevars[self.index.name].getone()
valty = typevars[self.value.name].getone()
sig = typeinfer.context.resolve_setitem(targetty, idxty, valty)
if sig is None:
raise TypingError(
"Cannot resolve setitem: %s[%s] = %s" % (targetty, idxty, valty),
loc=self.loc,
)
# For array setitem, refine imprecise array dtype
if _is_array_not_precise(targetty):
assert sig.args[0].is_precise()
typeinfer.add_type(self.target.name, sig.args[0], loc=self.loc)
self.signature = sig
|
def __call__(self, typeinfer):
with new_error_context("typing of setitem at {0}", self.loc):
typevars = typeinfer.typevars
if not all(
typevars[var.name].defined for var in (self.target, self.index, self.value)
):
return
targetty = typevars[self.target.name].getone()
idxty = typevars[self.index.name].getone()
valty = typevars[self.value.name].getone()
sig = typeinfer.context.resolve_setitem(targetty, idxty, valty)
if sig is None:
raise TypingError(
"Cannot resolve setitem: %s[%s] = %s" % (targetty, idxty, valty),
loc=self.loc,
)
self.signature = sig
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def __init__(self, context, func_ir, warnings):
self.context = context
# sort based on label, ensure iteration order!
self.blocks = OrderedDict()
for k in sorted(func_ir.blocks.keys()):
self.blocks[k] = func_ir.blocks[k]
self.generator_info = func_ir.generator_info
self.func_id = func_ir.func_id
self.func_ir = func_ir
self.typevars = TypeVarMap()
self.typevars.set_context(context)
self.constraints = ConstraintNetwork()
self.warnings = warnings
# { index: mangled name }
self.arg_names = {}
# self.return_type = None
# Set of assumed immutable globals
self.assumed_immutables = set()
# Track all calls and associated constraints
self.calls = []
# The inference result of the above calls
self.calltypes = utils.UniqueDict()
# Target var -> constraint with refine hook
self.refine_map = {}
if config.DEBUG or config.DEBUG_TYPEINFER:
self.debug = TypeInferDebug(self)
else:
self.debug = NullDebug()
self._skip_recursion = False
|
def __init__(self, context, func_ir, warnings):
self.context = context
self.blocks = func_ir.blocks
self.generator_info = func_ir.generator_info
self.func_id = func_ir.func_id
self.func_ir = func_ir
self.typevars = TypeVarMap()
self.typevars.set_context(context)
self.constraints = ConstraintNetwork()
self.warnings = warnings
# { index: mangled name }
self.arg_names = {}
# self.return_type = None
# Set of assumed immutable globals
self.assumed_immutables = set()
# Track all calls and associated constraints
self.calls = []
# The inference result of the above calls
self.calltypes = utils.UniqueDict()
# Target var -> constraint with refine hook
self.refine_map = {}
if config.DEBUG or config.DEBUG_TYPEINFER:
self.debug = TypeInferDebug(self)
else:
self.debug = NullDebug()
self._skip_recursion = False
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def constrain_statement(self, inst):
if isinstance(inst, ir.Assign):
self.typeof_assign(inst)
elif isinstance(inst, ir.SetItem):
self.typeof_setitem(inst)
elif isinstance(inst, ir.StaticSetItem):
self.typeof_static_setitem(inst)
elif isinstance(inst, ir.DelItem):
self.typeof_delitem(inst)
elif isinstance(inst, ir.SetAttr):
self.typeof_setattr(inst)
elif isinstance(inst, ir.Print):
self.typeof_print(inst)
elif isinstance(inst, (ir.Jump, ir.Branch, ir.Return, ir.Del)):
pass
elif isinstance(inst, ir.StaticRaise):
pass
elif type(inst) in typeinfer_extensions:
# let external calls handle stmt if type matches
f = typeinfer_extensions[type(inst)]
f(inst, self)
else:
raise NotImplementedError(inst)
|
def constrain_statement(self, inst):
if isinstance(inst, ir.Assign):
self.typeof_assign(inst)
elif isinstance(inst, ir.SetItem):
self.typeof_setitem(inst)
elif isinstance(inst, ir.StaticSetItem):
self.typeof_static_setitem(inst)
elif isinstance(inst, ir.DelItem):
self.typeof_delitem(inst)
elif isinstance(inst, ir.SetAttr):
self.typeof_setattr(inst)
elif isinstance(inst, ir.Print):
self.typeof_print(inst)
elif isinstance(inst, (ir.Jump, ir.Branch, ir.Return, ir.Del)):
pass
elif isinstance(inst, ir.StaticRaise):
pass
else:
raise NotImplementedError(inst)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def unify(self, typingctx, other):
"""
Unify this with the *other* Array.
"""
# If other is array and the ndim matches
if isinstance(other, Array) and other.ndim == self.ndim:
# If dtype matches or other.dtype is undefined (inferred)
if other.dtype == self.dtype or not other.dtype.is_precise():
if self.layout == other.layout:
layout = self.layout
else:
layout = "A"
readonly = not (self.mutable and other.mutable)
aligned = self.aligned and other.aligned
return Array(
dtype=self.dtype,
ndim=self.ndim,
layout=layout,
readonly=readonly,
aligned=aligned,
)
|
def unify(self, typingctx, other):
"""
Unify this with the *other* Array.
"""
if (
isinstance(other, Array)
and other.ndim == self.ndim
and other.dtype == self.dtype
):
if self.layout == other.layout:
layout = self.layout
else:
layout = "A"
readonly = not (self.mutable and other.mutable)
aligned = self.aligned and other.aligned
return Array(
dtype=self.dtype,
ndim=self.ndim,
layout=layout,
readonly=readonly,
aligned=aligned,
)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def generic(self, args, kws):
assert not kws
ary, idx, val = args
if not isinstance(ary, types.Buffer):
return
if not ary.mutable:
raise TypeError("Cannot modify value of type %s" % (ary,))
out = get_array_index_type(ary, idx)
if out is None:
return
idx = out.index
res = out.result
if isinstance(res, types.Array):
# Indexing produces an array
if isinstance(val, types.Array):
if not self.context.can_convert(val.dtype, res.dtype):
# DType conversion not possible
return
else:
res = val
elif isinstance(val, types.Sequence):
if res.ndim == 1 and self.context.can_convert(val.dtype, res.dtype):
# Allow assignement of sequence to 1d array
res = val
else:
# NOTE: sequence-to-array broadcasting is unsupported
return
else:
# Allow scalar broadcasting
if self.context.can_convert(val, res.dtype):
res = res.dtype
else:
# Incompatible scalar type
return
elif not isinstance(val, types.Array):
# Single item assignment
if not self.context.can_convert(val, res):
# if the array dtype is not yet defined
if not res.is_precise():
# set the array type to use the dtype of value (RHS)
newary = ary.copy(dtype=val)
return signature(types.none, newary, idx, res)
else:
return
res = val
else:
return
return signature(types.none, ary, idx, res)
|
def generic(self, args, kws):
assert not kws
ary, idx, val = args
if not isinstance(ary, types.Buffer):
return
if not ary.mutable:
raise TypeError("Cannot modify value of type %s" % (ary,))
out = get_array_index_type(ary, idx)
if out is None:
return
idx = out.index
res = out.result
if isinstance(res, types.Array):
# Indexing produces an array
if isinstance(val, types.Array):
if not self.context.can_convert(val.dtype, res.dtype):
# DType conversion not possible
return
else:
res = val
elif isinstance(val, types.Sequence):
if res.ndim == 1 and self.context.can_convert(val.dtype, res.dtype):
# Allow assignement of sequence to 1d array
res = val
else:
# NOTE: sequence-to-array broadcasting is unsupported
return
else:
# Allow scalar broadcasting
if self.context.can_convert(val, res.dtype):
res = res.dtype
else:
# Incompatible scalar type
return
elif not isinstance(val, types.Array):
# Single item assignment
res = val
else:
return
return signature(types.none, ary, idx, res)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def generic_expand(self, args, kws):
assert not args
assert not kws
return signature(_expand_integer(self.this.dtype), recvr=self.this)
|
def generic_expand(self, args, kws):
return signature(_expand_integer(self.this.dtype), recvr=self.this)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def generic_expand_cumulative(self, args, kws):
assert not args
assert not kws
assert isinstance(self.this, types.Array)
return_type = types.Array(
dtype=_expand_integer(self.this.dtype), ndim=1, layout="C"
)
return signature(return_type, recvr=self.this)
|
def generic_expand_cumulative(self, args, kws):
assert isinstance(self.this, types.Array)
return_type = types.Array(
dtype=_expand_integer(self.this.dtype), ndim=1, layout="C"
)
return signature(return_type, recvr=self.this)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def generic(self, args, kws):
(arg,) = args
return signature(types.none, *args)
|
def generic(self, args, kws):
(arg,) = args
if self.is_accepted_type(arg):
return signature(types.none, *args)
|
https://github.com/numba/numba/issues/25
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-3b23f7331ceb> in <module>()
----> 1 @jit(arg_types=[numba.double], ret_type=numba.double)
2 def is_REALLY_five(some_value):
3 for i in range(5):
4 if some_value == 5.0:
5 return 1.0
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/decorators.pyc in _jit(func)
77 "garbage collected!" % (func,))
78 t = Translate(func, *args, **kws)
---> 79 t.translate()
80 __tr_map__[func] = t
81 return t.get_ctypes_func(llvm)
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in translate(self)
926 # Ensure we are playing with locals that might
927 # actually precede the next block.
--> 928 self.check_locals(i)
929
930 self.crnt_block = i
/Library/Frameworks/EPD64.framework/Versions/7.3/lib/python2.7/site-packages/numba/translate.pyc in check_locals(self, i)
1135 else:
1136 assert next_locals is not None, "Internal compiler error!"
-> 1137 self._locals = next_locals[:]
1138
1139 def get_ctypes_func(self, llvm=True):
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def run(self, fd, iovec, iovcnt):
if iovec.symbolic or iovcnt.symbolic:
raise angr.errors.SimPosixError("Can't handle symbolic arguments to readv")
iovcnt = self.state.solver.eval(iovcnt)
res = 0
for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:
tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len).ret_expr
if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):
return tmpres
return res
|
def run(self, fd, iovec, iovcnt):
if iovec.symbolic or iovcnt.symbolic:
raise angr.errors.SimPosixError("Can't handle symbolic arguments to readv")
iovcnt = self.state.solver.eval(iovcnt)
res = 0
for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:
tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len)
if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):
return tmpres
return res
|
https://github.com/angr/angr/issues/2447
|
Traceback (most recent call last):
File "try.py", line 5, in <module>
sm.explore()
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 239, in explore
self.run(stash=stash, n=n, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 261, in run
self.step(stash=stash, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/misc/hookset.py", line 75, in __call__
result = current_hook(self.func.__self__, *args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/exploration_techniques/explorer.py", line 96, in step
return simgr.step(stash=stash, extra_stop_points=base_extra_stop_points | self._extra_stop_points, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/misc/hookset.py", line 80, in __call__
return self.func(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 346, in step
successors = self.step_state(state, successor_func=successor_func, **run_args)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 383, in step_state
successors = self.successors(state, successor_func=successor_func, **run_args)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 422, in successors
return self._project.factory.successors(state, **run_args)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/factory.py", line 60, in successors
return self.default_engine.process(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/vex/light/slicing.py", line 19, in process
return super().process(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/engine.py", line 149, in process
self.process_successors(self.successors, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/failure.py", line 21, in process_successors
return super().process_successors(successors, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/syscall.py", line 38, in process_successors
return self.process_procedure(state, successors, sys_procedure, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/procedure.py", line 37, in process_procedure
inst = procedure.execute(state, successors, ret_to=ret_to, arguments=arguments)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_procedure.py", line 230, in execute
r = getattr(inst, inst.run_func)(*sim_args, **inst.kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/procedures/linux_kernel/iovec.py", line 21, in run
if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py", line 126, in concrete_shortcut_bool
return f(self, *args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/sim_action_object.py", line 57, in ast_stripper
return f(*new_args, **new_kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py", line 89, in wrapped_f
return f(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py", line 585, in is_true
return self._solver.is_true(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontend_mixins/concrete_handler_mixin.py", line 53, in is_true
return super(ConcreteHandlerMixin, self).is_true(e, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontend_mixins/constraint_filter_mixin.py", line 60, in is_true
return super(ConstraintFilterMixin, self).is_true(e, extra_constraints=ec, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontends/composite_frontend.py", line 349, in is_true
r = ms.is_true(e, extra_constraints=extra_constraints, exact=exact)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontends/full_frontend.py", line 184, in is_true
return e.is_true()
AttributeError: 'NotImplementedType' object has no attribute 'is_true'
|
AttributeError
|
def _indirect_jump_encountered(
self,
addr: int,
cfg_node: CFGNode,
irsb: pyvex.IRSB,
func_addr: int,
stmt_idx: Union[int, str] = DEFAULT_STATEMENT,
) -> Tuple[bool, List[int], Optional[IndirectJump]]:
"""
Called when we encounter an indirect jump. We will try to resolve this indirect jump using timeless (fast)
indirect jump resolvers. If it cannot be resolved, we will see if this indirect jump has been resolved before.
:param addr: Address of the block containing the indirect jump.
:param cfg_node: The CFGNode instance of the block that contains the indirect jump.
:param irsb: The IRSB instance of the block that contains the indirect jump. It must be lifted with
cross-instruction optimization disabled (cross_insn_opt=True when opt_level=1, or
opt_level=0).
:param func_addr: Address of the current function.
:param stmt_idx: ID of the source statement.
:return: A 3-tuple of (whether it is resolved or not, all resolved targets, an IndirectJump object
if there is one or None otherwise)
"""
jumpkind = irsb.jumpkind
l.debug("IRSB %#x has an indirect jump (%s) as its default exit.", addr, jumpkind)
# try resolving it fast
resolved, resolved_targets = self._resolve_indirect_jump_timelessly(
addr, irsb, func_addr, jumpkind
)
if resolved:
l.debug(
"Indirect jump at block %#x is resolved by a timeless indirect jump resolver. "
"%d targets found.",
addr,
len(resolved_targets),
)
return True, resolved_targets, None
l.debug(
"Indirect jump at block %#x cannot be resolved by a timeless indirect jump resolver.",
addr,
)
# Add it to our set. Will process it later if user allows.
# Create an IndirectJump instance
if addr not in self.indirect_jumps:
if self.project.arch.branch_delay_slot:
if len(cfg_node.instruction_addrs) < 2:
# sanity check
# decoding failed when decoding the second instruction (or even the first instruction)
return False, [], None
ins_addr = cfg_node.instruction_addrs[-2]
else:
ins_addr = cfg_node.instruction_addrs[-1]
ij = IndirectJump(
addr, ins_addr, func_addr, jumpkind, stmt_idx, resolved_targets=[]
)
self.indirect_jumps[addr] = ij
resolved = False
else:
ij = self.indirect_jumps[addr] # type: IndirectJump
resolved = len(ij.resolved_targets) > 0
return resolved, ij.resolved_targets, ij
|
def _indirect_jump_encountered(
self, addr, cfg_node, irsb, func_addr, stmt_idx=DEFAULT_STATEMENT
):
"""
Called when we encounter an indirect jump. We will try to resolve this indirect jump using timeless (fast)
indirect jump resolvers. If it cannot be resolved, we will see if this indirect jump has been resolved before.
:param int addr: Address of the block containing the indirect jump.
:param cfg_node: The CFGNode instance of the block that contains the indirect jump.
:param pyvex.IRSB irsb: The IRSB instance of the block that contains the indirect jump.
:param int func_addr: Address of the current function.
:param int or str stmt_idx: ID of the source statement.
:return: A 3-tuple of (whether it is resolved or not, all resolved targets, an IndirectJump object
if there is one or None otherwise)
:rtype: tuple
"""
jumpkind = irsb.jumpkind
l.debug("IRSB %#x has an indirect jump (%s) as its default exit.", addr, jumpkind)
# try resolving it fast
resolved, resolved_targets = self._resolve_indirect_jump_timelessly(
addr, irsb, func_addr, jumpkind
)
if resolved:
l.debug(
"Indirect jump at block %#x is resolved by a timeless indirect jump resolver. "
"%d targets found.",
addr,
len(resolved_targets),
)
return True, resolved_targets, None
l.debug(
"Indirect jump at block %#x cannot be resolved by a timeless indirect jump resolver.",
addr,
)
# Add it to our set. Will process it later if user allows.
# Create an IndirectJump instance
if addr not in self.indirect_jumps:
if self.project.arch.branch_delay_slot:
if len(cfg_node.instruction_addrs) < 2:
# sanity check
# decoding failed when decoding the second instruction (or even the first instruction)
return False, [], None
ins_addr = cfg_node.instruction_addrs[-2]
else:
ins_addr = cfg_node.instruction_addrs[-1]
ij = IndirectJump(
addr, ins_addr, func_addr, jumpkind, stmt_idx, resolved_targets=[]
)
self.indirect_jumps[addr] = ij
resolved = False
else:
ij = self.indirect_jumps[addr] # type: IndirectJump
resolved = len(ij.resolved_targets) > 0
return resolved, ij.resolved_targets, ij
|
https://github.com/angr/angr/issues/2448
|
import angr
p = angr.Project("get")
cfg = p.analyses.CFGEmulated()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_emulated.py", line 306, in __init__
self._analyze()
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/forward_analysis/forward_analysis.py", line 216, in _analyze
self._analysis_core_baremetal()
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/forward_analysis/forward_analysis.py", line 368, in _analysis_core_baremetal
self._process_job_and_get_successors(job_info)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/forward_analysis/forward_analysis.py", line 386, in _process_job_and_get_successors
successors = self._get_successors(job)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_emulated.py", line 1295, in _get_successors
resolved, resolved_targets, ij = self._indirect_jump_encountered(addr, cfg_node, irsb, func_addr,
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_base.py", line 2331, in _indirect_jump_encountered
resolved, resolved_targets = self._resolve_indirect_jump_timelessly(addr, irsb, func_addr, jumpkind)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_base.py", line 2274, in _resolve_indirect_jump_timelessly
r, resolved_targets = res.resolve(self, addr, func_addr, block, jumpkind)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/arm_elf_fast.py", line 60, in resolve
stmt = block.statements[stmt_idx]
IndexError: list index out of range
|
IndexError
|
def _get_successors(self, job):
"""
Get a collection of successors out of the current job.
:param CFGJob job: The CFGJob instance.
:return: A collection of successors.
:rtype: list
"""
addr = job.addr
sim_successors = job.sim_successors
cfg_node = job.cfg_node
input_state = job.state
func_addr = job.func_addr
# check step limit
if self._max_steps is not None:
depth = cfg_node.depth
if depth >= self._max_steps:
return []
successors = []
is_indirect_jump = sim_successors.sort == "IRSB" and self._is_indirect_jump(
cfg_node, sim_successors
)
indirect_jump_resolved_by_resolvers = False
if is_indirect_jump and self._resolve_indirect_jumps:
# Try to resolve indirect jumps
irsb = input_state.block(cross_insn_opt=False).vex
resolved, resolved_targets, ij = self._indirect_jump_encountered(
addr, cfg_node, irsb, func_addr, stmt_idx=DEFAULT_STATEMENT
)
if resolved:
successors = self._convert_indirect_jump_targets_to_states(
job, resolved_targets
)
if ij:
self._indirect_jump_resolved(ij, ij.addr, None, resolved_targets)
else:
# Try to resolve this indirect jump using heavier approaches
resolved_targets = self._process_one_indirect_jump(ij)
successors = self._convert_indirect_jump_targets_to_states(
job, resolved_targets
)
if successors:
indirect_jump_resolved_by_resolvers = True
else:
# It's unresolved. Add it to the wait list (but apparently we don't have any better way to resolve it
# right now).
self._indirect_jumps_to_resolve.add(ij)
if not successors:
# Get all successors of this block
successors = (
(sim_successors.flat_successors + sim_successors.unsat_successors)
if addr not in self._avoid_runs
else []
)
# Post-process successors
successors, job.extra_info = self._post_process_successors(
input_state, sim_successors, successors
)
all_successors = successors + sim_successors.unconstrained_successors
# make sure FakeRets are at the last
all_successors = [
suc for suc in all_successors if suc.history.jumpkind != "Ijk_FakeRet"
] + [suc for suc in all_successors if suc.history.jumpkind == "Ijk_FakeRet"]
if self._keep_state:
cfg_node.final_states = all_successors[::]
if is_indirect_jump and not indirect_jump_resolved_by_resolvers:
# For indirect jumps, filter successors that do not make sense
successors = self._filter_insane_successors(successors)
successors = self._try_resolving_indirect_jumps(
sim_successors,
cfg_node,
func_addr,
successors,
job.exception_info,
self._block_artifacts,
)
# Remove all successors whose IP is symbolic
successors = [s for s in successors if not s.ip.symbolic]
# Add additional edges supplied by the user
successors = self._add_additional_edges(
input_state, sim_successors, cfg_node, successors
)
# if base graph is used, add successors implied from the graph
if self._base_graph:
basegraph_successor_addrs = set()
for src_, dst_ in self._base_graph.edges():
if src_.addr == addr:
basegraph_successor_addrs.add(dst_.addr)
successor_addrs = {s.solver.eval(s.ip) for s in successors}
extra_successor_addrs = basegraph_successor_addrs - successor_addrs
if all_successors: # make sure we have a base state to use
base_state = all_successors[
0
] # TODO: for calls, we want to use the fake_ret state
for s_addr in extra_successor_addrs:
# an extra target
successor_state = base_state.copy()
successor_state.ip = s_addr
successors.append(successor_state)
else:
if extra_successor_addrs:
l.error(
"CFGEmulated terminates at %#x although base graph provided more exits.",
addr,
)
if not successors:
# There is no way out :-(
# Log it first
self._push_unresolvable_run(addr)
if sim_successors.sort == "SimProcedure" and isinstance(
sim_successors.artifacts["procedure"],
SIM_PROCEDURES["stubs"]["PathTerminator"],
):
# If there is no valid exit in this branch and it's not
# intentional (e.g. caused by a SimProcedure that does not
# do_return) , we should make it return to its call-site. However,
# we don't want to use its state anymore as it might be corrupted.
# Just create an edge in the graph.
return_target = job.call_stack.current_return_target
if return_target is not None:
new_call_stack = job.call_stack_copy()
return_target_key = self._generate_block_id(
new_call_stack.stack_suffix(self.context_sensitivity_level),
return_target,
False,
) # You can never return to a syscall
if not cfg_node.instruction_addrs:
ret_ins_addr = None
else:
if self.project.arch.branch_delay_slot:
if len(cfg_node.instruction_addrs) > 1:
ret_ins_addr = cfg_node.instruction_addrs[-2]
else:
l.error(
"At %s: expecting more than one instruction. Only got one.",
cfg_node,
)
ret_ins_addr = None
else:
ret_ins_addr = cfg_node.instruction_addrs[-1]
# Things might be a bit difficult here. _graph_add_edge() requires both nodes to exist, but here
# the return target node may not exist yet. If that's the case, we will put it into a "delayed edge
# list", and add this edge later when the return target CFGNode is created.
if return_target_key in self._nodes:
self._graph_add_edge(
job.block_id,
return_target_key,
jumpkind="Ijk_Ret",
stmt_id=DEFAULT_STATEMENT,
ins_addr=ret_ins_addr,
)
else:
self._pending_edges[return_target_key].append(
(
job.block_id,
return_target_key,
{
"jumpkind": "Ijk_Ret",
"stmt_id": DEFAULT_STATEMENT,
"ins_addr": ret_ins_addr,
},
)
)
else:
# There are no successors, but we still want to update the function graph
artifacts = job.sim_successors.artifacts
if (
"irsb" in artifacts
and "insn_addrs" in artifacts
and artifacts["insn_addrs"]
):
the_irsb = artifacts["irsb"]
insn_addrs = artifacts["insn_addrs"]
self._handle_job_without_successors(job, the_irsb, insn_addrs)
# TODO: replace it with a DDG-based function IO analysis
# handle all actions
if successors:
self._handle_actions(
successors[0],
sim_successors,
job.current_function,
job.current_stack_pointer,
set(),
)
return successors
|
def _get_successors(self, job):
"""
Get a collection of successors out of the current job.
:param CFGJob job: The CFGJob instance.
:return: A collection of successors.
:rtype: list
"""
addr = job.addr
sim_successors = job.sim_successors
cfg_node = job.cfg_node
input_state = job.state
func_addr = job.func_addr
# check step limit
if self._max_steps is not None:
depth = cfg_node.depth
if depth >= self._max_steps:
return []
successors = []
is_indirect_jump = sim_successors.sort == "IRSB" and self._is_indirect_jump(
cfg_node, sim_successors
)
indirect_jump_resolved_by_resolvers = False
if is_indirect_jump and self._resolve_indirect_jumps:
# Try to resolve indirect jumps
irsb = input_state.block().vex
resolved, resolved_targets, ij = self._indirect_jump_encountered(
addr, cfg_node, irsb, func_addr, stmt_idx=DEFAULT_STATEMENT
)
if resolved:
successors = self._convert_indirect_jump_targets_to_states(
job, resolved_targets
)
if ij:
self._indirect_jump_resolved(ij, ij.addr, None, resolved_targets)
else:
# Try to resolve this indirect jump using heavier approaches
resolved_targets = self._process_one_indirect_jump(ij)
successors = self._convert_indirect_jump_targets_to_states(
job, resolved_targets
)
if successors:
indirect_jump_resolved_by_resolvers = True
else:
# It's unresolved. Add it to the wait list (but apparently we don't have any better way to resolve it
# right now).
self._indirect_jumps_to_resolve.add(ij)
if not successors:
# Get all successors of this block
successors = (
(sim_successors.flat_successors + sim_successors.unsat_successors)
if addr not in self._avoid_runs
else []
)
# Post-process successors
successors, job.extra_info = self._post_process_successors(
input_state, sim_successors, successors
)
all_successors = successors + sim_successors.unconstrained_successors
# make sure FakeRets are at the last
all_successors = [
suc for suc in all_successors if suc.history.jumpkind != "Ijk_FakeRet"
] + [suc for suc in all_successors if suc.history.jumpkind == "Ijk_FakeRet"]
if self._keep_state:
cfg_node.final_states = all_successors[::]
if is_indirect_jump and not indirect_jump_resolved_by_resolvers:
# For indirect jumps, filter successors that do not make sense
successors = self._filter_insane_successors(successors)
successors = self._try_resolving_indirect_jumps(
sim_successors,
cfg_node,
func_addr,
successors,
job.exception_info,
self._block_artifacts,
)
# Remove all successors whose IP is symbolic
successors = [s for s in successors if not s.ip.symbolic]
# Add additional edges supplied by the user
successors = self._add_additional_edges(
input_state, sim_successors, cfg_node, successors
)
# if base graph is used, add successors implied from the graph
if self._base_graph:
basegraph_successor_addrs = set()
for src_, dst_ in self._base_graph.edges():
if src_.addr == addr:
basegraph_successor_addrs.add(dst_.addr)
successor_addrs = {s.solver.eval(s.ip) for s in successors}
extra_successor_addrs = basegraph_successor_addrs - successor_addrs
if all_successors: # make sure we have a base state to use
base_state = all_successors[
0
] # TODO: for calls, we want to use the fake_ret state
for s_addr in extra_successor_addrs:
# an extra target
successor_state = base_state.copy()
successor_state.ip = s_addr
successors.append(successor_state)
else:
if extra_successor_addrs:
l.error(
"CFGEmulated terminates at %#x although base graph provided more exits.",
addr,
)
if not successors:
# There is no way out :-(
# Log it first
self._push_unresolvable_run(addr)
if sim_successors.sort == "SimProcedure" and isinstance(
sim_successors.artifacts["procedure"],
SIM_PROCEDURES["stubs"]["PathTerminator"],
):
# If there is no valid exit in this branch and it's not
# intentional (e.g. caused by a SimProcedure that does not
# do_return) , we should make it return to its call-site. However,
# we don't want to use its state anymore as it might be corrupted.
# Just create an edge in the graph.
return_target = job.call_stack.current_return_target
if return_target is not None:
new_call_stack = job.call_stack_copy()
return_target_key = self._generate_block_id(
new_call_stack.stack_suffix(self.context_sensitivity_level),
return_target,
False,
) # You can never return to a syscall
if not cfg_node.instruction_addrs:
ret_ins_addr = None
else:
if self.project.arch.branch_delay_slot:
if len(cfg_node.instruction_addrs) > 1:
ret_ins_addr = cfg_node.instruction_addrs[-2]
else:
l.error(
"At %s: expecting more than one instruction. Only got one.",
cfg_node,
)
ret_ins_addr = None
else:
ret_ins_addr = cfg_node.instruction_addrs[-1]
# Things might be a bit difficult here. _graph_add_edge() requires both nodes to exist, but here
# the return target node may not exist yet. If that's the case, we will put it into a "delayed edge
# list", and add this edge later when the return target CFGNode is created.
if return_target_key in self._nodes:
self._graph_add_edge(
job.block_id,
return_target_key,
jumpkind="Ijk_Ret",
stmt_id=DEFAULT_STATEMENT,
ins_addr=ret_ins_addr,
)
else:
self._pending_edges[return_target_key].append(
(
job.block_id,
return_target_key,
{
"jumpkind": "Ijk_Ret",
"stmt_id": DEFAULT_STATEMENT,
"ins_addr": ret_ins_addr,
},
)
)
else:
# There are no successors, but we still want to update the function graph
artifacts = job.sim_successors.artifacts
if (
"irsb" in artifacts
and "insn_addrs" in artifacts
and artifacts["insn_addrs"]
):
the_irsb = artifacts["irsb"]
insn_addrs = artifacts["insn_addrs"]
self._handle_job_without_successors(job, the_irsb, insn_addrs)
# TODO: replace it with a DDG-based function IO analysis
# handle all actions
if successors:
self._handle_actions(
successors[0],
sim_successors,
job.current_function,
job.current_stack_pointer,
set(),
)
return successors
|
https://github.com/angr/angr/issues/2448
|
import angr
p = angr.Project("get")
cfg = p.analyses.CFGEmulated()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_emulated.py", line 306, in __init__
self._analyze()
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/forward_analysis/forward_analysis.py", line 216, in _analyze
self._analysis_core_baremetal()
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/forward_analysis/forward_analysis.py", line 368, in _analysis_core_baremetal
self._process_job_and_get_successors(job_info)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/forward_analysis/forward_analysis.py", line 386, in _process_job_and_get_successors
successors = self._get_successors(job)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_emulated.py", line 1295, in _get_successors
resolved, resolved_targets, ij = self._indirect_jump_encountered(addr, cfg_node, irsb, func_addr,
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_base.py", line 2331, in _indirect_jump_encountered
resolved, resolved_targets = self._resolve_indirect_jump_timelessly(addr, irsb, func_addr, jumpkind)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/cfg_base.py", line 2274, in _resolve_indirect_jump_timelessly
r, resolved_targets = res.resolve(self, addr, func_addr, block, jumpkind)
File "/home/manuel/.virtualenvs/angr/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/arm_elf_fast.py", line 60, in resolve
stmt = block.statements[stmt_idx]
IndexError: list index out of range
|
IndexError
|
def _make_returns(self, ail_graph: networkx.DiGraph) -> networkx.DiGraph:
"""
Work on each return statement and fill in its return expressions.
"""
if self.function.calling_convention is None:
# unknown calling convention. cannot do much about return expressions.
return ail_graph
# Block walker
def _handle_Return(
stmt_idx: int, stmt: ailment.Stmt.Return, block: Optional[ailment.Block]
): # pylint:disable=unused-argument
if (
block is not None
and not stmt.ret_exprs
and self.function.calling_convention.ret_val is not None
):
new_stmt = stmt.copy()
ret_val = self.function.calling_convention.ret_val
if isinstance(ret_val, SimRegArg):
reg = self.project.arch.registers[ret_val.reg_name]
new_stmt.ret_exprs.append(
ailment.Expr.Register(
None, None, reg[0], reg[1] * self.project.arch.byte_width
)
)
else:
l.warning(
"Unsupported type of return expression %s.",
type(self.function.calling_convention.ret_val),
)
block.statements[stmt_idx] = new_stmt
def _handler(block):
walker = AILBlockWalker()
# we don't need to handle any statement besides Returns
walker.stmt_handlers.clear()
walker.expr_handlers.clear()
walker.stmt_handlers[ailment.Stmt.Return] = _handle_Return
walker.walk(block)
# Graph walker
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
return ail_graph
|
def _make_returns(self, ail_graph: networkx.DiGraph):
"""
Work on each return statement and fill in its return expressions.
"""
# Block walker
def _handle_Return(
stmt_idx: int, stmt: ailment.Stmt.Return, block: Optional[ailment.Block]
): # pylint:disable=unused-argument
if (
block is not None
and not stmt.ret_exprs
and self.function.calling_convention.ret_val is not None
):
new_stmt = stmt.copy()
ret_val = self.function.calling_convention.ret_val
if isinstance(ret_val, SimRegArg):
reg = self.project.arch.registers[ret_val.reg_name]
new_stmt.ret_exprs.append(
ailment.Expr.Register(
None, None, reg[0], reg[1] * self.project.arch.byte_width
)
)
else:
l.warning(
"Unsupported type of return expression %s.",
type(self.function.calling_convention.ret_val),
)
block.statements[stmt_idx] = new_stmt
def _handler(block):
walker = AILBlockWalker()
# we don't need to handle any statement besides Returns
walker.stmt_handlers.clear()
walker.expr_handlers.clear()
walker.stmt_handlers[ailment.Stmt.Return] = _handle_Return
walker.walk(block)
# Graph walker
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
return ail_graph
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def _analyze_function(self) -> Optional[SimCC]:
"""
Go over the variable information in variable manager for this function, and return all uninitialized
register/stack variables.
"""
if self._function.is_simprocedure or self._function.is_plt:
# we do not analyze SimProcedures or PLT stubs
return None
if not self._variable_manager.has_function_manager:
l.warning(
"Please run variable recovery on %r before analyzing its calling convention.",
self._function,
)
return None
vm = self._variable_manager[self._function.addr]
input_variables = vm.input_variables()
input_args = self._args_from_vars(input_variables, vm)
# TODO: properly determine sp_delta
sp_delta = self.project.arch.bytes if self.project.arch.call_pushes_ret else 0
cc = SimCC.find_cc(self.project.arch, list(input_args), sp_delta)
if cc is None:
l.warning(
"_analyze_function(): Cannot find a calling convention for %r that fits the given arguments.",
self._function,
)
else:
# reorder args
args = self._reorder_args(input_args, cc)
cc.args = args
# set return value
cc.ret_val = cc.return_val
return cc
|
def _analyze_function(self) -> Optional[SimCC]:
"""
Go over the variable information in variable manager for this function, and return all uninitialized
register/stack variables.
"""
if self._function.is_simprocedure or self._function.is_plt:
# we do not analyze SimProcedures or PLT stubs
return None
if not self._variable_manager.has_function_manager:
l.warning(
"Please run variable recovery on %r before analyzing its calling convention.",
self._function,
)
return None
vm = self._variable_manager[self._function.addr]
input_variables = vm.input_variables()
input_args = self._args_from_vars(input_variables)
# TODO: properly determine sp_delta
sp_delta = self.project.arch.bytes if self.project.arch.call_pushes_ret else 0
cc = SimCC.find_cc(self.project.arch, list(input_args), sp_delta)
if cc is None:
l.warning(
"_analyze_function(): Cannot find a calling convention for %r that fits the given arguments.",
self._function,
)
else:
# reorder args
args = self._reorder_args(input_args, cc)
cc.args = args
# set return value
cc.ret_val = cc.return_val
return cc
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def _args_from_vars(self, variables: List, var_manager):
"""
Derive function arguments from input variables.
:param variables:
:param var_manager: The variable manager of this function.
:return:
"""
args = set()
if not self.project.arch.call_pushes_ret:
ret_addr_offset = 0
else:
ret_addr_offset = self.project.arch.bytes
reg_vars_with_single_access: List[SimRegisterVariable] = []
for variable in variables:
if isinstance(variable, SimStackVariable):
# a stack variable. convert it to a stack argument.
# TODO: deal with the variable base
if variable.offset <= 0:
# skip the return address on the stack
# TODO: make sure it was the return address
continue
arg = SimStackArg(variable.offset - ret_addr_offset, variable.size)
args.add(arg)
elif isinstance(variable, SimRegisterVariable):
# a register variable, convert it to a register argument
if not self._is_sane_register_variable(variable):
continue
reg_name = self.project.arch.translate_register_name(
variable.reg, size=variable.size
)
arg = SimRegArg(reg_name, variable.size)
args.add(arg)
accesses = var_manager.get_variable_accesses(variable)
if len(accesses) == 1:
reg_vars_with_single_access.append(variable)
else:
l.error("Unsupported type of variable %s.", type(variable))
# the function might be saving registers at the beginning and restoring them at the end
# we should remove all registers that are strictly callee-saved and are not used anywhere in this function
end_blocks = [
(endpoint.addr, endpoint.size)
for endpoint in self._function.endpoints_with_type["return"]
]
restored_reg_vars: Set[SimRegArg] = set()
# is there any instruction that restores this register in any end blocks?
if reg_vars_with_single_access:
if self._function.returning is False:
# no restoring is required if this function does not return
for var_ in reg_vars_with_single_access:
reg_name = self.project.arch.translate_register_name(
var_.reg, size=var_.size
)
restored_reg_vars.add(SimRegArg(reg_name, var_.size))
else:
reg_offsets: Set[int] = set(r.reg for r in reg_vars_with_single_access)
for var_ in var_manager.get_variables(sort="reg"):
if var_.reg in reg_offsets:
# check if there is only a write to it
accesses = var_manager.get_variable_accesses(var_)
if len(accesses) == 1 and accesses[0].access_type == "write":
found = False
for end_block_addr, end_block_size in end_blocks:
if (
end_block_addr
<= accesses[0].location.ins_addr
< end_block_addr + end_block_size
):
found = True
break
if found:
reg_name = self.project.arch.translate_register_name(
var_.reg, size=var_.size
)
restored_reg_vars.add(SimRegArg(reg_name, var_.size))
return args.difference(restored_reg_vars)
|
def _args_from_vars(self, variables):
"""
:param list variables:
:return:
"""
args = set()
if not self.project.arch.call_pushes_ret:
ret_addr_offset = 0
else:
ret_addr_offset = self.project.arch.bytes
for variable in variables:
if isinstance(variable, SimStackVariable):
# a stack variable. convert it to a stack argument.
# TODO: deal with the variable base
if variable.offset <= 0:
# skip the return address on the stack
# TODO: make sure it was the return address
continue
arg = SimStackArg(variable.offset - ret_addr_offset, variable.size)
args.add(arg)
elif isinstance(variable, SimRegisterVariable):
# a register variable, convert it to a register argument
if not self._is_sane_register_variable(variable):
continue
reg_name = self.project.arch.translate_register_name(
variable.reg, size=variable.size
)
arg = SimRegArg(reg_name, variable.size)
args.add(arg)
else:
l.error("Unsupported type of variable %s.", type(variable))
return args
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def _is_sane_register_variable(self, variable: SimRegisterVariable) -> bool:
"""
Filters all registers that are surly not members of function arguments.
This can be seen as a workaround, since VariableRecoveryFast sometimes gives input variables of cc_ndep (which
is a VEX-specific register) :-(
:param variable: The variable to test.
:return: True if it is an acceptable function argument, False otherwise.
:rtype: bool
"""
arch = self.project.arch
if arch.name == "AARCH64":
return 16 <= variable.reg < 80 # x0-x7
elif arch.name == "AMD64":
return (
24 <= variable.reg < 40 # rcx, rdx
or 64 <= variable.reg < 104 # rsi, rdi, r8, r9, r10
)
# 224 <= variable.reg < 480) # xmm0-xmm7
elif is_arm_arch(arch):
return 8 <= variable.reg < 24 # r0-r3
elif arch.name == "MIPS32":
return 24 <= variable.reg < 40 # a0-a3
elif arch.name == "MIPS64":
return 48 <= variable.reg < 80 or 112 <= variable.reg < 208 # a0-a3 or t4-t7
elif arch.name == "PPC32":
return 28 <= variable.reg < 60 # r3-r10
elif arch.name == "X86":
return (
8 <= variable.reg < 24 # eax, ebx, ecx, edx
or 160 <= variable.reg < 288
) # xmm0-xmm7
else:
l.critical("Unsupported architecture %s.", arch.name)
return True
|
def _is_sane_register_variable(self, variable):
"""
Filters all registers that are surly not members of function arguments.
This can be seen as a workaround, since VariableRecoveryFast sometimes gives input variables of cc_ndep (which
is a VEX-specific register) :-(
:param SimRegisterVariable variable: The variable to test.
:return: True if it is an acceptable function argument, False otherwise.
:rtype: bool
"""
arch = self.project.arch
if arch.name == "AARCH64":
return 16 <= variable.reg < 80 # x0-x7
elif arch.name == "AMD64":
return (
24 <= variable.reg < 40 # rcx, rdx
or 64 <= variable.reg < 104 # rsi, rdi, r8, r9, r10
)
# 224 <= variable.reg < 480) # xmm0-xmm7
elif is_arm_arch(arch):
return 8 <= variable.reg < 24 # r0-r3
elif arch.name == "MIPS32":
return 24 <= variable.reg < 40 # a0-a3
elif arch.name == "MIPS64":
return 48 <= variable.reg < 80 or 112 <= variable.reg < 208 # a0-a3 or t4-t7
elif arch.name == "PPC32":
return 28 <= variable.reg < 60 # r3-r10
elif arch.name == "X86":
return (
8 <= variable.reg < 24 # eax, ebx, ecx, edx
or 160 <= variable.reg < 288
) # xmm0-xmm7
else:
l.critical("Unsupported architecture %s.", arch.name)
return True
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def _create_job_call(
self,
addr,
irsb,
cfg_node,
stmt_idx,
ins_addr,
current_function_addr,
target_addr,
jumpkind,
is_syscall=False,
):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = []
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(
mode="fastpath",
addr=cfg_node.addr,
add_options={
o.SYMBOL_FILL_UNCONSTRAINED_MEMORY,
o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS,
},
)
# Find the first successor with a syscall jumpkind
successors = self._simulate_block_with_resilience(tmp_state)
if successors is not None:
succ = next(
iter(
succ
for succ in successors.flat_successors
if succ.history.jumpkind
and succ.history.jumpkind.startswith("Ijk_Sys")
),
None,
)
else:
succ = None
if succ is None:
# For some reason, there is no such successor with a syscall jumpkind
target_addr = self._unresolvable_call_target_addr
else:
try:
syscall_stub = self.project.simos.syscall(succ)
if (
syscall_stub
): # can be None if simos is not a subclass of SimUserspace
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_call_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_call_target_addr
if isinstance(target_addr, SootAddressDescriptor):
new_function_addr = target_addr.method
else:
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
if self.project.arch.name != "Soot":
return_site = (
addr + irsb.size
) # We assume the program will always return to the succeeding position
else:
# For Soot, we return to the next statement, which is not necessarily the next block (as Shimple does
# not break blocks at calls)
assert isinstance(ins_addr, SootAddressDescriptor)
soot_block = irsb
return_block_idx = ins_addr.block_idx
if stmt_idx + 1 >= soot_block.label + len(soot_block.statements):
# tick the block ID
return_block_idx += 1
return_site = SootAddressDescriptor(
ins_addr.method, return_block_idx, stmt_idx + 1
)
edge = None
if new_function_addr is not None:
edge = FunctionCallEdge(
cfg_node,
new_function_addr,
return_site,
current_function_addr,
syscall=is_syscall,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
)
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(
target_addr,
new_function_addr,
jumpkind,
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
syscall=is_syscall,
func_edges=[edge],
gp=self.kb.functions[current_function_addr].info.get("gp", None),
)
jobs.append(ce)
callee_might_return = True
callee_function = None
if new_function_addr is not None:
if is_syscall or self.project.is_hooked(new_function_addr):
# we can create the function if it is a syscall or a SimProcedure and it does not exist yet. Note that
# syscalls are handled as SimProcedures anyway.
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall, create=True
)
else:
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall
)
if callee_function is not None:
callee_might_return = not (callee_function.returning is False)
if callee_might_return:
func_edges = []
if return_site is not None:
if callee_function is not None and callee_function.returning is True:
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=True
)
func_edges.append(fakeret_edge)
ret_edge = FunctionReturnEdge(
new_function_addr, return_site, current_function_addr
)
func_edges.append(ret_edge)
# Also, keep tracing from the return site
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
elif callee_function is not None and callee_function.returning is False:
pass # Don't go past a call that does not return!
else:
# HACK: We don't know where we are jumping. Let's assume we fakeret to the
# next instruction after the block
# TODO: FIXME: There are arch-specific hints to give the correct ret site
# Such as looking for constant values of LR in this block for ARM stuff.
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=None
)
func_edges.append(fakeret_edge)
fr = FunctionReturn(
new_function_addr, current_function_addr, addr, return_site
)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].add(fr)
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
return jobs
|
def _create_job_call(
self,
addr,
irsb,
cfg_node,
stmt_idx,
ins_addr,
current_function_addr,
target_addr,
jumpkind,
is_syscall=False,
):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = []
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(
mode="fastpath", addr=cfg_node.addr
)
# Find the first successor with a syscall jumpkind
successors = self._simulate_block_with_resilience(tmp_state)
if successors is not None:
succ = next(
iter(
succ
for succ in successors.flat_successors
if succ.history.jumpkind
and succ.history.jumpkind.startswith("Ijk_Sys")
),
None,
)
else:
succ = None
if succ is None:
# For some reason, there is no such successor with a syscall jumpkind
target_addr = self._unresolvable_call_target_addr
else:
try:
syscall_stub = self.project.simos.syscall(succ)
if (
syscall_stub
): # can be None if simos is not a subclass of SimUserspace
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_call_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_call_target_addr
if isinstance(target_addr, SootAddressDescriptor):
new_function_addr = target_addr.method
else:
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
if self.project.arch.name != "Soot":
return_site = (
addr + irsb.size
) # We assume the program will always return to the succeeding position
else:
# For Soot, we return to the next statement, which is not necessarily the next block (as Shimple does
# not break blocks at calls)
assert isinstance(ins_addr, SootAddressDescriptor)
soot_block = irsb
return_block_idx = ins_addr.block_idx
if stmt_idx + 1 >= soot_block.label + len(soot_block.statements):
# tick the block ID
return_block_idx += 1
return_site = SootAddressDescriptor(
ins_addr.method, return_block_idx, stmt_idx + 1
)
edge = None
if new_function_addr is not None:
edge = FunctionCallEdge(
cfg_node,
new_function_addr,
return_site,
current_function_addr,
syscall=is_syscall,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
)
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(
target_addr,
new_function_addr,
jumpkind,
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
syscall=is_syscall,
func_edges=[edge],
gp=self.kb.functions[current_function_addr].info.get("gp", None),
)
jobs.append(ce)
callee_might_return = True
callee_function = None
if new_function_addr is not None:
if is_syscall or self.project.is_hooked(new_function_addr):
# we can create the function if it is a syscall or a SimProcedure and it does not exist yet. Note that
# syscalls are handled as SimProcedures anyway.
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall, create=True
)
else:
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall
)
if callee_function is not None:
callee_might_return = not (callee_function.returning is False)
if callee_might_return:
func_edges = []
if return_site is not None:
if callee_function is not None and callee_function.returning is True:
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=True
)
func_edges.append(fakeret_edge)
ret_edge = FunctionReturnEdge(
new_function_addr, return_site, current_function_addr
)
func_edges.append(ret_edge)
# Also, keep tracing from the return site
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
elif callee_function is not None and callee_function.returning is False:
pass # Don't go past a call that does not return!
else:
# HACK: We don't know where we are jumping. Let's assume we fakeret to the
# next instruction after the block
# TODO: FIXME: There are arch-specific hints to give the correct ret site
# Such as looking for constant values of LR in this block for ARM stuff.
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=None
)
func_edges.append(fakeret_edge)
fr = FunctionReturn(
new_function_addr, current_function_addr, addr, return_site
)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].add(fr)
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
return jobs
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def blocks(self):
"""
An iterator of all local blocks in the current function.
:return: angr.lifter.Block instances.
"""
for block_addr, block in self._local_blocks.items():
try:
yield self.get_block(
block_addr,
size=block.size,
byte_string=block.bytestr if isinstance(block, BlockNode) else None,
)
except (SimEngineError, SimMemoryError):
pass
|
def blocks(self):
"""
An iterator of all local blocks in the current function.
:return: angr.lifter.Block instances.
"""
for block_addr, block in self._local_blocks.items():
try:
yield self._get_block(
block_addr,
size=block.size,
byte_string=block.bytestr if isinstance(block, BlockNode) else None,
)
except (SimEngineError, SimMemoryError):
pass
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def subgraph(self, ins_addrs):
"""
Generate a sub control flow graph of instruction addresses based on self.graph
:param iterable ins_addrs: A collection of instruction addresses that should be included in the subgraph.
:return networkx.DiGraph: A subgraph.
"""
# find all basic blocks that include those instructions
blocks = []
block_addr_to_insns = {}
for b in self._local_blocks.values():
# TODO: should I call get_blocks?
block = self.get_block(b.addr, size=b.size, byte_string=b.bytestr)
common_insns = set(block.instruction_addrs).intersection(ins_addrs)
if common_insns:
blocks.append(b)
block_addr_to_insns[b.addr] = sorted(common_insns)
# subgraph = networkx.subgraph(self.graph, blocks)
subgraph = self.graph.subgraph(blocks).copy()
g = networkx.DiGraph()
for n in subgraph.nodes():
insns = block_addr_to_insns[n.addr]
in_edges = subgraph.in_edges(n)
# out_edges = subgraph.out_edges(n)
if len(in_edges) > 1:
# the first instruction address should be included
if n.addr not in insns:
insns = [n.addr] + insns
for src, _ in in_edges:
last_instr = block_addr_to_insns[src.addr][-1]
g.add_edge(last_instr, insns[0])
for i in range(0, len(insns) - 1):
g.add_edge(insns[i], insns[i + 1])
return g
|
def subgraph(self, ins_addrs):
"""
Generate a sub control flow graph of instruction addresses based on self.graph
:param iterable ins_addrs: A collection of instruction addresses that should be included in the subgraph.
:return networkx.DiGraph: A subgraph.
"""
# find all basic blocks that include those instructions
blocks = []
block_addr_to_insns = {}
for b in self._local_blocks.values():
# TODO: should I call get_blocks?
block = self._get_block(b.addr, size=b.size, byte_string=b.bytestr)
common_insns = set(block.instruction_addrs).intersection(ins_addrs)
if common_insns:
blocks.append(b)
block_addr_to_insns[b.addr] = sorted(common_insns)
# subgraph = networkx.subgraph(self.graph, blocks)
subgraph = self.graph.subgraph(blocks).copy()
g = networkx.DiGraph()
for n in subgraph.nodes():
insns = block_addr_to_insns[n.addr]
in_edges = subgraph.in_edges(n)
# out_edges = subgraph.out_edges(n)
if len(in_edges) > 1:
# the first instruction address should be included
if n.addr not in insns:
insns = [n.addr] + insns
for src, _ in in_edges:
last_instr = block_addr_to_insns[src.addr][-1]
g.add_edge(last_instr, insns[0])
for i in range(0, len(insns) - 1):
g.add_edge(insns[i], insns[i + 1])
return g
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def instruction_size(self, insn_addr):
"""
Get the size of the instruction specified by `insn_addr`.
:param int insn_addr: Address of the instruction
:return int: Size of the instruction in bytes, or None if the instruction is not found.
"""
for b in self.blocks:
block = self.get_block(b.addr, size=b.size, byte_string=b.bytestr)
if insn_addr in block.instruction_addrs:
index = block.instruction_addrs.index(insn_addr)
if index == len(block.instruction_addrs) - 1:
# the very last instruction
size = block.addr + block.size - insn_addr
else:
size = block.instruction_addrs[index + 1] - insn_addr
return size
return None
|
def instruction_size(self, insn_addr):
"""
Get the size of the instruction specified by `insn_addr`.
:param int insn_addr: Address of the instruction
:return int: Size of the instruction in bytes, or None if the instruction is not found.
"""
for b in self.blocks:
block = self._get_block(b.addr, size=b.size, byte_string=b.bytestr)
if insn_addr in block.instruction_addrs:
index = block.instruction_addrs.index(insn_addr)
if index == len(block.instruction_addrs) - 1:
# the very last instruction
size = block.addr + block.size - insn_addr
else:
size = block.instruction_addrs[index + 1] - insn_addr
return size
return None
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def __init__(self, variable, access_type, location):
self.variable = variable
self.access_type = access_type
self.location: "CodeLocation" = location
|
def __init__(self, variable, access_type, location):
self.variable = variable
self.access_type = access_type
self.location = location
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def get_variables(
self, sort=None, collapse_same_ident=False
) -> List[Union[SimStackVariable, SimRegisterVariable]]:
"""
Get a list of variables.
:param str or None sort: Sort of the variable to get.
:param collapse_same_ident: Whether variables of the same identifier should be collapsed or not.
:return: A list of variables.
:rtype: list
"""
variables = []
if collapse_same_ident:
raise NotImplementedError()
for var in self._variables:
if sort == "stack" and not isinstance(var, SimStackVariable):
continue
if sort == "reg" and not isinstance(var, SimRegisterVariable):
continue
variables.append(var)
return variables
|
def get_variables(self, sort=None, collapse_same_ident=False):
"""
Get a list of variables.
:param str or None sort: Sort of the variable to get.
:param collapse_same_ident: Whether variables of the same identifier should be collapsed or not.
:return: A list of variables.
:rtype: list
"""
variables = []
if collapse_same_ident:
raise NotImplementedError()
for var in self._variables:
if sort == "stack" and not isinstance(var, SimStackVariable):
continue
if sort == "reg" and not isinstance(var, SimRegisterVariable):
continue
variables.append(var)
return variables
|
https://github.com/angr/angr/issues/2444
|
Traceback (most recent call last):
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/main_window.py", line 496, in decompile_current_function
self.workspace.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 265, in decompile_current_function
view.decompile_current_function()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 382, in decompile_current_function
self.workspace.decompile_function(self._current_function.am_obj)
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 279, in decompile_function
view.function = func
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 66, in function
self.decompile()
File "/home/dnivra/angr-dev/angr-management/angrmanagement/ui/views/code_view.py", line 43, in decompile
d = self.workspace.instance.project.analyses.Decompiler(
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 24, in __init__
self._decompile()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/decompiler.py", line 41, in _decompile
clinic = self.project.analyses.Clinic(self.func,
File "/home/dnivra/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 70, in __init__
self._analyze()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 138, in _analyze
ail_graph = self._make_returns(ail_graph)
File "/home/dnivra/angr-dev/angr/angr/utils/timing.py", line 28, in timed_func
return func(*args, **kwargs)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 457, in _make_returns
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailgraph_walker.py", line 23, in walk
r = self.handler(node)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 453, in _handler
walker.walk(block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 37, in walk
self._handle_stmt(i, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/ailblock_walker.py", line 50, in _handle_stmt
return handler(stmt_idx, stmt, block)
File "/home/dnivra/angr-dev/angr/angr/analyses/decompiler/clinic.py", line 434, in _handle_Return
and self.function.calling_convention.ret_val is not None:
AttributeError: 'NoneType' object has no attribute 'ret_val'
|
AttributeError
|
def _next_code_addr_core(self):
"""
Call _next_unscanned_addr() first to get the next address that is not scanned. Then check if data locates at
that address seems to be code or not. If not, we'll continue to for the next un-scanned address.
"""
next_addr = self._next_unscanned_addr()
if next_addr is None:
return None
start_addr = next_addr
while True:
string_length = self._scan_for_printable_strings(start_addr)
if string_length:
self._seg_list.occupy(start_addr, string_length, "string")
start_addr += string_length
if self.project.arch.name in ("X86", "AMD64"):
cc_length = self._scan_for_repeating_bytes(start_addr, 0xCC, threshold=1)
if cc_length:
self._seg_list.occupy(start_addr, cc_length, "alignment")
start_addr += cc_length
else:
cc_length = 0
zeros_length = self._scan_for_repeating_bytes(start_addr, 0x00)
if zeros_length:
self._seg_list.occupy(start_addr, zeros_length, "alignment")
start_addr += zeros_length
if string_length == 0 and cc_length == 0 and zeros_length == 0:
# umm now it's probably code
break
instr_alignment = self._initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
# occupy those few bytes
self._seg_list.occupy(
start_addr, instr_alignment - (start_addr % instr_alignment), "alignment"
)
start_addr = start_addr - start_addr % instr_alignment + instr_alignment
# trickiness: aligning the start_addr may create a new address that is outside any mapped region.
if not self._inside_regions(start_addr):
raise ContinueScanningNotification()
return start_addr
|
def _next_code_addr_core(self):
"""
Call _next_unscanned_addr() first to get the next address that is not scanned. Then check if data locates at
that address seems to be code or not. If not, we'll continue to for the next un-scanned address.
"""
next_addr = self._next_unscanned_addr()
if next_addr is None:
return None
start_addr = next_addr
while True:
string_length = self._scan_for_printable_strings(start_addr)
if string_length:
self._seg_list.occupy(start_addr, string_length, "string")
start_addr += string_length
if self.project.arch.name in ("X86", "AMD64"):
cc_length = self._scan_for_repeating_bytes(start_addr, 0xCC, threshold=1)
if cc_length:
self._seg_list.occupy(start_addr, cc_length, "alignment")
start_addr += cc_length
else:
cc_length = 0
zeros_length = self._scan_for_repeating_bytes(start_addr, 0x00)
if zeros_length:
self._seg_list.occupy(start_addr, zeros_length, "alignment")
start_addr += zeros_length
if string_length == 0 and cc_length == 0 and zeros_length == 0:
# umm now it's probably code
break
instr_alignment = self._initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
# occupy those few bytes
self._seg_list.occupy(
start_addr, instr_alignment - (start_addr % instr_alignment), "alignment"
)
start_addr = start_addr - start_addr % instr_alignment + instr_alignment
return start_addr
|
https://github.com/angr/angr/issues/2201
|
Traceback (most recent call last):
File "test.py", line 4, in <module>
cfg = proj.analyses.CFGFast(show_progressbar=True)
File "/home/angr_fork/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/angr_fork/angr/analyses/cfg/cfg_fast.py", line 653, in __init__
self._analyze()
File "/home/angr_fork/angr/analyses/forward_analysis/forward_analysis.py", line 216, in _analyze
self._analysis_core_baremetal()
File "/home/angr_fork/angr/analyses/forward_analysis/forward_analysis.py", line 344, in _analysis_core_baremetal
self._job_queue_empty()
File "/home/angr_fork/angr/analyses/cfg/cfg_fast.py", line 1192, in _job_queue_empty
if any(re.match(prolog, bytes_prefix) for prolog in self.project.arch.thumb_prologs):
File "/home/angr_fork/angr/analyses/cfg/cfg_fast.py", line 1192, in <genexpr>
if any(re.match(prolog, bytes_prefix) for prolog in self.project.arch.thumb_prologs):
File "/home/angr_fork/venv/lib/python3.6/re.py", line 172, in match
return _compile(pattern, flags).match(string)
TypeError: expected string or bytes-like object
|
TypeError
|
def _next_code_addr(self):
while True:
try:
addr = self._next_code_addr_core()
except ContinueScanningNotification:
continue
if addr is None:
return None
# if the new address is already occupied
if not self._seg_list.is_occupied(addr):
return addr
|
def _next_code_addr(self):
while True:
addr = self._next_code_addr_core()
if addr is None:
return None
# if the new address is already occupied
if not self._seg_list.is_occupied(addr):
return addr
|
https://github.com/angr/angr/issues/2201
|
Traceback (most recent call last):
File "test.py", line 4, in <module>
cfg = proj.analyses.CFGFast(show_progressbar=True)
File "/home/angr_fork/angr/analyses/analysis.py", line 115, in __call__
oself.__init__(*args, **kwargs)
File "/home/angr_fork/angr/analyses/cfg/cfg_fast.py", line 653, in __init__
self._analyze()
File "/home/angr_fork/angr/analyses/forward_analysis/forward_analysis.py", line 216, in _analyze
self._analysis_core_baremetal()
File "/home/angr_fork/angr/analyses/forward_analysis/forward_analysis.py", line 344, in _analysis_core_baremetal
self._job_queue_empty()
File "/home/angr_fork/angr/analyses/cfg/cfg_fast.py", line 1192, in _job_queue_empty
if any(re.match(prolog, bytes_prefix) for prolog in self.project.arch.thumb_prologs):
File "/home/angr_fork/angr/analyses/cfg/cfg_fast.py", line 1192, in <genexpr>
if any(re.match(prolog, bytes_prefix) for prolog in self.project.arch.thumb_prologs):
File "/home/angr_fork/venv/lib/python3.6/re.py", line 172, in match
return _compile(pattern, flags).match(string)
TypeError: expected string or bytes-like object
|
TypeError
|
def _return_from_call(self, from_func, to_node, to_outside=False):
self.transition_graph.add_edge(
from_func, to_node, type="return", to_outside=to_outside
)
for _, _, data in self.transition_graph.in_edges(to_node, data=True):
if "type" in data and data["type"] == "fake_return":
data["confirmed"] = True
self._local_transition_graph = None
|
def _return_from_call(self, from_func, to_node, to_outside=False):
self.transition_graph.add_edge(
from_func, to_node, type="real_return", to_outside=to_outside
)
for _, _, data in self.transition_graph.in_edges(to_node, data=True):
if "type" in data and data["type"] == "fake_return":
data["confirmed"] = True
self._local_transition_graph = None
|
https://github.com/angr/angr/issues/2101
|
File "angr/angr/angrdb/db.py", line 36, in open_db
yield Session
File "angr/angr/angrdb/db.py", line 143, in dump
KnowledgeBaseSerializer.dump(session, self.project.kb)
File "angr/angr/angrdb/serializers/kb.py", line 36, in dump
FunctionManagerSerializer.dump(session, db_kb, kb.functions)
File "angr/angr/angrdb/serializers/funcs.py", line 29, in dump
blob=func.serialize(),
File "angr/angr/serializable.py", line 37, in serialize
return self.serialize_to_cmessage().SerializeToString()
File "angr/angr/knowledge_plugins/functions/function.py", line 388, in serialize_to_cmessage
return FunctionParser.serialize(self)
File "angr/angr/knowledge_plugins/functions/function_parser.py", line 58, in serialize
edge.jumpkind = func_edge_type_to_pb(value)
TypeError: None has type NoneType, but expected one of: int, long
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "fuzz_driver.py", line 474, in <module>
db.dump(db_file)
File "angr/angr/angrdb/db.py", line 145, in dump
self.update_dbinfo(session)
File "/usr/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "angr/angr/angrdb/db.py", line 40, in open_db
raise AngrDBError(str(ex))
angr.errors.AngrDBError: None has type NoneType, but expected one of: int, long
|
TypeError
|
def __repr__(self):
return "<XRef %s: %s->%s>" % (
self.type_string,
"%#x" % self.ins_addr
if self.ins_addr is not None
else "%#x[%d]" % (self.block_addr, self.stmt_idx),
"%s" % self.dst if self.dst is not None else "%#x" % (self.memory_data.addr),
)
|
def __repr__(self):
return "<XRef %s: %s->%s>" % (
XRefType.to_string(self.type),
"%#x" % self.ins_addr
if self.ins_addr is not None
else "%#x[%d]" % (self.block_addr, self.stmt_idx),
"%#x" % (self.dst if self.dst is not None else self.memory_data.addr),
)
|
https://github.com/angr/angr/issues/1727
|
TypeError Traceback (most recent call last)
----> 5 print(p.kb.xrefs.xrefs_by_ins_addr)
~/angr-dev/angr/angr/knowledge_plugins/xrefs/xref.py in __repr__(self)
43 XRefType.to_string(self.type),
44 "%#x" % self.ins_addr if self.ins_addr is not None else "%#x[%d]" % (self.block_addr, self.stmt_idx),
---> 45 "%#x" % (self.dst if self.dst is not None else self.memory_data.addr)
46 )
47
TypeError: %x format: an integer is required, not SpOffset
|
TypeError
|
def _handle_CCall(self, expr):
if not isinstance(expr.args[0], pyvex.IRExpr.Const):
return
cond_type_enum = expr.args[0].con.value
if self.arch.name in {"X86", "AMD64", "AARCH64"}:
if cond_type_enum in EXPECTED_COND_TYPES[self.arch.name]:
self._handle_Comparison(expr.args[2], expr.args[3])
elif is_arm_arch(self.arch):
if cond_type_enum in EXPECTED_COND_TYPES["ARM"]:
self._handle_Comparison(expr.args[2], expr.args[3])
else:
raise ValueError(
"Unexpected ccall encountered in architecture %s." % self.arch.name
)
|
def _handle_CCall(self, expr):
if not isinstance(expr.args[0], pyvex.IRExpr.Const):
return
cond_type_enum = expr.args[0].con.value
if self.arch.name in ("X86", "AMD64"):
if cond_type_enum in EXPECTED_COND_TYPES[self.arch.name]:
self._handle_Comparison(expr.args[2], expr.args[3])
elif is_arm_arch(self.arch):
if cond_type_enum in EXPECTED_COND_TYPES["ARM"]:
self._handle_Comparison(expr.args[2], expr.args[3])
else:
raise ValueError(
"Unexpected ccall encountered in architecture %s." % self.arch.name
)
|
https://github.com/angr/angr/issues/1917
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-cef6a7e02e93> in <module>
----> 1 cfg = proj.analyses.CFG()
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/analysis.py in __call__(self, *args, **kwargs)
109
110 oself._show_progressbar = show_progressbar
--> 111 oself.__init__(*args, **kwargs)
112 return oself
113
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/cfg.py in __init__(self, **kwargs)
53
54 # Now initializes CFGFast :-)
---> 55 CFGFast.__init__(self, **kwargs)
56
57 from angr.analyses import AnalysesHub
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/cfg_fast.py in __init__(self, binary, objects, regions, pickle_intermediate_results, symbols, function_prologues, resolve_indirect_jumps, force_segment, force_complete_scan, indirect_jump_target_limit, data_references, cross_references, normalize, start_at_entry, function_starts, extra_memory_regions, data_type_guessing_handlers, arch_options, indirect_jump_resolvers, base_state, exclude_sparse_regions, skip_specific_regions, heuristic_plt_resolving, detect_tail_calls, low_priority, cfb, model, use_patches, start, end, collect_data_references, extra_cross_references, **extra_arch_options)
641
642 # Start working!
--> 643 self._analyze()
644
645 def __getstate__(self):
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/forward_analysis/forward_analysis.py in _analyze(self)
214 # An example is the CFG recovery.
215
--> 216 self._analysis_core_baremetal()
217
218 else:
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/forward_analysis/forward_analysis.py in _analysis_core_baremetal(self)
342
343 if not self._job_info_queue:
--> 344 self._job_queue_empty()
345
346 if not self._job_info_queue:
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/cfg_fast.py in _job_queue_empty(self)
1134 # function 0x100006480 does not return. Hence, we resolve indirect jumps before popping undecided pending jobs.
1135 if self._resolve_indirect_jumps and self._indirect_jumps_to_resolve:
-> 1136 self._process_unresolved_indirect_jumps()
1137
1138 if self._job_info_queue:
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/cfg_base.py in _process_unresolved_indirect_jumps(self)
2207 if self._low_priority:
2208 self._release_gil(idx, 20, 0.0001)
-> 2209 all_targets |= self._process_one_indirect_jump(jump)
2210
2211 self._indirect_jumps_to_resolve.clear()
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/cfg_base.py in _process_one_indirect_jump(self, jump)
2233 continue
2234
-> 2235 resolved, targets = resolver.resolve(self, jump.addr, jump.func_addr, block, jump.jumpkind)
2236 if resolved:
2237 resolved_by = resolver
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/jumptable.py in resolve(self, cfg, addr, func_addr, block, jumpkind)
467
468 l.debug("Try resolving %#x with a %d-level backward slice...", addr, slice_steps)
--> 469 r, targets = self._resolve(cfg, addr, func_addr, b)
470 if r:
471 return r, targets
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/jumptable.py in _resolve(self, cfg, addr, func_addr, b)
521
522 try:
--> 523 stmts_to_instrument, regs_to_initialize = self._jumptable_precheck(b)
524 except NotAJumpTableNotification:
525 l.debug("Indirect jump at %#x does not look like a jump table. Skip.", addr)
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/jumptable.py in _jumptable_precheck(self, b)
868 block = self.project.factory.block(block_addr, backup_state=self.base_state)
869 stmt_whitelist = annotatedcfg.get_whitelisted_statements(block_addr)
--> 870 engine.process(state, block=block, whitelist=stmt_whitelist)
871
872 if state.is_jumptable:
~/Desktop/fuzzing/angr-dev/angr/angr/engines/light/engine.py in process(self, state, *args, **kwargs)
33 # we are using a completely different state. Therefore, we directly call our _process() method before
34 # SimEngine becomes flexible enough.
---> 35 self._process(state, None, block=kwargs.pop('block', None), whitelist=kwargs.pop('whitelist', None))
36
37 def _process(self, new_state, successors, *args, **kwargs):
~/Desktop/fuzzing/angr-dev/angr/angr/engines/light/engine.py in _process(self, state, successors, block, whitelist, *args, **kwargs)
68 self.tyenv = block.vex.tyenv
69
---> 70 self._process_Stmt(whitelist=whitelist)
71
72 self.stmt_idx = None
~/Desktop/fuzzing/angr-dev/angr/angr/engines/light/engine.py in _process_Stmt(self, whitelist)
89 self.ins_addr = stmt.addr + stmt.delta
90
---> 91 self._handle_Stmt(stmt)
92
93 if self.block.vex.jumpkind == 'Ijk_Call':
~/Desktop/fuzzing/angr-dev/angr/angr/engines/light/engine.py in _handle_Stmt(self, stmt)
109 handler = "_handle_%s" % type(stmt).__name__
110 if hasattr(self, handler):
--> 111 getattr(self, handler)(stmt)
112 elif type(stmt).__name__ not in ('IMark', 'AbiHint'):
113 self.l.error('Unsupported statement type %s.', type(stmt).__name__)
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/jumptable.py in _handle_WrTmp(self, stmt)
145 def _handle_WrTmp(self, stmt):
146 self._tsrc = set()
--> 147 super()._handle_WrTmp(stmt)
148
149 if self._tsrc:
~/Desktop/fuzzing/angr-dev/angr/angr/engines/light/engine.py in _handle_WrTmp(self, stmt)
115 # synchronize with function _handle_WrTmpData()
116 def _handle_WrTmp(self, stmt):
--> 117 data = self._expr(stmt.data)
118 if data is None:
119 return
~/Desktop/fuzzing/angr-dev/angr/angr/engines/light/engine.py in _expr(self, expr)
141 handler = "_handle_%s" % type(expr).__name__
142 if hasattr(self, handler):
--> 143 return getattr(self, handler)(expr)
144 else:
145 self.l.error('Unsupported expression type %s.', type(expr).__name__)
~/Desktop/fuzzing/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/jumptable.py in _handle_CCall(self, expr)
234 self._handle_Comparison(expr.args[2], expr.args[3])
235 else:
--> 236 raise ValueError("Unexpected ccall encountered in architecture %s." % self.arch.name)
237
238 def _handle_Comparison(self, arg0, arg1):
ValueError: Unexpected ccall encountered in architecture AARCH64.
|
ValueError
|
def _process_Stmt(self, whitelist=None):
if whitelist is not None:
# optimize whitelist lookups
whitelist = set(whitelist)
for stmt_idx, stmt in enumerate(self.block.vex.statements):
if whitelist is not None and stmt_idx not in whitelist:
continue
self.stmt_idx = stmt_idx
if type(stmt) is pyvex.IRStmt.IMark:
# Note that we cannot skip IMarks as they are used later to trigger observation events
# The bug caused by skipping IMarks is reported at https://github.com/angr/angr/pull/1150
self.ins_addr = stmt.addr + stmt.delta
self._handle_Stmt(stmt)
if self.block.vex.jumpkind == "Ijk_Call":
handler = "_handle_function"
if hasattr(self, handler):
func_addr = self._expr(self.block.vex.next)
if func_addr is not None:
getattr(self, handler)(func_addr)
else:
self.l.debug(
"Cannot determine the callee address at %#x.", self.block.addr
)
else:
self.l.warning("Function handler not implemented.")
|
def _process_Stmt(self, whitelist=None):
if whitelist is not None:
# optimize whitelist lookups
whitelist = set(whitelist)
for stmt_idx, stmt in enumerate(self.block.vex.statements):
if whitelist is not None and stmt_idx not in whitelist:
continue
self.stmt_idx = stmt_idx
if type(stmt) is pyvex.IRStmt.IMark:
# Note that we cannot skip IMarks as they are used later to trigger observation events
# The bug caused by skipping IMarks is reported at https://github.com/angr/angr/pull/1150
self.ins_addr = stmt.addr + stmt.delta
self._handle_Stmt(stmt)
if self.block.vex.jumpkind == "Ijk_Call":
handler = "_handle_function"
if hasattr(self, handler):
getattr(self, handler)(self._expr(self.block.vex.next))
else:
self.l.warning("Function handler not implemented.")
|
https://github.com/angr/angr/issues/1850
|
<...>
DEBUG | 2019-11-25 16:04:47,177 | angr.analyses.cfg.cfg_fast | Function __libc_csu_init
ERROR | 2019-11-25 16:04:47,198 | angr.analyses.cfg.cfg_fast | Error collecting XRefs for function __libc_csu_init.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/cfg/cfg_fast.py", line 1259, in _do_full_xrefs
prop = self.project.analyses.Propagator(func=f, base_state=state)
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/analysis.py", line 109, in __call__
oself.__init__(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/propagator/propagator.py", line 222, in __init__
self._analyze()
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/forward_analysis/__init__.py", line 223, in _analyze
self._analysis_core_graph()
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/forward_analysis/__init__.py", line 246, in _analysis_core_graph
changed, output_state = self._run_on_node(n, job_state)
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/propagator/propagator.py", line 263, in _run_on_node
load_callback=self._load_callback, fail_fast=self._fail_fast)
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/propagator/engine_base.py", line 27, in process
self._process(state, None, block=kwargs.pop('block', None))
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/propagator/engine_vex.py", line 25, in _process
super()._process(state, successors, block=block, whitelist=whitelist, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/angr/engines/light/engine.py", line 69, in _process
self._process_Stmt(whitelist=whitelist)
File "/usr/local/lib/python3.6/dist-packages/angr/engines/light/engine.py", line 95, in _process_Stmt
getattr(self, handler)(self._expr(self.block.vex.next))
File "/usr/local/lib/python3.6/dist-packages/angr/analyses/propagator/engine_vex.py", line 76, in _handle_function
b = self._project.loader.memory.load(addr, 4)
File "/usr/local/lib/python3.6/dist-packages/cle/memory.py", line 204, in load
for start, backer in self.backers(addr):
File "/usr/local/lib/python3.6/dist-packages/cle/memory.py", line 176, in backers
started = addr <= 0
TypeError: '<=' not supported between instances of 'NoneType' and 'int'
DEBUG | 2019-11-25 16:04:47,199 | angr.analyses.cfg.cfg_fast | Function sub_804865d
<...>
|
TypeError
|
def _create_job_call(
self,
addr,
irsb,
cfg_node,
stmt_idx,
ins_addr,
current_function_addr,
target_addr,
jumpkind,
is_syscall=False,
):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = []
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(
mode="fastpath", addr=cfg_node.addr
)
# Find the first successor with a syscall jumpkind
successors = self._simulate_block_with_resilience(tmp_state)
if successors is not None:
succ = next(
iter(
succ
for succ in successors.flat_successors
if succ.history.jumpkind
and succ.history.jumpkind.startswith("Ijk_Sys")
),
None,
)
else:
succ = None
if succ is None:
# For some reason, there is no such successor with a syscall jumpkind
target_addr = self._unresolvable_call_target_addr
else:
try:
syscall_stub = self.project.simos.syscall(succ)
if (
syscall_stub
): # can be None if simos is not a subclass of SimUserspace
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_call_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_call_target_addr
if isinstance(target_addr, SootAddressDescriptor):
new_function_addr = target_addr.method
else:
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
if self.project.arch.name != "Soot":
return_site = (
addr + irsb.size
) # We assume the program will always return to the succeeding position
else:
# For Soot, we return to the next statement, which is not necessarily the next block (as Shimple does
# not break blocks at calls)
assert isinstance(ins_addr, SootAddressDescriptor)
soot_block = irsb
return_block_idx = ins_addr.block_idx
if stmt_idx + 1 >= soot_block.label + len(soot_block.statements):
# tick the block ID
return_block_idx += 1
return_site = SootAddressDescriptor(
ins_addr.method, return_block_idx, stmt_idx + 1
)
edge = None
if new_function_addr is not None:
edge = FunctionCallEdge(
cfg_node,
new_function_addr,
return_site,
current_function_addr,
syscall=is_syscall,
ins_addr=ins_addr,
stmt_idx=ins_addr,
)
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(
target_addr,
new_function_addr,
jumpkind,
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
syscall=is_syscall,
func_edges=[edge],
)
jobs.append(ce)
callee_might_return = True
callee_function = None
if new_function_addr is not None:
if is_syscall or self.project.is_hooked(new_function_addr):
# we can create the function if it is a syscall or a SimProcedure and it does not exist yet. Note that
# syscalls are handled as SimProcedures anyway.
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall, create=True
)
else:
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall
)
if callee_function is not None:
callee_might_return = not (callee_function.returning is False)
if callee_might_return:
func_edges = []
if return_site is not None:
if callee_function is not None and callee_function.returning is True:
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=True
)
func_edges.append(fakeret_edge)
ret_edge = FunctionReturnEdge(
new_function_addr, return_site, current_function_addr
)
func_edges.append(ret_edge)
# Also, keep tracing from the return site
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
elif callee_function is not None and callee_function.returning is False:
pass # Don't go past a call that does not return!
else:
# HACK: We don't know where we are jumping. Let's assume we fakeret to the
# next instruction after the block
# TODO: FIXME: There are arch-specific hints to give the correct ret site
# Such as looking for constant values of LR in this block for ARM stuff.
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=None
)
func_edges.append(fakeret_edge)
fr = FunctionReturn(
new_function_addr, current_function_addr, addr, return_site
)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].add(fr)
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
return jobs
|
def _create_job_call(
self,
addr,
irsb,
cfg_node,
stmt_idx,
ins_addr,
current_function_addr,
target_addr,
jumpkind,
is_syscall=False,
):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = []
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(
mode="fastpath", addr=cfg_node.addr
)
# Find the first successor with a syscall jumpkind
succ = next(
iter(
succ
for succ in self.project.factory.successors(tmp_state).flat_successors
if succ.history.jumpkind and succ.history.jumpkind.startswith("Ijk_Sys")
),
None,
)
if succ is None:
# For some reason, there is no such successor with a syscall jumpkind
target_addr = self._unresolvable_call_target_addr
else:
try:
syscall_stub = self.project.simos.syscall(succ)
if (
syscall_stub
): # can be None if simos is not a subclass of SimUserspac
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_call_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_call_target_addr
if isinstance(target_addr, SootAddressDescriptor):
new_function_addr = target_addr.method
else:
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
if self.project.arch.name != "Soot":
return_site = (
addr + irsb.size
) # We assume the program will always return to the succeeding position
else:
# For Soot, we return to the next statement, which is not necessarily the next block (as Shimple does
# not break blocks at calls)
assert isinstance(ins_addr, SootAddressDescriptor)
soot_block = irsb
return_block_idx = ins_addr.block_idx
if stmt_idx + 1 >= soot_block.label + len(soot_block.statements):
# tick the block ID
return_block_idx += 1
return_site = SootAddressDescriptor(
ins_addr.method, return_block_idx, stmt_idx + 1
)
edge = None
if new_function_addr is not None:
edge = FunctionCallEdge(
cfg_node,
new_function_addr,
return_site,
current_function_addr,
syscall=is_syscall,
ins_addr=ins_addr,
stmt_idx=ins_addr,
)
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(
target_addr,
new_function_addr,
jumpkind,
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
syscall=is_syscall,
func_edges=[edge],
)
jobs.append(ce)
callee_might_return = True
callee_function = None
if new_function_addr is not None:
if is_syscall or self.project.is_hooked(new_function_addr):
# we can create the function if it is a syscall or a SimProcedure and it does not exist yet. Note that
# syscalls are handled as SimProcedures anyway.
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall, create=True
)
else:
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall
)
if callee_function is not None:
callee_might_return = not (callee_function.returning is False)
if callee_might_return:
func_edges = []
if return_site is not None:
if callee_function is not None and callee_function.returning is True:
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=True
)
func_edges.append(fakeret_edge)
ret_edge = FunctionReturnEdge(
new_function_addr, return_site, current_function_addr
)
func_edges.append(ret_edge)
# Also, keep tracing from the return site
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
elif callee_function is not None and callee_function.returning is False:
pass # Don't go past a call that does not return!
else:
# HACK: We don't know where we are jumping. Let's assume we fakeret to the
# next instruction after the block
# TODO: FIXME: There are arch-specific hints to give the correct ret site
# Such as looking for constant values of LR in this block for ARM stuff.
fakeret_edge = FunctionFakeRetEdge(
cfg_node, return_site, current_function_addr, confirmed=None
)
func_edges.append(fakeret_edge)
fr = FunctionReturn(
new_function_addr, current_function_addr, addr, return_site
)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].add(fr)
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
func_edges=func_edges,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
return jobs
|
https://github.com/angr/angr/issues/1696
|
ClaripyZeroDivisionError Traceback (most recent call last)
~/Workspace/angr/angr/angr/engines/vex/irop.py in calculate(self, *args)
371 try:
--> 372 return self.extend_size(self._calculate(args))
373 except (ZeroDivisionError, claripy.ClaripyZeroDivisionError) as e:
~/Workspace/angr/angr/angr/engines/vex/irop.py in _op_divmod(self, args)
717 if self.is_signed:
--> 718 quotient = (args[0].SDiv(claripy.SignExt(self._from_size - self._to_size, args[1])))
719 remainder = (args[0].SMod(claripy.SignExt(self._from_size - self._to_size, args[1])))
~/Workspace/angr/claripy/claripy/operations.py in _op(*args)
66
---> 67 return return_type(name, fixed_args, **kwargs)
68
~/Workspace/angr/claripy/claripy/ast/base.py in __new__(cls, op, args, add_variables, hash, **kwargs)
152 try:
--> 153 r = operations._handle_annotations(eb._abstract(eb.call(op, args)), args)
154 if r is not None:
~/Workspace/angr/claripy/claripy/backends/__init__.py in call(self, op, args)
238 converted = self.convert_list(args)
--> 239 return self._call(op, converted)
240
~/Workspace/angr/claripy/claripy/backends/__init__.py in _call(self, op, args)
249 # the raw ops don't get the model, cause, for example, Z3 stuff can't take it
--> 250 obj = self._op_raw[op](*args)
251 elif not op.startswith("__"):
~/Workspace/angr/claripy/claripy/bv.py in normalize_helper(self, o)
41 return NotImplemented
---> 42 return f(self, o)
43
~/Workspace/angr/claripy/claripy/bv.py in compare_guard(self, o)
15 raise ClaripyTypeError("bitvectors are differently-sized (%d and %d)" % (self.bits, o.bits))
---> 16 return f(self, o)
17
~/Workspace/angr/claripy/claripy/bv.py in SDiv(self, o)
417 if b == 0:
--> 418 raise ClaripyZeroDivisionError()
419 val = a//b if a*b>0 else (a+(-a%b))//b
ClaripyZeroDivisionError:
The above exception was the direct cause of the following exception:
SimZeroDivisionException Traceback (most recent call last)
<ipython-input-6-96f129e3fa7c> in <module>
----> 1 cfg = project.analyses.CFGFast()
~/Workspace/angr/angr/angr/analyses/analysis.py in __call__(self, *args, **kwargs)
107
108 oself._show_progressbar = show_progressbar
--> 109 oself.__init__(*args, **kwargs)
110 return oself
111
~/Workspace/angr/angr/angr/analyses/cfg/cfg_fast.py in __init__(self, binary, objects, regions, pickle_intermediate_results, symbols, function_prologues, resolve_indirect_jumps, force_segment, force_complete_scan, indirect_jump_target_limit, data_references, cross_references, normalize, start_at_entry, function_starts, extra_memory_regions, data_type_guessing_handlers, arch_options, indirect_jump_resolvers, base_state, exclude_sparse_regions, skip_specific_regions, heuristic_plt_resolving, detect_tail_calls, low_priority, cfb, model, use_patches, start, end, collect_data_references, extra_cross_references, **extra_arch_options)
643
644 # Start working!
--> 645 self._analyze()
646
647 def __getstate__(self):
~/Workspace/angr/angr/angr/analyses/forward_analysis.py in _analyze(self)
581 # An example is the CFG recovery.
582
--> 583 self._analysis_core_baremetal()
584
585 else:
~/Workspace/angr/angr/angr/analyses/forward_analysis.py in _analysis_core_baremetal(self)
706 self._job_info_queue = self._job_info_queue[1:]
707
--> 708 self._process_job_and_get_successors(job_info)
709
710 # Short-cut for aborting the analysis
~/Workspace/angr/angr/angr/analyses/forward_analysis.py in _process_job_and_get_successors(self, job_info)
724 job = job_info.job
725
--> 726 successors = self._get_successors(job)
727
728 all_new_jobs = [ ]
~/Workspace/angr/angr/angr/analyses/cfg/cfg_fast.py in _get_successors(self, job)
1050 # l.debug("Tracing new exit %#x", addr)
1051
-> 1052 jobs = self._scan_block(job)
1053
1054 # l.debug("... got %d jobs: %s", len(jobs), jobs)
~/Workspace/angr/angr/angr/analyses/cfg/cfg_fast.py in _scan_block(self, cfg_job)
1351
1352 else:
-> 1353 entries = self._scan_irsb(cfg_job, current_func_addr)
1354
1355 return entries
~/Workspace/angr/angr/angr/analyses/cfg/cfg_fast.py in _scan_irsb(self, cfg_job, current_func_addr)
1544
1545 entries += self._create_jobs(target, jumpkind, function_addr, irsb, addr, cfg_node, ins_addr,
-> 1546 stmt_idx
1547 )
1548
~/Workspace/angr/angr/angr/analyses/cfg/cfg_fast.py in _create_jobs(self, target, jumpkind, current_function_addr, irsb, addr, cfg_node, ins_addr, stmt_idx)
1712 elif jumpkind == 'Ijk_Call' or jumpkind.startswith("Ijk_Sys"):
1713 jobs += self._create_job_call(addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr,
-> 1714 target_addr, jumpkind, is_syscall=is_syscall
1715 )
1716
~/Workspace/angr/angr/angr/analyses/cfg/cfg_fast.py in _create_job_call(self, addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr, target_addr, jumpkind, is_syscall)
1747 tmp_state = self.project.factory.blank_state(mode="fastpath", addr=cfg_node.addr)
1748 # Find the first successor with a syscall jumpkind
-> 1749 succ = next(iter(succ for succ in self.project.factory.successors(tmp_state).flat_successors
1750 if succ.history.jumpkind and succ.history.jumpkind.startswith("Ijk_Sys")), None)
1751 if succ is None:
~/Workspace/angr/angr/angr/factory.py in successors(self, *args, **kwargs)
52 """
53
---> 54 return self.project.engines.successors(*args, **kwargs)
55
56 def blank_state(self, **kwargs):
~/Workspace/angr/angr/angr/engines/hub.py in successors(self, state, addr, jumpkind, default_engine, procedure_engine, engines, **kwargs)
126 for engine in engines:
127 if engine.check(state, **kwargs):
--> 128 r = engine.process(state, **kwargs)
129 if r.processed:
130 return r
~/Workspace/angr/angr/angr/engines/vex/engine.py in process(self, state, irsb, skip_stmts, last_stmt, whitelist, inline, force_addr, insn_bytes, size, num_inst, traceflags, thumb, extra_stop_points, opt_level, **kwargs)
146 thumb=thumb,
147 extra_stop_points=extra_stop_points,
--> 148 opt_level=opt_level)
149
150 def _check(self, state, *args, **kwargs):
~/Workspace/angr/angr/angr/engines/engine.py in process(***failed resolving arguments***)
58 successors = new_state._inspect_getattr('sim_successors', successors)
59 try:
---> 60 self._process(new_state, successors, *args, **kwargs)
61 except SimException:
62 if o.EXCEPTION_HANDLING not in old_state.options:
~/Workspace/angr/angr/angr/engines/vex/engine.py in _process(self, state, successors, irsb, skip_stmts, last_stmt, whitelist, insn_bytes, size, num_inst, traceflags, thumb, extra_stop_points, opt_level)
197
198 try:
--> 199 self._handle_irsb(state, successors, irsb, skip_stmts, last_stmt, whitelist)
200 except SimReliftException as e:
201 state = e.state
~/Workspace/angr/angr/angr/engines/vex/engine.py in _handle_irsb(self, state, successors, irsb, skip_stmts, last_stmt, whitelist)
276 state.scratch.stmt_idx = stmt_idx
277 state._inspect('statement', BP_BEFORE, statement=stmt_idx)
--> 278 cont = self._handle_statement(state, successors, stmt)
279 state._inspect('statement', BP_AFTER)
280 if not cont:
~/Workspace/angr/angr/angr/engines/vex/engine.py in _handle_statement(self, state, successors, stmt)
391 return None
392 else:
--> 393 exit_data = stmt_handler(self, state, stmt)
394
395 # for the exits, put *not* taking the exit on the list of constraints so
~/Workspace/angr/angr/angr/engines/vex/statements/wrtmp.py in SimIRStmt_WrTmp(engine, state, stmt)
2 # get data and track data reads
3 with state.history.subscribe_actions() as data_deps:
----> 4 data = engine.handle_expression(state, stmt.data)
5 state.scratch.store_tmp(stmt.tmp, data, deps=data_deps)
6
~/Workspace/angr/angr/angr/engines/vex/engine.py in handle_expression(self, state, expr)
452
453 state._inspect('expr', BP_BEFORE, expr=expr)
--> 454 result = handler(self, state, expr)
455
456 if o.SIMPLIFY_EXPRS in state.options:
~/Workspace/angr/angr/angr/engines/vex/expressions/op.py in SimIRExpr_Op(engine, state, expr)
12
13 try:
---> 14 result = translate(state, expr.op, exprs)
15
16 if o.TRACK_OP_ACTIONS in state.options:
~/Workspace/angr/angr/angr/engines/vex/irop.py in translate(state, op, s_args)
1042 l.error(error)
1043 raise UnsupportedIROpError(error)
-> 1044 return translate_inner(state, simop, s_args)
1045
1046
~/Workspace/angr/angr/angr/engines/vex/irop.py in translate_inner(state, irop, s_args)
1049 if irop._float and not options.SUPPORT_FLOATING_POINT in state.options:
1050 raise UnsupportedIROpError("floating point support disabled")
-> 1051 return irop.calculate(*s_args)
1052 except SimZeroDivisionException:
1053 if state.mode == 'static' and len(s_args) == 2 and state.solver.is_true(s_args[1] == 0):
~/Workspace/angr/angr/angr/engines/vex/irop.py in calculate(self, *args)
372 return self.extend_size(self._calculate(args))
373 except (ZeroDivisionError, claripy.ClaripyZeroDivisionError) as e:
--> 374 raise SimZeroDivisionException("divide by zero!") from e
375 except (TypeError, ValueError, SimValueError, claripy.ClaripyError) as e:
376 raise SimOperationError("%s._calculate() raised exception" % self.name) from e
SimZeroDivisionException: divide by zero!
|
ClaripyZeroDivisionError
|
def resolve(self, cfg, addr, func_addr, block, jumpkind):
"""
Resolves jump tables.
:param cfg: A CFG instance.
:param int addr: IRSB address.
:param int func_addr: The function address.
:param pyvex.IRSB block: The IRSB.
:return: A bool indicating whether the indirect jump is resolved successfully, and a list of resolved targets
:rtype: tuple
"""
project = self.project # short-hand
self._max_targets = cfg._indirect_jump_target_limit
# Perform a backward slicing from the jump target
b = Blade(
cfg.graph,
addr,
-1,
cfg=cfg,
project=project,
ignore_sp=False,
ignore_bp=False,
max_level=3,
base_state=self.base_state,
)
stmt_loc = (addr, DEFAULT_STATEMENT)
if stmt_loc not in b.slice:
return False, None
load_stmt_loc, load_stmt, load_size = None, None, None
stmts_to_remove = [stmt_loc]
stmts_adding_base_addr = [] # type: list[JumpTargetBaseAddr]
# All temporary variables that hold indirect addresses loaded out of the memory
# Obviously, load_stmt.tmp must be here
# if there are additional data transferring statements between the Load statement and the base-address-adding
# statement, all_addr_holders will have more than one temporary variables
#
# Here is an example:
#
# IRSB 0x4c64c4
# + 06 | t12 = LDle:I32(t7)
# + 07 | t11 = 32Sto64(t12)
# + 10 | t2 = Add64(0x0000000000571df0,t11)
#
# all_addr_holders will be {(0x4c64c4, 11): AddressTransferringTypes.SignedExtension32to64,
# (0x4c64c4, 12); AddressTransferringTypes.Assignment,
# }
all_addr_holders = OrderedDict()
while True:
preds = list(b.slice.predecessors(stmt_loc))
if len(preds) != 1:
return False, None
block_addr, stmt_idx = stmt_loc = preds[0]
block = project.factory.block(block_addr, backup_state=self.base_state).vex
stmt = block.statements[stmt_idx]
if isinstance(stmt, (pyvex.IRStmt.WrTmp, pyvex.IRStmt.Put)):
if isinstance(stmt.data, (pyvex.IRExpr.Get, pyvex.IRExpr.RdTmp)):
# data transferring
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Assignment
)
continue
elif isinstance(stmt.data, pyvex.IRExpr.ITE):
# data transferring
# t16 = if (t43) ILGop_Ident32(LDle(t29)) else 0x0000c844
# > t44 = ITE(t43,t16,0x0000c844)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Assignment
)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Unop):
if stmt.data.op == "Iop_32Sto64":
# data transferring with conversion
# t11 = 32Sto64(t12)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.SignedExtension32to64
)
continue
elif stmt.data.op == "Iop_64to32":
# data transferring with conversion
# t24 = 64to32(t21)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Truncation64to32
)
continue
elif stmt.data.op == "Iop_32Uto64":
# data transferring with conversion
# t21 = 32Uto64(t22)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.UnsignedExtension32to64
)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Binop) and stmt.data.op.startswith(
"Iop_Add"
):
# GitHub issue #1289, a S390X binary
# jump_label = &jump_table + *(jump_table[index])
# IRSB 0x4007c0
# 00 | ------ IMark(0x4007c0, 4, 0) ------
# + 01 | t0 = GET:I32(212)
# + 02 | t1 = Add32(t0,0xffffffff)
# 03 | PUT(352) = 0x0000000000000003
# 04 | t13 = 32Sto64(t0)
# 05 | t6 = t13
# 06 | PUT(360) = t6
# 07 | PUT(368) = 0xffffffffffffffff
# 08 | PUT(376) = 0x0000000000000000
# 09 | PUT(212) = t1
# 10 | PUT(ia) = 0x00000000004007c4
# 11 | ------ IMark(0x4007c4, 6, 0) ------
# + 12 | t14 = 32Uto64(t1)
# + 13 | t8 = t14
# + 14 | t16 = CmpLE64U(t8,0x000000000000000b)
# + 15 | t15 = 1Uto32(t16)
# + 16 | t10 = t15
# + 17 | t11 = CmpNE32(t10,0x00000000)
# + 18 | if (t11) { PUT(offset=336) = 0x4007d4; Ijk_Boring }
# Next: 0x4007ca
#
# IRSB 0x4007d4
# 00 | ------ IMark(0x4007d4, 6, 0) ------
# + 01 | t8 = GET:I64(r2)
# + 02 | t7 = Shr64(t8,0x3d)
# + 03 | t9 = Shl64(t8,0x03)
# + 04 | t6 = Or64(t9,t7)
# + 05 | t11 = And64(t6,0x00000007fffffff8)
# 06 | ------ IMark(0x4007da, 6, 0) ------
# 07 | PUT(r1) = 0x0000000000400a50
# 08 | PUT(ia) = 0x00000000004007e0
# 09 | ------ IMark(0x4007e0, 6, 0) ------
# + 10 | t12 = Add64(0x0000000000400a50,t11)
# + 11 | t16 = LDbe:I64(t12)
# 12 | PUT(r2) = t16
# 13 | ------ IMark(0x4007e6, 4, 0) ------
# + 14 | t17 = Add64(0x0000000000400a50,t16)
# + Next: t17
#
# Special case: a base address is added to the loaded offset before jumping to it.
if isinstance(stmt.data.args[0], pyvex.IRExpr.Const) and isinstance(
stmt.data.args[1], pyvex.IRExpr.RdTmp
):
stmts_adding_base_addr.append(
JumpTargetBaseAddr(
stmt_loc,
stmt,
stmt.data.args[1].tmp,
base_addr=stmt.data.args[0].con.value,
)
)
stmts_to_remove.append(stmt_loc)
elif isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and isinstance(
stmt.data.args[1], pyvex.IRExpr.Const
):
stmts_adding_base_addr.append(
JumpTargetBaseAddr(
stmt_loc,
stmt,
stmt.data.args[0].tmp,
base_addr=stmt.data.args[1].con.value,
)
)
stmts_to_remove.append(stmt_loc)
elif isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and isinstance(
stmt.data.args[1], pyvex.IRExpr.RdTmp
):
# one of the tmps must be holding a concrete value at this point
stmts_adding_base_addr.append(
JumpTargetBaseAddr(
stmt_loc,
stmt,
stmt.data.args[0].tmp,
tmp_1=stmt.data.args[1].tmp,
)
)
stmts_to_remove.append(stmt_loc)
else:
# not supported
pass
continue
elif isinstance(stmt.data, pyvex.IRExpr.Load):
# Got it!
load_stmt, load_stmt_loc, load_size = (
stmt,
stmt_loc,
block.tyenv.sizeof(stmt.tmp) // self.project.arch.byte_width,
)
stmts_to_remove.append(stmt_loc)
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Assignment
)
elif isinstance(stmt, pyvex.IRStmt.LoadG):
# Got it!
#
# this is how an ARM jump table is translated to VEX
# > t16 = if (t43) ILGop_Ident32(LDle(t29)) else 0x0000c844
load_stmt, load_stmt_loc, load_size = (
stmt,
stmt_loc,
block.tyenv.sizeof(stmt.dst) // self.project.arch.byte_width,
)
stmts_to_remove.append(stmt_loc)
break
if load_stmt_loc is None:
# the load statement is not found
return False, None
# If we're just reading a constant, don't bother with the rest of this mess!
if isinstance(load_stmt, pyvex.IRStmt.WrTmp):
if type(load_stmt.data.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# ldr r0, =main+1
# blx r0
# It's not a jump table, but we resolve it anyway
jump_target_addr = load_stmt.data.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
if jump_target is None:
l.info(
"Constant indirect jump at %#08x points outside of loaded memory to %#08x",
addr,
jump_target_addr,
)
return False, None
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
elif isinstance(load_stmt, pyvex.IRStmt.LoadG):
if type(load_stmt.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# 4352c SUB R1, R11, #0x1000
# 43530 LDRHI R3, =loc_45450
# ...
# 43540 MOV PC, R3
#
# It's not a jump table, but we resolve it anyway
# Note that this block has two branches: One goes to 45450, the other one goes to whatever the original
# value of R3 is. Some intensive data-flow analysis is required in this case.
jump_target_addr = load_stmt.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
# Well, we have a real jumptable to resolve!
# If we're just reading a constant, don't bother with the rest of this mess!
if isinstance(load_stmt, pyvex.IRStmt.WrTmp):
if type(load_stmt.data.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# ldr r0, =main+1
# blx r0
# It's not a jump table, but we resolve it anyway
jump_target_addr = load_stmt.data.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
if not jump_target:
# ...except this constant looks like a jumpout!
l.info(
"Constant indirect jump directed out of the binary at #%08x", addr
)
return False, []
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
elif isinstance(load_stmt, pyvex.IRStmt.LoadG):
if type(load_stmt.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# 4352c SUB R1, R11, #0x1000
# 43530 LDRHI R3, =loc_45450
# ...
# 43540 MOV PC, R3
#
# It's not a jump table, but we resolve it anyway
# Note that this block has two branches: One goes to 45450, the other one goes to whatever the original
# value of R3 is. Some intensive data-flow analysis is required in this case.
jump_target_addr = load_stmt.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
# skip all statements before the load statement
# We want to leave the final loaded value as symbolic, so we can
# get the full range of possibilities
b.slice.remove_nodes_from(stmts_to_remove)
# Debugging output
if l.level == logging.DEBUG:
self._dbg_repr_slice(b)
# Get all sources
sources = [n_ for n_ in b.slice.nodes() if b.slice.in_degree(n_) == 0]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(project, None, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
# pylint: disable=too-many-nested-blocks
for src_irsb, _ in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self._initial_state(src_irsb)
# Keep IP symbolic to avoid unnecessary concretization
start_state.options.add(o.KEEP_IP_SYMBOLIC)
start_state.options.add(o.NO_IP_CONCRETIZATION)
# be quiet!!!!!!
start_state.options.add(o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS)
start_state.options.add(o.SYMBOL_FILL_UNCONSTRAINED_MEMORY)
# any read from an uninitialized segment should be unconstrained
if self._bss_regions:
bss_memory_read_bp = BP(
when=BP_BEFORE, enabled=True, action=self._bss_memory_read_hook
)
start_state.inspect.add_breakpoint("mem_read", bss_memory_read_bp)
# FIXME:
# this is a hack: for certain architectures, we do not initialize the base pointer, since the jump table on
# those architectures may use the bp register to store value
if not self.project.arch.name in {"S390X"}:
start_state.regs.bp = start_state.arch.initial_sp + 0x2000
self._cached_memread_addrs.clear()
init_registers_on_demand_bp = BP(
when=BP_BEFORE, enabled=True, action=self._init_registers_on_demand
)
start_state.inspect.add_breakpoint("mem_read", init_registers_on_demand_bp)
# Create the slicecutor
simgr = self.project.factory.simulation_manager(start_state, resilience=True)
slicecutor = Slicecutor(annotatedcfg, force_taking_exit=True)
simgr.use_technique(slicecutor)
simgr.use_technique(Explorer(find=load_stmt_loc[0]))
# Run it!
try:
simgr.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in simgr.found:
try:
whitelist = annotatedcfg.get_whitelisted_statements(r.addr)
last_stmt = annotatedcfg.get_last_statement_index(r.addr)
succ = project.factory.successors(
r, whitelist=whitelist, last_stmt=last_stmt
)
except (AngrError, SimError):
# oops there are errors
l.warning(
"Cannot get jump successor states from a path that has reached the target. Skip it."
)
continue
all_states = succ.flat_successors + succ.unconstrained_successors
if not all_states:
l.warning(
"Slicecutor failed to execute the program slice. No output state is available."
)
continue
state = all_states[0] # Just take the first state
self._cached_memread_addrs.clear() # clear the cache to save some memory (and avoid confusion when
# debugging)
# Parse the memory load statement and get the memory address of where the jump table is stored
jumptable_addr = self._parse_load_statement(load_stmt, state)
if jumptable_addr is None:
continue
# sanity check and necessary pre-processing
if stmts_adding_base_addr:
assert (
len(stmts_adding_base_addr) == 1
) # Making sure we are only dealing with one operation here
jump_base_addr = stmts_adding_base_addr[0]
if jump_base_addr.base_addr_available:
addr_holders = {(jump_base_addr.stmt_loc[0], jump_base_addr.tmp)}
else:
addr_holders = {
(jump_base_addr.stmt_loc[0], jump_base_addr.tmp),
(jump_base_addr.stmt_loc[0], jump_base_addr.tmp_1),
}
if len(set(all_addr_holders.keys()).intersection(addr_holders)) != 1:
# for some reason it's trying to add a base address onto a different temporary variable that we
# are not aware of. skip.
continue
if not jump_base_addr.base_addr_available:
# we need to decide which tmp is the address holder and which tmp holds the base address
addr_holder = next(
iter(set(all_addr_holders.keys()).intersection(addr_holders))
)
if jump_base_addr.tmp_1 == addr_holder[1]:
# swap the two tmps
jump_base_addr.tmp, jump_base_addr.tmp_1 = (
jump_base_addr.tmp_1,
jump_base_addr.tmp,
)
# Load the concrete base address
jump_base_addr.base_addr = state.solver.eval(
state.scratch.temps[jump_base_addr.tmp_1]
)
all_targets = []
total_cases = jumptable_addr._model_vsa.cardinality
if total_cases > self._max_targets:
# We resolved too many targets for this indirect jump. Something might have gone wrong.
l.debug(
"%d targets are resolved for the indirect jump at %#x. It may not be a jump table. Try the "
"next source, if there is any.",
total_cases,
addr,
)
continue
# Or alternatively, we can ask user, which is meh...
#
# jump_base_addr = int(raw_input("please give me the jump base addr: "), 16)
# total_cases = int(raw_input("please give me the total cases: "))
# jump_target = state.solver.SI(bits=64, lower_bound=jump_base_addr, upper_bound=jump_base_addr +
# (total_cases - 1) * 8, stride=8)
jump_table = []
min_jumptable_addr = state.solver.min(jumptable_addr)
max_jumptable_addr = state.solver.max(jumptable_addr)
# Both the min jump target and the max jump target should be within a mapped memory region
# i.e., we shouldn't be jumping to the stack or somewhere unmapped
if not project.loader.find_segment_containing(
min_jumptable_addr
) or not project.loader.find_segment_containing(max_jumptable_addr):
if not project.loader.find_section_containing(
min_jumptable_addr
) or not project.loader.find_section_containing(max_jumptable_addr):
l.debug(
"Jump table %#x might have jump targets outside mapped memory regions. "
"Continue to resolve it from the next data source.",
addr,
)
continue
# Load the jump table from memory
should_skip = False
for idx, a in enumerate(
state.solver.eval_upto(jumptable_addr, total_cases)
):
if idx % 100 == 0 and idx != 0:
l.debug(
"%d targets have been resolved for the indirect jump at %#x...",
idx,
addr,
)
target = cfg._fast_memory_load_pointer(a, size=load_size)
if target is None:
l.debug("Cannot load pointer from address %#x. Skip.", a)
should_skip = True
break
all_targets.append(target)
if should_skip:
continue
# Adjust entries inside the jump table
if stmts_adding_base_addr:
stmt_adding_base_addr = stmts_adding_base_addr[0]
base_addr = stmt_adding_base_addr.base_addr
conversion_ops = list(
reversed(
list(
v
for v in all_addr_holders.values()
if v is not AddressTransferringTypes.Assignment
)
)
)
if conversion_ops:
invert_conversion_ops = []
for conversion_op in conversion_ops:
if (
conversion_op
is AddressTransferringTypes.SignedExtension32to64
):
lam = (
lambda a: (a | 0xFFFFFFFF00000000)
if a >= 0x80000000
else a
)
elif (
conversion_op
is AddressTransferringTypes.UnsignedExtension32to64
):
lam = lambda a: a
elif conversion_op is AddressTransferringTypes.Truncation64to32:
lam = lambda a: a & 0xFFFFFFFF
else:
raise NotImplementedError(
"Unsupported conversion operation."
)
invert_conversion_ops.append(lam)
all_targets_copy = all_targets
all_targets = []
for target_ in all_targets_copy:
for lam in invert_conversion_ops:
target_ = lam(target_)
all_targets.append(target_)
mask = (2**self.project.arch.bits) - 1
all_targets = [(target + base_addr) & mask for target in all_targets]
# Finally... all targets are ready
illegal_target_found = False
for target in all_targets:
# if the total number of targets is suspicious (it usually implies a failure in applying the
# constraints), check if all jump targets are legal
if len(all_targets) in {
0x100,
0x10000,
} and not self._is_jumptarget_legal(target):
l.info(
"Jump target %#x is probably illegal. Try to resolve indirect jump at %#x from the next "
"source.",
target,
addr,
)
illegal_target_found = True
break
jump_table.append(target)
if illegal_target_found:
continue
l.info("Resolved %d targets from %#x.", len(all_targets), addr)
# write to the IndirectJump object in CFG
ij = cfg.indirect_jumps[addr]
if total_cases > 1:
# It can be considered a jump table only if there are more than one jump target
ij.jumptable = True
ij.jumptable_addr = state.solver.min(jumptable_addr)
ij.resolved_targets = set(jump_table)
ij.jumptable_entries = jump_table
else:
ij.jumptable = False
ij.resolved_targets = set(jump_table)
return True, all_targets
l.info("Could not resolve indirect jump %#x in funtion %#x.", addr, func_addr)
return False, None
|
def resolve(self, cfg, addr, func_addr, block, jumpkind):
"""
Resolves jump tables.
:param cfg: A CFG instance.
:param int addr: IRSB address.
:param int func_addr: The function address.
:param pyvex.IRSB block: The IRSB.
:return: A bool indicating whether the indirect jump is resolved successfully, and a list of resolved targets
:rtype: tuple
"""
project = self.project # short-hand
self._max_targets = cfg._indirect_jump_target_limit
# Perform a backward slicing from the jump target
b = Blade(
cfg.graph,
addr,
-1,
cfg=cfg,
project=project,
ignore_sp=False,
ignore_bp=False,
max_level=3,
base_state=self.base_state,
)
stmt_loc = (addr, DEFAULT_STATEMENT)
if stmt_loc not in b.slice:
return False, None
load_stmt_loc, load_stmt, load_size = None, None, None
stmts_to_remove = [stmt_loc]
stmts_adding_base_addr = [] # type: list[JumpTargetBaseAddr]
# All temporary variables that hold indirect addresses loaded out of the memory
# Obviously, load_stmt.tmp must be here
# if there are additional data transferring statements between the Load statement and the base-address-adding
# statement, all_addr_holders will have more than one temporary variables
#
# Here is an example:
#
# IRSB 0x4c64c4
# + 06 | t12 = LDle:I32(t7)
# + 07 | t11 = 32Sto64(t12)
# + 10 | t2 = Add64(0x0000000000571df0,t11)
#
# all_addr_holders will be {(0x4c64c4, 11): AddressTransferringTypes.SignedExtension32to64,
# (0x4c64c4, 12); AddressTransferringTypes.Assignment,
# }
all_addr_holders = OrderedDict()
while True:
preds = list(b.slice.predecessors(stmt_loc))
if len(preds) != 1:
return False, None
block_addr, stmt_idx = stmt_loc = preds[0]
block = project.factory.block(block_addr, backup_state=self.base_state).vex
stmt = block.statements[stmt_idx]
if isinstance(stmt, (pyvex.IRStmt.WrTmp, pyvex.IRStmt.Put)):
if isinstance(stmt.data, (pyvex.IRExpr.Get, pyvex.IRExpr.RdTmp)):
# data transferring
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Assignment
)
continue
elif isinstance(stmt.data, pyvex.IRExpr.ITE):
# data transferring
# t16 = if (t43) ILGop_Ident32(LDle(t29)) else 0x0000c844
# > t44 = ITE(t43,t16,0x0000c844)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Assignment
)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Unop):
if stmt.data.op == "Iop_32Sto64":
# data transferring with conversion
# t11 = 32Sto64(t12)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.SignedExtension32to64
)
continue
elif stmt.data.op == "Iop_64to32":
# data transferring with conversion
# t24 = 64to32(t21)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Truncation64to32
)
continue
elif stmt.data.op == "Iop_32Uto64":
# data transferring with conversion
# t21 = 32Uto64(t22)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.UnsignedExtension32to64
)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Binop) and stmt.data.op.startswith(
"Iop_Add"
):
# GitHub issue #1289, a S390X binary
# jump_label = &jump_table + *(jump_table[index])
# IRSB 0x4007c0
# 00 | ------ IMark(0x4007c0, 4, 0) ------
# + 01 | t0 = GET:I32(212)
# + 02 | t1 = Add32(t0,0xffffffff)
# 03 | PUT(352) = 0x0000000000000003
# 04 | t13 = 32Sto64(t0)
# 05 | t6 = t13
# 06 | PUT(360) = t6
# 07 | PUT(368) = 0xffffffffffffffff
# 08 | PUT(376) = 0x0000000000000000
# 09 | PUT(212) = t1
# 10 | PUT(ia) = 0x00000000004007c4
# 11 | ------ IMark(0x4007c4, 6, 0) ------
# + 12 | t14 = 32Uto64(t1)
# + 13 | t8 = t14
# + 14 | t16 = CmpLE64U(t8,0x000000000000000b)
# + 15 | t15 = 1Uto32(t16)
# + 16 | t10 = t15
# + 17 | t11 = CmpNE32(t10,0x00000000)
# + 18 | if (t11) { PUT(offset=336) = 0x4007d4; Ijk_Boring }
# Next: 0x4007ca
#
# IRSB 0x4007d4
# 00 | ------ IMark(0x4007d4, 6, 0) ------
# + 01 | t8 = GET:I64(r2)
# + 02 | t7 = Shr64(t8,0x3d)
# + 03 | t9 = Shl64(t8,0x03)
# + 04 | t6 = Or64(t9,t7)
# + 05 | t11 = And64(t6,0x00000007fffffff8)
# 06 | ------ IMark(0x4007da, 6, 0) ------
# 07 | PUT(r1) = 0x0000000000400a50
# 08 | PUT(ia) = 0x00000000004007e0
# 09 | ------ IMark(0x4007e0, 6, 0) ------
# + 10 | t12 = Add64(0x0000000000400a50,t11)
# + 11 | t16 = LDbe:I64(t12)
# 12 | PUT(r2) = t16
# 13 | ------ IMark(0x4007e6, 4, 0) ------
# + 14 | t17 = Add64(0x0000000000400a50,t16)
# + Next: t17
#
# Special case: a base address is added to the loaded offset before jumping to it.
if isinstance(stmt.data.args[0], pyvex.IRExpr.Const) and isinstance(
stmt.data.args[1], pyvex.IRExpr.RdTmp
):
stmts_adding_base_addr.append(
JumpTargetBaseAddr(
stmt_loc,
stmt,
stmt.data.args[1].tmp,
base_addr=stmt.data.args[0].con.value,
)
)
stmts_to_remove.append(stmt_loc)
elif isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and isinstance(
stmt.data.args[1], pyvex.IRExpr.Const
):
stmts_adding_base_addr.append(
JumpTargetBaseAddr(
stmt_loc,
stmt,
stmt.data.args[0].tmp,
base_addr=stmt.data.args[1].con.value,
)
)
stmts_to_remove.append(stmt_loc)
elif isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and isinstance(
stmt.data.args[1], pyvex.IRExpr.RdTmp
):
# one of the tmps must be holding a concrete value at this point
stmts_adding_base_addr.append(
JumpTargetBaseAddr(
stmt_loc,
stmt,
stmt.data.args[0].tmp,
tmp_1=stmt.data.args[1].tmp,
)
)
stmts_to_remove.append(stmt_loc)
else:
# not supported
pass
continue
elif isinstance(stmt.data, pyvex.IRExpr.Load):
# Got it!
load_stmt, load_stmt_loc, load_size = (
stmt,
stmt_loc,
block.tyenv.sizeof(stmt.tmp) // self.project.arch.byte_width,
)
stmts_to_remove.append(stmt_loc)
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (
AddressTransferringTypes.Assignment
)
elif isinstance(stmt, pyvex.IRStmt.LoadG):
# Got it!
#
# this is how an ARM jump table is translated to VEX
# > t16 = if (t43) ILGop_Ident32(LDle(t29)) else 0x0000c844
load_stmt, load_stmt_loc, load_size = (
stmt,
stmt_loc,
block.tyenv.sizeof(stmt.dst) // self.project.arch.byte_width,
)
stmts_to_remove.append(stmt_loc)
break
if load_stmt_loc is None:
# the load statement is not found
return False, None
# If we're just reading a constant, don't bother with the rest of this mess!
if isinstance(load_stmt, pyvex.IRStmt.WrTmp):
if type(load_stmt.data.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# ldr r0, =main+1
# blx r0
# It's not a jump table, but we resolve it anyway
jump_target_addr = load_stmt.data.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
if jump_target is None:
l.info(
"Constant indirect jump at %#08x points outside of loaded memory to %#08x",
addr,
jump_target_addr,
)
return False, None
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
elif isinstance(load_stmt, pyvex.IRStmt.LoadG):
if type(load_stmt.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# 4352c SUB R1, R11, #0x1000
# 43530 LDRHI R3, =loc_45450
# ...
# 43540 MOV PC, R3
#
# It's not a jump table, but we resolve it anyway
# Note that this block has two branches: One goes to 45450, the other one goes to whatever the original
# value of R3 is. Some intensive data-flow analysis is required in this case.
jump_target_addr = load_stmt.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
# Well, we have a real jumptable to resolve!
# If we're just reading a constant, don't bother with the rest of this mess!
if isinstance(load_stmt, pyvex.IRStmt.WrTmp):
if type(load_stmt.data.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# ldr r0, =main+1
# blx r0
# It's not a jump table, but we resolve it anyway
jump_target_addr = load_stmt.data.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
if not jump_target:
# ...except this constant looks like a jumpout!
l.info(
"Constant indirect jump directed out of the binary at #%08x", addr
)
return False, []
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
elif isinstance(load_stmt, pyvex.IRStmt.LoadG):
if type(load_stmt.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# 4352c SUB R1, R11, #0x1000
# 43530 LDRHI R3, =loc_45450
# ...
# 43540 MOV PC, R3
#
# It's not a jump table, but we resolve it anyway
# Note that this block has two branches: One goes to 45450, the other one goes to whatever the original
# value of R3 is. Some intensive data-flow analysis is required in this case.
jump_target_addr = load_stmt.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
l.info(
"Resolved constant indirect jump from %#08x to %#08x",
addr,
jump_target_addr,
)
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = set([jump_target])
return True, [jump_target]
# skip all statements before the load statement
# We want to leave the final loaded value as symbolic, so we can
# get the full range of possibilities
b.slice.remove_nodes_from(stmts_to_remove)
# Debugging output
if l.level == logging.DEBUG:
self._dbg_repr_slice(b)
# Get all sources
sources = [n_ for n_ in b.slice.nodes() if b.slice.in_degree(n_) == 0]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(project, None, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
# pylint: disable=too-many-nested-blocks
for src_irsb, _ in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self._initial_state(src_irsb)
# Keep IP symbolic to avoid unnecessary concretization
start_state.options.add(o.KEEP_IP_SYMBOLIC)
start_state.options.add(o.NO_IP_CONCRETIZATION)
# be quiet!!!!!!
start_state.options.add(o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS)
start_state.options.add(o.SYMBOL_FILL_UNCONSTRAINED_MEMORY)
# any read from an uninitialized segment should be unconstrained
if self._bss_regions:
bss_memory_read_bp = BP(
when=BP_BEFORE, enabled=True, action=self._bss_memory_read_hook
)
start_state.inspect.add_breakpoint("mem_read", bss_memory_read_bp)
# FIXME:
# this is a hack: for certain architectures, we do not initialize the base pointer, since the jump table on
# those architectures may use the bp register to store value
if not self.project.arch.name in {"S390X"}:
start_state.regs.bp = start_state.arch.initial_sp + 0x2000
self._cached_memread_addrs.clear()
init_registers_on_demand_bp = BP(
when=BP_BEFORE, enabled=True, action=self._init_registers_on_demand
)
start_state.inspect.add_breakpoint("mem_read", init_registers_on_demand_bp)
# Create the slicecutor
simgr = self.project.factory.simulation_manager(start_state, resilience=True)
slicecutor = Slicecutor(annotatedcfg, force_taking_exit=True)
simgr.use_technique(slicecutor)
simgr.use_technique(Explorer(find=load_stmt_loc[0]))
# Run it!
try:
simgr.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in simgr.found:
try:
whitelist = annotatedcfg.get_whitelisted_statements(r.addr)
last_stmt = annotatedcfg.get_last_statement_index(r.addr)
succ = project.factory.successors(
r, whitelist=whitelist, last_stmt=last_stmt
)
except (AngrError, SimError):
# oops there are errors
l.warning(
"Cannot get jump successor states from a path that has reached the target. Skip it."
)
continue
all_states = succ.flat_successors + succ.unconstrained_successors
if not all_states:
l.warning(
"Slicecutor failed to execute the program slice. No output state is available."
)
continue
state = all_states[0] # Just take the first state
self._cached_memread_addrs.clear() # clear the cache to save some memory (and avoid confusion when
# debugging)
# Parse the memory load statement and get the memory address of where the jump table is stored
jumptable_addr = self._parse_load_statement(load_stmt, state)
if jumptable_addr is None:
continue
# sanity check and necessary pre-processing
if stmts_adding_base_addr:
assert (
len(stmts_adding_base_addr) == 1
) # Making sure we are only dealing with one operation here
jump_base_addr = stmts_adding_base_addr[0]
if jump_base_addr.base_addr_available:
addr_holders = {(jump_base_addr.stmt_loc[0], jump_base_addr.tmp)}
else:
addr_holders = {
(jump_base_addr.stmt_loc[0], jump_base_addr.tmp),
(jump_base_addr.stmt_loc[0], jump_base_addr.tmp_1),
}
if len(set(all_addr_holders.keys()).intersection(addr_holders)) != 1:
# for some reason it's trying to add a base address onto a different temporary variable that we
# are not aware of. skip.
continue
if not jump_base_addr.base_addr_available:
# we need to decide which tmp is the address holder and which tmp holds the base address
addr_holder = next(
iter(set(all_addr_holders.keys()).intersection(addr_holders))
)
if jump_base_addr.tmp_1 == addr_holder[1]:
# swap the two tmps
jump_base_addr.tmp, jump_base_addr.tmp_1 = (
jump_base_addr.tmp_1,
jump_base_addr.tmp,
)
# Load the concrete base address
jump_base_addr.base_addr = state.solver.eval(
state.scratch.temps[jump_base_addr.tmp_1]
)
all_targets = []
total_cases = jumptable_addr._model_vsa.cardinality
if total_cases > self._max_targets:
# We resolved too many targets for this indirect jump. Something might have gone wrong.
l.debug(
"%d targets are resolved for the indirect jump at %#x. It may not be a jump table. Try the "
"next source, if there is any.",
total_cases,
addr,
)
continue
# Or alternatively, we can ask user, which is meh...
#
# jump_base_addr = int(raw_input("please give me the jump base addr: "), 16)
# total_cases = int(raw_input("please give me the total cases: "))
# jump_target = state.solver.SI(bits=64, lower_bound=jump_base_addr, upper_bound=jump_base_addr +
# (total_cases - 1) * 8, stride=8)
jump_table = []
min_jumptable_addr = state.solver.min(jumptable_addr)
max_jumptable_addr = state.solver.max(jumptable_addr)
# Both the min jump target and the max jump target should be within a mapped memory region
# i.e., we shouldn't be jumping to the stack or somewhere unmapped
if not project.loader.find_segment_containing(
min_jumptable_addr
) or not project.loader.find_segment_containing(max_jumptable_addr):
if not project.loader.find_section_containing(
min_jumptable_addr
) or not project.loader.find_section_containing(max_jumptable_addr):
l.debug(
"Jump table %#x might have jump targets outside mapped memory regions. "
"Continue to resolve it from the next data source.",
addr,
)
continue
# Load the jump table from memory
for idx, a in enumerate(
state.solver.eval_upto(jumptable_addr, total_cases)
):
if idx % 100 == 0 and idx != 0:
l.debug(
"%d targets have been resolved for the indirect jump at %#x...",
idx,
addr,
)
target = cfg._fast_memory_load_pointer(a, size=load_size)
all_targets.append(target)
# Adjust entries inside the jump table
if stmts_adding_base_addr:
stmt_adding_base_addr = stmts_adding_base_addr[0]
base_addr = stmt_adding_base_addr.base_addr
conversion_ops = list(
reversed(
list(
v
for v in all_addr_holders.values()
if v is not AddressTransferringTypes.Assignment
)
)
)
if conversion_ops:
invert_conversion_ops = []
for conversion_op in conversion_ops:
if (
conversion_op
is AddressTransferringTypes.SignedExtension32to64
):
lam = (
lambda a: (a | 0xFFFFFFFF00000000)
if a >= 0x80000000
else a
)
elif (
conversion_op
is AddressTransferringTypes.UnsignedExtension32to64
):
lam = lambda a: a
elif conversion_op is AddressTransferringTypes.Truncation64to32:
lam = lambda a: a & 0xFFFFFFFF
else:
raise NotImplementedError(
"Unsupported conversion operation."
)
invert_conversion_ops.append(lam)
all_targets_copy = all_targets
all_targets = []
for target_ in all_targets_copy:
for lam in invert_conversion_ops:
target_ = lam(target_)
all_targets.append(target_)
mask = (2**self.project.arch.bits) - 1
all_targets = [(target + base_addr) & mask for target in all_targets]
# Finally... all targets are ready
illegal_target_found = False
for target in all_targets:
# if the total number of targets is suspicious (it usually implies a failure in applying the
# constraints), check if all jump targets are legal
if len(all_targets) in {
0x100,
0x10000,
} and not self._is_jumptarget_legal(target):
l.info(
"Jump target %#x is probably illegal. Try to resolve indirect jump at %#x from the next "
"source.",
target,
addr,
)
illegal_target_found = True
break
jump_table.append(target)
if illegal_target_found:
continue
l.info("Resolved %d targets from %#x.", len(all_targets), addr)
# write to the IndirectJump object in CFG
ij = cfg.indirect_jumps[addr]
if total_cases > 1:
# It can be considered a jump table only if there are more than one jump target
ij.jumptable = True
ij.jumptable_addr = state.solver.min(jumptable_addr)
ij.resolved_targets = set(jump_table)
ij.jumptable_entries = jump_table
else:
ij.jumptable = False
ij.resolved_targets = set(jump_table)
return True, all_targets
l.info("Could not resolve indirect jump %#x in funtion %#x.", addr, func_addr)
return False, None
|
https://github.com/angr/angr/issues/1579
|
p = angr.Project("/usr/lib/libc.so.6")
cfg = p.analyses.cfgfast()
[...]
ERROR | 2019-05-23 18:27:06,592 | angr.analyses.cfg.cfg_fast | Decoding error occurred at address 0x447ebe of function 0x447d30.
ERROR | 2019-05-23 18:27:06,593 | angr.analyses.cfg.cfg_fast | Decoding error occurred at address 0x447e99 of function 0x447d30.
ERROR | 2019-05-23 18:27:06,858 | angr.analyses.cfg.cfg_fast | Decoding error occurred at address 0x4c7b45 of function 0x4c7b30.
WARNING | 2019-05-23 18:27:19,295 | claripy.vsa.strided_interval | Reversing a real strided-interval <64>0x1[0x1, 0xffffffffffffffff]R(uninit) is bad
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-34d0f86d8755> in <module>
----> 1 cfg = p.analyses.CFGFast()
~/Projects/angr-dev/angr/angr/analyses/analysis.py in __call__(self, *args, **kwargs)
107
108 oself._show_progressbar = show_progressbar
--> 109 oself.__init__(*args, **kwargs)
110 return oself
111
~/Projects/angr-dev/angr/angr/analyses/cfg/cfg_fast.py in __init__(self, binary, objects, regions, pickle_intermediate_results, symbols, function_prologues, resolve_indirect_jumps, force_segment, force_complete_scan, indirect_jump_target_limit, collect_data_references, extra_cross_references, normalize, start_at_entry, function_starts, extra_memory_regions, data_type_guessing_handlers, arch_options, indirect_jump_resolvers, base_state, exclude_sparse_regions, skip_specific_regions, heuristic_plt_resolving, detect_tail_calls, low_priority, cfb, model, start, end, **extra_arch_options)
624
625 # Start working!
--> 626 self._analyze()
627
628 def __getstate__(self):
~/Projects/angr-dev/angr/angr/analyses/forward_analysis.py in _analyze(self)
581 # An example is the CFG recovery.
582
--> 583 self._analysis_core_baremetal()
584
585 else:
~/Projects/angr-dev/angr/angr/analyses/forward_analysis.py in _analysis_core_baremetal(self)
682
683 if not self._job_info_queue:
--> 684 self._job_queue_empty()
685
686 if not self._job_info_queue:
~/Projects/angr-dev/angr/angr/analyses/cfg/cfg_fast.py in _job_queue_empty(self)
1104 # Try to see if there is any indirect jump left to be resolved
1105 if self._resolve_indirect_jumps and self._indirect_jumps_to_resolve:
-> 1106 self._process_unresolved_indirect_jumps()
1107
1108 if self._job_info_queue:
~/Projects/angr-dev/angr/angr/analyses/cfg/cfg_base.py in _process_unresolved_indirect_jumps(self)
2141 if self._low_priority:
2142 self._release_gil(idx, 20, 0.0001)
-> 2143 all_targets |= self._process_one_indirect_jump(jump)
2144
2145 self._indirect_jumps_to_resolve.clear()
~/Projects/angr-dev/angr/angr/analyses/cfg/cfg_base.py in _process_one_indirect_jump(self, jump)
2167 continue
2168
-> 2169 resolved, targets = resolver.resolve(self, jump.addr, jump.func_addr, block, jump.jumpkind)
2170 if resolved:
2171 resolved_by = resolver
~/Projects/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/jumptable.py in resolve(self, cfg, addr, func_addr, block, jumpkind)
499 for target_ in all_targets_copy:
500 for lam in invert_conversion_ops:
--> 501 target_ = lam(target_)
502 all_targets.append(target_)
503 mask = (2 ** self.project.arch.bits) - 1
~/Projects/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/jumptable.py in <lambda>(a)
487 for conversion_op in conversion_ops:
488 if conversion_op is AddressTransferringTypes.SignedExtension32to64:
--> 489 lam = lambda a: (a | 0xffffffff00000000) if a >= 0x80000000 else a
490 elif conversion_op is AddressTransferringTypes.UnsignedExtension32to64:
491 lam = lambda a: a
TypeError: '>=' not supported between instances of 'NoneType' and 'int'
|
TypeError
|
def _hook_register_read(self, state):
reg_read_offset = state.inspect.reg_read_offset
if isinstance(reg_read_offset, claripy.ast.BV):
if reg_read_offset.multivalued:
# Multi-valued register offsets are not supported
l.warning("Multi-valued register offsets are not supported.")
return
reg_read_offset = state.solver.eval(reg_read_offset)
reg_read_length = state.inspect.reg_read_length
if reg_read_offset == state.arch.sp_offset and reg_read_length == state.arch.bytes:
# TODO: make sure the sp is not overwritten by something that we are not tracking
return
# if reg_read_offset == state.arch.bp_offset and reg_read_length == state.arch.bytes:
# # TODO:
var_offset = self._normalize_register_offset(reg_read_offset)
if var_offset not in self.register_region:
# the variable being read doesn't exist before
variable = SimRegisterVariable(
reg_read_offset,
reg_read_length,
ident=self.variable_manager[self.func_addr].next_variable_ident("register"),
region=self.func_addr,
)
self.register_region.add_variable(var_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable(
"register", var_offset, variable
)
|
def _hook_register_read(self, state):
reg_read_offset = state.inspect.reg_read_offset
reg_read_length = state.inspect.reg_read_length
if reg_read_offset == state.arch.sp_offset and reg_read_length == state.arch.bytes:
# TODO: make sure the sp is not overwritten by something that we are not tracking
return
# if reg_read_offset == state.arch.bp_offset and reg_read_length == state.arch.bytes:
# # TODO:
var_offset = self._normalize_register_offset(reg_read_offset)
if var_offset not in self.register_region:
# the variable being read doesn't exist before
variable = SimRegisterVariable(
reg_read_offset,
reg_read_length,
ident=self.variable_manager[self.func_addr].next_variable_ident("register"),
region=self.func_addr,
)
self.register_region.add_variable(var_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable(
"register", var_offset, variable
)
|
https://github.com/angr/angr/issues/1430
|
In [8]: proj.analyses.VariableRecovery(x)
WARNING | 2019-02-03 17:53:52,086 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_33]|Reg 68, 4B>, <0x4022c8[ir_35]|Reg 68, 4B>}.
WARNING | 2019-02-03 17:53:52,088 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_36]|Reg 40, 4B>, <0x4022c8[ir_28]|Reg 40, 4B>}.
WARNING | 2019-02-03 17:53:52,089 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_37]|Reg 44, 4B>, <0x4022c8[ir_29]|Reg 44, 4B>}.
WARNING | 2019-02-03 17:53:52,089 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_30]|Reg 48, 4B>, <0x4022c8[ir_38]|Reg 48, 4B>}.
WARNING | 2019-02-03 17:53:52,090 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_39]|Reg 52, 4B>, <0x4022c8[ir_31]|Reg 52, 4B>}.
WARNING | 2019-02-03 17:53:52,094 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_15]|Reg 32, 4B>, <0x4022c8[ir_40]|Reg 32, 4B>}.
WARNING | 2019-02-03 17:53:52,098 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_41]|Reg 8, 4B>, <0x4022c8[ir_32]|Reg 8, 4B>}.
WARNING | 2019-02-03 17:53:52,102 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_42]|Reg 152, 4B>, <0x4022c8[ir_34]|Reg 152, 4B>}.
---------------------------------------------------------------------------
ClaripyOperationError Traceback (most recent call last)
<ipython-input-8-de8d6084dd49> in <module>
----> 1 proj.analyses.VariableRecovery(x)
~/angr-dev/angr/angr/analyses/analysis.py in __call__(self, *args, **kwargs)
107
108 oself._show_progressbar = show_progressbar
--> 109 oself.__init__(*args, **kwargs)
110 return oself
111
~/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py in __init__(self, func, max_iterations)
402 self._node_iterations = defaultdict(int)
403
--> 404 self._analyze()
405
406 #
~/angr-dev/angr/angr/analyses/forward_analysis.py in _analyze(self)
555 # We have a base graph to follow. Just handle the current job.
556
--> 557 self._analysis_core_graph()
558
559 self._post_analysis()
~/angr-dev/angr/angr/analyses/forward_analysis.py in _analysis_core_graph(self)
578 break
579
--> 580 changed, output_state = self._run_on_node(n, job_state)
581
582 # output state of node n is input state for successors to node n
~/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py in _run_on_node(self, node, state)
467 addr=node.addr,
468 size=node.size,
--> 469 opt_level=0 # disable the optimization in order to have
470 # instruction-level analysis results
471 )
~/angr-dev/angr/angr/factory.py in successors(self, *args, **kwargs)
47 """
48
---> 49 return self.project.engines.successors(*args, **kwargs)
50
51 def blank_state(self, **kwargs):
~/angr-dev/angr/angr/engines/hub.py in successors(self, state, addr, jumpkind, default_engine, procedure_engine, engines, **kwargs)
126 for engine in engines:
127 if engine.check(state, **kwargs):
--> 128 r = engine.process(state, **kwargs)
129 if r.processed:
130 return r
~/angr-dev/angr/angr/engines/vex/engine.py in process(self, state, irsb, skip_stmts, last_stmt, whitelist, inline, force_addr, insn_bytes, size, num_inst, traceflags, thumb, extra_stop_points, opt_level, **kwargs)
140 thumb=thumb,
141 extra_stop_points=extra_stop_points,
--> 142 opt_level=opt_level)
143
144 def _check(self, state, *args, **kwargs):
~/angr-dev/angr/angr/engines/engine.py in process(***failed resolving arguments***)
53 successors = new_state._inspect_getattr('sim_successors', successors)
54 try:
---> 55 self._process(new_state, successors, *args, **kwargs)
56 except SimException:
57 if o.EXCEPTION_HANDLING not in old_state.options:
~/angr-dev/angr/angr/engines/vex/engine.py in _process(self, state, successors, irsb, skip_stmts, last_stmt, whitelist, insn_bytes, size, num_inst, traceflags, thumb, extra_stop_points, opt_level)
191
192 try:
--> 193 self._handle_irsb(state, successors, irsb, skip_stmts, last_stmt, whitelist)
194 except SimReliftException as e:
195 state = e.state
~/angr-dev/angr/angr/engines/vex/engine.py in _handle_irsb(self, state, successors, irsb, skip_stmts, last_stmt, whitelist)
270 state.scratch.stmt_idx = stmt_idx
271 state._inspect('statement', BP_BEFORE, statement=stmt_idx)
--> 272 cont = self._handle_statement(state, successors, stmt)
273 state._inspect('statement', BP_AFTER)
274 if not cont:
~/angr-dev/angr/angr/engines/vex/engine.py in _handle_statement(self, state, successors, stmt)
378
379 # process it!
--> 380 s_stmt = translate_stmt(stmt, state)
381 if s_stmt is not None:
382 state.history.extend_actions(s_stmt.actions)
~/angr-dev/angr/angr/engines/vex/statements/__init__.py in translate_stmt(stmt, state)
27 stmt_class = globals()[stmt_name]
28 s = stmt_class(stmt, state)
---> 29 s.process()
30 return s
31 else:
~/angr-dev/angr/angr/engines/vex/statements/base.py in process(self)
39 """
40 # this is where we would choose between different analysis modes
---> 41 self._execute()
42
43 def _execute(self):
~/angr-dev/angr/angr/engines/vex/statements/wrtmp.py in _execute(self)
8 def _execute(self):
9 # get data and track data reads
---> 10 data = self._translate_expr(self.stmt.data)
11 self.state.scratch.store_tmp(self.stmt.tmp, data.expr, data.reg_deps(), data.tmp_deps(),
12 action_holder=self.actions
~/angr-dev/angr/angr/engines/vex/statements/base.py in _translate_expr(self, expr)
46 def _translate_expr(self, expr):
47 """Translates an IRExpr into a SimIRExpr."""
---> 48 e = translate_expr(expr, self.state)
49 self._record_expr(e)
50 return e
~/angr-dev/angr/angr/engines/vex/expressions/__init__.py in translate_expr(expr, state)
12 l.debug("Processing expression %s", expr_name)
13 e = expr_class(expr, state)
---> 14 e.process()
15 return e
16
~/angr-dev/angr/angr/engines/vex/expressions/base.py in process(self)
38
39 # this should change when additional analyses are implemented
---> 40 self._execute()
41
42 self._post_process()
~/angr-dev/angr/angr/engines/vex/expressions/geti.py in _execute(self)
18
19 # get it!
---> 20 self.expr = self.state.registers.load(self.offset, size)
21
22 if self.type.startswith('Ity_F'):
~/angr-dev/angr/angr/storage/memory.py in load(self, addr, size, condition, fallback, add_constraints, action, endness, inspect, disable_actions, ret_on_segv)
802
803 elif self.category == 'reg':
--> 804 self.state._inspect('reg_read', BP_AFTER, reg_read_expr=r)
805 r = self.state._inspect_getattr("reg_read_expr", r)
806
~/angr-dev/angr/angr/sim_state.py in _inspect(self, *args, **kwargs)
257 def _inspect(self, *args, **kwargs):
258 if self.has_plugin('inspect'):
--> 259 self.inspect.action(*args, **kwargs)
260
261 def _inspect_getattr(self, attr, default_value):
~/angr-dev/angr/angr/state_plugins/inspect.py in action(self, event_type, when, **kwargs)
246 if bp.check(self.state, when):
247 l.debug("... FIRE")
--> 248 bp.fire(self.state)
249
250 def make_breakpoint(self, event_type, *args, **kwargs):
~/angr-dev/angr/angr/state_plugins/inspect.py in fire(self, state)
198 "make modifications, then exit this shell to resume your analysis.")
199 else:
--> 200 self.action(state)
201
202 def __repr__(self):
~/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py in _hook_register_read(self, state)
144 reg_read_length = state.inspect.reg_read_length
145
--> 146 if reg_read_offset == state.arch.sp_offset and reg_read_length == state.arch.bytes:
147 # TODO: make sure the sp is not overwritten by something that we are not tracking
148 return
~/angr-dev/claripy/claripy/ast/base.py in __bool__(self)
643 constraint solve. This caused tons of issues.
644 """
--> 645 raise ClaripyOperationError('testing Expressions for truthiness does not do what you want, as these expressions can be symbolic')
646
647 def structurally_match(self, o):
ClaripyOperationError: testing Expressions for truthiness does not do what you want, as these expressions can be symbolic
|
ClaripyOperationError
|
def _hook_register_write(self, state):
reg_write_offset = state.inspect.reg_write_offset
if isinstance(reg_write_offset, claripy.ast.BV):
if reg_write_offset.multivalued:
# Multi-valued register offsets are not supported
l.warning("Multi-valued register offsets are not supported.")
return
reg_write_offset = state.solver.eval(reg_write_offset)
if reg_write_offset == state.arch.sp_offset:
# it's updating stack pointer. skip
return
reg_write_expr = state.inspect.reg_write_expr
reg_write_length = len(reg_write_expr) // 8
# annotate it
# reg_write_expr = reg_write_expr.annotate(VariableSourceAnnotation.from_state(state))
state.inspect.reg_write_expr = reg_write_expr
existing_vars = self.variable_manager[self.func_addr].find_variables_by_stmt(
state.scratch.bbl_addr, state.scratch.stmt_idx, "register"
)
if not existing_vars:
# create the variable
variable = SimRegisterVariable(
reg_write_offset,
reg_write_length,
ident=self.variable_manager[self.func_addr].next_variable_ident("register"),
region=self.func_addr,
)
var_offset = self._normalize_register_offset(reg_write_offset)
self.register_region.set_variable(var_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].set_variable(
"register", var_offset, variable
)
self.variable_manager[self.func_addr].write_to(
variable, 0, self._codeloc_from_state(state)
)
# is it writing a pointer to a stack variable into the register?
# e.g. lea eax, [ebp-0x40]
stack_offset = self._addr_to_stack_offset(reg_write_expr)
if stack_offset is not None:
# it is!
# unfortunately we don't know the size. We use size None for now.
if stack_offset not in self.stack_region:
lea_size = 1
new_var = SimStackVariable(
stack_offset,
lea_size,
base="bp",
ident=self.variable_manager[self.func_addr].next_variable_ident(
"stack"
),
region=self.func_addr,
)
self.stack_region.add_variable(stack_offset, new_var)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable(
"stack", stack_offset, new_var
)
base_offset = self.stack_region.get_base_addr(stack_offset)
assert base_offset is not None
for var in self.stack_region.get_variables_by_offset(stack_offset):
self.variable_manager[self.func_addr].reference_at(
var, stack_offset - base_offset, self._codeloc_from_state(state)
)
|
def _hook_register_write(self, state):
reg_write_offset = state.inspect.reg_write_offset
if reg_write_offset == state.arch.sp_offset:
# it's updating stack pointer. skip
return
reg_write_expr = state.inspect.reg_write_expr
reg_write_length = len(reg_write_expr) // 8
# annotate it
# reg_write_expr = reg_write_expr.annotate(VariableSourceAnnotation.from_state(state))
state.inspect.reg_write_expr = reg_write_expr
existing_vars = self.variable_manager[self.func_addr].find_variables_by_stmt(
state.scratch.bbl_addr, state.scratch.stmt_idx, "register"
)
if not existing_vars:
# create the variable
variable = SimRegisterVariable(
reg_write_offset,
reg_write_length,
ident=self.variable_manager[self.func_addr].next_variable_ident("register"),
region=self.func_addr,
)
var_offset = self._normalize_register_offset(reg_write_offset)
self.register_region.set_variable(var_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].set_variable(
"register", var_offset, variable
)
self.variable_manager[self.func_addr].write_to(
variable, 0, self._codeloc_from_state(state)
)
# is it writing a pointer to a stack variable into the register?
# e.g. lea eax, [ebp-0x40]
stack_offset = self._addr_to_stack_offset(reg_write_expr)
if stack_offset is not None:
# it is!
# unfortunately we don't know the size. We use size None for now.
if stack_offset not in self.stack_region:
lea_size = 1
new_var = SimStackVariable(
stack_offset,
lea_size,
base="bp",
ident=self.variable_manager[self.func_addr].next_variable_ident(
"stack"
),
region=self.func_addr,
)
self.stack_region.add_variable(stack_offset, new_var)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable(
"stack", stack_offset, new_var
)
base_offset = self.stack_region.get_base_addr(stack_offset)
assert base_offset is not None
for var in self.stack_region.get_variables_by_offset(stack_offset):
self.variable_manager[self.func_addr].reference_at(
var, stack_offset - base_offset, self._codeloc_from_state(state)
)
|
https://github.com/angr/angr/issues/1430
|
In [8]: proj.analyses.VariableRecovery(x)
WARNING | 2019-02-03 17:53:52,086 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_33]|Reg 68, 4B>, <0x4022c8[ir_35]|Reg 68, 4B>}.
WARNING | 2019-02-03 17:53:52,088 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_36]|Reg 40, 4B>, <0x4022c8[ir_28]|Reg 40, 4B>}.
WARNING | 2019-02-03 17:53:52,089 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_37]|Reg 44, 4B>, <0x4022c8[ir_29]|Reg 44, 4B>}.
WARNING | 2019-02-03 17:53:52,089 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_30]|Reg 48, 4B>, <0x4022c8[ir_38]|Reg 48, 4B>}.
WARNING | 2019-02-03 17:53:52,090 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_39]|Reg 52, 4B>, <0x4022c8[ir_31]|Reg 52, 4B>}.
WARNING | 2019-02-03 17:53:52,094 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_15]|Reg 32, 4B>, <0x4022c8[ir_40]|Reg 32, 4B>}.
WARNING | 2019-02-03 17:53:52,098 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_41]|Reg 8, 4B>, <0x4022c8[ir_32]|Reg 8, 4B>}.
WARNING | 2019-02-03 17:53:52,102 | angr.keyed_region | Overlapping objects {<0x4022c8[ir_42]|Reg 152, 4B>, <0x4022c8[ir_34]|Reg 152, 4B>}.
---------------------------------------------------------------------------
ClaripyOperationError Traceback (most recent call last)
<ipython-input-8-de8d6084dd49> in <module>
----> 1 proj.analyses.VariableRecovery(x)
~/angr-dev/angr/angr/analyses/analysis.py in __call__(self, *args, **kwargs)
107
108 oself._show_progressbar = show_progressbar
--> 109 oself.__init__(*args, **kwargs)
110 return oself
111
~/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py in __init__(self, func, max_iterations)
402 self._node_iterations = defaultdict(int)
403
--> 404 self._analyze()
405
406 #
~/angr-dev/angr/angr/analyses/forward_analysis.py in _analyze(self)
555 # We have a base graph to follow. Just handle the current job.
556
--> 557 self._analysis_core_graph()
558
559 self._post_analysis()
~/angr-dev/angr/angr/analyses/forward_analysis.py in _analysis_core_graph(self)
578 break
579
--> 580 changed, output_state = self._run_on_node(n, job_state)
581
582 # output state of node n is input state for successors to node n
~/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py in _run_on_node(self, node, state)
467 addr=node.addr,
468 size=node.size,
--> 469 opt_level=0 # disable the optimization in order to have
470 # instruction-level analysis results
471 )
~/angr-dev/angr/angr/factory.py in successors(self, *args, **kwargs)
47 """
48
---> 49 return self.project.engines.successors(*args, **kwargs)
50
51 def blank_state(self, **kwargs):
~/angr-dev/angr/angr/engines/hub.py in successors(self, state, addr, jumpkind, default_engine, procedure_engine, engines, **kwargs)
126 for engine in engines:
127 if engine.check(state, **kwargs):
--> 128 r = engine.process(state, **kwargs)
129 if r.processed:
130 return r
~/angr-dev/angr/angr/engines/vex/engine.py in process(self, state, irsb, skip_stmts, last_stmt, whitelist, inline, force_addr, insn_bytes, size, num_inst, traceflags, thumb, extra_stop_points, opt_level, **kwargs)
140 thumb=thumb,
141 extra_stop_points=extra_stop_points,
--> 142 opt_level=opt_level)
143
144 def _check(self, state, *args, **kwargs):
~/angr-dev/angr/angr/engines/engine.py in process(***failed resolving arguments***)
53 successors = new_state._inspect_getattr('sim_successors', successors)
54 try:
---> 55 self._process(new_state, successors, *args, **kwargs)
56 except SimException:
57 if o.EXCEPTION_HANDLING not in old_state.options:
~/angr-dev/angr/angr/engines/vex/engine.py in _process(self, state, successors, irsb, skip_stmts, last_stmt, whitelist, insn_bytes, size, num_inst, traceflags, thumb, extra_stop_points, opt_level)
191
192 try:
--> 193 self._handle_irsb(state, successors, irsb, skip_stmts, last_stmt, whitelist)
194 except SimReliftException as e:
195 state = e.state
~/angr-dev/angr/angr/engines/vex/engine.py in _handle_irsb(self, state, successors, irsb, skip_stmts, last_stmt, whitelist)
270 state.scratch.stmt_idx = stmt_idx
271 state._inspect('statement', BP_BEFORE, statement=stmt_idx)
--> 272 cont = self._handle_statement(state, successors, stmt)
273 state._inspect('statement', BP_AFTER)
274 if not cont:
~/angr-dev/angr/angr/engines/vex/engine.py in _handle_statement(self, state, successors, stmt)
378
379 # process it!
--> 380 s_stmt = translate_stmt(stmt, state)
381 if s_stmt is not None:
382 state.history.extend_actions(s_stmt.actions)
~/angr-dev/angr/angr/engines/vex/statements/__init__.py in translate_stmt(stmt, state)
27 stmt_class = globals()[stmt_name]
28 s = stmt_class(stmt, state)
---> 29 s.process()
30 return s
31 else:
~/angr-dev/angr/angr/engines/vex/statements/base.py in process(self)
39 """
40 # this is where we would choose between different analysis modes
---> 41 self._execute()
42
43 def _execute(self):
~/angr-dev/angr/angr/engines/vex/statements/wrtmp.py in _execute(self)
8 def _execute(self):
9 # get data and track data reads
---> 10 data = self._translate_expr(self.stmt.data)
11 self.state.scratch.store_tmp(self.stmt.tmp, data.expr, data.reg_deps(), data.tmp_deps(),
12 action_holder=self.actions
~/angr-dev/angr/angr/engines/vex/statements/base.py in _translate_expr(self, expr)
46 def _translate_expr(self, expr):
47 """Translates an IRExpr into a SimIRExpr."""
---> 48 e = translate_expr(expr, self.state)
49 self._record_expr(e)
50 return e
~/angr-dev/angr/angr/engines/vex/expressions/__init__.py in translate_expr(expr, state)
12 l.debug("Processing expression %s", expr_name)
13 e = expr_class(expr, state)
---> 14 e.process()
15 return e
16
~/angr-dev/angr/angr/engines/vex/expressions/base.py in process(self)
38
39 # this should change when additional analyses are implemented
---> 40 self._execute()
41
42 self._post_process()
~/angr-dev/angr/angr/engines/vex/expressions/geti.py in _execute(self)
18
19 # get it!
---> 20 self.expr = self.state.registers.load(self.offset, size)
21
22 if self.type.startswith('Ity_F'):
~/angr-dev/angr/angr/storage/memory.py in load(self, addr, size, condition, fallback, add_constraints, action, endness, inspect, disable_actions, ret_on_segv)
802
803 elif self.category == 'reg':
--> 804 self.state._inspect('reg_read', BP_AFTER, reg_read_expr=r)
805 r = self.state._inspect_getattr("reg_read_expr", r)
806
~/angr-dev/angr/angr/sim_state.py in _inspect(self, *args, **kwargs)
257 def _inspect(self, *args, **kwargs):
258 if self.has_plugin('inspect'):
--> 259 self.inspect.action(*args, **kwargs)
260
261 def _inspect_getattr(self, attr, default_value):
~/angr-dev/angr/angr/state_plugins/inspect.py in action(self, event_type, when, **kwargs)
246 if bp.check(self.state, when):
247 l.debug("... FIRE")
--> 248 bp.fire(self.state)
249
250 def make_breakpoint(self, event_type, *args, **kwargs):
~/angr-dev/angr/angr/state_plugins/inspect.py in fire(self, state)
198 "make modifications, then exit this shell to resume your analysis.")
199 else:
--> 200 self.action(state)
201
202 def __repr__(self):
~/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py in _hook_register_read(self, state)
144 reg_read_length = state.inspect.reg_read_length
145
--> 146 if reg_read_offset == state.arch.sp_offset and reg_read_length == state.arch.bytes:
147 # TODO: make sure the sp is not overwritten by something that we are not tracking
148 return
~/angr-dev/claripy/claripy/ast/base.py in __bool__(self)
643 constraint solve. This caused tons of issues.
644 """
--> 645 raise ClaripyOperationError('testing Expressions for truthiness does not do what you want, as these expressions can be symbolic')
646
647 def structurally_match(self, o):
ClaripyOperationError: testing Expressions for truthiness does not do what you want, as these expressions can be symbolic
|
ClaripyOperationError
|
def _process_irrational_function_starts(
self, functions, predetermined_function_addrs, blockaddr_to_function
):
"""
Functions that are identified via function prologues can be starting after the actual beginning of the function.
For example, the following function (with an incorrect start) might exist after a CFG recovery:
sub_8049f70:
push esi
sub_8049f71:
sub esp, 0A8h
mov esi, [esp+0ACh+arg_0]
mov [esp+0ACh+var_88], 0
If the following conditions are met, we will remove the second function and merge it into the first function:
- The second function is not called by other code.
- The first function has only one jumpout site, which points to the second function.
- The first function and the second function are adjacent.
:param FunctionManager functions: All functions that angr recovers.
:return: A set of addresses of all removed functions.
:rtype: set
"""
addrs = sorted(functions.keys())
functions_to_remove = set()
adjusted_cfgnodes = set()
for addr_0, addr_1 in zip(addrs[:-1], addrs[1:]):
if addr_1 in predetermined_function_addrs:
continue
if self.project.is_hooked(addr_0) or self.project.is_hooked(addr_1):
continue
func_0 = functions[addr_0]
if len(func_0.block_addrs) == 1:
block = next(func_0.blocks)
if block.vex.jumpkind not in ("Ijk_Boring", "Ijk_InvalICache"):
continue
# Skip alignment blocks
if self._is_noop_block(self.project.arch, block):
continue
target = block.vex.next
if type(target) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
target_addr = target.con.value
elif type(target) in (pyvex.IRConst.U32, pyvex.IRConst.U64): # pylint: disable=unidiomatic-typecheck
target_addr = target.value
elif type(target) is int: # pylint: disable=unidiomatic-typecheck
target_addr = target
else:
continue
if target_addr != addr_1:
continue
cfgnode_0 = self.get_any_node(addr_0)
cfgnode_1 = self.get_any_node(addr_1)
# Are func_0 adjacent to func_1?
if cfgnode_0.addr + cfgnode_0.size != addr_1:
continue
# Merge block addr_0 and block addr_1
l.debug("Merging function %#x into %#x.", addr_1, addr_0)
self._merge_cfgnodes(cfgnode_0, cfgnode_1)
adjusted_cfgnodes.add(cfgnode_0)
adjusted_cfgnodes.add(cfgnode_1)
# Merge it
func_1 = functions[addr_1]
for block_addr in func_1.block_addrs:
if block_addr == addr_1:
# Skip addr_1 (since it has been merged to the preceding block)
continue
merge_with = self._addr_to_function(
addr_0, blockaddr_to_function, functions
)
blockaddr_to_function[block_addr] = merge_with
functions_to_remove.add(addr_1)
for to_remove in functions_to_remove:
del functions[to_remove]
return functions_to_remove, adjusted_cfgnodes
|
def _process_irrational_function_starts(
self, functions, predetermined_function_addrs, blockaddr_to_function
):
"""
Functions that are identified via function prologues can be starting after the actual beginning of the function.
For example, the following function (with an incorrect start) might exist after a CFG recovery:
sub_8049f70:
push esi
sub_8049f71:
sub esp, 0A8h
mov esi, [esp+0ACh+arg_0]
mov [esp+0ACh+var_88], 0
If the following conditions are met, we will remove the second function and merge it into the first function:
- The second function is not called by other code.
- The first function has only one jumpout site, which points to the second function.
- The first function and the second function are adjacent.
:param FunctionManager functions: All functions that angr recovers.
:return: A set of addresses of all removed functions.
:rtype: set
"""
addrs = sorted(functions.keys())
functions_to_remove = set()
adjusted_cfgnodes = set()
for addr_0, addr_1 in zip(addrs[:-1], addrs[1:]):
if addr_1 in predetermined_function_addrs:
continue
if self.project.is_hooked(addr_0) or self.project.is_hooked(addr_1):
continue
func_0 = functions[addr_0]
if len(func_0.block_addrs) == 1:
block = next(func_0.blocks)
if block.vex.jumpkind not in ("Ijk_Boring", "Ijk_InvalICache"):
continue
# Skip alignment blocks
if self._is_noop_block(self.project.arch, block):
continue
target = block.vex.next
if type(target) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
target_addr = target.con.value
elif type(target) in (pyvex.IRConst.U32, pyvex.IRConst.U64): # pylint: disable=unidiomatic-typecheck
target_addr = target.value
elif type(target) is int: # pylint: disable=unidiomatic-typecheck
target_addr = target
else:
continue
if target_addr != addr_1:
continue
# Are func_0 adjacent to func_1?
if block.addr + block.size != addr_1:
continue
l.debug("Merging function %#x into %#x.", addr_1, addr_0)
# Merge block addr_0 and block addr_1
cfgnode_0 = self.get_any_node(addr_0)
cfgnode_1 = self.get_any_node(addr_1)
self._merge_cfgnodes(cfgnode_0, cfgnode_1)
adjusted_cfgnodes.add(cfgnode_0)
adjusted_cfgnodes.add(cfgnode_1)
# Merge it
func_1 = functions[addr_1]
for block_addr in func_1.block_addrs:
if block_addr == addr_1:
# Skip addr_1 (since it has been merged to the preceding block)
continue
merge_with = self._addr_to_function(
addr_0, blockaddr_to_function, functions
)
blockaddr_to_function[block_addr] = merge_with
functions_to_remove.add(addr_1)
for to_remove in functions_to_remove:
del functions[to_remove]
return functions_to_remove, adjusted_cfgnodes
|
https://github.com/angr/angr/issues/1329
|
Traceback (most recent call last):
File "test_cfg_hook.py", line 27, in <module>
test()
File "test_cfg_hook.py", line 23, in test
_ = p.analyses.CFGFast(show_progressbar=True)
File "/home/lrxiao/hase/venv/lib/python3.6/site-packages/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "/home/lrxiao/hase/venv/lib/python3.6/site-packages/angr/analyses/cfg/cfg_fast.py", line 1026, in __init__
self._analyze()
File "/home/lrxiao/hase/venv/lib/python3.6/site-packages/angr/analyses/forward_analysis.py", line 559, in _analyze
self._post_analysis()
File "/home/lrxiao/hase/venv/lib/python3.6/site-packages/angr/analyses/cfg/cfg_fast.py", line 1531, in _post_analysis
self.make_functions()
File "/home/lrxiao/hase/venv/lib/python3.6/site-packages/angr/analyses/cfg/cfg_base.py", line 1437, in make_functions
blockaddr_to_function
File "/home/lrxiao/hase/venv/lib/python3.6/site-packages/angr/analyses/cfg/cfg_base.py", line 1741, in _process_irrational_function_starts
self._merge_cfgnodes(cfgnode_0, cfgnode_1)
File "/home/lrxiao/hase/venv/lib/python3.6/site-packages/angr/analyses/cfg/cfg_base.py", line 526, in _merge_cfgnodes
assert cfgnode_0.addr + cfgnode_0.size == cfgnode_1.addr
AssertionError
|
AssertionError
|
def _process_irrational_function_starts(
self, functions, predetermined_function_addrs, blockaddr_to_function
):
"""
Functions that are identified via function prologues can be starting after the actual beginning of the function.
For example, the following function (with an incorrect start) might exist after a CFG recovery:
sub_8049f70:
push esi
sub_8049f71:
sub esp, 0A8h
mov esi, [esp+0ACh+arg_0]
mov [esp+0ACh+var_88], 0
If the following conditions are met, we will remove the second function and merge it into the first function:
- The second function is not called by other code.
- The first function has only one jumpout site, which points to the second function.
- The first function and the second function are adjacent.
:param FunctionManager functions: All functions that angr recovers.
:return: A set of addresses of all removed functions.
:rtype: set
"""
addrs = sorted(
k
for k in functions.keys()
if not self.project.is_hooked(k) and not self.project.simos.is_syscall_addr(k)
)
functions_to_remove = set()
adjusted_cfgnodes = set()
for addr_0, addr_1 in zip(addrs[:-1], addrs[1:]):
if addr_1 in predetermined_function_addrs:
continue
func_0 = functions[addr_0]
if len(func_0.block_addrs) == 1:
block = next(func_0.blocks)
if block.vex.jumpkind not in ("Ijk_Boring", "Ijk_InvalICache"):
continue
# Skip alignment blocks
if self._is_noop_block(self.project.arch, block):
continue
target = block.vex.next
if type(target) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
target_addr = target.con.value
elif type(target) in (pyvex.IRConst.U32, pyvex.IRConst.U64): # pylint: disable=unidiomatic-typecheck
target_addr = target.value
elif type(target) is int: # pylint: disable=unidiomatic-typecheck
target_addr = target
else:
continue
if target_addr != addr_1:
continue
cfgnode_0 = self.get_any_node(addr_0)
cfgnode_1 = self.get_any_node(addr_1)
# Are func_0 adjacent to func_1?
if cfgnode_0.addr + cfgnode_0.size != addr_1:
continue
# Merge block addr_0 and block addr_1
l.debug("Merging function %#x into %#x.", addr_1, addr_0)
self._merge_cfgnodes(cfgnode_0, cfgnode_1)
adjusted_cfgnodes.add(cfgnode_0)
adjusted_cfgnodes.add(cfgnode_1)
# Merge it
func_1 = functions[addr_1]
for block_addr in func_1.block_addrs:
if block_addr == addr_1:
# Skip addr_1 (since it has been merged to the preceding block)
continue
merge_with = self._addr_to_function(
addr_0, blockaddr_to_function, functions
)
blockaddr_to_function[block_addr] = merge_with
functions_to_remove.add(addr_1)
for to_remove in functions_to_remove:
del functions[to_remove]
return functions_to_remove, adjusted_cfgnodes
|
def _process_irrational_function_starts(
self, functions, predetermined_function_addrs, blockaddr_to_function
):
"""
Functions that are identified via function prologues can be starting after the actual beginning of the function.
For example, the following function (with an incorrect start) might exist after a CFG recovery:
sub_8049f70:
push esi
sub_8049f71:
sub esp, 0A8h
mov esi, [esp+0ACh+arg_0]
mov [esp+0ACh+var_88], 0
If the following conditions are met, we will remove the second function and merge it into the first function:
- The second function is not called by other code.
- The first function has only one jumpout site, which points to the second function.
- The first function and the second function are adjacent.
:param FunctionManager functions: All functions that angr recovers.
:return: A set of addresses of all removed functions.
:rtype: set
"""
addrs = sorted(functions.keys())
functions_to_remove = set()
adjusted_cfgnodes = set()
for addr_0, addr_1 in zip(addrs[:-1], addrs[1:]):
if addr_1 in predetermined_function_addrs:
continue
if self.project.is_hooked(addr_0) or self.project.is_hooked(addr_1):
continue
func_0 = functions[addr_0]
if len(func_0.block_addrs) == 1:
block = next(func_0.blocks)
if block.vex.jumpkind not in ("Ijk_Boring", "Ijk_InvalICache"):
continue
# Skip alignment blocks
if self._is_noop_block(self.project.arch, block):
continue
target = block.vex.next
if type(target) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
target_addr = target.con.value
elif type(target) in (pyvex.IRConst.U32, pyvex.IRConst.U64): # pylint: disable=unidiomatic-typecheck
target_addr = target.value
elif type(target) is int: # pylint: disable=unidiomatic-typecheck
target_addr = target
else:
continue
if target_addr != addr_1:
continue
cfgnode_0 = self.get_any_node(addr_0)
cfgnode_1 = self.get_any_node(addr_1)
# Are func_0 adjacent to func_1?
if cfgnode_0.addr + cfgnode_0.size != addr_1:
continue
# Merge block addr_0 and block addr_1
l.debug("Merging function %#x into %#x.", addr_1, addr_0)
self._merge_cfgnodes(cfgnode_0, cfgnode_1)
adjusted_cfgnodes.add(cfgnode_0)
adjusted_cfgnodes.add(cfgnode_1)
# Merge it
func_1 = functions[addr_1]
for block_addr in func_1.block_addrs:
if block_addr == addr_1:
# Skip addr_1 (since it has been merged to the preceding block)
continue
merge_with = self._addr_to_function(
addr_0, blockaddr_to_function, functions
)
blockaddr_to_function[block_addr] = merge_with
functions_to_remove.add(addr_1)
for to_remove in functions_to_remove:
del functions[to_remove]
return functions_to_remove, adjusted_cfgnodes
|
https://github.com/angr/angr/issues/1333
|
Traceback (most recent call last):
File "cfg_fast.py", line 61, in <module>
analyze(sys.argv[1])
File "cfg_fast.py", line 30, in analyze
cfg = proj.analyses.CFGFast()
File "/home/wgh/angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "/home/wgh/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 1026, in __init__
self._analyze()
File "/home/wgh/angr-dev/angr/angr/analyses/forward_analysis.py", line 559, in _analyze
self._post_analysis()
File "/home/wgh/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 1531, in _post_analysis
self.make_functions()
File "/home/wgh/angr-dev/angr/angr/analyses/cfg/cfg_base.py", line 1439, in make_functions
blockaddr_to_function
File "/home/wgh/angr-dev/angr/angr/analyses/cfg/cfg_base.py", line 1711, in _process_irrational_function_starts
if block.vex.jumpkind not in ('Ijk_Boring', 'Ijk_InvalICache'):
File "/home/wgh/angr-dev/angr/angr/block.py", line 152, in vex
collect_data_refs=self._collect_data_refs,
File "/home/wgh/angr-dev/angr/angr/engines/vex/engine.py", line 568, in lift
raise SimEngineError("No bytes in memory for block starting at %#x." % addr)
angr.errors.SimEngineError: No bytes in memory for block starting at 0x3000002.
|
angr.errors.SimEngineError
|
def _run_on_node(self, node, state):
"""
:param angr.Block node:
:param VariableRecoveryState state:
:return:
"""
l.debug(
"Analyzing block %#x, iteration %d.", node.addr, self._node_iterations[node]
)
concrete_state = state.get_concrete_state(node.addr)
if concrete_state is None:
# didn't find any state going to here
l.error("_run_on_node(): cannot find any state for address %#x.", node.addr)
return False, state
state = state.copy()
if self._node_iterations[node] >= self._max_iterations:
l.debug(
"Skip node %s as we have iterated %d times on it.",
node,
self._node_iterations[node],
)
return False, state
state.register_callbacks([concrete_state])
successors = self.project.factory.successors(
concrete_state,
addr=node.addr,
size=node.size,
opt_level=0, # disable the optimization in order to have
# instruction-level analysis results
)
output_states = successors.all_successors
state.concrete_states = [state for state in output_states if not state.ip.symbolic]
self._node_to_state[node.addr] = state
self._node_iterations[node] += 1
return True, state
|
def _run_on_node(self, node, state):
"""
:param angr.Block node:
:param VariableRecoveryState state:
:return:
"""
l.debug(
"Analyzing block %#x, iteration %d.", node.addr, self._node_iterations[node]
)
concrete_state = state.get_concrete_state(node.addr)
if concrete_state is None:
# didn't find any state going to here
l.error("_run_on_node(): cannot find any state for address %#x.", node.addr)
return False, state
state = state.copy()
if self._node_iterations[node] >= self._max_iterations:
l.debug(
"Skip node %s as we have iterated %d times on it.",
node,
self._node_iterations[node],
)
return False, state
state.register_callbacks([concrete_state])
successors = self.project.factory.successors(
concrete_state,
addr=node.addr,
size=node.size,
opt_level=0, # disable the optimization in order to have
# instruction-level analysis results
)
output_states = successors.all_successors
state.concrete_states = output_states
self._node_to_state[node.addr] = state
self._node_iterations[node] += 1
return True, state
|
https://github.com/angr/angr/issues/1264
|
Traceback (most recent call last):
File "angr-dev/blah.py", line 4, in <module>
vr = p.analyses.VariableRecovery(cfg.functions.function(name='main'))
File "angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py", line 416, in __init__
self._analyze()
File "angr-dev/angr/angr/analyses/forward_analysis.py", line 557, in _analyze
self._analysis_core_graph()
File "angr-dev/angr/angr/analyses/forward_analysis.py", line 580, in _analysis_core_graph
changed, output_state = self._run_on_node(n, job_state)
File "angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py", line 461, in _run_on_node
concrete_state = state.get_concrete_state(node.addr)
File "angr-dev/angr/angr/analyses/variable_recovery/variable_recovery.py", line 62, in get_concrete_state
if s.ip._model_concrete.value == addr:
File "angr-dev/claripy/claripy/ast/base.py", line 951, in __getattr__
raise AttributeError(a)
AttributeError: value
|
AttributeError
|
def _handle_Unop(self, expr):
handler = None
# All conversions are handled by the Conversion handler
simop = vex_operations.get(expr.op)
if simop is not None and simop.op_attrs["conversion"]:
handler = "_handle_Conversion"
# Notice order of "Not" comparisons
elif expr.op == "Iop_Not1":
handler = "_handle_Not1"
elif expr.op.startswith("Iop_Not"):
handler = "_handle_Not"
if handler is not None and hasattr(self, handler):
return getattr(self, handler)(expr)
else:
self.l.error("Unsupported Unop %s.", expr.op)
return None
|
def _handle_Unop(self, expr):
handler = None
simop = vex_operations[expr.op]
if simop.op_attrs["conversion"]:
handler = "_handle_Conversion"
# Notice order of "Not" comparisons
elif expr.op == "Iop_Not1":
handler = "_handle_Not1"
elif expr.op.startswith("Iop_Not"):
handler = "_handle_Not"
if handler is not None and hasattr(self, handler):
return getattr(self, handler)(expr)
else:
self.l.error("Unsupported Unop %s.", expr.op)
return None
|
https://github.com/angr/angr/issues/1260
|
Traceback (most recent call last):
File "$HOME/angr-dev/angr-management/angrmanagement/ui/widgets/qfunction_table.py", line 227, in _on_function_selected
self._selection_callback(selected_func)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/functions_view.py", line 41, in _on_function_selected
self.workspace.on_function_selected(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 57, in on_function_selected
self.views_by_category['disassembly'][0].display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 171, in display_function
self._display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 343, in _display_function
vr = self.workspace.instance.project.analyses.VariableRecoveryFast(the_func)
File "$HOME/angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 510, in __init__
self._analyze()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 557, in _analyze
self._analysis_core_graph()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 580, in _analysis_core_graph
changed, output_state = self._run_on_node(n, job_state)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 587, in _run_on_node
self._process_block(state, block)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 625, in _process_block
processor.process(state, block=block, fail_fast=self._fail_fast)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 86, in process
self._process(state, None, block=kwargs.pop('block', None))
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 96, in _process
super(SimEngineVR, self)._process(state, successors, block=block)
File "$HOME/angr-dev/angr/angr/engines/light/engine.py", line 59, in _process
self._process_Stmt()
File "$HOME/angr-dev/angr/angr/engines/light/engine.py", line 74, in _process_Stmt
self._handle_Stmt(stmt)
File "$HOME/angr-dev/angr/angr/engines/light/engine.py", line 97, in _handle_Stmt
getattr(self, handler)(stmt)
File "$HOME/angr-dev/angr/angr/engines/light/engine.py", line 103, in _handle_WrTmp
data = self._expr(stmt.data)
File "$HOME/angr-dev/angr/angr/engines/light/engine.py", line 129, in _expr
return getattr(self, handler)(expr)
File "$HOME/angr-dev/angr/angr/engines/light/engine.py", line 149, in _handle_Unop
simop = vex_operations[expr.op]
KeyError: 'Iop_Sqrt64F0x2'
|
KeyError
|
def slice_graph(graph, node, frontier, include_frontier=False):
"""
Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph
"""
subgraph = networkx.DiGraph()
for frontier_node in frontier:
for simple_path in networkx.all_simple_paths(graph, node, frontier_node):
for src, dst in zip(simple_path, simple_path[1:]):
if include_frontier or (src not in frontier and dst not in frontier):
subgraph.add_edge(src, dst)
if not list(subgraph.nodes):
# HACK: FIXME: for infinite loop nodes, this would return an empty set, so we include the loop body itself
# Make sure this makes sense (EDG thinks it does)
if (node, node) in graph.edges:
subgraph.add_edge(node, node)
return subgraph
|
def slice_graph(graph, node, frontier, include_frontier=False):
"""
Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph
"""
subgraph = networkx.DiGraph()
for frontier_node in frontier:
for simple_path in networkx.all_simple_paths(graph, node, frontier_node):
for src, dst in zip(simple_path, simple_path[1:]):
if include_frontier or (src not in frontier and dst not in frontier):
subgraph.add_edge(src, dst)
return subgraph
|
https://github.com/angr/angr/issues/1259
|
Traceback (most recent call last):
File "$HOME/angr-dev/angr-management/angrmanagement/ui/widgets/qfunction_table.py", line 227, in _on_function_selected
self._selection_callback(selected_func)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/functions_view.py", line 41, in _on_function_selected
self.workspace.on_function_selected(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 57, in on_function_selected
self.views_by_category['disassembly'][0].display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 171, in display_function
self._display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 343, in _display_function
vr = self.workspace.instance.project.analyses.VariableRecoveryFast(the_func)
File "$HOME/angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 510, in __init__
self._analyze()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 557, in _analyze
self._analysis_core_graph()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 583, in _analysis_core_graph
self._add_input_state(n, output_state)
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 605, in _add_input_state
self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ]))
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 551, in _merge_states
return states[0].merge(states[1], successor=node.addr)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 448, in merge
processor_state=self.processor_state.copy().merge(other.processor_state),
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 53, in merge
self.bp = max(self.bp, other.bp)
TypeError: '>' not supported between instances of 'NoneType' and 'NoneType'
|
TypeError
|
def _find_initial_loop_nodes(self, graph, head):
# TODO optimize
latching_nodes = set(
[s for s, t in dfs_back_edges(graph, self._start_node) if t == head]
)
loop_subgraph = self.slice_graph(graph, head, latching_nodes, include_frontier=True)
nodes = set(loop_subgraph.nodes())
return nodes
|
def _find_initial_loop_nodes(self, graph, head):
# TODO optimize
latching_nodes = set(
[s for s, t in dfs_back_edges(graph, self._start_node) if t == head]
)
loop_subgraph = self.slice_graph(graph, head, latching_nodes, include_frontier=True)
return set(loop_subgraph.nodes())
|
https://github.com/angr/angr/issues/1259
|
Traceback (most recent call last):
File "$HOME/angr-dev/angr-management/angrmanagement/ui/widgets/qfunction_table.py", line 227, in _on_function_selected
self._selection_callback(selected_func)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/functions_view.py", line 41, in _on_function_selected
self.workspace.on_function_selected(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 57, in on_function_selected
self.views_by_category['disassembly'][0].display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 171, in display_function
self._display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 343, in _display_function
vr = self.workspace.instance.project.analyses.VariableRecoveryFast(the_func)
File "$HOME/angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 510, in __init__
self._analyze()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 557, in _analyze
self._analysis_core_graph()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 583, in _analysis_core_graph
self._add_input_state(n, output_state)
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 605, in _add_input_state
self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ]))
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 551, in _merge_states
return states[0].merge(states[1], successor=node.addr)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 448, in merge
processor_state=self.processor_state.copy().merge(other.processor_state),
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 53, in merge
self.bp = max(self.bp, other.bp)
TypeError: '>' not supported between instances of 'NoneType' and 'NoneType'
|
TypeError
|
def _ail_handle_Const(self, expr):
return DataSet(expr, expr.bits)
|
def _ail_handle_Const(self, expr):
return expr
|
https://github.com/angr/angr/issues/1259
|
Traceback (most recent call last):
File "$HOME/angr-dev/angr-management/angrmanagement/ui/widgets/qfunction_table.py", line 227, in _on_function_selected
self._selection_callback(selected_func)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/functions_view.py", line 41, in _on_function_selected
self.workspace.on_function_selected(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 57, in on_function_selected
self.views_by_category['disassembly'][0].display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 171, in display_function
self._display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 343, in _display_function
vr = self.workspace.instance.project.analyses.VariableRecoveryFast(the_func)
File "$HOME/angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 510, in __init__
self._analyze()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 557, in _analyze
self._analysis_core_graph()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 583, in _analysis_core_graph
self._add_input_state(n, output_state)
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 605, in _add_input_state
self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ]))
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 551, in _merge_states
return states[0].merge(states[1], successor=node.addr)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 448, in merge
processor_state=self.processor_state.copy().merge(other.processor_state),
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 53, in merge
self.bp = max(self.bp, other.bp)
TypeError: '>' not supported between instances of 'NoneType' and 'NoneType'
|
TypeError
|
def _process_block(self, state, block): # pylint:disable=no-self-use
"""
Scan through all statements and perform the following tasks:
- Find stack pointers and the VEX temporary variable storing stack pointers
- Selectively calculate VEX statements
- Track memory loading and mark stack and global variables accordingly
:param angr.Block block:
:return:
"""
l.debug("Processing block %#x.", block.addr)
processor = (
self._ail_engine if isinstance(block, ailment.Block) else self._vex_engine
)
processor.process(state, block=block, fail_fast=self._fail_fast)
# readjusting sp at the end for blocks that end in a call
if block.addr in self._node_to_cc:
cc = self._node_to_cc[block.addr]
if cc is not None:
state.processor_state.sp_adjustment += cc.sp_delta
state.processor_state.sp_adjusted = True
l.debug(
"Adjusting stack pointer at end of block %#x with offset %+#x.",
block.addr,
state.processor_state.sp_adjustment,
)
|
def _process_block(self, state, block): # pylint:disable=no-self-use
"""
Scan through all statements and perform the following tasks:
- Find stack pointers and the VEX temporary variable storing stack pointers
- Selectively calculate VEX statements
- Track memory loading and mark stack and global variables accordingly
:param angr.Block block:
:return:
"""
l.debug("Processing block %#x.", block.addr)
processor = (
self._ail_engine if isinstance(block, ailment.Block) else self._vex_engine
)
processor.process(state, block=block, fail_fast=self._fail_fast)
# readjusting sp at the end for blocks that end in a call
if block.addr in self._node_to_cc:
cc = self._node_to_cc[block.addr]
state.processor_state.sp_adjustment += cc.sp_delta
state.processor_state.sp_adjusted = True
l.debug(
"Adjusting stack pointer at end of block %#x with offset %+#x.",
block.addr,
state.processor_state.sp_adjustment,
)
|
https://github.com/angr/angr/issues/1259
|
Traceback (most recent call last):
File "$HOME/angr-dev/angr-management/angrmanagement/ui/widgets/qfunction_table.py", line 227, in _on_function_selected
self._selection_callback(selected_func)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/functions_view.py", line 41, in _on_function_selected
self.workspace.on_function_selected(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 57, in on_function_selected
self.views_by_category['disassembly'][0].display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 171, in display_function
self._display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 343, in _display_function
vr = self.workspace.instance.project.analyses.VariableRecoveryFast(the_func)
File "$HOME/angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 510, in __init__
self._analyze()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 557, in _analyze
self._analysis_core_graph()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 583, in _analysis_core_graph
self._add_input_state(n, output_state)
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 605, in _add_input_state
self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ]))
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 551, in _merge_states
return states[0].merge(states[1], successor=node.addr)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 448, in merge
processor_state=self.processor_state.copy().merge(other.processor_state),
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 53, in merge
self.bp = max(self.bp, other.bp)
TypeError: '>' not supported between instances of 'NoneType' and 'NoneType'
|
TypeError
|
def merge(self, other):
if not self == other:
l.warning("Inconsistent merge: %s %s ", self, other)
# FIXME: none of the following logic makes any sense...
if other.sp_adjusted is True:
self.sp_adjusted = True
self.sp_adjustment = max(self.sp_adjustment, other.sp_adjustment)
if other.bp_as_base is True:
self.bp_as_base = True
if self.bp is None:
self.bp = other.bp
elif other.bp is not None: # and self.bp is not None
self.bp = max(self.bp, other.bp)
return self
|
def merge(self, other):
if not self == other:
l.warning("Inconsistent merge: %s %s ", self, other)
# FIXME: none of the following logic makes any sense...
if other.sp_adjusted is True:
self.sp_adjusted = True
self.sp_adjustment = max(self.sp_adjustment, other.sp_adjustment)
if other.bp_as_base is True:
self.bp_as_base = True
self.bp = max(self.bp, other.bp)
return self
|
https://github.com/angr/angr/issues/1259
|
Traceback (most recent call last):
File "$HOME/angr-dev/angr-management/angrmanagement/ui/widgets/qfunction_table.py", line 227, in _on_function_selected
self._selection_callback(selected_func)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/functions_view.py", line 41, in _on_function_selected
self.workspace.on_function_selected(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/workspace.py", line 57, in on_function_selected
self.views_by_category['disassembly'][0].display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 171, in display_function
self._display_function(function)
File "$HOME/angr-dev/angr-management/angrmanagement/ui/views/disassembly_view.py", line 343, in _display_function
vr = self.workspace.instance.project.analyses.VariableRecoveryFast(the_func)
File "$HOME/angr-dev/angr/angr/analyses/analysis.py", line 108, in __call__
oself.__init__(*args, **kwargs)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 510, in __init__
self._analyze()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 557, in _analyze
self._analysis_core_graph()
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 583, in _analysis_core_graph
self._add_input_state(n, output_state)
File "$HOME/angr-dev/angr/angr/analyses/forward_analysis.py", line 605, in _add_input_state
self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ]))
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 551, in _merge_states
return states[0].merge(states[1], successor=node.addr)
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 448, in merge
processor_state=self.processor_state.copy().merge(other.processor_state),
File "$HOME/angr-dev/angr/angr/analyses/variable_recovery/variable_recovery_fast.py", line 53, in merge
self.bp = max(self.bp, other.bp)
TypeError: '>' not supported between instances of 'NoneType' and 'NoneType'
|
TypeError
|
def string_references(self, minimum_length=2, vex_only=False):
"""
All of the constant string references used by this function.
:param minimum_length: The minimum length of strings to find (default is 1)
:param vex_only: Only analyze VEX IR, don't interpret the entry state to detect additional constants.
:return: A list of tuples of (address, string) where is address is the location of the string in
memory.
"""
strings = []
memory = self._project.loader.memory
# get known instruction addresses and call targets
# these addresses cannot be string references, but show up frequently in the runtime values
known_executable_addresses = set()
for block in self.blocks:
known_executable_addresses.update(block.instruction_addrs)
for function in self._function_manager.values():
known_executable_addresses.update(set(x.addr for x in function.graph.nodes()))
# loop over all local runtime values and check if the value points to a printable string
for addr in self.local_runtime_values if not vex_only else self.code_constants:
if not isinstance(addr, claripy.fp.FPV) and addr in memory:
# check that the address isn't an pointing to known executable code
# and that it isn't an indirect pointer to known executable code
try:
possible_pointer = memory.unpack_word(addr)
if (
addr not in known_executable_addresses
and possible_pointer not in known_executable_addresses
):
# build string
stn = ""
offset = 0
current_char = memory[addr + offset]
while chr(current_char) in string.printable:
stn += current_char
offset += 1
current_char = memory[addr + offset]
# check that the string was a null terminated string with minimum length
if current_char == "\x00" and len(stn) >= minimum_length:
strings.append((addr, stn))
except KeyError:
pass
return strings
|
def string_references(self, minimum_length=2, vex_only=False):
"""
All of the constant string references used by this function.
:param minimum_length: The minimum length of strings to find (default is 1)
:param vex_only: Only analyze VEX IR, don't interpret the entry state to detect additional constants.
:return: A list of tuples of (address, string) where is address is the location of the string in
memory.
"""
strings = []
memory = self._project.loader.memory
# get known instruction addresses and call targets
# these addresses cannot be string references, but show up frequently in the runtime values
known_executable_addresses = set()
for block in self.blocks:
known_executable_addresses.update(block.instruction_addrs)
for function in self._function_manager.values():
known_executable_addresses.update(set(x.addr for x in function.graph.nodes()))
# loop over all local runtime values and check if the value points to a printable string
for addr in self.local_runtime_values if not vex_only else self.code_constants:
if not isinstance(addr, claripy.fp.FPV) and addr in memory:
# check that the address isn't an pointing to known executable code
# and that it isn't an indirect pointer to known executable code
try:
possible_pointer = memory.unpack_word(addr)
if (
addr not in known_executable_addresses
and possible_pointer not in known_executable_addresses
):
# build string
stn = ""
offset = 0
current_char = memory[addr + offset]
while current_char in string.printable:
stn += current_char
offset += 1
current_char = memory[addr + offset]
# check that the string was a null terminated string with minimum length
if current_char == "\x00" and len(stn) >= minimum_length:
strings.append((addr, stn))
except KeyError:
pass
return strings
|
https://github.com/angr/angr/issues/1226
|
Traceback (most recent call last):
File ".\src\gaeInputGeneration.py", line 109, in <module>
for offset, strRef in func.string_references(vex_only=True):
File "C:\Users\lixue\AppData\Local\Programs\Python\Python36\lib\site-packages\angr\knowledge_plugins\functions\function.py", line 301, in string_references
while current_char in string.printable:
TypeError: 'in <string>' requires string as left operand, not int
|
TypeError
|
def string_references(self, minimum_length=2, vex_only=False):
"""
All of the constant string references used by this function.
:param minimum_length: The minimum length of strings to find (default is 1)
:param vex_only: Only analyze VEX IR, don't interpret the entry state to detect additional constants.
:return: A list of tuples of (address, string) where is address is the location of the string in
memory.
"""
strings = []
memory = self._project.loader.memory
# get known instruction addresses and call targets
# these addresses cannot be string references, but show up frequently in the runtime values
known_executable_addresses = set()
for block in self.blocks:
known_executable_addresses.update(block.instruction_addrs)
for function in self._function_manager.values():
known_executable_addresses.update(set(x.addr for x in function.graph.nodes()))
# loop over all local runtime values and check if the value points to a printable string
for addr in self.local_runtime_values if not vex_only else self.code_constants:
if not isinstance(addr, claripy.fp.FPV) and addr in memory:
# check that the address isn't an pointing to known executable code
# and that it isn't an indirect pointer to known executable code
try:
possible_pointer = memory.unpack_word(addr)
if (
addr not in known_executable_addresses
and possible_pointer not in known_executable_addresses
):
# build string
stn = ""
offset = 0
current_char = chr(memory[addr + offset])
while current_char in string.printable:
stn += current_char
offset += 1
current_char = chr(memory[addr + offset])
# check that the string was a null terminated string with minimum length
if current_char == "\x00" and len(stn) >= minimum_length:
strings.append((addr, stn))
except KeyError:
pass
return strings
|
def string_references(self, minimum_length=2, vex_only=False):
"""
All of the constant string references used by this function.
:param minimum_length: The minimum length of strings to find (default is 1)
:param vex_only: Only analyze VEX IR, don't interpret the entry state to detect additional constants.
:return: A list of tuples of (address, string) where is address is the location of the string in
memory.
"""
strings = []
memory = self._project.loader.memory
# get known instruction addresses and call targets
# these addresses cannot be string references, but show up frequently in the runtime values
known_executable_addresses = set()
for block in self.blocks:
known_executable_addresses.update(block.instruction_addrs)
for function in self._function_manager.values():
known_executable_addresses.update(set(x.addr for x in function.graph.nodes()))
# loop over all local runtime values and check if the value points to a printable string
for addr in self.local_runtime_values if not vex_only else self.code_constants:
if not isinstance(addr, claripy.fp.FPV) and addr in memory:
# check that the address isn't an pointing to known executable code
# and that it isn't an indirect pointer to known executable code
try:
possible_pointer = memory.unpack_word(addr)
if (
addr not in known_executable_addresses
and possible_pointer not in known_executable_addresses
):
# build string
stn = ""
offset = 0
current_char = memory[addr + offset]
while chr(current_char) in string.printable:
stn += current_char
offset += 1
current_char = memory[addr + offset]
# check that the string was a null terminated string with minimum length
if current_char == "\x00" and len(stn) >= minimum_length:
strings.append((addr, stn))
except KeyError:
pass
return strings
|
https://github.com/angr/angr/issues/1226
|
Traceback (most recent call last):
File ".\src\gaeInputGeneration.py", line 109, in <module>
for offset, strRef in func.string_references(vex_only=True):
File "C:\Users\lixue\AppData\Local\Programs\Python\Python36\lib\site-packages\angr\knowledge_plugins\functions\function.py", line 301, in string_references
while current_char in string.printable:
TypeError: 'in <string>' requires string as left operand, not int
|
TypeError
|
def _remove_redundant_overlapping_blocks(self):
"""
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
"""
sorted_nodes = sorted(
self.graph.nodes(), key=lambda n: n.addr if n is not None else 0
)
all_plt_stub_addrs = set(
itertools.chain.from_iterable(
obj.reverse_plt.keys()
for obj in self.project.loader.all_objects
if isinstance(obj, cle.MetaELF)
)
)
# go over the list. for each node that is the beginning of a function and is not properly aligned, if its
# leading instruction is a single-byte or multi-byte nop, make sure there is another CFGNode starts after the
# nop instruction
nodes_to_append = {}
# pylint:disable=too-many-nested-blocks
for a in sorted_nodes:
if (
a.addr in self.functions
and a.addr not in all_plt_stub_addrs
and not self._addr_hooked_or_syscall(a.addr)
):
all_in_edges = self.graph.in_edges(a, data=True)
if not any([data["jumpkind"] == "Ijk_Call" for _, _, data in all_in_edges]):
# no one is calling it
# this function might be created from linear sweeping
try:
block = self._lift(a.addr, size=0x10 - (a.addr % 0x10), opt_level=1)
vex_block = block.vex
except SimTranslationError:
continue
nop_length = None
if self._is_noop_block(vex_block):
# fast path: in most cases, the entire block is a single byte or multi-byte nop, which VEX
# optimizer is able to tell
nop_length = block.size
else:
# this is not a no-op block. Determine where nop instructions terminate.
insns = block.capstone.insns
if insns:
nop_length = self._get_nop_length(insns)
if nop_length <= 0:
continue
# leading nop for alignment.
next_node_addr = a.addr + nop_length
if nop_length < a.size and not (
next_node_addr in self._nodes or next_node_addr in nodes_to_append
):
# create a new CFGNode that starts there
next_node_size = a.size - nop_length
next_node = CFGNode(
next_node_addr,
next_node_size,
self,
function_address=next_node_addr,
instruction_addrs=tuple(
i
for i in a.instruction_addrs
if next_node_addr <= i < next_node_addr + next_node_size
),
thumb=a.thumb,
byte_string=None
if a.byte_string is None
else a.byte_string[nop_length:],
)
self.graph.add_node(next_node)
# create edges accordingly
all_out_edges = self.graph.out_edges(a, data=True)
for _, dst, data in all_out_edges:
self.graph.add_edge(next_node, dst, **data)
nodes_to_append[next_node_addr] = next_node
# make sure there is a function begins there
try:
snippet = self._to_snippet(
addr=next_node_addr,
size=next_node_size,
base_state=self._base_state,
)
self.functions._add_node(next_node_addr, snippet)
except (SimEngineError, SimMemoryError):
continue
# append all new nodes to sorted nodes
if nodes_to_append:
sorted_nodes = sorted(
sorted_nodes + nodes_to_append.values(),
key=lambda n: n.addr if n is not None else 0,
)
removed_nodes = set()
a = None # it always hold the very recent non-removed node
for i in xrange(len(sorted_nodes)):
if a is None:
a = sorted_nodes[0]
continue
b = sorted_nodes[i]
if self._addr_hooked_or_syscall(b.addr):
continue
if b in removed_nodes:
# skip all removed nodes
continue
if a.addr <= b.addr and (a.addr + a.size > b.addr):
# They are overlapping
try:
block = self.project.factory.fresh_block(
a.addr, b.addr - a.addr, backup_state=self._base_state
)
except SimTranslationError:
a = b
continue
if block.capstone.insns and all(
[self._is_noop_insn(insn) for insn in block.capstone.insns]
):
# It's a big nop - no function starts with nop
# add b to indices
self._nodes[b.addr] = b
self._nodes_by_addr[b.addr].append(b)
# shrink a
self._shrink_node(a, b.addr - a.addr, remove_function=False)
a = b
continue
all_functions = self.kb.functions
# now things are a little harder
# if there is no incoming edge to b, we should replace b with a
# this is mostly because we misidentified the function beginning. In fact a is the function beginning,
# but somehow we thought b is the beginning
if a.addr + a.size == b.addr + b.size:
in_edges = len(
[_ for _, _, data in self.graph.in_edges([b], data=True)]
)
if in_edges == 0:
# we use node a to replace node b
# link all successors of b to a
for _, dst, data in self.graph.out_edges([b], data=True):
self.graph.add_edge(a, dst, **data)
if b.addr in self._nodes:
del self._nodes[b.addr]
if (
b.addr in self._nodes_by_addr
and b in self._nodes_by_addr[b.addr]
):
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
# skip b
removed_nodes.add(b)
continue
# next case - if b is directly from function prologue detection, or a basic block that is a successor of
# a wrongly identified basic block, we might be totally misdecoding b
if b.instruction_addrs[0] not in a.instruction_addrs:
# use a, truncate b
new_b_addr = a.addr + a.size # b starts right after a terminates
new_b_size = (
b.addr + b.size - new_b_addr
) # this may not be the size we want, since b might be
# misdecoded
# totally remove b
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
removed_nodes.add(b)
if new_b_size > 0:
# there are still some parts left in node b - we don't want to lose it
self._scan_block(
new_b_addr, a.function_address, None, None, None, None
)
continue
# for other cases, we'll let them be for now
a = b # update a
|
def _remove_redundant_overlapping_blocks(self):
"""
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
"""
sorted_nodes = sorted(
self.graph.nodes(), key=lambda n: n.addr if n is not None else 0
)
all_plt_stub_addrs = set(
itertools.chain.from_iterable(
obj.reverse_plt.keys()
for obj in self.project.loader.all_objects
if isinstance(obj, cle.MetaELF)
)
)
# go over the list. for each node that is the beginning of a function and is not properly aligned, if its
# leading instruction is a single-byte or multi-byte nop, make sure there is another CFGNode starts after the
# nop instruction
nodes_to_append = {}
# pylint:disable=too-many-nested-blocks
for a in sorted_nodes:
if (
a.addr in self.functions
and a.addr not in all_plt_stub_addrs
and not self._addr_hooked_or_syscall(a.addr)
):
all_in_edges = self.graph.in_edges(a, data=True)
if not any([data["jumpkind"] == "Ijk_Call" for _, _, data in all_in_edges]):
# no one is calling it
# this function might be created from linear sweeping
try:
block = self._lift(a.addr, size=0x10 - (a.addr % 0x10), opt_level=1)
vex_block = block.vex
except SimTranslationError:
continue
nop_length = None
if self._is_noop_block(vex_block):
# fast path: in most cases, the entire block is a single byte or multi-byte nop, which VEX
# optimizer is able to tell
nop_length = block.size
else:
# this is not a no-op block. Determine where nop instructions terminate.
insns = block.capstone.insns
if insns:
nop_length = self._get_nop_length(insns)
if nop_length <= 0:
continue
# leading nop for alignment.
next_node_addr = a.addr + nop_length
if nop_length < a.size and not (
next_node_addr in self._nodes or next_node_addr in nodes_to_append
):
# create a new CFGNode that starts there
next_node_size = a.size - nop_length
next_node = CFGNode(
next_node_addr,
next_node_size,
self,
function_address=next_node_addr,
instruction_addrs=tuple(
i
for i in a.instruction_addrs
if next_node_addr <= i < next_node_addr + next_node_size
),
thumb=a.thumb,
byte_string=None
if a.byte_string is None
else a.byte_string[nop_length:],
)
# create edges accordingly
all_out_edges = self.graph.out_edges(a, data=True)
for _, dst, data in all_out_edges:
self.graph.add_edge(next_node, dst, **data)
nodes_to_append[next_node_addr] = next_node
# make sure there is a function begins there
try:
snippet = self._to_snippet(
addr=next_node_addr,
size=next_node_size,
base_state=self._base_state,
)
self.functions._add_node(next_node_addr, snippet)
except (SimEngineError, SimMemoryError):
continue
# append all new nodes to sorted nodes
if nodes_to_append:
sorted_nodes = sorted(
sorted_nodes + nodes_to_append.values(),
key=lambda n: n.addr if n is not None else 0,
)
removed_nodes = set()
a = None # it always hold the very recent non-removed node
for i in xrange(len(sorted_nodes)):
if a is None:
a = sorted_nodes[0]
continue
b = sorted_nodes[i]
if self._addr_hooked_or_syscall(b.addr):
continue
if b in removed_nodes:
# skip all removed nodes
continue
if a.addr <= b.addr and (a.addr + a.size > b.addr):
# They are overlapping
try:
block = self.project.factory.fresh_block(
a.addr, b.addr - a.addr, backup_state=self._base_state
)
except SimTranslationError:
a = b
continue
if block.capstone.insns and all(
[self._is_noop_insn(insn) for insn in block.capstone.insns]
):
# It's a big nop - no function starts with nop
# add b to indices
self._nodes[b.addr] = b
self._nodes_by_addr[b.addr].append(b)
# shrink a
self._shrink_node(a, b.addr - a.addr, remove_function=False)
a = b
continue
all_functions = self.kb.functions
# now things are a little harder
# if there is no incoming edge to b, we should replace b with a
# this is mostly because we misidentified the function beginning. In fact a is the function beginning,
# but somehow we thought b is the beginning
if a.addr + a.size == b.addr + b.size:
in_edges = len(
[_ for _, _, data in self.graph.in_edges([b], data=True)]
)
if in_edges == 0:
# we use node a to replace node b
# link all successors of b to a
for _, dst, data in self.graph.out_edges([b], data=True):
self.graph.add_edge(a, dst, **data)
if b.addr in self._nodes:
del self._nodes[b.addr]
if (
b.addr in self._nodes_by_addr
and b in self._nodes_by_addr[b.addr]
):
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
# skip b
removed_nodes.add(b)
continue
# next case - if b is directly from function prologue detection, or a basic block that is a successor of
# a wrongly identified basic block, we might be totally misdecoding b
if b.instruction_addrs[0] not in a.instruction_addrs:
# use a, truncate b
new_b_addr = a.addr + a.size # b starts right after a terminates
new_b_size = (
b.addr + b.size - new_b_addr
) # this may not be the size we want, since b might be
# misdecoded
# totally remove b
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
removed_nodes.add(b)
if new_b_size > 0:
# there are still some parts left in node b - we don't want to lose it
self._scan_block(
new_b_addr, a.function_address, None, None, None, None
)
continue
# for other cases, we'll let them be for now
a = b # update a
|
https://github.com/angr/angr/issues/1107
|
In [1]: import angr
WARNING | 2018-07-03 14:13:20,872 | angr.analyses.disassembly_utils | Your version of capstone does not support MIPS instruction groups.
In [2]: p = angr.Project("/bin/ls")
WARNING | 2018-07-03 14:13:22,746 | cle.loader | The main binary is a position-independent executable. It is being loaded with a base address of 0x400000.
In [3]: cfg = p.analyses.CFGFast(show_progressbar=True)
WARNING | 2018-07-03 14:13:26,623 | angr.analyses.cfg.cfg_fast | "auto_load_libs" is enabled. With libraries loaded in project, CFGFast will cover libraries, which may take significantly more time than expected. You may reload the binary
with "auto_load_libs" disabled, or specify "regions" to limit the scope of CFG recovery.
WARNING | 2018-07-03 14:13:33,234 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c000026_86_64{UNINITIALIZED}>
WARNING | 2018-07-03 14:13:34,434 | angr.analyses.cfg.cfg_fast | __libc_start_main is supposed to yield new exits, but it fails to do so.
WARNING | 2018-07-03 14:13:35,639 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c00003e_218_64{UNINITIALIZED}>
WARNING | 2018-07-03 14:13:36,330 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_10_223_64{UNINITIALIZED}>
WARNING | 2018-07-03 14:13:36,766 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c000056_240_64{UNINITIALIZED}>
WARNING | 2018-07-03 14:13:37,091 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c000066_247_64{UNINITIALIZED}>
WARNING | 2018-07-03 14:13:40,723 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c000076_361_64{UNINITIALIZED}>
WARNING | 2018-07-03 14:13:40,804 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c000086_369_64{UNINITIALIZED}>
ERROR | 2018-07-03 14:13:40,877 | angr.engines.vex.statements.dirty | Unsupported dirty helper amd64g_dirtyhelper_FSTENV
ERROR | 2018-07-03 14:13:40,878 | angr.engines.vex.statements.dirty | Unsupported dirty helper amd64g_dirtyhelper_FLDENV
ERROR | 2018-07-03 14:13:40,952 | angr.engines.vex.statements.dirty | Unsupported dirty helper amd64g_dirtyhelper_FSTENV
WARNING | 2018-07-03 14:13:42,606 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c00009e_438_64{UNINITIALIZED}>
WARNING | 2018-07-03 14:13:43,948 | angr.engines.successors | Exit state has over 256 possible solutions. Likely unconstrained; skipping. <BV64 global_c0000b6_468_64{UNINITIALIZED}>
[...] # Many more unconstraineds
NetworkXError Traceback (most recent call last)
<ipython-input-3-718b8705bb6a> in <module>()
----> 1 cfg = p.analyses.CFGFast(show_progressbar=True)
/home/X/angr-dev/angr/angr/analyses/analysis.pyc in __call__(self, *args, **kwargs)
106
107 oself._show_progressbar = show_progressbar
--> 108 oself.__init__(*args, **kwargs)
109 return oself
110
/home/X/angr-dev/angr/angr/analyses/cfg/cfg_fast.pyc in __init__(self, binary, regions, pickle_intermediate_results, symbols, function_prologues, resolve_indirect_jumps, force_segment, force_complete_scan, indirect_jump_target_limit, collect_data_references, extra_cross_references, normalize, start_at_entry, function_starts, extra_memory_regions, data_type_guessing_handlers, arch_options, indirect_jump_resolvers, base_state, exclude_sparse_regions, skip_specific_regions, heuristic_plt_resolving, start, end, **extra_arch_options)
907
908 # Start working!
--> 909 self._analyze()
910
911 #
/home/X/angr-dev/angr/angr/analyses/forward_analysis.pyc in _analyze(self)
557 self._analysis_core_graph()
558
--> 559 self._post_analysis()
560
561 def _analysis_core_graph(self):
/home/X/angr-dev/angr/angr/analyses/cfg/cfg_fast.pyc in _post_analysis(self)
1458
1459 if self.project.arch.name in ('X86', 'AMD64', 'MIPS32'):
-> 1460 self._remove_redundant_overlapping_blocks()
1461
1462 if self._normalize:
/home/X/angr-dev/angr/angr/analyses/cfg/cfg_fast.pyc in _remove_redundant_overlapping_blocks(self)
2737 self._nodes_by_addr[b.addr].remove(b)
2738
-> 2739 self.graph.remove_node(b)
2740
2741 if b.addr in all_functions:
/home/X/.virtualenvs/angr-pypy/site-packages/networkx/classes/digraph.pyc in remove_node(self, n)
521 del self._node[n]
522 except KeyError: # NetworkXError if n not in self
--> 523 raise NetworkXError("The node %s is not in the digraph." % (n,))
524 for u in nbrs:
525 del self._pred[u][n] # remove all edges n-u in digraph
NetworkXError: The node <CFGNode 0x213a0b0L[61]> is not in the digraph
|
NetworkXError
|
def _create_job_call(
self,
addr,
irsb,
cfg_node,
stmt_idx,
ins_addr,
current_function_addr,
target_addr,
jumpkind,
is_syscall=False,
):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = []
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(
mode="fastpath", addr=cfg_node.addr
)
# Find the first successor with a syscall jumpkind
succ = next(
iter(
succ
for succ in self.project.factory.successors(tmp_state).flat_successors
if succ.jumpkind and succ.jumpkind.startswith("Ijk_Sys")
),
None,
)
if succ is None:
# For some reason, there is no such successor with a syscall jumpkind
target_addr = self._unresolvable_target_addr
else:
try:
syscall_stub = self.project.simos.syscall(succ)
if (
syscall_stub
): # can be None if simos is not a subclass of SimUserspac
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_target_addr
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
return_site = (
addr + irsb.size
) # We assume the program will always return to the succeeding position
if new_function_addr is not None:
r = self._function_add_call_edge(
new_function_addr,
cfg_node,
return_site,
current_function_addr,
syscall=is_syscall,
stmt_idx=stmt_idx,
ins_addr=ins_addr,
)
if not r:
return []
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(
target_addr,
new_function_addr,
jumpkind,
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
syscall=is_syscall,
)
jobs.append(ce)
if return_site is not None:
# Also, keep tracing from the return site
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
if new_function_addr is not None:
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall
)
if callee_function.returning is True:
if return_site is not None:
self._function_add_fakeret_edge(
return_site, cfg_node, current_function_addr, confirmed=True
)
self._function_add_return_edge(
new_function_addr, return_site, current_function_addr
)
elif callee_function.returning is False:
# The function does not return - there is no fake ret edge
pass
else:
if return_site is not None:
self._function_add_fakeret_edge(
return_site, cfg_node, current_function_addr, confirmed=None
)
fr = FunctionReturn(
new_function_addr, current_function_addr, addr, return_site
)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].append(fr)
return jobs
|
def _create_job_call(
self,
addr,
irsb,
cfg_node,
stmt_idx,
ins_addr,
current_function_addr,
target_addr,
jumpkind,
is_syscall=False,
):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = []
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(
mode="fastpath", addr=cfg_node.addr
)
succ = self.project.factory.successors(tmp_state).flat_successors[0]
try:
syscall_stub = self.project.simos.syscall(succ)
if syscall_stub: # can be None if simos is not a subclass of SimUserspac
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_target_addr
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
return_site = (
addr + irsb.size
) # We assume the program will always return to the succeeding position
if new_function_addr is not None:
r = self._function_add_call_edge(
new_function_addr,
cfg_node,
return_site,
current_function_addr,
syscall=is_syscall,
stmt_idx=stmt_idx,
ins_addr=ins_addr,
)
if not r:
return []
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(
target_addr,
new_function_addr,
jumpkind,
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
syscall=is_syscall,
)
jobs.append(ce)
if return_site is not None:
# Also, keep tracing from the return site
ce = CFGJob(
return_site,
current_function_addr,
"Ijk_FakeRet",
last_addr=addr,
src_node=cfg_node,
src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
returning_source=new_function_addr,
syscall=is_syscall,
)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
if new_function_addr is not None:
callee_function = self.kb.functions.function(
addr=new_function_addr, syscall=is_syscall
)
if callee_function.returning is True:
if return_site is not None:
self._function_add_fakeret_edge(
return_site, cfg_node, current_function_addr, confirmed=True
)
self._function_add_return_edge(
new_function_addr, return_site, current_function_addr
)
elif callee_function.returning is False:
# The function does not return - there is no fake ret edge
pass
else:
if return_site is not None:
self._function_add_fakeret_edge(
return_site, cfg_node, current_function_addr, confirmed=None
)
fr = FunctionReturn(
new_function_addr, current_function_addr, addr, return_site
)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].append(fr)
return jobs
|
https://github.com/angr/angr/issues/971
|
ERROR | 2018-04-28 18:12:46,695 | angr.engines.vex.statements.dirty | Unsupported dirty helper amd64g_dirtyhelper_FSTENV
ERROR | 2018-04-28 18:12:46,696 | angr.engines.vex.statements.dirty | Unsupported dirty helper amd64g_dirtyhelper_FLDENV
WARNING | 2018-04-28 18:12:46,704 | angr.procedures.definitions | unsupported syscall: rt_sigprocmask
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/analysis.py", line 96, in make_analysis
oself.__init__(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/cfg/cfg.py", line 58, in __init__
CFGFast.__init__(self, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/cfg/cfg_fast.py", line 819, in __init__
self._analyze()
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/forward_analysis.py", line 508, in _analyze
self._analysis_core_baremetal()
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/forward_analysis.py", line 633, in _analysis_core_baremetal
self._process_job_and_get_successors(job_info)
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/forward_analysis.py", line 651, in _process_job_and_get_successors
successors = self._get_successors(job)
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/cfg/cfg_fast.py", line 1210, in _get_successors
jobs = self._scan_block(addr, current_function_addr, jumpkind, src_node, src_ins_addr, src_stmt_idx)
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/cfg/cfg_fast.py", line 1484, in _scan_block
previous_src_ins_addr, previous_src_stmt_idx)
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/cfg/cfg_fast.py", line 1656, in _scan_irsb
stmt_idx
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/cfg/cfg_fast.py", line 1818, in _create_jobs
target_addr, jumpkind, is_syscall=is_syscall
File "/usr/local/lib/python2.7/dist-packages/angr/analyses/cfg/cfg_fast.py", line 1886, in _create_job_call
syscall_stub = self.project.simos.syscall(succ)
File "/usr/local/lib/python2.7/dist-packages/angr/simos/userland.py", line 55, in syscall
abi = self.syscall_abi(state)
File "/usr/local/lib/python2.7/dist-packages/angr/simos/linux.py", line 147, in syscall_abi
raise AngrSyscallError("Unknown syscall jumpkind %s" % state.history.jumpkind)
angr.errors.AngrSyscallError: Unknown syscall jumpkind Ijk_EmWarn
|
angr.errors.AngrSyscallError
|
def _fast_memory_load(self, addr):
"""
Perform a fast memory loading of static content from static regions, a.k.a regions that are mapped to the
memory by the loader.
:param int addr: Address to read from.
:return: A tuple of the data (cffi.CData) and the max size in the current continuous block, or (None, None) if
the address does not exist.
:rtype: tuple
"""
try:
buff, size = self.project.loader.memory.read_bytes_c(addr)
return buff, size
except KeyError:
return None, None
|
def _fast_memory_load(self, addr):
"""
Perform a fast memory loading of static content from static regions, a.k.a regions that are mapped to the
memory by the loader.
:param int addr: Address to read from.
:return: A tuple of the data (cffi.CData) and the max size in the current continuous block, or (None, None) if
the address does not exist.
:rtype: tuple
"""
try:
buff, size = self.project.loader.memory.read_bytes_c(addr)
return buff, size
except KeyError:
return None
|
https://github.com/angr/angr/issues/834
|
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "gfh_gen_cfg.py", line 23, in worker
g = b.analyses.CFG(fail_fast=True)
File "/home/angr/angr-dev/angr/angr/analyses/analysis.py", line 96, in make_analysis
oself.__init__(*args, **kwargs)
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg.py", line 58, in __init__
CFGFast.__init__(self, **kwargs)
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 819, in __init__
self._analyze()
File "/home/angr/angr-dev/angr/angr/analyses/forward_analysis.py", line 508, in _analyze
self._analysis_core_baremetal()
File "/home/angr/angr-dev/angr/angr/analyses/forward_analysis.py", line 633, in _analysis_core_baremetal
self._process_job_and_get_successors(job_info)
File "/home/angr/angr-dev/angr/angr/analyses/forward_analysis.py", line 651, in _process_job_and_get_successors
successors = self._get_successors(job)
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 1210, in _get_successors
jobs = self._scan_block(addr, current_function_addr, jumpkind, src_node, src_ins_addr, src_stmt_idx)
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 1484, in _scan_block
previous_src_ins_addr, previous_src_stmt_idx)
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 1656, in _scan_irsb
stmt_idx
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 1695, in _create_jobs
jumpkind
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg_fast.py", line 2354, in _resolve_indirect_jump_timelessly
r, resolved_targets = res.resolve(self, addr, func_addr, block, jumpkind)
File "/home/angr/angr-dev/angr/angr/analyses/cfg/indirect_jump_resolvers/x86_pe_iat.py", line 27, in resolve
target = cfg._fast_memory_load_pointer(slot)
File "/home/angr/angr-dev/angr/angr/analyses/cfg/cfg_base.py", line 862, in _fast_memory_load_pointer
buf, size = self._fast_memory_load(addr)
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def _get_successors(self, job):
# Extract initial values
state = job.state
addr = job.addr
# Obtain successors
if addr not in self._avoid_runs:
all_successors = (
job.sim_successors.flat_successors
+ job.sim_successors.unconstrained_successors
)
else:
all_successors = []
# save those states
job.vfg_node.final_states = all_successors[:]
# Update thumb_addrs
if job.sim_successors.sort == "IRSB" and state.thumb:
self._thumb_addrs.update(job.sim_successors.artifacts["insn_addrs"])
if len(all_successors) == 0:
if job.sim_successors.sort == "SimProcedure" and isinstance(
job.sim_successors.artifacts["procedure"],
simuvex.procedures.SimProcedures["stubs"]["PathTerminator"],
):
# If there is no valid exit in this branch and it's not
# intentional (e.g. caused by a SimProcedure that does not
# do_return) , we should make it return to its callsite.
# However, we don't want to use its state as it might be
# corrupted. Just create a link in the exit_targets map.
retn_target = job.call_stack.current_return_target
if retn_target is not None:
new_call_stack = job.call_stack_copy()
exit_target_tpl = new_call_stack.stack_suffix(
self._context_sensitivity_level
) + (retn_target,)
self._exit_targets[job.call_stack_suffix + (addr,)].append(
(exit_target_tpl, "Ijk_Ret")
)
else:
# This is intentional. We shall remove all the pending returns generated before along this path.
self._remove_pending_return(job, self._pending_returns)
# If this is a call exit, we shouldn't put the default exit (which
# is artificial) into the CFG. The exits will be Ijk_Call and
# Ijk_FakeRet, and Ijk_Call always goes first
job.is_call_jump = any(
[self._is_call_jumpkind(i.scratch.jumpkind) for i in all_successors]
)
call_targets = [
i.se.exactly_int(i.ip)
for i in all_successors
if self._is_call_jumpkind(i.scratch.jumpkind)
]
job.call_target = None if not call_targets else call_targets[0]
job.is_return_jump = (
len(all_successors) and all_successors[0].scratch.jumpkind == "Ijk_Ret"
)
if job.is_call_jump:
# create the call task
# TODO: correctly fill the return address
call_task = CallAnalysis(job.addr, None, [])
self._task_stack.append(call_task)
job.call_task = call_task
return all_successors
|
def _get_successors(self, job):
# Extract initial values
state = job.state
addr = job.addr
# Obtain successors
if addr not in self._avoid_runs:
all_successors = (
job.sim_successors.flat_successors
+ job.sim_successors.unconstrained_successors
)
else:
all_successors = []
# save those states
job.vfg_node.final_states = all_successors[:]
# Update thumb_addrs
if job.sim_successors.sort == "IRSB" and state.thumb:
self._thumb_addrs.update(job.sim_successors.artifacts["instruction_addrs"])
if len(all_successors) == 0:
if job.sim_successors.sort == "SimProcedure" and isinstance(
job.sim_successors.artifacts["procedure"],
simuvex.procedures.SimProcedures["stubs"]["PathTerminator"],
):
# If there is no valid exit in this branch and it's not
# intentional (e.g. caused by a SimProcedure that does not
# do_return) , we should make it return to its callsite.
# However, we don't want to use its state as it might be
# corrupted. Just create a link in the exit_targets map.
retn_target = job.call_stack.current_return_target
if retn_target is not None:
new_call_stack = job.call_stack_copy()
exit_target_tpl = new_call_stack.stack_suffix(
self._context_sensitivity_level
) + (retn_target,)
self._exit_targets[job.call_stack_suffix + (addr,)].append(
(exit_target_tpl, "Ijk_Ret")
)
else:
# This is intentional. We shall remove all the pending returns generated before along this path.
self._remove_pending_return(job, self._pending_returns)
# If this is a call exit, we shouldn't put the default exit (which
# is artificial) into the CFG. The exits will be Ijk_Call and
# Ijk_FakeRet, and Ijk_Call always goes first
job.is_call_jump = any(
[self._is_call_jumpkind(i.scratch.jumpkind) for i in all_successors]
)
call_targets = [
i.se.exactly_int(i.ip)
for i in all_successors
if self._is_call_jumpkind(i.scratch.jumpkind)
]
job.call_target = None if not call_targets else call_targets[0]
job.is_return_jump = (
len(all_successors) and all_successors[0].scratch.jumpkind == "Ijk_Ret"
)
if job.is_call_jump:
# create the call task
# TODO: correctly fill the return address
call_task = CallAnalysis(job.addr, None, [])
self._task_stack.append(call_task)
job.call_task = call_task
return all_successors
|
https://github.com/angr/angr/issues/482
|
vfg = proj.analyses.VFG(cfg=cfg, context_sensitivity_level=0, start=f_addrs[0])
WARNING | 2017-06-07 04:19:33,094 | angr.analyses.vfg | The given CFG is not normalized, which might impact the performance/accuracy of the VFG analysis.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/pypy/site-packages/angr/analysis.py", line 99, in make_analysis
oself.__init__(*args, **kwargs)
File "/opt/pypy/site-packages/angr/analyses/vfg.py", line 341, in __init__
self._analyze()
File "/opt/pypy/site-packages/angr/analyses/forward_analysis.py", line 500, in _analyze
self._analysis_core_baremetal()
File "/opt/pypy/site-packages/angr/analyses/forward_analysis.py", line 625, in _analysis_core_baremetal
self._process_job_and_get_successors(job_info)
File "/opt/pypy/site-packages/angr/analyses/forward_analysis.py", line 643, in _process_job_and_get_successors
successors = self._get_successors(job)
File "/opt/pypy/site-packages/angr/analyses/vfg.py", line 683, in _get_successors
self._thumb_addrs.update(job.sim_successors.artifacts['instruction_addrs'])
KeyError: 'instruction_addrs'
|
KeyError
|
async def _process(self, query: str):
username, subject, client_id, access_token, refresh_token, args = _parse_query(
query
)
logger.debug(f"user: {username}, client: {client_id}")
logger.debug(args)
app_state, user_state, client_state = self._state
args_state: dict = unmarshal(args)
events_state: Optional[dict] = args_state.get("", None)
if isinstance(events_state, dict):
events_state = {k: Expando(v) for k, v in events_state.items()}
del args_state[""]
q = Q(
site=self._site,
mode=self._mode,
username=username,
client_id=client_id,
route=self._route,
app_state=app_state,
user_state=_session_for(user_state, username),
client_state=_session_for(client_state, client_id),
auth=Auth(username, subject, access_token, refresh_token),
args=Expando(args_state),
events=Expando(events_state),
)
# noinspection PyBroadException,PyPep8
try:
await self._handle(q)
except:
logger.exception("Unhandled exception")
# noinspection PyBroadException,PyPep8
try:
q.page.drop()
# TODO replace this with a custom-designed error display
q.page["__unhandled_error__"] = markdown_card(
box="1 1 -1 -1",
title="Error",
content=f"```\n{traceback.format_exc()}\n```",
)
await q.page.save()
except:
logger.exception("Failed transmitting unhandled exception")
|
async def _process(self, query: str):
username, subject, client_id, access_token, refresh_token, args = _parse_query(
query
)
logger.debug(f"user: {username}, client: {client_id}")
logger.debug(args)
app_state, user_state, client_state = self._state
args_state: dict = unmarshal(args)
events_state: Optional[dict] = args_state.get("", None)
if events_state:
events_state = {k: Expando(v) for k, v in events_state.items()}
del args_state[""]
q = Q(
site=self._site,
mode=self._mode,
username=username,
client_id=client_id,
route=self._route,
app_state=app_state,
user_state=_session_for(user_state, username),
client_state=_session_for(client_state, client_id),
auth=Auth(username, subject, access_token, refresh_token),
args=Expando(args_state),
events=Expando(events_state),
)
# noinspection PyBroadException,PyPep8
try:
await self._handle(q)
except:
logger.exception("Unhandled exception")
# noinspection PyBroadException,PyPep8
try:
q.page.drop()
# TODO replace this with a custom-designed error display
q.page["__unhandled_error__"] = markdown_card(
box="1 1 -1 -1",
title="Error",
content=f"```\n{traceback.format_exc()}\n```",
)
await q.page.save()
except:
logger.exception("Failed transmitting unhandled exception")
|
https://github.com/h2oai/wave/issues/510
|
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/uvicorn/protocols/http/h11_impl.py", line 389, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/h2o_wave/server.py", line 323, in __call__
await self._app.app(scope, receive, send)
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/starlette/routing.py", line 566, in __call__
await route.handle(scope, receive, send)
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/starlette/routing.py", line 227, in handle
await self.app(scope, receive, send)
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/starlette/routing.py", line 44, in app
await response(scope, receive, send)
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/starlette/responses.py", line 152, in __call__
await self.background()
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/starlette/background.py", line 18, in __call__
await self.func(*self.args, **self.kwargs)
File "/home/dott/Projects/q-model-validation/venv/lib/python3.8/site-packages/h2o_wave/server.py", line 248, in _process
events_state = {k: Expando(v) for k, v in events_state.items()}
AttributeError: 'bool' object has no attribute 'items'
|
AttributeError
|
def __init__(self, filename: str, title: str, description: str, source: str):
self.name = os.path.splitext(filename)[0]
self.filename = filename
self.title = title
self.description = description
self.source = source
self.code = highlight(source, py_lexer, html_formatter)
self.previous_example: Optional[Example] = None
self.next_example: Optional[Example] = None
self.process: Optional[subprocess.Popen] = None
self.is_app = source.find("@app(") > 0
|
def __init__(self, filename: str, title: str, description: str, source: str):
self.name = os.path.splitext(filename)[0]
self.filename = filename
self.title = title
self.description = description
self.source = source
self.code = highlight(source, py_lexer, html_formatter)
self.previous_example: Optional[Example] = None
self.next_example: Optional[Example] = None
self.process: Optional[asyncio.subprocess.Process] = None
self.is_app = source.find("@app(") > 0
|
https://github.com/h2oai/wave/issues/397
|
(.venv) C:\code\python\h2oai\wave-0.10.0-windows-amd64>wave run --no-reload examples.tour
----------------------------------------
Welcome to the H2O Wave Interactive Tour!
Go to http://localhost:10101/tour
----------------------------------------
INFO: Started server process [22080]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:51930 - "POST / HTTP/1.1" 200 OK
Unhandled exception
Traceback (most recent call last):
File "c:\code\python\h2oai\wave-0.10.0-windows-amd64\.venv\lib\site-packages\h2o_wave\server.py", line 265, in _process
await self._handle(q)
File ".\examples\tour.py", line 197, in serve
await show_example(q, catalog[route])
File ".\examples\tour.py", line 166, in show_example
await active_example.start()
File ".\examples\tour.py", line 48, in start
sys.executable, os.path.join(example_dir, self.filename)
File "C:\Python\Python37\Lib\asyncio\subprocess.py", line 217, in create_subprocess_exec
stderr=stderr, **kwds)
File "C:\Python\Python37\Lib\asyncio\base_events.py", line 1544, in subprocess_exec
bufsize, **kwargs)
File "C:\Python\Python37\Lib\asyncio\base_events.py", line 462, in _make_subprocess_transport
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
async def start(self):
# The environment passed into Popen must include SYSTEMROOT, otherwise Popen will fail when called
# inside python during initialization if %PATH% is configured, but without %SYSTEMROOT%.
env = (
{"SYSTEMROOT": os.environ["SYSTEMROOT"]}
if sys.platform.lower().startswith("win")
else {}
)
if self.is_app:
self.process = subprocess.Popen(
[
sys.executable,
"-m",
"uvicorn",
"--port",
_app_port,
f"examples.{self.name}:main",
],
env=dict(
H2O_WAVE_EXTERNAL_ADDRESS=f"http://{_app_host}:{_app_port}", **env
),
)
else:
self.process = subprocess.Popen(
[sys.executable, os.path.join(example_dir, self.filename)], env=env
)
|
async def start(self):
if self.is_app:
self.process = await asyncio.create_subprocess_exec(
sys.executable,
"-m",
"uvicorn",
"--port",
_app_port,
f"examples.{self.name}:main",
env=dict(H2O_WAVE_EXTERNAL_ADDRESS=f"http://{_app_host}:{_app_port}"),
)
else:
self.process = await asyncio.create_subprocess_exec(
sys.executable, os.path.join(example_dir, self.filename)
)
|
https://github.com/h2oai/wave/issues/397
|
(.venv) C:\code\python\h2oai\wave-0.10.0-windows-amd64>wave run --no-reload examples.tour
----------------------------------------
Welcome to the H2O Wave Interactive Tour!
Go to http://localhost:10101/tour
----------------------------------------
INFO: Started server process [22080]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:51930 - "POST / HTTP/1.1" 200 OK
Unhandled exception
Traceback (most recent call last):
File "c:\code\python\h2oai\wave-0.10.0-windows-amd64\.venv\lib\site-packages\h2o_wave\server.py", line 265, in _process
await self._handle(q)
File ".\examples\tour.py", line 197, in serve
await show_example(q, catalog[route])
File ".\examples\tour.py", line 166, in show_example
await active_example.start()
File ".\examples\tour.py", line 48, in start
sys.executable, os.path.join(example_dir, self.filename)
File "C:\Python\Python37\Lib\asyncio\subprocess.py", line 217, in create_subprocess_exec
stderr=stderr, **kwds)
File "C:\Python\Python37\Lib\asyncio\base_events.py", line 1544, in subprocess_exec
bufsize, **kwargs)
File "C:\Python\Python37\Lib\asyncio\base_events.py", line 462, in _make_subprocess_transport
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
async def stop(self):
if self.process and self.process.returncode is None:
self.process.terminate()
self.process.wait()
|
async def stop(self):
if self.process and self.process.returncode is None:
self.process.terminate()
|
https://github.com/h2oai/wave/issues/397
|
(.venv) C:\code\python\h2oai\wave-0.10.0-windows-amd64>wave run --no-reload examples.tour
----------------------------------------
Welcome to the H2O Wave Interactive Tour!
Go to http://localhost:10101/tour
----------------------------------------
INFO: Started server process [22080]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:51930 - "POST / HTTP/1.1" 200 OK
Unhandled exception
Traceback (most recent call last):
File "c:\code\python\h2oai\wave-0.10.0-windows-amd64\.venv\lib\site-packages\h2o_wave\server.py", line 265, in _process
await self._handle(q)
File ".\examples\tour.py", line 197, in serve
await show_example(q, catalog[route])
File ".\examples\tour.py", line 166, in show_example
await active_example.start()
File ".\examples\tour.py", line 48, in start
sys.executable, os.path.join(example_dir, self.filename)
File "C:\Python\Python37\Lib\asyncio\subprocess.py", line 217, in create_subprocess_exec
stderr=stderr, **kwds)
File "C:\Python\Python37\Lib\asyncio\base_events.py", line 1544, in subprocess_exec
bufsize, **kwargs)
File "C:\Python\Python37\Lib\asyncio\base_events.py", line 462, in _make_subprocess_transport
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real, emb_img = nd.split(heads, num_outputs=2, axis=-1)
rel_real, rel_img = nd.split(relations, num_outputs=2, axis=-1)
real = emb_real * rel_real - emb_img * rel_img
img = emb_real * rel_img + emb_img * rel_real
emb_complex = nd.concat(real, img, dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = nd.transpose(tails, axes=(0, 2, 1))
return nd.linalg_gemm2(tmp, tails)
|
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = nd.transpose(tails, axes=(0, 2, 1))
tmp = (heads * relations).reshape(num_chunks, chunk_size, hidden_dim)
return nd.linalg_gemm2(tmp, tails)
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def __init__(self):
super(ComplExScore, self).__init__()
|
def __init__(self):
super(DistMultScore, self).__init__()
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def edge_func(self, edges):
real_head, img_head = nd.split(edges.src["emb"], num_outputs=2, axis=-1)
real_tail, img_tail = nd.split(edges.dst["emb"], num_outputs=2, axis=-1)
real_rel, img_rel = nd.split(edges.data["emb"], num_outputs=2, axis=-1)
score = (
real_head * real_tail * real_rel
+ img_head * img_tail * real_rel
+ real_head * img_tail * img_rel
- img_head * real_tail * img_rel
)
# TODO: check if there exists minus sign and if gamma should be used here(jin)
return {"score": nd.sum(score, -1)}
|
def edge_func(self, edges):
head = edges.src["emb"]
tail = edges.dst["emb"]
rel = edges.data["emb"]
score = head * rel * tail
# TODO: check if there exists minus sign and if gamma should be used here(jin)
return {"score": nd.sum(score, axis=-1)}
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real, emb_img = nd.split(tails, num_outputs=2, axis=-1)
rel_real, rel_img = nd.split(relations, num_outputs=2, axis=-1)
real = emb_real * rel_real + emb_img * rel_img
img = -emb_real * rel_img + emb_img * rel_real
emb_complex = nd.concat(real, img, dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = nd.transpose(heads, axes=(0, 2, 1))
return nd.linalg_gemm2(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
emb_real, emb_img = nd.split(heads, num_outputs=2, axis=-1)
rel_real, rel_img = nd.split(relations, num_outputs=2, axis=-1)
real = emb_real * rel_real - emb_img * rel_img
img = emb_real * rel_img + emb_img * rel_real
emb_complex = nd.concat(real, img, dim=-1)
tmp = emb_complex.reshape(num_chunks, chunk_size, hidden_dim)
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = nd.transpose(tails, axes=(0, 2, 1))
return nd.linalg_gemm2(tmp, tails)
return fn
|
def create_neg(self, neg_head):
if neg_head:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
heads = heads.reshape(num_chunks, neg_sample_size, hidden_dim)
heads = nd.transpose(heads, axes=(0, 2, 1))
tmp = (tails * relations).reshape(num_chunks, chunk_size, hidden_dim)
return nd.linalg_gemm2(tmp, heads)
return fn
else:
def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
hidden_dim = heads.shape[1]
tails = tails.reshape(num_chunks, neg_sample_size, hidden_dim)
tails = nd.transpose(tails, axes=(0, 2, 1))
tmp = (heads * relations).reshape(num_chunks, chunk_size, hidden_dim)
return nd.linalg_gemm2(tmp, tails)
return fn
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = mx.nd.array(data.features)
labels = mx.nd.array(data.labels)
mask = mx.nd.array(np.where(data.train_mask == 1))
test_mask = mx.nd.array(np.where(data.test_mask == 1))
val_mask = mx.nd.array(np.where(data.val_mask == 1))
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
if args.gpu < 0:
ctx = mx.cpu()
else:
ctx = mx.gpu(args.gpu)
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
mask = mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
val_mask = val_mask.as_in_context(ctx)
# create graph
g = data.graph
# add self-loop
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
# create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT(
g,
args.num_layers,
in_feats,
args.num_hidden,
n_classes,
heads,
elu,
args.in_drop,
args.attn_drop,
args.alpha,
args.residual,
)
stopper = EarlyStopping(patience=100)
model.initialize(ctx=ctx)
# use optimizer
trainer = gluon.Trainer(model.collect_params(), "adam", {"learning_rate": args.lr})
dur = []
for epoch in range(args.epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
logits = model(features)
loss = mx.nd.softmax_cross_entropy(
logits[mask].squeeze(), labels[mask].squeeze()
)
loss.backward()
trainer.step(mask.shape[0])
if epoch >= 3:
dur.append(time.time() - t0)
print(
"Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | ETputs(KTEPS) {:.2f}".format(
epoch, loss.asnumpy()[0], np.mean(dur), n_edges / np.mean(dur) / 1000
)
)
val_accuracy = evaluate(model, features, labels, val_mask)
print("Validation Accuracy {:.4f}".format(val_accuracy))
if stopper.step(val_accuracy, model):
break
model.load_parameters("model.param")
test_accuracy = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(test_accuracy))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = mx.nd.array(data.features)
labels = mx.nd.array(data.labels)
mask = mx.nd.array(np.where(data.train_mask == 1))
test_mask = mx.nd.array(np.where(data.test_mask == 1))
val_mask = mx.nd.array(np.where(data.val_mask == 1))
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
if args.gpu < 0:
ctx = mx.cpu()
else:
ctx = mx.gpu(args.gpu)
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
mask = mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
val_mask = val_mask.as_in_context(ctx)
# create graph
g = data.graph
# add self-loop
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
# create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT(
g,
args.num_layers,
in_feats,
args.num_hidden,
n_classes,
heads,
elu,
args.in_drop,
args.attn_drop,
args.alpha,
args.residual,
)
stopper = EarlyStopping(patience=100)
model.initialize(ctx=ctx)
# use optimizer
trainer = gluon.Trainer(model.collect_params(), "adam", {"learning_rate": args.lr})
dur = []
for epoch in range(args.epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
logits = model(features)
loss = mx.nd.softmax_cross_entropy(
logits[mask].squeeze(), labels[mask].squeeze()
)
loss.backward()
trainer.step(mask.shape[0])
if epoch >= 3:
dur.append(time.time() - t0)
print(
"Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | ETputs(KTEPS) {:.2f}".format(
epoch, loss.asnumpy()[0], np.mean(dur), n_edges / np.mean(dur) / 1000
)
)
val_accuracy = evaluate(model, features, labels, val_mask)
print("Validation Accuracy {:.4f}".format(val_accuracy))
if stopper.step(val_accuracy, model):
break
model.load_parameters("model.param")
test_accuracy = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(test_accuracy))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = mx.nd.array(data.features)
labels = mx.nd.array(data.labels)
train_mask = mx.nd.array(data.train_mask)
val_mask = mx.nd.array(data.val_mask)
test_mask = mx.nd.array(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().asscalar(),
val_mask.sum().asscalar(),
test_mask.sum().asscalar(),
)
)
if args.gpu < 0:
cuda = False
ctx = mx.cpu(0)
else:
cuda = True
ctx = mx.gpu(args.gpu)
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
train_mask = train_mask.as_in_context(ctx)
val_mask = val_mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
# create GCN model
g = data.graph
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
# normalization
degs = g.in_degrees().astype("float32")
norm = mx.nd.power(degs, -0.5)
if cuda:
norm = norm.as_in_context(ctx)
g.ndata["norm"] = mx.nd.expand_dims(norm, 1)
model = GCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, mx.nd.relu, args.dropout
)
model.initialize(ctx=ctx)
n_train_samples = train_mask.sum().asscalar()
loss_fcn = gluon.loss.SoftmaxCELoss()
# use optimizer
print(model.collect_params())
trainer = gluon.Trainer(
model.collect_params(),
"adam",
{"learning_rate": args.lr, "wd": args.weight_decay},
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
pred = model(features)
loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
loss = loss.sum() / n_train_samples
loss.backward()
trainer.step(batch_size=1)
if epoch >= 3:
loss.asscalar()
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch,
np.mean(dur),
loss.asscalar(),
acc,
n_edges / np.mean(dur) / 1000,
)
)
# test set accuracy
acc = evaluate(model, features, labels, test_mask)
print("Test accuracy {:.2%}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = mx.nd.array(data.features)
labels = mx.nd.array(data.labels)
train_mask = mx.nd.array(data.train_mask)
val_mask = mx.nd.array(data.val_mask)
test_mask = mx.nd.array(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().asscalar(),
val_mask.sum().asscalar(),
test_mask.sum().asscalar(),
)
)
if args.gpu < 0:
cuda = False
ctx = mx.cpu(0)
else:
cuda = True
ctx = mx.gpu(args.gpu)
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
train_mask = train_mask.as_in_context(ctx)
val_mask = val_mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
# create GCN model
g = data.graph
if args.self_loop:
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
# normalization
degs = g.in_degrees().astype("float32")
norm = mx.nd.power(degs, -0.5)
if cuda:
norm = norm.as_in_context(ctx)
g.ndata["norm"] = mx.nd.expand_dims(norm, 1)
model = GCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, mx.nd.relu, args.dropout
)
model.initialize(ctx=ctx)
n_train_samples = train_mask.sum().asscalar()
loss_fcn = gluon.loss.SoftmaxCELoss()
# use optimizer
print(model.collect_params())
trainer = gluon.Trainer(
model.collect_params(),
"adam",
{"learning_rate": args.lr, "wd": args.weight_decay},
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
pred = model(features)
loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
loss = loss.sum() / n_train_samples
loss.backward()
trainer.step(batch_size=1)
if epoch >= 3:
loss.asscalar()
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch,
np.mean(dur),
loss.asscalar(),
acc,
n_edges / np.mean(dur) / 1000,
)
)
# test set accuracy
acc = evaluate(model, features, labels, test_mask)
print("Test accuracy {:.2%}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = mx.nd.array(data.features)
labels = mx.nd.array(data.labels)
train_mask = mx.nd.array(data.train_mask)
val_mask = mx.nd.array(data.val_mask)
test_mask = mx.nd.array(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().asscalar(),
val_mask.sum().asscalar(),
test_mask.sum().asscalar(),
)
)
if args.gpu < 0:
cuda = False
ctx = mx.cpu(0)
else:
cuda = True
ctx = mx.gpu(args.gpu)
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
train_mask = train_mask.as_in_context(ctx)
val_mask = val_mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
# create TAGCN model
model = TAGCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, mx.nd.relu, args.dropout
)
model.initialize(ctx=ctx)
n_train_samples = train_mask.sum().asscalar()
loss_fcn = gluon.loss.SoftmaxCELoss()
# use optimizer
print(model.collect_params())
trainer = gluon.Trainer(
model.collect_params(),
"adam",
{"learning_rate": args.lr, "wd": args.weight_decay},
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
pred = model(features)
loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
loss = loss.sum() / n_train_samples
loss.backward()
trainer.step(batch_size=1)
if epoch >= 3:
loss.asscalar()
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch,
np.mean(dur),
loss.asscalar(),
acc,
n_edges / np.mean(dur) / 1000,
)
)
print()
acc = evaluate(model, features, labels, val_mask)
print("Test accuracy {:.2%}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = mx.nd.array(data.features)
labels = mx.nd.array(data.labels)
train_mask = mx.nd.array(data.train_mask)
val_mask = mx.nd.array(data.val_mask)
test_mask = mx.nd.array(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().asscalar(),
val_mask.sum().asscalar(),
test_mask.sum().asscalar(),
)
)
if args.gpu < 0:
cuda = False
ctx = mx.cpu(0)
else:
cuda = True
ctx = mx.gpu(args.gpu)
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
train_mask = train_mask.as_in_context(ctx)
val_mask = val_mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
# create TAGCN model
model = TAGCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, mx.nd.relu, args.dropout
)
model.initialize(ctx=ctx)
n_train_samples = train_mask.sum().asscalar()
loss_fcn = gluon.loss.SoftmaxCELoss()
# use optimizer
print(model.collect_params())
trainer = gluon.Trainer(
model.collect_params(),
"adam",
{"learning_rate": args.lr, "wd": args.weight_decay},
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
if epoch >= 3:
t0 = time.time()
# forward
with mx.autograd.record():
pred = model(features)
loss = loss_fcn(pred, labels, mx.nd.expand_dims(train_mask, 1))
loss = loss.sum() / n_train_samples
loss.backward()
trainer.step(batch_size=1)
if epoch >= 3:
loss.asscalar()
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch,
np.mean(dur),
loss.asscalar(),
acc,
n_edges / np.mean(dur) / 1000,
)
)
print()
acc = evaluate(model, features, labels, val_mask)
print("Test accuracy {:.2%}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
torch.manual_seed(args.rnd_seed)
np.random.seed(args.rnd_seed)
random.seed(args.rnd_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
multitask_data = set(["ppi"])
multitask = args.dataset in multitask_data
# load and preprocess dataset
data = load_data(args)
train_nid = np.nonzero(data.train_mask)[0].astype(np.int64)
# Normalize features
if args.normalize:
train_feats = data.features[train_nid]
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(train_feats)
features = scaler.transform(data.features)
else:
features = data.features
features = torch.FloatTensor(features)
if not multitask:
labels = torch.LongTensor(data.labels)
else:
labels = torch.FloatTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
n_train_samples = train_mask.sum().item()
n_val_samples = val_mask.sum().item()
n_test_samples = test_mask.sum().item()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (n_edges, n_classes, n_train_samples, n_val_samples, n_test_samples)
)
# create GCN model
g = data.graph
if args.self_loop and not args.dataset.startswith("reddit"):
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
print("adding self-loop edges")
g = DGLGraph(g, readonly=True)
# set device for dataset tensors
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
print(torch.cuda.get_device_name(0))
g.ndata["features"] = features
g.ndata["labels"] = labels
g.ndata["train_mask"] = train_mask
print("labels shape:", labels.shape)
cluster_iterator = ClusterIter(
args.dataset, g, args.psize, args.batch_size, train_nid, use_pp=args.use_pp
)
print("features shape, ", features.shape)
model = GraphSAGE(
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
F.relu,
args.dropout,
args.use_pp,
)
if cuda:
model.cuda()
# logger and so on
log_dir = save_log_dir(args)
writer = SummaryWriter(log_dir)
logger = Logger(os.path.join(log_dir, "loggings"))
logger.write(args)
# Loss function
if multitask:
print("Using multi-label loss")
loss_f = nn.BCEWithLogitsLoss()
else:
print("Using multi-class loss")
loss_f = nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# set train_nids to cuda tensor
if cuda:
train_nid = torch.from_numpy(train_nid).cuda()
print(
"current memory after model before training",
torch.cuda.memory_allocated(device=train_nid.device) / 1024 / 1024,
)
start_time = time.time()
best_f1 = -1
for epoch in range(args.n_epochs):
for j, cluster in enumerate(cluster_iterator):
# sync with upper level training graph
cluster.copy_from_parent()
model.train()
# forward
pred = model(cluster)
batch_labels = cluster.ndata["labels"]
batch_train_mask = cluster.ndata["train_mask"]
loss = loss_f(pred[batch_train_mask], batch_labels[batch_train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# in PPI case, `log_every` is chosen to log one time per epoch.
# Choose your log freq dynamically when you want more info within one epoch
if j % args.log_every == 0:
print(
f"epoch:{epoch}/{args.n_epochs}, Iteration {j}/"
f"{len(cluster_iterator)}:training loss",
loss.item(),
)
writer.add_scalar(
"train/loss",
loss.item(),
global_step=j + epoch * len(cluster_iterator),
)
print(
"current memory:",
torch.cuda.memory_allocated(device=pred.device) / 1024 / 1024,
)
# evaluate
if epoch % args.val_every == 0:
val_f1_mic, val_f1_mac = evaluate(model, g, labels, val_mask, multitask)
print("Val F1-mic{:.4f}, Val F1-mac{:.4f}".format(val_f1_mic, val_f1_mac))
if val_f1_mic > best_f1:
best_f1 = val_f1_mic
print("new best val f1:", best_f1)
torch.save(model.state_dict(), os.path.join(log_dir, "best_model.pkl"))
writer.add_scalar("val/f1-mic", val_f1_mic, global_step=epoch)
writer.add_scalar("val/f1-mac", val_f1_mac, global_step=epoch)
end_time = time.time()
print(f"training using time {start_time - end_time}")
# test
if args.use_val:
model.load_state_dict(torch.load(os.path.join(log_dir, "best_model.pkl")))
test_f1_mic, test_f1_mac = evaluate(model, g, labels, test_mask, multitask)
print("Test F1-mic{:.4f}, Test F1-mac{:.4f}".format(test_f1_mic, test_f1_mac))
writer.add_scalar("test/f1-mic", test_f1_mic)
writer.add_scalar("test/f1-mac", test_f1_mac)
|
def main(args):
torch.manual_seed(args.rnd_seed)
np.random.seed(args.rnd_seed)
random.seed(args.rnd_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
multitask_data = set(["ppi"])
multitask = args.dataset in multitask_data
# load and preprocess dataset
data = load_data(args)
train_nid = np.nonzero(data.train_mask)[0].astype(np.int64)
# Normalize features
if args.normalize:
train_feats = data.features[train_nid]
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(train_feats)
features = scaler.transform(data.features)
else:
features = data.features
features = torch.FloatTensor(features)
if not multitask:
labels = torch.LongTensor(data.labels)
else:
labels = torch.FloatTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
n_train_samples = train_mask.sum().item()
n_val_samples = val_mask.sum().item()
n_test_samples = test_mask.sum().item()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (n_edges, n_classes, n_train_samples, n_val_samples, n_test_samples)
)
# create GCN model
g = data.graph
if args.self_loop and not args.dataset.startswith("reddit"):
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
print("adding self-loop edges")
g = DGLGraph(g, readonly=True)
# set device for dataset tensors
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
print(torch.cuda.get_device_name(0))
g.ndata["features"] = features
g.ndata["labels"] = labels
g.ndata["train_mask"] = train_mask
print("labels shape:", labels.shape)
cluster_iterator = ClusterIter(
args.dataset, g, args.psize, args.batch_size, train_nid, use_pp=args.use_pp
)
print("features shape, ", features.shape)
model = GraphSAGE(
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
F.relu,
args.dropout,
args.use_pp,
)
if cuda:
model.cuda()
# logger and so on
log_dir = save_log_dir(args)
writer = SummaryWriter(log_dir)
logger = Logger(os.path.join(log_dir, "loggings"))
logger.write(args)
# Loss function
if multitask:
print("Using multi-label loss")
loss_f = nn.BCEWithLogitsLoss()
else:
print("Using multi-class loss")
loss_f = nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# set train_nids to cuda tensor
if cuda:
train_nid = torch.from_numpy(train_nid).cuda()
print(
"current memory after model before training",
torch.cuda.memory_allocated(device=train_nid.device) / 1024 / 1024,
)
start_time = time.time()
best_f1 = -1
for epoch in range(args.n_epochs):
for j, cluster in enumerate(cluster_iterator):
# sync with upper level training graph
cluster.copy_from_parent()
model.train()
# forward
pred = model(cluster)
batch_labels = cluster.ndata["labels"]
batch_train_mask = cluster.ndata["train_mask"]
loss = loss_f(pred[batch_train_mask], batch_labels[batch_train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# in PPI case, `log_every` is chosen to log one time per epoch.
# Choose your log freq dynamically when you want more info within one epoch
if j % args.log_every == 0:
print(
f"epoch:{epoch}/{args.n_epochs}, Iteration {j}/"
f"{len(cluster_iterator)}:training loss",
loss.item(),
)
writer.add_scalar(
"train/loss",
loss.item(),
global_step=j + epoch * len(cluster_iterator),
)
print(
"current memory:",
torch.cuda.memory_allocated(device=pred.device) / 1024 / 1024,
)
# evaluate
if epoch % args.val_every == 0:
val_f1_mic, val_f1_mac = evaluate(model, g, labels, val_mask, multitask)
print("Val F1-mic{:.4f}, Val F1-mac{:.4f}".format(val_f1_mic, val_f1_mac))
if val_f1_mic > best_f1:
best_f1 = val_f1_mic
print("new best val f1:", best_f1)
torch.save(model.state_dict(), os.path.join(log_dir, "best_model.pkl"))
writer.add_scalar("val/f1-mic", val_f1_mic, global_step=epoch)
writer.add_scalar("val/f1-mac", val_f1_mac, global_step=epoch)
end_time = time.time()
print(f"training using time {start_time - end_time}")
# test
if args.use_val:
model.load_state_dict(torch.load(os.path.join(log_dir, "best_model.pkl")))
test_f1_mic, test_f1_mac = evaluate(model, g, labels, test_mask, multitask)
print("Test F1-mic{:.4f}, Test F1-mac{:.4f}".format(test_f1_mic, test_f1_mac))
writer.add_scalar("test/f1-mic", test_f1_mic)
writer.add_scalar("test/f1-mac", test_f1_mac)
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create DGI model
dgi = DGI(
g, in_feats, args.n_hidden, args.n_layers, nn.PReLU(args.n_hidden), args.dropout
)
if cuda:
dgi.cuda()
dgi_optimizer = torch.optim.Adam(
dgi.parameters(), lr=args.dgi_lr, weight_decay=args.weight_decay
)
# train deep graph infomax
cnt_wait = 0
best = 1e9
best_t = 0
dur = []
for epoch in range(args.n_dgi_epochs):
dgi.train()
if epoch >= 3:
t0 = time.time()
dgi_optimizer.zero_grad()
loss = dgi(features)
loss.backward()
dgi_optimizer.step()
if loss < best:
best = loss
best_t = epoch
cnt_wait = 0
torch.save(dgi.state_dict(), "best_dgi.pkl")
else:
cnt_wait += 1
if cnt_wait == args.patience:
print("Early stopping!")
break
if epoch >= 3:
dur.append(time.time() - t0)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), n_edges / np.mean(dur) / 1000
)
)
# create classifier model
classifier = Classifier(args.n_hidden, n_classes)
if cuda:
classifier.cuda()
classifier_optimizer = torch.optim.Adam(
classifier.parameters(), lr=args.classifier_lr, weight_decay=args.weight_decay
)
# train classifier
print("Loading {}th epoch".format(best_t))
dgi.load_state_dict(torch.load("best_dgi.pkl"))
embeds = dgi.encoder(features, corrupt=False)
embeds = embeds.detach()
dur = []
for epoch in range(args.n_classifier_epochs):
classifier.train()
if epoch >= 3:
t0 = time.time()
classifier_optimizer.zero_grad()
preds = classifier(embeds)
loss = F.nll_loss(preds[train_mask], labels[train_mask])
loss.backward()
classifier_optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(classifier, embeds, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(classifier, embeds, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create DGI model
dgi = DGI(
g, in_feats, args.n_hidden, args.n_layers, nn.PReLU(args.n_hidden), args.dropout
)
if cuda:
dgi.cuda()
dgi_optimizer = torch.optim.Adam(
dgi.parameters(), lr=args.dgi_lr, weight_decay=args.weight_decay
)
# train deep graph infomax
cnt_wait = 0
best = 1e9
best_t = 0
dur = []
for epoch in range(args.n_dgi_epochs):
dgi.train()
if epoch >= 3:
t0 = time.time()
dgi_optimizer.zero_grad()
loss = dgi(features)
loss.backward()
dgi_optimizer.step()
if loss < best:
best = loss
best_t = epoch
cnt_wait = 0
torch.save(dgi.state_dict(), "best_dgi.pkl")
else:
cnt_wait += 1
if cnt_wait == args.patience:
print("Early stopping!")
break
if epoch >= 3:
dur.append(time.time() - t0)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), n_edges / np.mean(dur) / 1000
)
)
# create classifier model
classifier = Classifier(args.n_hidden, n_classes)
if cuda:
classifier.cuda()
classifier_optimizer = torch.optim.Adam(
classifier.parameters(), lr=args.classifier_lr, weight_decay=args.weight_decay
)
# train classifier
print("Loading {}th epoch".format(best_t))
dgi.load_state_dict(torch.load("best_dgi.pkl"))
embeds = dgi.encoder(features, corrupt=False)
embeds = embeds.detach()
dur = []
for epoch in range(args.n_classifier_epochs):
classifier.train()
if epoch >= 3:
t0 = time.time()
classifier_optimizer.zero_grad()
preds = classifier(embeds)
loss = F.nll_loss(preds[train_mask], labels[train_mask])
loss.backward()
classifier_optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(classifier, embeds, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(classifier, embeds, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.