code stringlengths 17 6.64M |
|---|
class PoisonOnly(CPSelectMixin, SMTPoison):
pass
|
class PLDI2015(BaseSMTEncoder):
'Preserve the encoding of Alive as of the original Alive paper.\n '
pass
|
class LLVM4Mixin(BaseSMTEncoder):
'Prior to LLVM 5.0, shifts returned undefined values when the shift amount\n exceeded the bit-width.\n '
|
def shift_op(op, poisons):
def _(term, smt):
x = smt.eval(term.x)
y = smt.eval(term.y)
z = smt._conditional_value([z3.ULT(y, y.size())], op(x, y), term.name)
for f in poisons:
if smt.has_analysis(f, term):
smt.add_nonpoison(z3.Implies(smt.get_analysis(f, term), poisons[f](x, y, z)))
elif (f in term.flags):
smt.add_nonpoison(poisons[f](x, y, z))
return z
return _
|
class SMTUndef_LLVM4(LLVM4Mixin, SMTUndef):
pass
|
class SMTPoison_LLVM4(LLVM4Mixin, SMTPoison):
pass
|
class NewShlMixin(LLVM4Mixin):
pass
|
class SMTPoisonNewShl(NewShlMixin, SMTPoison):
pass
|
class SMTUndefNewShl(NewShlMixin, SMTUndef):
pass
|
class FPImpreciseUndef(SMTUndef):
pass
|
class OldNSZ(BaseSMTEncoder):
def _float_binary_operator(self, term, op):
x = self.eval(term.x)
y = self.eval(term.y)
if ('nnan' in term.flags):
self.add_defs(z3.Not(z3.fpIsNaN(x)), z3.Not(z3.fpIsNaN(y)), z3.Not(z3.fpIsNaN(op(x, y))))
if ('ninf' in term.flags):
self.add_defs(z3.Not(z3.fpIsInf(x)), z3.Not(z3.fpIsInf(y)), z3.Not(z3.fpIsInf(op(x, y))))
if ('nsz' in term.flags):
nz = z3.fpMinusZero(_ty_sort(self.type(term)))
self.add_defs(z3.Not((x == nz)), z3.Not((y == nz)))
return op(x, y)
return op(x, y)
|
class BrokenNSZ(BaseSMTEncoder):
def _float_binary_operator(self, term, op):
x = self.eval(term.x)
y = self.eval(term.y)
if ('nnan' in term.flags):
self.add_defs(z3.Not(z3.fpIsNaN(x)), z3.Not(z3.fpIsNaN(y)), z3.Not(z3.fpIsNaN(op(x, y))))
if ('ninf' in term.flags):
self.add_defs(z3.Not(z3.fpIsInf(x)), z3.Not(z3.fpIsInf(y)), z3.Not(z3.fpIsInf(op(x, y))))
if ('nsz' in term.flags):
q = self.fresh_var(self.type(term))
self.add_qvar(q)
self.add_defs(z3.fpEQ(q, 0))
z = op(x, y)
return z3.If(z3.fpEQ(z, 0), q, z)
return op(x, y)
|
def read_opt_files(files, filter=None, extended_results=False):
'Parse and iterate optimizations given in files.\n\n files - iterable of open files or strings\n filter - selects which optimizations to return, None means return all\n extended_results\n - if True, returns any features provided with the optimization\n '
opts = itertools.chain.from_iterable((parser.parse_opt_file(f, extended_results) for f in files))
if filter:
if extended_results:
return itertools.ifilter((lambda x: filter(x[0])), opts)
return itertools.ifilter(filter, opts)
return opts
|
def all_of(*preds):
preds = filter(None, preds)
if (len(preds) == 0):
return None
if (len(preds) == 1):
return preds[0]
return (lambda opt: all((p(opt) for p in preds)))
|
def any_of(*preds):
preds = filter(None, preds)
if (len(preds) == 1):
return preds[0]
return (lambda opt: any((p(opt) for p in preds)))
|
def contains_node(cls):
return (lambda opt: any((isinstance(t, cls) for t in opt.subterms())))
|
def match_name(pattern):
if (pattern is None):
return None
try:
regex = re.compile(pattern)
return (lambda opt: regex.search(opt.name))
except re.error as e:
raise Error('Invalid pattern: {}'.format(e))
|
class Transform(pretty.PrettyRepr):
def __init__(self, src, tgt, pre=(), asm=(), name=''):
self.name = name
self.pre = pre
self.asm = asm
self.src = src
self.tgt = tgt
def pretty(self):
return pretty.pfun(type(self).__name__, (self.src, self.tgt, self.pre, self.asm, self.name))
def subterms(self):
'Generate all terms in the transform, without repeats.\n '
seen = set()
return itertools.chain(L.subterms(self.src, seen), L.subterms(self.tgt, seen), itertools.chain.from_iterable((L.subterms(p, seen) for p in self.pre)), itertools.chain.from_iterable((L.subterms(p, seen) for p in self.asm)))
def type_constraints(self):
logger.debug('%s: Gathering type constraints', self.name)
t = typing.TypeConstraints()
seen = set()
t.collect(self.src, seen)
t.bind_reps()
for p in self.asm:
t.collect(p, seen)
for p in self.pre:
t.collect(p, seen)
t.collect(self.tgt, seen)
t.eq_types(self.src, self.tgt)
t.set_defaultables()
return t
@property
def type_environment(self):
try:
return self._env
except AttributeError:
env = self.type_constraints().make_environment()
self._env = env
return env
@type_environment.deleter
def type_environment(self):
try:
del self._env
except AttributeError:
pass
def type_models(self):
return self.type_environment.models()
def validate_model(self, type_vector):
"Return whether the type vector meets this opt's constraints.\n "
if isinstance(type_vector, typing.TypeModel):
type_vector = type_vector.types
V = typing.Validator(self.type_environment, type_vector)
try:
V.eq_types(self.src, self.tgt)
for t in self.subterms():
logger.debug('checking %s', t)
t.type_constraints(V)
return True
except typing.Error:
return False
def constant_defs(self):
'Generate shared constant terms from the target and precondition.\n\n Terms are generated before any terms that reference them.\n '
return constant_defs(self.tgt, (self.pre + self.asm))
def format(self):
return Formatted(self)
|
@format_doc.register(Transform)
def _(opt, fmt, prec):
return format_parts(opt.name, ([('Assume:', p) for p in opt.asm] + [('Pre:', p) for p in opt.pre]), opt.src, opt.tgt, fmt)
|
def most_specific(c1, c2):
if (c1 > c2):
(c1, c2) = (c2, c1)
if (c1 == NUMBER):
if (c2 == PTR):
return None
if (c2 == INT_PTR):
return INT
if ((c1 == FLOAT) and (c2 != FLOAT)):
return None
if ((c1 == PTR) and (c2 != PTR)):
return None
return c2
|
def meets_constraint(con, ty):
if (con == BOOL):
return (ty == IntType(1))
return isinstance(ty, _constraint_class[con])
|
class TypeConstraints(object):
logger = logger.getChild('TypeConstraints')
def __init__(self):
self.sets = disjoint.DisjointSubsets()
self.specifics = {}
self.constraints = collections.defaultdict((lambda : FIRST_CLASS))
self.ordering = set()
self.width_equalities = set()
self.default_rep = None
self.defaultable_reps = set()
self.bound_reps = set()
def collect(self, term, seen=None):
'Gather type constraints for this term and its subterms.\n\n If seen is provided, terms in seen will not be gathered.\n '
for t in subterms(term, seen):
t.type_constraints(self)
def bind_reps(self):
'Mark all reps as bound. Useful for finding target/precondition terms\n that did not unify with any source term.\n '
self.bound_reps = set(self.sets.reps())
def rep(self, term):
'Return the representative member of the unification set containing this\n term. Creates and initializes a unification set if one did not previously\n exist.\n '
try:
return self.sets.rep(term)
except KeyError:
assert isinstance(term, Value)
self._init_term(term)
return term
def _init_term(self, term):
self.logger.debug('adding term %s', term)
self.sets.add_key(term)
def _merge(self, t1, t2):
self.logger.debug('unifying %s and %s', t1, t2)
if (t2 in self.specifics):
self.specific(t1, self.specifics.pop(t2))
if (t2 in self.constraints):
self.constrain(t1, self.constraints.pop(t2))
if (t2 is self.default_rep):
self.default_rep = t1
if (t2 in self.defaultable_reps):
self.defaultable_reps.remove(t2)
self.defaultable_reps.add(t1)
if (t2 in self.bound_reps):
self.bound_reps.remove(t2)
self.bound_reps.add(t1)
def eq_types(self, *terms):
it = iter(terms)
t1 = self.rep(next(it))
for t2 in it:
self.sets.unify(t1, self.rep(t2), self._merge)
def _init_default(self, rep):
self.specific(rep, predicate_default)
self.constrain(rep, INT)
self.default_rep = rep
def default(self, term):
if (self.default_rep is None):
self._init_default(self.rep(term))
else:
self.eq_types(term, self.default_rep)
def defaultable(self, term):
'Mark this term as potentially having a default type.\n '
self.defaultable_reps.add(self.rep(term))
def set_defaultables(self):
'Set unbound, defaultable values to the default type. Raise an error if\n any unbound, non-defaultable values.\n '
for r in self.sets.reps():
if ((r in self.bound_reps) or (r in self.specifics) or (self.constraints[r] == BOOL)):
continue
if (r in self.defaultable_reps):
self.default(r)
else:
raise Error(('Ambiguous type for ' + _name(r)))
def specific(self, term, ty):
r = self.rep(term)
if (ty is None):
return
self.logger.debug('specifying %s : %s', term, ty)
if (r not in self.specifics):
self.specifics[r] = ty
if (self.specifics[r] != ty):
raise Error('Incompatible types for {}: {} and {}'.format(_name(term), ty, self.specifics[term]))
def constrain(self, term, con):
r = self.rep(term)
con0 = self.constraints[r]
self.logger.debug('Refining constraint for %s: %s & %s', term, con, con0)
c = most_specific(con0, con)
if (c is None):
raise Error('Incompatible constraints for {}: {} and {}'.format(_name(term), _constraint_name[con], _constraint_name[con0]))
self.constraints[r] = c
def integer(self, term):
self.constrain(term, INT)
def bool(self, term):
self.constrain(term, BOOL)
def pointer(self, term):
self.constrain(term, PTR)
def int_ptr_vec(self, term):
self.constrain(term, INT_PTR)
def float(self, term):
self.constrain(term, FLOAT)
def number(self, term):
self.constrain(term, NUMBER)
def first_class(self, term):
self.constrain(term, FIRST_CLASS)
def width_order(self, lo, hi):
if isinstance(lo, Value):
lo = self.rep(lo)
hi = self.rep(hi)
self.ordering.add((lo, hi))
def width_equal(self, a, b):
a = self.rep(a)
b = self.rep(b)
self.width_equalities.add((a, b))
def validate(self):
'Make sure specific types meet constraints'
for r in self.specifics:
if (r not in self.constraints):
continue
if (not meets_constraint(self.constraints[r], self.specifics[r])):
raise Error('Incompatible constraints for {}: {} is not {}'.format(_name(r), self.specifics[r], _constraint_name[self.constraints[r]]))
def simplify_orderings(self):
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(((('simplifying ordering:\n ' + pretty.pformat(self.ordering, indent=2)) + '\n equalities:\n') + pretty.pformat(self.width_equalities, indent=2)))
ords = {((lo if isinstance(lo, int) else self.sets.rep(lo)), self.sets.rep(hi)) for (lo, hi) in self.ordering}
eqs = {(self.sets.rep(a), self.sets.rep(b)) for (a, b) in self.width_equalities if (a != b)}
eqs = {((a, b) if (id(a) < id(b)) else (b, a)) for (a, b) in eqs}
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(((('simplified ordering:\n ' + pretty.pformat(ords, indent=2)) + '\n equalities:\n') + pretty.pformat(eqs, indent=2)))
assert all(((isinstance(lo, int) or (most_specific(self.constraints[lo], self.constraints[hi]) is not None)) for (lo, hi) in ords))
self.ordering = ords
self.width_equalities = eqs
@staticmethod
def find_transitive_bounds(orders):
'Return a pair of mappings from a value to values known to be above or\n below.\n\n Argument is an iterable of (lo,hi) pairs.\n '
above = collections.defaultdict(set)
below = collections.defaultdict(set)
for (lo, hi) in orders:
if ((lo == hi) or (lo in above[hi])):
problem = above[hi].intersection(below[lo])
problem.add(lo)
problem.add(hi)
problem = sorted((str(_name(v)) for v in problem))
raise Error('Incompatible constraints for {}: circular ordering'.format(', '.join(problem)))
if (lo in below[hi]):
continue
new_above_lo = list(above[hi])
new_above_lo.append(hi)
new_below_hi = list(below[lo])
new_below_hi.append(lo)
for x in new_above_lo:
below[x].update(new_below_hi)
for x in new_below_hi:
above[x].update(new_above_lo)
return (below, above)
@staticmethod
def topological_sort(edges, starts):
seen = set()
order = []
def visit(n):
if (n in seen):
return
for d in edges[n]:
visit(d)
seen.add(n)
order.append(n)
for r in starts:
visit(r)
return order
def make_environment(self):
'Return a TypeEnvironment expressing the constraints gathered\n '
(lower_bounds, upper_bounds) = self.find_transitive_bounds(((self.rep(lo), self.rep(hi)) for (lo, hi) in self.ordering if isinstance(lo, Value)))
min_width = {}
for (lo, hi) in self.ordering:
if isinstance(lo, int):
min_width[self.rep(hi)] = max(lo, min_width.get(hi, 0))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(((('get_type_model:\n min_width: ' + pretty.pformat(min_width, indent=13)) + '\n lower_bounds: ') + pretty.pformat(lower_bounds, indent=16)))
order = self.topological_sort(lower_bounds, self.sets.reps())
tyvars = dict(itertools.izip(order, itertools.count()))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(('get_type_model:\n tyvars: ' + pretty.pformat(tyvars, indent=10)))
min_width = {tyvars[rep]: w for (rep, w) in min_width.iteritems()}
lower_bounds = {tyvars[rep]: tuple((tyvars[t] for t in ts)) for (rep, ts) in lower_bounds.iteritems() if ts}
upper_bounds = {tyvars[rep]: tuple((tyvars[t] for t in ts)) for (rep, ts) in upper_bounds.iteritems() if ts}
specific = {}
constraint = []
context = weakref.WeakKeyDictionary()
for (tyvar, rep) in enumerate(order):
if (rep in self.specifics):
specific[tyvar] = self.specifics[rep]
if (not meets_constraint(self.constraints[rep], self.specifics[rep])):
raise Error('Incompatible constraints for {}: {} is not {}'.format(_name(rep), self.specifics[rep], _constraint_name[self.constraints[rep]]))
constraint.append(self.constraints[rep])
for t in self.sets.subset(rep):
assert (t not in context)
context[t] = tyvar
width_equality = {}
if self.width_equalities:
width_classes = disjoint.DisjointSubsets()
for r in xrange(len(order)):
width_classes.add_key(r)
for (a, b) in self.width_equalities:
width_classes.unify(tyvars[self.rep(a)], tyvars[self.rep(b)])
for (_, width_class) in width_classes.subset_items():
if (len(width_class) < 2):
continue
it = iter(sorted(width_class))
va = next(it)
for vb in it:
width_equality[vb] = va
if (self.default_rep is None):
default_id = len(constraint)
constraint.append(INT)
specific[default_id] = predicate_default
else:
default_id = tyvars[self.default_rep]
return TypeEnvironment(context, constraint, specific, min_width, lower_bounds, upper_bounds, width_equality, default_id)
|
class TypeEnvironment(object):
'Contains the constraints gathered during type checking.\n '
pointer_width = 64
float_tys = (HalfType(), SingleType(), DoubleType())
def __init__(self, vars, constraint, specific, min_width, lower_bounds, upper_bounds, width_equality, default_id):
self.vars = vars
self.constraint = constraint
self.specific = specific
self.min_width = min_width
self.lower_bounds = lower_bounds
self.upper_bounds = upper_bounds
self.width_equality = width_equality
self.default_id = default_id
self.tyvars = len(constraint)
@staticmethod
def int_types(min_width, max_width):
'Generate IntTypes in the range min_width to max_width-1.\n '
if (min_width <= 4 < max_width):
(yield IntType(4))
if (min_width <= 8 < max_width):
(yield IntType(8))
for w in xrange(min_width, min(max_width, 4)):
(yield IntType(w))
for w in xrange(max(min_width, 5), min(max_width, 8)):
(yield IntType(w))
for w in xrange(max(min_width, 9), max_width):
(yield IntType(w))
def floor(self, vid, vector):
if (vid in self.lower_bounds):
floor = max((vector[v] for v in self.lower_bounds[vid]))
else:
floor = 0
floor = max(floor, self.min_width.get(vid, 0))
return floor
def bits(self, ty):
'Return the size of the type in bits.\n '
if isinstance(ty, IntType):
return ty.width
if isinstance(ty, X86FP80Type):
return 80
if isinstance(ty, FloatType):
return (ty.exp + ty.frac)
if isinstance(ty, PtrType):
return self.pointer_width
assert False
def _enum_vectors(self, vid, vector, int_limit):
if (vid >= self.tyvars):
(yield tuple(vector))
return
if (vid in self.specific):
if (vector[vid] <= self.floor(vid, vector)):
return
if ((vid in self.width_equality) and (self.bits(vector[vid]) != self.bits(vector[self.width_equality[vid]]))):
return
for v in self._enum_vectors((vid + 1), vector, int_limit):
(yield v)
return
con = self.constraint[vid]
if (con == FIRST_CLASS):
tys = itertools.chain(self.int_types(1, int_limit), (PtrType(),), self.float_tys)
elif (con == NUMBER):
tys = itertools.chain(self.int_types(1, int_limit), self.float_tys)
elif (con == FLOAT):
tys = (t for t in self.float_tys if (t > self.floor(vid, vector)))
elif (con == INT_PTR):
tys = itertools.chain(self.int_types(1, int_limit), (PtrType(),))
elif (con == INT):
floor = self.floor(vid, vector)
if isinstance(floor, IntType):
floor = floor.width
tys = self.int_types((floor + 1), int_limit)
elif (con == BOOL):
tys = (IntType(1),)
else:
assert False
if (vid in self.width_equality):
bits = self.bits(vector[self.width_equality[vid]])
tys = (t for t in tys if (self.bits(t) == bits))
for t in tys:
vector[vid] = t
for v in self._enum_vectors((vid + 1), vector, int_limit):
(yield v)
def models(self, int_limit=config.int_limit):
'Generate type models consistent with this environment.'
vector = ([None] * self.tyvars)
for (vid, ty) in self.specific.iteritems():
vector[vid] = ty
for v in self._enum_vectors(0, vector, int_limit):
(yield TypeModel(self, v))
def width_equal_tyvars(self, v1, v2):
'Test whether the type variables are width-equal.\n '
v1 = self.width_equality.get(v1, v1)
v2 = self.width_equality.get(v2, v2)
return (v1 == v2)
def extend(self, term):
'Type-check a term with respect to this environment, adding new terms\n\n The term must not introduce new type variables or futher constrain types.\n '
tc = _EnvironmentExtender(environment=self)
tc.collect(term)
for rep in tc.defaultable_reps:
if (rep not in tc.rep_tyvar):
tc.default(rep)
for rep in tc.sets.reps():
if (rep not in tc.rep_tyvar):
raise Error('Ambiguous type for {}'.format(_name(rep)))
tyvar = tc.rep_tyvar[rep]
c = self.constraint[tyvar]
cx = tc.constraints[rep]
if (most_specific(c, cx) != c):
raise Error('Constraints too strong for {}'.format(_name(term)))
if (rep in tc.specifics):
if (tyvar not in self.specific):
raise Error('Constraints too strong for {}'.format(_name(term)))
if (tc.specifics[rep] != self.specific[tyvar]):
raise Error('Incompatible constraints for {}'.format(_name(term)))
tc.simplify_orderings()
for (t1, t2) in tc.width_equalities:
if (t1 == t2):
raise Error('Improperly unified {} and {}'.format(_name(t1), _name(t2)))
if (not self.width_equal_tyvars(tc.rep_tyvar[t1], tc.rep_tyvar[t2])):
raise Error(('Constraints too strong for ' + _name(term)))
for (lo, hi) in tc.ordering:
v2 = tc.rep_tyvar[hi]
if isinstance(lo, int):
if ((lo > self.min_width.get(v2, 0)) and all(((lo > self.min_width.get(v, 0)) for v in self.lower_bounds[v2])) and ((v2 not in self.specific) or (lo > self.bits(self.specific[v2])))):
raise Error(('Constraints too strong for ' + _name(term)))
else:
v1 = tc.rep_tyvar[lo]
if all(((v != v1) for v in self.lower_bounds[v2])):
raise Error(('Constraints too strong for ' + _name(term)))
for (rep, terms) in tc.sets.subset_items():
tyvar = tc.rep_tyvar[rep]
for t in terms:
assert ((t not in self.vars) or (self.vars[t] == tyvar))
self.vars[t] = tyvar
|
def _name(term):
return Formatter().operand(term)
|
class TypeModel(object):
'Maps values to types for a specific context (e.g., a transformation).\n\n Usually generated from a TypeModel.\n '
def __init__(self, environment, types):
self.environment = environment
self.types = types
def __getitem__(self, key):
return self.types[self.environment.vars[key]]
def __eq__(self, other):
return (isinstance(other, TypeModel) and (self.environment == other.environment) and (self.types == other.types))
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return (hash(type(self)) ^ hash((self.environment, self.types)))
def __repr__(self):
return 'TypeModel({0.environment},{0.types})'.format(self)
|
class _EnvironmentExtender(TypeConstraints):
'Used by TypeEnvironment.extend.\n '
logger = logger.getChild('_EnvironmentExtender')
def __init__(self, environment, **kws):
self.environment = environment
self.tyvar_reps = ([None] * environment.tyvars)
self.rep_tyvar = {}
super(_EnvironmentExtender, self).__init__(**kws)
def _init_term(self, term):
super(_EnvironmentExtender, self)._init_term(term)
if (term not in self.environment.vars):
return
tyvar = self.environment.vars[term]
rep = self.tyvar_reps[tyvar]
if rep:
self.sets.unify(term, rep, self._merge)
else:
self.logger.debug('Set rep for tyvar %s to %s', tyvar, term)
self.tyvar_reps[tyvar] = term
self.rep_tyvar[term] = tyvar
def _merge(self, t1, t2):
super(_EnvironmentExtender, self)._merge(t1, t2)
if (t2 in self.rep_tyvar):
if (t1 in self.rep_tyvar):
raise Error('Cannot unify types for {} and {}'.format(_name(t1), _name(t2)))
tyvar = self.rep_tyvar.pop(t2)
self.rep_tyvar[t1] = tyvar
self.tyvar_reps[tyvar] = t1
self.logger.debug('Set rep for tyvar %s to %s', tyvar, t1)
def _init_default(self, rep):
super(_EnvironmentExtender, self)._init_default(rep)
tyvar = self.environment.default_id
assert (rep not in self.rep_tyvar)
assert (self.tyvar_reps[tyvar] is None)
self.rep_tyvar[rep] = tyvar
self.tyvar_reps[tyvar] = rep
|
class Validator(object):
"Compare type constraints for a term against a supplied type vector.\n\n Usage:\n given a TypeEnvironment e, a type vector v, and a term t,\n\n > t.type_constraints(Validator(e,v))\n\n will return None for success and raise an Error if the term's constraints\n are not met.\n "
def __init__(self, environment, type_vector):
self.environment = environment
self.type_vector = type_vector
def type(self, term):
return self.type_vector[environment.vars[term]]
def eq_types(self, *terms):
it = iter(terms)
t1 = it.next()
ty = self.type(t1)
for t in it:
if (self.type(t) != ty):
raise Error
def specific(self, term, ty):
if ((ty is not None) and (self.type(term) != ty)):
raise Error
def integer(self, term):
if (not isinstance(self.type(term), IntType)):
raise Error
def bool(self, term):
if (self.type(term) != IntType(1)):
raise Error
def pointer(self, term):
if (not isinstance(self.type(term), PtrType)):
raise Error
def int_ptr_vec(self, term):
if (not isinstance(self.type(term), (IntType, PtrType))):
raise Error
def float(self, term):
if (not isinstance(self.type(term), FloatType)):
raise Error
def first_class(self, term):
if (not isinstance(self.type(term), (IntType, FloatType, PtrType))):
raise Error
def number(self, term):
if (not isinstance(self.type(term), (IntType, FloatType))):
raise Error
def width_order(self, lo, hi):
if isinstance(lo, Value):
lo = self.type(lo)
if (lo >= self.type(hi)):
raise Error
def width_equal(self, a, b):
if (self.environment.bits(self.type(a)) != self.environment.bits(self.type(b))):
raise Error
|
class Error(error.Error):
pass
|
class NegatableFlag(argparse.Action):
"Action type for paired Boolean flags, e.g., '--spam'/'--no-spam'.\n\n This is an alternative to specifying two separate options with a common\n dest and opposite storage actions. If --spam and --no-spam occur in the\n argument list, the last one wins.\n\n Usage:\n parser.add_argument('--spam', action=NegatableFlag)\n\n Usable keywords: dest, default, required, help\n "
def __init__(self, option_strings, dest, default=False, required=False, help=None):
neg_options = []
for opt in option_strings:
if opt.startswith('--no-'):
raise ValueError('Flags cannot begin with "--no-"')
if opt.startswith('--'):
neg_options.append(('--no-' + opt[2:]))
option_strings.extend(neg_options)
if help:
help += ' (default {})'.format(('yes' if default else 'no'))
super(NegatableFlag, self).__init__(option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, (not (isinstance(option_string, str) and option_string.startswith('--no-'))))
|
class DisjointSubsets(object):
'\n Stores values in one or more subsets. Each value exists in only one\n subset at a time. Subsets may be unified.\n '
def __init__(self):
self._parent = {}
self._subset = {}
def __contains__(self, key):
return (key in self._parent)
def key_reps(self):
'Generate pairs consisting of an element and its representative.'
for key in self._parent:
rep = self.rep(key)
(yield (key, rep))
def subset_items(self):
'Generate pairs consisting of a representative and its subset.'
return self._subset.iteritems()
def reps(self):
'Generate all representatives.'
return self._subset.iterkeys()
def add_key(self, key):
if (key in self._parent):
return
self._parent[key] = None
self._subset[key] = frozenset([key])
def rep(self, key):
if (key not in self._parent):
raise KeyError(key)
while (self._parent[key] is not None):
key = self._parent[key]
return key
def subset(self, key):
rep = self.rep(key)
return self._subset[rep]
def unify(self, key1, key2, merge=None):
"Merge the sets containing these keys.\n\n The resulting set will be represented by one of the representatives of\n the old set(s).\n\n Keywords:\n merge - called with the resulting set's representative and the other,\n former representative\n "
rep1 = self.rep(key1)
rep2 = self.rep(key2)
if (rep1 == rep2):
return
self._parent[rep2] = rep1
subset1 = self._subset[rep1]
subset2 = self._subset.pop(rep2)
self._subset[rep1] = subset1.union(subset2)
if merge:
merge(rep1, rep2)
def unified(self, key1, key2):
return (self.rep(key1) == self.rep(key2))
|
class Tag(pretty.PrettyRepr):
'Subclasses of Tag may be used to label objects, so that they\n may be added to sets or DisjointSubsets multiple times.'
def __init__(self, value):
self.val = value
def __eq__(self, other):
return ((type(self) == type(other)) and (self.val == other.val))
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return (hash(type(self)) ^ hash(self.val))
def pretty(self):
return pretty.pfun(type(self).__name__, (self.val,))
|
def singledispatch(default):
registry = {object: default}
def dispatch(cls):
for k in cls.mro():
if (k in registry):
return registry[k]
raise NotImplementedError
def register(cls, fun=None):
if (fun is None):
return (lambda f: register(cls, f))
registry[cls] = fun
return fun
def wrapper(arg, *args, **kws):
return dispatch(type(arg))(arg, *args, **kws)
functools.update_wrapper(wrapper, default)
wrapper.dispatch = dispatch
wrapper.register = register
wrapper.registry = registry
return wrapper
|
def _lookup2(cls1, cls2, registry):
'\n Find the most specific implementation for (cls1,cls2) in registry.\n \n A more specific superclass of cls1 beats a more specific superclass\n of cls2.\n '
for k1 in cls1.mro():
for k2 in cls2.mro():
if ((k1, k2) in registry):
return registry[(k1, k2)]
raise NotImplementedError
|
def doubledispatch(default):
'\n Create a multimethod which dispatches on the first two arguments.\n '
registry = {(object, object): default}
cache = {}
def dispatch(cls1, cls2):
try:
return cache[(cls1, cls2)]
except KeyError:
fun = _lookup2(cls1, cls2, registry)
cache[(cls1, cls2)] = fun
return fun
def register(cls1, cls2, fun=None):
if (fun is None):
return (lambda f: register(cls1, cls2, f))
cache.clear()
registry[(cls1, cls2)] = fun
return fun
def wrapper(arg1, arg2, *args, **kws):
return dispatch(type(arg1), type(arg2))(arg1, arg2, *args, **kws)
functools.update_wrapper(wrapper, default)
wrapper.dispatch = dispatch
wrapper.register = register
wrapper.registry = registry
return wrapper
|
def iter_seq(doc_it):
docs = tuple(doc_it)
return (text(docs[0]) if (len(docs) == 1) else _Seq(docs))
|
def seq(*docs):
return iter_seq(docs)
|
def group(*docs):
doc = (text(docs[0]) if (len(docs) == 1) else iter_seq(docs))
if (isinstance(doc, _Group) or (not bool(doc))):
return doc
return _Group(doc)
|
def text(string):
'Converts a string to a Doc.\n\n Docs are passed through unchanged. Other objects are passed to prepr.\n '
if isinstance(string, Doc):
return string
if isinstance(string, str):
return _Text(string)
return prepr(string)
|
def prepr(obj):
'Converts an object to a Doc, similar to repr.\n\n prepr(obj) -> obj.pretty(), if obj has a member pretty.\n prepr(obj) special-cases tuples, lists, dicts, sets, and frozensets.\n prepr(obj) -> text(repr(obj)) for all other objects\n '
if hasattr(obj, 'pretty'):
return _Pretty(obj)
if isinstance(obj, tuple):
return ptuple(obj)
if isinstance(obj, list):
return plist(obj)
if isinstance(obj, dict):
return pdict(obj)
if isinstance(obj, (set, frozenset)):
return pset(obj)
return _Text(repr(obj))
|
def pprint(*objs, **kws):
"Pretty-print specified objects.\n\n pprint(*objs, file=sys.stdout, sep=line, end='\n', grouped=True, first=True,\n indent=0, prefix='', **kws)\n\n Keywords:\n file - where to write the objects\n sep - a Doc output between objects\n end - a string written after any objs have been written\n grouped - whether to attempt to write on one line\n first - if False, apply prefix and indent to first line\n indent - indentation level (following first line)\n prefix - written before all lines following the first, before any indent\n width - desired maximum width\n "
file = kws.pop('file', sys.stdout)
sep = kws.pop('sep', line)
end = kws.pop('end', '\n')
grouped = kws.pop('grouped', True)
first = kws.pop('first', True)
indent = kws.pop('indent', 0)
prefix = kws.pop('prefix', '')
doc = sep.join(objs)
if grouped:
doc = group(doc)
if (not first):
file.write(prefix)
file.write((' ' * indent))
kws['start_at'] = (len(prefix) + indent)
doc.write_to(file, indent=indent, prefix=prefix, **kws)
if end:
file.write(end)
|
def pformat(*objs, **kws):
"Return a string containing the pretty-printed objects.\n\n pformat(*objs, sep=line, end='', **kws)\n\n Keywords:\n sep - a Doc output between objects\n end - a string written after any objs have been written\n width - desired maximum width\n indent - indentation level (following first line)\n prefix - written before all lines following the first, before any indent\n "
sep = kws.pop('sep', line)
end = kws.pop('end', '')
sbuf = StringIO.StringIO()
group(sep.join(objs)).write_to(sbuf, **kws)
if end:
sbuf.write(end)
return sbuf.getvalue()
|
class PrettyRepr(object):
'Mixin class for objects which can pretty-print their representation.\n '
def pretty(self):
'Return a Doc representing the object.'
return text(super(PrettyRepr, self).__repr__())
def __repr__(self):
return self.pretty().oneline()
def pprint(self, stream=None, end='\n', **kws):
if (stream is None):
stream = sys.stdout
self.pretty().write_to(stream, **kws)
if end:
stream.write(end)
def pformat(self, **kws):
sbuf = StringIO.StringIO()
self.pretty().write_to(sbuf, **kws)
return sbuf.getvalue()
|
class Doc(PrettyRepr):
'The intermediate formatting tree generated during pretty printing.\n\n Use text, prepr, group, seq, line, lbreak, and others to create Docs.\n\n Combine Docs with +, or use | to put a line between them.\n '
__slots__ = ()
(Text, Line, Break, GBegin, GEnd) = range(5)
def __add__(self, other):
return seq(self, other)
def __radd__(self, other):
return seq(other, self)
def __or__(self, other):
'doc | obj -> seq(doc, line, obj)'
return seq(self, line, other)
def __ror__(self, other):
return seq(other, line, self)
def nest(self, indent):
'Increase indentation level.\n\n doc.nest(x) == nest(x, doc)\n '
return _Nest(indent, self)
def join(self, docs):
'Concatenate the docs, separated by this Doc.'
return iter_seq(joinit(docs, self))
def __str__(self):
'Convert this Doc to a string.\n\n This returns the content of the Doc. Use __repr__ to return the\n structure of the Doc.'
sbuf = StringIO.StringIO()
self.write_to(sbuf)
return sbuf.getvalue()
def write_to(self, file, width=80, indent=0, **kws):
'Write this doc to the specified file.'
out = grow_groups(add_hp(find_group_ends(width, text_events(width, file.write, **kws))))
out.next()
self.send_to(out, indent)
out.close()
def oneline(self):
'Convert this Doc to a one-line string.'
sbuf = StringIO.StringIO()
def dump():
while True:
event = (yield)
if (event[0] == Doc.Text):
sbuf.write(event[1])
elif (event[0] == Doc.Line):
sbuf.write(' ')
out = dump()
out.next()
self.send_to(out, 0)
return sbuf.getvalue()
def pretty(self):
'Return the structure of this Doc as a Doc.'
return pfun(type(self).__name__, (getattr(self, s) for s in self.__slots__))
|
class _Text(Doc):
__slots__ = ('text',)
def __init__(self, text):
assert ('\n' not in text)
self.text = text
def send_to(self, out, indent):
out.send((Doc.Text, self.text))
def __nonzero__(self):
return bool(self.text)
|
class _Line(Doc):
__slots__ = ()
def send_to(self, out, indent):
out.send((Doc.Line, indent))
def __repr__(self):
return '_Line()'
|
class _Break(Doc):
__slots__ = ()
def send_to(self, out, indent):
out.send((Doc.Break, indent))
def __repr__(self):
return '_Break()'
def __nonzero__(self):
return False
|
class _Group(Doc):
__slots__ = ('doc',)
def __init__(self, doc):
assert bool(doc)
self.doc = doc
def send_to(self, out, indent):
out.send((Doc.GBegin,))
self.doc.send_to(out, indent)
out.send((Doc.GEnd,))
|
class _Seq(Doc):
__slots__ = ('docs',)
def __init__(self, docs):
self.docs = docs
def send_to(self, out, indent):
for doc in self.docs:
text(doc).send_to(out, indent)
def __nonzero__(self):
return any((bool(doc) for doc in self.docs))
|
class _Nest(Doc):
__slots__ = ('indent', 'doc')
def __init__(self, indent, doc):
self.indent = indent
self.doc = doc
def send_to(self, out, indent):
self.doc.send_to(out, (indent + self.indent))
def __nonzero__(self):
return bool(self.doc)
|
class _Pretty(Doc):
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def send_to(self, out, indent):
self.obj.pretty().send_to(out, indent)
|
def joinit(iterable, delimiter):
it = iter(iterable)
(yield next(it))
for x in it:
(yield delimiter)
(yield x)
|
def grow_groups(next):
'Delays GEnd event until the next Line or Break.\n\n If a group is immediately followed by trailing text, we should take it\n into account when choosing whether to break the group. This stream\n transformer pushes GEnds past any trailing text.\n\n Furthermore, since GBegin can always be moved past text, grow_groups also\n pushes them to the right as far as possible. This will eliminate some\n groups if they contain only text.\n\n This avoids the problem where a group is just short enough to fit on a line,\n but is immediately followed by text, such as a comma, which will then go\n past the right margin.\n\n Be sure to call close() to send any suspended GEnds downstream.\n '
next.next()
pushing = 0
pushing_b = 0
try:
while True:
event = (yield)
if (event[0] == Doc.Text):
next.send(event)
elif (event[0] == Doc.GBegin):
if pushing:
pushing_b += 1
else:
next.send(event)
elif (event[0] == Doc.GEnd):
if pushing_b:
pushing_b -= 1
else:
pushing += 1
else:
while pushing:
next.send((Doc.GEnd,))
pushing -= 1
while pushing_b:
next.send((Doc.GBegin,))
pushing_b -= 1
next.send(event)
finally:
while pushing:
next.send((Doc.GEnd,))
pushing -= 1
while pushing_b:
next.send((Doc.GBegin,))
pushing_b -= 1
|
def add_hp(next):
'Annotate events with their horizontal position.\n\n Assuming an infinitely-wide canvas, how many characters to the right is the\n _end_ of this event.\n '
next.next()
pos = 0
while True:
event = (yield)
if (event[0] == Doc.Text):
pos += len(event[1])
next.send((Doc.Text, pos, event[1]))
elif (event[0] == Doc.Line):
pos += 1
next.send((Doc.Line, pos, event[1]))
elif (event[0] == Doc.Break):
next.send((Doc.Break, pos, event[1]))
else:
next.send((event[0], pos))
|
class Buf(object):
'Sequence type providing O(1) insert at either end, and O(1) concatenation.\n '
def __init__(self):
self.head = []
self.tail = self.head
def append_left(self, item):
self.head = [item, self.head]
def append(self, item):
last = self.tail
self.tail = []
last.append(item)
last.append(self.tail)
def extend(self, other):
last = self.tail
last.extend(other.head)
self.tail = other.tail
def __iter__(self):
crnt = self.head
while crnt:
(yield crnt[0])
crnt = crnt[1]
|
def add_GBegin_pos(next):
'Annotate GBegin events with the horizontal position of the end of the\n group.\n\n Because this waits until the entire group has been seen, so its latency and\n memory use are unbounded.\n '
next.next()
bufs = []
while True:
event = (yield)
if (event[0] == Doc.GBegin):
bufs.append(Buf())
elif (bufs and (event[0] == Doc.GEnd)):
pos = event[1]
buf = bufs.pop()
buf.append_left((Doc.GBegin, pos))
buf.append(event)
if bufs:
buf[(- 1)].extend(buf)
else:
for event in buf:
next.send(event)
elif bufs:
bufs[(- 1)].append(event)
else:
next.send(event)
|
def find_group_ends(width, next):
"Annotate GBegin events with the horizontal position of the end of the\n group.\n\n GBegins corresponding to groups larger than the width will be annotated with\n 'None'. This keeps memory usage and latency bounded, at the cost of some\n potential inaccuracy. (Zero-width groups may cause FindGroupEnds to declare\n a group too long, even if it is not.) Assumes that all groups have non-zero\n widths.\n "
next.next()
bufs = deque()
while True:
event = (yield)
if bufs:
if (event[0] == Doc.GEnd):
(_, buf) = bufs.pop()
buf.append_left((Doc.GBegin, event[1]))
buf.append((Doc.GEnd, event[1]))
if bufs:
bufs[(- 1)][1].extend(buf)
else:
for e in buf:
next.send(e)
else:
if (event[0] == Doc.GBegin):
bufs.append(((event[1] + width), Buf()))
else:
bufs[(- 1)][1].append(event)
while ((bufs[0][0] < event[1]) or (len(bufs) > width)):
next.send((Doc.GBegin, None))
(_, buf) = bufs.popleft()
for e in buf:
next.send(e)
if (not bufs):
break
elif (event[0] == Doc.GBegin):
bufs.append(((event[1] + width), Buf()))
else:
next.send(event)
|
def text_events(width, out, prefix='', start_at=0):
'Write an annotated event stream to some method.\n\n Arguments:\n width - Desired maximum width for printing\n out - A function which accepts strings (e.g. sys.stdout.write)\n Keywords:\n prefix - A string to put the start of each subsequent line. This counts\n against the given width.\n start_at - Assume this many characters have been printed on the first line\n '
width -= len(prefix)
newline = ('\n' + prefix)
fits = 0
hpl = (width - start_at)
while True:
event = (yield)
if (event[0] == Doc.Text):
out(event[2])
elif (event[0] == Doc.Line):
if fits:
out(' ')
else:
out(newline)
out((' ' * event[2]))
hpl = ((event[1] + width) - event[2])
elif (event[0] == Doc.Break):
if (not fits):
out(newline)
out((' ' * event[2]))
hpl = ((event[1] + width) - event[2])
elif (event[0] == Doc.GBegin):
if fits:
fits += 1
elif ((event[1] != None) and (event[1] <= hpl)):
fits = 1
elif fits:
fits -= 1
|
def nest(indent, doc):
return _Nest(indent, doc)
|
def pfun(name, args, indent=2):
args = tuple((prepr(a) for a in args))
if (len(args) == 0):
return seq(name, '()')
return group(name, '(', lbreak, commaline.join(args), ')').nest(indent)
|
def pfun_(name, args):
if (len(args) == 0):
return seq(name, '()')
return group(name, '(', commaline.join(args), ')').nest((len(name) + 1))
|
def pdict(dict):
return group('{', commaline.join((group(prepr(k), ':', line, prepr(v)).nest(2) for (k, v) in dict.iteritems())), '}').nest(1)
|
def plist(list):
return group('[', commaline.join((prepr(v) for v in list)), ']').nest(1)
|
def ptuple(tup):
if (len(tup) == 0):
return text('()')
if (len(tup) == 1):
return group('(', prepr(tup[0]), ',)').nest(1)
return group('(', commaline.join((prepr(v) for v in tup)), ')').nest(1)
|
def pset(set):
nm = type(set).__name__
return seq(nm, '(', plist(sorted(set)), ')').nest((len(nm) + 1))
|
def block_print(obj, width=80):
def blk(next):
next.next()
try:
while True:
event = (yield)
if ((event[0] == Doc.Line) or (event[0] == Doc.Break)):
next.send((Doc.GBegin,))
next.send((event[0], 0))
next.send((Doc.GEnd,))
elif (event[0] == Doc.Text):
next.send(event)
finally:
next.close()
it = blk(grow_groups(add_hp(find_group_ends(width, text_events(width, sys.stdout.write)))))
it.next()
text(obj).send_to(it, 0)
it.close()
sys.stdout.write('\n')
|
def mk_and(clauses):
'mk_and([p,q,r]) -> And(p,q,r)'
if (len(clauses) == 1):
return clauses[0]
if (len(clauses) == 0):
return z3.BoolVal(True)
return z3.And(clauses)
|
def mk_or(clauses):
'mk_or([p,q,r]) -> Or(p,q,r)'
if (len(clauses) == 1):
return clauses[0]
if (len(clauses) == 0):
return z3.BoolVal(False)
return z3.Or(clauses)
|
def mk_not(clauses):
'mk_not([p,q,r]) -> Not(And(p,q,r))'
if (len(clauses) == 1):
return z3.Not(clauses[0])
if (len(clauses) == 0):
return z3.BoolVal(False)
return z3.Not(z3.And(clauses))
|
def mk_forall(qvars, clauses):
'mk_forall(vs, [p,q,r]) -> ForAll(vs, And(p,q,r))'
if (len(qvars) == 0):
return mk_and(clauses)
return z3.ForAll(qvars, mk_and(clauses))
|
def bool_to_BitVec(b):
return z3.If(b, z3.BitVecVal(1, 1), z3.BitVecVal(0, 1))
|
def bv_log2(bitwidth, v):
def rec(h, l):
if (h <= l):
return z3.BitVecVal(l, bitwidth)
mid = (l + int(((h - l) / 2)))
return z3.If((z3.Extract(h, (mid + 1), v) != 0), rec(h, (mid + 1)), rec(mid, l))
return rec((v.size() - 1), 0)
|
def zext_or_trunc(v, src, tgt):
if (tgt == src):
return v
if (tgt > src):
return z3.ZeroExt((tgt - src), v)
return z3.Extract((tgt - 1), 0, v)
|
def ctlz(output_width, v):
size = v.size()
def rec(i):
if (i < 0):
return z3.BitVecVal(size, output_width)
return z3.If((z3.Extract(i, i, v) == z3.BitVecVal(1, 1)), z3.BitVecVal(((size - 1) - i), output_width), rec((i - 1)))
return rec((size - 1))
|
def cttz(output_width, v):
size = v.size()
def rec(i):
if (i == size):
return z3.BitVecVal(size, output_width)
return z3.If((z3.Extract(i, i, v) == z3.BitVecVal(1, 1)), z3.BitVecVal(i, output_width), rec((i + 1)))
return rec(0)
|
def ComputeNumSignBits(bitwidth, v):
size = v.size()
size1 = (size - 1)
sign = z3.Extract(size1, size1, v)
def rec(i):
if (i < 0):
return z3.BitVecVal(size, bitwidth)
return z3.If((z3.Extract(i, i, v) == sign), rec((i - 1)), z3.BitVecVal((size1 - i), bitwidth))
return rec((size - 2))
|
def fpUEQ(x, y):
return z3.Or(z3.fpEQ(x, y), z3.fpIsNaN(x), z3.fpIsNaN(y))
|
def detect_fpMod():
"Determine whether Z3's fpRem is correct, and set fpMod accordingly.\n "
import logging
log = logging.getLogger(__name__)
log.debug('Setting fpMod')
if z3.is_true(z3.simplify(((z3.FPVal(3, z3.Float32()) % 2) < 0))):
log.debug('Correct fpRem detected')
fpMod.__code__ = fpMod_using_fpRem.__code__
else:
log.debug('fpRem = fpMod')
fpMod.__code__ = fpRem_trampoline.__code__
|
def fpMod(x, y, ctx=None):
detect_fpMod()
return fpMod(x, y, ctx)
|
def fpMod_using_fpRem(x, y, ctx=None):
y = z3.fpAbs(y)
z = z3.fpRem(z3.fpAbs(x), y, ctx)
r = z3.If(z3.fpIsNegative(z), (z + y), z, ctx)
return z3.If(z3.Not((z3.fpIsNegative(x) == z3.fpIsNegative(r)), ctx), z3.fpNeg(r), r, ctx)
|
def fpRem_trampoline(x, y, ctx=None):
return z3.fpRem(x, y)
|
def _xml_escape(data):
'Escape &, <, >, ", \', etc. in a string of data.'
from_symbols = '&><"\''
to_symbols = ((('&' + s) + ';') for s in 'amp gt lt quot apos'.split())
for (from_, to_) in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
|
class _Constants(object):
pass
|
class ParseBaseException(Exception):
'base exception class for all parsing runtime exceptions'
def __init__(self, pstr, loc=0, msg=None, elem=None):
self.loc = loc
if (msg is None):
self.msg = pstr
self.pstr = ''
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__(self, aname):
'supported attributes by name are:\n - lineno - returns the line number of the exception text\n - col - returns the column number of the exception text\n - line - returns the line containing the exception text\n '
if (aname == 'lineno'):
return lineno(self.loc, self.pstr)
elif (aname in ('col', 'column')):
return col(self.loc, self.pstr)
elif (aname == 'line'):
return line(self.loc, self.pstr)
else:
raise AttributeError(aname)
def __str__(self):
return ('%s (at char %d), (line:%d, col:%d)' % (self.msg, self.loc, self.lineno, self.column))
def __repr__(self):
return _ustr(self)
def markInputline(self, markerString='>!<'):
'Extracts the exception line from the input string, and marks\n the location of the exception with a special symbol.\n '
line_str = self.line
line_column = (self.column - 1)
if markerString:
line_str = ''.join((line_str[:line_column], markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return 'loc msg pstr parserElement lineno col line markInputline __str__ __repr__'.split()
|
class ParseException(ParseBaseException):
"exception thrown when parse expressions don't match class;\n supported attributes by name are:\n - lineno - returns the line number of the exception text\n - col - returns the column number of the exception text\n - line - returns the line containing the exception text\n "
pass
|
class ParseFatalException(ParseBaseException):
'user-throwable exception thrown when inconsistent parse content\n is found; stops all parsing immediately'
pass
|
class ParseSyntaxException(ParseFatalException):
"just like C{L{ParseFatalException}}, but thrown internally when an\n C{L{ErrorStop<And._ErrorStop>}} ('-' operator) indicates that parsing is to stop immediately because\n an unbacktrackable syntax error has been found"
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(pe.pstr, pe.loc, pe.msg, pe.parserElement)
|
class RecursiveGrammarException(Exception):
'exception thrown by C{validate()} if the grammar could be improperly recursive'
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self):
return ('RecursiveGrammarException: %s' % self.parseElementTrace)
|
class _ParseResultsWithOffset(object):
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self, i):
self.tup = (self.tup[0], i)
|
class ParseResults(object):
'Structured parse results, to provide multiple means of access to the parsed data:\n - as a list (C{len(results)})\n - by list index (C{results[0], results[1]}, etc.)\n - by attribute (C{results.<resultsName>})\n '
def __new__(cls, toklist, name=None, asList=True, modal=True):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
def __init__(self, toklist, name=None, asList=True, modal=True, isinstance=isinstance):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if ((name is not None) and name):
if (not modal):
self.__accumNames[name] = 0
if isinstance(name, int):
name = _ustr(name)
self.__name = name
if (not (toklist in (None, '', []))):
if isinstance(toklist, basestring):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(), 0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
self[name] = toklist
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.__toklist[i]
elif (i not in self.__accumNames):
return self.__tokdict[i][(- 1)][0]
else:
return ParseResults([v[0] for v in self.__tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self.__tokdict[k] = (self.__tokdict.get(k, list()) + [v])
sub = v[0]
elif isinstance(k, int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = (self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)])
sub = v
if isinstance(sub, ParseResults):
sub.__parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self.__toklist)
del self.__toklist[i]
if isinstance(i, int):
if (i < 0):
i += mylen
i = slice(i, (i + 1))
removed = list(range(*i.indices(mylen)))
removed.reverse()
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for (k, (value, position)) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, (position - (position > j)))
else:
del self.__tokdict[i]
def __contains__(self, k):
return (k in self.__tokdict)
def __len__(self):
return len(self.__toklist)
def __bool__(self):
return (len(self.__toklist) > 0)
__nonzero__ = __bool__
def __iter__(self):
return iter(self.__toklist)
def __reversed__(self):
return iter(self.__toklist[::(- 1)])
def iterkeys(self):
'Returns all named result keys.'
if hasattr(self.__tokdict, 'iterkeys'):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def itervalues(self):
'Returns all named result values.'
return (self[k] for k in self.iterkeys())
def iteritems(self):
return ((k, self[k]) for k in self.iterkeys())
if PY_3:
keys = iterkeys
values = itervalues
items = iteritems
else:
def keys(self):
'Returns all named result keys.'
return list(self.iterkeys())
def values(self):
'Returns all named result values.'
return list(self.itervalues())
def items(self):
'Returns all named result keys and values as a list of tuples.'
return list(self.iteritems())
def haskeys(self):
'Since keys() returns an iterator, this method is helpful in bypassing\n code that looks for the existence of any defined results names.'
return bool(self.__tokdict)
def pop(self, *args, **kwargs):
'Removes and returns item at specified index (default=last).\n Supports both list and dict semantics for pop(). If passed no\n argument or an integer argument, it will use list semantics\n and pop tokens from the list of parsed tokens. If passed a \n non-integer argument (most likely a string), it will use dict\n semantics and pop the corresponding value from any defined \n results names. A second default return value argument is \n supported, just as in dict.pop().'
if (not args):
args = [(- 1)]
if ('default' in kwargs):
args.append(kwargs['default'])
if (isinstance(args[0], int) or (len(args) == 1) or (args[0] in self)):
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
'Returns named result matching the given key, or if there is no\n such name, then returns the given C{defaultValue} or C{None} if no\n C{defaultValue} is specified.'
if (key in self):
return self[key]
else:
return defaultValue
def insert(self, index, insStr):
'Inserts new element at location index in the list of parsed tokens.'
self.__toklist.insert(index, insStr)
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for (k, (value, position)) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, (position + (position > index)))
def append(self, item):
'Add single element to end of ParseResults list of elements.'
self.__toklist.append(item)
def extend(self, itemseq):
'Add sequence of elements to end of ParseResults list of elements.'
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear(self):
'Clear all elements and results names.'
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
return ''
if (name in self.__tokdict):
if (name not in self.__accumNames):
return self.__tokdict[name][(- 1)][0]
else:
return ParseResults([v[0] for v in self.__tokdict[name]])
else:
return ''
def __add__(self, other):
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = (lambda a: (((a < 0) and offset) or (a + offset)))
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) for (k, vlist) in otheritems for v in vlist]
for (k, v) in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update(other.__accumNames)
return self
def __radd__(self, other):
if (isinstance(other, int) and (other == 0)):
return self.copy()
def __repr__(self):
return ('(%s, %s)' % (repr(self.__toklist), repr(self.__tokdict)))
def __str__(self):
out = []
for i in self.__toklist:
if isinstance(i, ParseResults):
out.append(_ustr(i))
else:
out.append(repr(i))
return (('[' + ', '.join(out)) + ']')
def _asStringList(self, sep=''):
out = []
for item in self.__toklist:
if (out and sep):
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(_ustr(item))
return out
def asList(self):
'Returns the parse results as a nested list of matching tokens, all converted to strings.'
out = []
for res in self.__toklist:
if isinstance(res, ParseResults):
out.append(res.asList())
else:
out.append(res)
return out
def asDict(self):
'Returns the named parse results as dictionary.'
if PY_3:
return dict(self.items())
else:
return dict(self.iteritems())
def copy(self):
'Returns a new copy of a C{ParseResults} object.'
ret = ParseResults(self.__toklist)
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
return ret
def asXML(self, doctag=None, namedItemsOnly=False, indent='', formatted=True):
'Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.'
nl = '\n'
out = []
namedItems = dict(((v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist))
nextLevelIndent = (indent + ' ')
if (not formatted):
indent = ''
nextLevelIndent = ''
nl = ''
selfTag = None
if (doctag is not None):
selfTag = doctag
elif self.__name:
selfTag = self.__name
if (not selfTag):
if namedItemsOnly:
return ''
else:
selfTag = 'ITEM'
out += [nl, indent, '<', selfTag, '>']
worklist = self.__toklist
for (i, res) in enumerate(worklist):
if isinstance(res, ParseResults):
if (i in namedItems):
out += [res.asXML(namedItems[i], (namedItemsOnly and (doctag is None)), nextLevelIndent, formatted)]
else:
out += [res.asXML(None, (namedItemsOnly and (doctag is None)), nextLevelIndent, formatted)]
else:
resTag = None
if (i in namedItems):
resTag = namedItems[i]
if (not resTag):
if namedItemsOnly:
continue
else:
resTag = 'ITEM'
xmlBodyText = _xml_escape(_ustr(res))
out += [nl, nextLevelIndent, '<', resTag, '>', xmlBodyText, '</', resTag, '>']
out += [nl, indent, '</', selfTag, '>']
return ''.join(out)
def __lookup(self, sub):
for (k, vlist) in self.__tokdict.items():
for (v, loc) in vlist:
if (sub is v):
return k
return None
def getName(self):
'Returns the results name for this token expression.'
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif ((len(self) == 1) and (len(self.__tokdict) == 1) and (self.__tokdict.values()[0][0][1] in (0, (- 1)))):
return self.__tokdict.keys()[0]
else:
return None
def dump(self, indent='', depth=0):
'Diagnostic method for listing out the contents of a C{ParseResults}.\n Accepts an optional C{indent} argument so that this string can be embedded\n in a nested display of other data.'
out = []
out.append((indent + _ustr(self.asList())))
items = sorted(self.items())
for (k, v) in items:
if out:
out.append('\n')
out.append(('%s%s- %s: ' % (indent, (' ' * depth), k)))
if isinstance(v, ParseResults):
if v.haskeys():
out.append(v.dump(indent, (depth + 1)))
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return ''.join(out)
def pprint(self, *args, **kwargs):
'Pretty-printer for parsed results as a list, using the C{pprint} module.\n Accepts additional positional or keyword args as defined for the \n C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})'
pprint.pprint(self.asList(), *args, **kwargs)
def __getstate__(self):
return (self.__toklist, (self.__tokdict.copy(), (((self.__parent is not None) and self.__parent()) or None), self.__accumNames, self.__name))
def __setstate__(self, state):
self.__toklist = state[0]
(self.__tokdict, par, inAccumNames, self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if (par is not None):
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return (dir(super(ParseResults, self)) + list(self.keys()))
|
def col(loc, strg):
'Returns current column within a string, counting newlines as line separators.\n The first column is number 1.\n\n Note: the default parsing behavior is to expand tabs in the input string\n before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information\n on parsing strings containing C{<TAB>}s, and suggested methods to maintain a\n consistent view of the parsed string, the parse location, and line and column\n positions within the parsed string.\n '
return ((((loc < len(strg)) and (strg[loc] == '\n')) and 1) or (loc - strg.rfind('\n', 0, loc)))
|
def lineno(loc, strg):
'Returns current line number within a string, counting newlines as line separators.\n The first line is number 1.\n\n Note: the default parsing behavior is to expand tabs in the input string\n before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information\n on parsing strings containing C{<TAB>}s, and suggested methods to maintain a\n consistent view of the parsed string, the parse location, and line and column\n positions within the parsed string.\n '
return (strg.count('\n', 0, loc) + 1)
|
def line(loc, strg):
'Returns the line of text containing loc within a string, counting newlines as line separators.\n '
lastCR = strg.rfind('\n', 0, loc)
nextCR = strg.find('\n', loc)
if (nextCR >= 0):
return strg[(lastCR + 1):nextCR]
else:
return strg[(lastCR + 1):]
|
def _defaultStartDebugAction(instring, loc, expr):
print((((('Match ' + _ustr(expr)) + ' at loc ') + _ustr(loc)) + ('(%d,%d)' % (lineno(loc, instring), col(loc, instring)))))
|
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
print(((('Matched ' + _ustr(expr)) + ' -> ') + str(toks.asList())))
|
def _defaultExceptionDebugAction(instring, loc, expr, exc):
print(('Exception raised:' + _ustr(exc)))
|
def nullDebugAction(*args):
"'Do-nothing' debug action, to suppress debugging output during parsing."
pass
|
def _trim_arity(func, maxargs=2):
if (func in singleArgBuiltins):
return (lambda s, l, t: func(t))
limit = [0]
foundArity = [False]
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
if ((limit[0] <= maxargs) and (not foundArity[0])):
limit[0] += 1
continue
raise
return wrapper
|
class ParserElement(object):
'Abstract base level parser element class.'
DEFAULT_WHITE_CHARS = ' \n\t\r'
verbose_stacktrace = False
def setDefaultWhitespaceChars(chars):
'Overrides the default whitespace chars\n '
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def inlineLiteralsUsing(cls):
'\n Set class to be used for inclusion of string literals into a parser.\n '
ParserElement.literalStringClass = cls
inlineLiteralsUsing = staticmethod(inlineLiteralsUsing)
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True
self.errmsg = ''
self.modalResults = True
self.debugActions = (None, None, None)
self.re = None
self.callPreparse = True
self.callDuringTry = False
def copy(self):
'Make a copy of this C{ParserElement}. Useful for defining different parse actions\n for the same parsing pattern, using copies of the original parse element.'
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
'Define name for this expression, for use in debugging.'
self.name = name
self.errmsg = ('Expected ' + self.name)
if hasattr(self, 'exception'):
self.exception.msg = self.errmsg
return self
def setResultsName(self, name, listAllMatches=False):
'Define name for referencing matching tokens as a nested attribute\n of the returned parse results.\n NOTE: this returns a *copy* of the original C{ParserElement} object;\n this is so that the client can define a basic element, such as an\n integer, and reference it in multiple places with different names.\n \n You can also set results names using the abbreviated syntax,\n C{expr("name")} in place of C{expr.setResultsName("name")} - \n see L{I{__call__}<__call__>}.\n '
newself = self.copy()
if name.endswith('*'):
name = name[:(- 1)]
listAllMatches = True
newself.resultsName = name
newself.modalResults = (not listAllMatches)
return newself
def setBreak(self, breakFlag=True):
'Method to invoke the Python pdb debugger when this element is\n about to be parsed. Set C{breakFlag} to True to enable, False to\n disable.\n '
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
elif hasattr(self._parse, '_originalParseMethod'):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
'Define action to perform when successfully matching parse element definition.\n Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},\n C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:\n - s = the original string being parsed (see note below)\n - loc = the location of the matching substring\n - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object\n If the functions in fns modify the tokens, they can return them as the return\n value from fn, and the modified list of tokens will replace the original.\n Otherwise, fn does not need to return any value.\n\n Note: the default parsing behavior is to expand tabs in the input string\n before starting the parsing process. See L{I{parseString}<parseString>} for more information\n on parsing strings containing C{<TAB>}s, and suggested methods to maintain a\n consistent view of the parsed string, the parse location, and line and column\n positions within the parsed string.\n '
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = (('callDuringTry' in kwargs) and kwargs['callDuringTry'])
return self
def addParseAction(self, *fns, **kwargs):
"Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = (self.callDuringTry or (('callDuringTry' in kwargs) and kwargs['callDuringTry']))
return self
def setFailAction(self, fn):
'Define action to perform if parsing fails at this expression.\n Fail acton fn is a callable function that takes the arguments\n C{fn(s,loc,expr,err)} where:\n - s = string being parsed\n - loc = location where expression match was attempted and failed\n - expr = the parse expression that failed\n - err = the exception thrown\n The function returns no value. It may throw C{L{ParseFatalException}}\n if it is desired to stop parsing immediately.'
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
(loc, dummy) = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while ((loc < instrlen) and (instring[loc] in wt)):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return (loc, [])
def postParse(self, instring, loc, tokenlist):
return tokenlist
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
debugging = self.debug
if (debugging or self.failAction):
if self.debugActions[0]:
self.debugActions[0](instring, loc, self)
if (callPreParse and self.callPreparse):
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
try:
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if (callPreParse and self.callPreparse):
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if (self.mayIndexError or (loc >= len(instring))):
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if (self.parseAction and (doActions or self.callDuringTry)):
if debugging:
try:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if (tokens is not None):
retTokens = ParseResults(tokens, self.resultsName, asList=(self.saveAsList and isinstance(tokens, (ParseResults, list))), modal=self.modalResults)
except ParseBaseException as err:
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if (tokens is not None):
retTokens = ParseResults(tokens, self.resultsName, asList=(self.saveAsList and isinstance(tokens, (ParseResults, list))), modal=self.modalResults)
if debugging:
if self.debugActions[1]:
self.debugActions[1](instring, tokensStart, loc, self, retTokens)
return (loc, retTokens)
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
lookup = (self, instring, loc, callPreParse, doActions)
if (lookup in ParserElement._exprArgCache):
value = ParserElement._exprArgCache[lookup]
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
else:
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
ParserElement._exprArgCache[lookup] = (value[0], value[1].copy())
return value
except ParseBaseException as pe:
pe.__traceback__ = None
ParserElement._exprArgCache[lookup] = pe
raise
_parse = _parseNoCache
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
'Enables "packrat" parsing, which adds memoizing to the parsing logic.\n Repeated parse attempts at the same string location (which happens\n often in many complex grammars) can immediately return a cached value,\n instead of re-executing parsing/validating code. Memoizing is done of\n both valid results and parsing exceptions.\n\n This speedup may break existing programs that use parse actions that\n have side-effects. For this reason, packrat parsing is disabled when\n you first import pyparsing. To activate the packrat feature, your\n program must call the class method C{ParserElement.enablePackrat()}. If\n your program uses C{psyco} to "compile as you go", you must call\n C{enablePackrat} before calling C{psyco.full()}. If you do not do this,\n Python will crash. For best results, call C{enablePackrat()} immediately\n after importing pyparsing.\n '
if (not ParserElement._packratEnabled):
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString(self, instring, parseAll=False):
"Execute the parse expression with the given string.\n This is the main interface to the client code, once the complete\n expression has been built.\n\n If you want the grammar to require that the entire input string be\n successfully parsed, then set C{parseAll} to True (equivalent to ending\n the grammar with C{L{StringEnd()}}).\n\n Note: C{parseString} implicitly calls C{expandtabs()} on the input string,\n in order to report proper column numbers in parse actions.\n If the input string contains tabs and\n the grammar uses parse actions that use the C{loc} argument to index into the\n string being parsed, you can ensure you have a consistent view of the input\n string by:\n - calling C{parseWithTabs} on your grammar before calling C{parseString}\n (see L{I{parseWithTabs}<parseWithTabs>})\n - define your parse action using the full C{(s,loc,toks)} signature, and\n reference the input string using the parse action's C{s} argument\n - explictly expand the tabs in your input string before calling\n C{parseString}\n "
ParserElement.resetCache()
if (not self.streamlined):
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if (not self.keepTabs):
instring = instring.expandtabs()
try:
(loc, tokens) = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = (Empty() + StringEnd())
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"Scan the input string for expression matches. Each match will return the\n matching tokens, start location, and end location. May be called with optional\n C{maxMatches} argument, to clip scanning after 'n' matches are found. If\n C{overlap} is specified, then overlapping matches will be reported.\n\n Note that the start and end locations are reported relative to the string\n being parsed. See L{I{parseString}<parseString>} for more information on parsing\n strings with embedded tabs."
if (not self.streamlined):
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if (not self.keepTabs):
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while ((loc <= instrlen) and (matches < maxMatches)):
try:
preloc = preparseFn(instring, loc)
(nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = (preloc + 1)
else:
if (nextLoc > loc):
matches += 1
(yield (tokens, preloc, nextLoc))
if overlap:
nextloc = preparseFn(instring, loc)
if (nextloc > loc):
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = (preloc + 1)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def transformString(self, instring):
'Extension to C{L{scanString}}, to modify matching text with modified tokens that may\n be returned from a parse action. To use C{transformString}, define a grammar and\n attach a parse action to it that modifies the returned token list.\n Invoking C{transformString()} on a target string will then scan for matches,\n and replace the matched text patterns according to the logic in the parse\n action. C{transformString()} returns the resulting transformed string.'
out = []
lastE = 0
self.keepTabs = True
try:
for (t, s, e) in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return ''.join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
"Another extension to C{L{scanString}}, simplifying the access to the tokens found\n to match the given parse expression. May be called with optional\n C{maxMatches} argument, to clip searching after 'n' matches are found.\n "
try:
return ParseResults([t for (t, s, e) in self.scanString(instring, maxMatches)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def __add__(self, other):
'Implementation of + operator - returns C{L{And}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return And([self, other])
def __radd__(self, other):
'Implementation of + operator when left operand is not a C{L{ParserElement}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other + self)
def __sub__(self, other):
'Implementation of - operator, returns C{L{And}} with error stop'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return And([self, And._ErrorStop(), other])
def __rsub__(self, other):
'Implementation of - operator when left operand is not a C{L{ParserElement}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other - self)
def __mul__(self, other):
'Implementation of * operator, allows use of C{expr * 3} in place of\n C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer\n tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples\n may also include C{None} as in:\n - C{expr*(n,None)} or C{expr*(n,)} is equivalent\n to C{expr*n + L{ZeroOrMore}(expr)}\n (read as "at least n instances of C{expr}")\n - C{expr*(None,n)} is equivalent to C{expr*(0,n)}\n (read as "0 to n instances of C{expr}")\n - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}\n - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}\n\n Note that C{expr*(None,n)} does not raise an exception if\n more than n exprs exist in the input stream; that is,\n C{expr*(None,n)} does not enforce a maximum number of expr\n occurrences. If this behavior is desired, then write\n C{expr*(None,n) + ~expr}\n\n '
if isinstance(other, int):
(minElements, optElements) = (other, 0)
elif isinstance(other, tuple):
other = (other + (None, None))[:2]
if (other[0] is None):
other = (0, other[1])
if (isinstance(other[0], int) and (other[1] is None)):
if (other[0] == 0):
return ZeroOrMore(self)
if (other[0] == 1):
return OneOrMore(self)
else:
return ((self * other[0]) + ZeroOrMore(self))
elif (isinstance(other[0], int) and isinstance(other[1], int)):
(minElements, optElements) = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]), type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if (minElements < 0):
raise ValueError('cannot multiply ParserElement by negative value')
if (optElements < 0):
raise ValueError('second tuple value must be greater or equal to first tuple value')
if (minElements == optElements == 0):
raise ValueError('cannot multiply ParserElement by 0 or (0,0)')
if optElements:
def makeOptionalList(n):
if (n > 1):
return Optional((self + makeOptionalList((n - 1))))
else:
return Optional(self)
if minElements:
if (minElements == 1):
ret = (self + makeOptionalList(optElements))
else:
ret = (And(([self] * minElements)) + makeOptionalList(optElements))
else:
ret = makeOptionalList(optElements)
elif (minElements == 1):
ret = self
else:
ret = And(([self] * minElements))
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
'Implementation of | operator - returns C{L{MatchFirst}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return MatchFirst([self, other])
def __ror__(self, other):
'Implementation of | operator when left operand is not a C{L{ParserElement}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other | self)
def __xor__(self, other):
'Implementation of ^ operator - returns C{L{Or}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return Or([self, other])
def __rxor__(self, other):
'Implementation of ^ operator when left operand is not a C{L{ParserElement}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other ^ self)
def __and__(self, other):
'Implementation of & operator - returns C{L{Each}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return Each([self, other])
def __rand__(self, other):
'Implementation of & operator when left operand is not a C{L{ParserElement}}'
if isinstance(other, basestring):
other = ParserElement.literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other & self)
def __invert__(self):
'Implementation of ~ operator - returns C{L{NotAny}}'
return NotAny(self)
def __call__(self, name=None):
'Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}::\n userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")\n could be written as::\n userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")\n \n If C{name} is given with a trailing C{\'*\'} character, then C{listAllMatches} will be\n passed as C{True}.\n \n If C{name} is omitted, same as calling C{L{copy}}.\n '
if (name is not None):
return self.setResultsName(name)
else:
return self.copy()
def suppress(self):
'Suppresses the output of this C{ParserElement}; useful to keep punctuation from\n cluttering up returned output.\n '
return Suppress(self)
def leaveWhitespace(self):
"Disables the skipping of whitespace before matching the characters in the\n C{ParserElement}'s defined pattern. This is normally only used internally by\n the pyparsing module, but may be needed in some whitespace-sensitive grammars.\n "
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
'Overrides the default whitespace chars\n '
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
'Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.\n Must be called before C{parseString} when the input grammar contains elements that\n match C{<TAB>} characters.'
self.keepTabs = True
return self
def ignore(self, other):
'Define expression to be ignored (e.g., comments) while doing pattern\n matching; may be called repeatedly, to define multiple comment or other\n ignorable patterns.\n '
if isinstance(other, Suppress):
if (other not in self.ignoreExprs):
self.ignoreExprs.append(other.copy())
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
'Enable display of debugging messages while doing pattern matching.'
self.debugActions = ((startAction or _defaultStartDebugAction), (successAction or _defaultSuccessDebugAction), (exceptionAction or _defaultExceptionDebugAction))
self.debug = True
return self
def setDebug(self, flag=True):
'Enable display of debugging messages while doing pattern matching.\n Set C{flag} to True to enable, False to disable.'
if flag:
self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return _ustr(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=[]):
'Check defined expressions for valid structure, check for infinite recursive definitions.'
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
'Execute the parse expression on the given file or filename.\n If a filename is specified (instead of a file object),\n the entire file is opened, read, and closed before parsing.\n '
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, 'r')
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def __eq__(self, other):
if isinstance(other, ParserElement):
return ((self is other) or (self.__dict__ == other.__dict__))
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return (super(ParserElement, self) == other)
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash(id(self))
def __req__(self, other):
return (self == other)
def __rne__(self, other):
return (not (self == other))
|
class Token(ParserElement):
'Abstract C{ParserElement} subclass, for defining atomic matching patterns.'
def __init__(self):
super(Token, self).__init__(savelist=False)
def setName(self, name):
s = super(Token, self).setName(name)
self.errmsg = ('Expected ' + self.name)
return s
|
class Empty(Token):
'An empty token, will always match.'
def __init__(self):
super(Empty, self).__init__()
self.name = 'Empty'
self.mayReturnEmpty = True
self.mayIndexError = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.