idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
25,800
def nb_per_chunk ( item_size , item_dim , chunk_size ) : # from Mbytes to bytes size = chunk_size * 10. ** 6 ratio = int ( round ( size / ( item_size * item_dim ) ) ) return max ( 10 , ratio )
Return the number of items that can be stored in one chunk .
64
13
25,801
def is_appendable ( self , entry ) : try : if ( self . name == entry . name and self . dtype == entry . dtype and self . dim == entry . dim ) : return True except AttributeError : return False return False
Return True if entry can be appended to self
53
10
25,802
def append ( self , entry ) : if not self . is_appendable ( entry ) : raise ValueError ( 'entry not appendable' ) self . data += entry . data
Append an entry to self
38
6
25,803
def write ( self , data , groupname = 'h5features' , append = False ) : if append and groupname in self . h5file : # append data to the group, raise if we cannot group = self . h5file [ groupname ] if not is_same_version ( self . version , group ) : raise IOError ( 'data is not appendable to the group {}: ' 'versions are different' . format ( group . name ) ) if not data . is_appendable_to ( group ) : raise IOError ( 'data is not appendable to the group {}' . format ( group . name ) ) else : # overwrite any existing data in group group = self . _prepare ( data , groupname ) data . write_to ( group , append )
Write h5features data in a specified group of the file .
168
13
25,804
def _prepare ( self , data , groupname ) : if groupname in self . h5file : del self . h5file [ groupname ] group = self . h5file . create_group ( groupname ) group . attrs [ 'version' ] = self . version data . init_group ( group , self . chunk_size , self . compression , self . compression_opts ) return group
Clear the group if existing and initialize empty datasets .
88
10
25,805
def read_items ( group , version = '1.1' , check = False ) : if version == '0.1' : # parse unicode to strings return '' . join ( [ unichr ( int ( c ) ) for c in group [ 'files' ] [ ... ] ] ) . replace ( '/-' , '/' ) . split ( '/\\' ) elif version == '1.0' : return Items ( list ( group [ 'files' ] [ ... ] ) , check ) else : return Items ( list ( group [ 'items' ] [ ... ] ) , check )
Return an Items instance initialized from a h5features group .
129
12
25,806
def write_to ( self , group ) : # The HDF5 group where to write data items_group = group [ self . name ] nitems = items_group . shape [ 0 ] items_group . resize ( ( nitems + len ( self . data ) , ) ) items_group [ nitems : ] = self . data
Write stored items to the given HDF5 group .
72
11
25,807
def _create_dataset ( self , group , chunk_size , compression , compression_opts ) : if chunk_size == 'auto' : chunks = True else : # if dtype is a variable str, guess representative size is 20 bytes per_chunk = ( nb_per_chunk ( 20 , 1 , chunk_size ) if self . dtype == np . dtype ( 'O' ) else nb_per_chunk ( np . dtype ( self . dtype ) . itemsize , 1 , chunk_size ) ) chunks = ( per_chunk , ) shape = ( 0 , ) maxshape = ( None , ) # raise if per_chunk >= 4 Gb, this is requested by h5py group . create_dataset ( self . name , shape , dtype = self . dtype , chunks = chunks , maxshape = maxshape , compression = compression , compression_opts = compression_opts )
Create an empty dataset in a group .
207
8
25,808
def read_version ( group ) : version = ( '0.1' if 'version' not in group . attrs else group . attrs [ 'version' ] ) # decode from bytes to str if needed if isinstance ( version , bytes ) : version = version . decode ( ) if not is_supported_version ( version ) : raise IOError ( 'version {} is not supported' . format ( version ) ) return version
Return the h5features version of a given HDF5 group .
91
14
25,809
def read ( self , from_item = None , to_item = None , from_time = None , to_time = None ) : # handling default arguments if to_item is None : to_item = self . items . data [ - 1 ] if from_item is None else from_item if from_item is None : from_item = self . items . data [ 0 ] # index coordinates of from/to_item. TODO optimize because we # have 4 accesses to list.index() where 2 are enougth. if not self . items . is_valid_interval ( from_item , to_item ) : raise IOError ( 'cannot read items: not a valid interval' ) from_idx = self . items . data . index ( from_item ) to_idx = self . items . data . index ( to_item ) from_pos = self . _get_item_position ( from_idx ) to_pos = self . _get_item_position ( to_idx ) lower = self . _get_from_time ( from_time , from_pos ) # upper included with +1 upper = self . _get_to_time ( to_time , to_pos ) + 1 # Step 2: access actual data if self . dformat == 'sparse' : raise NotImplementedError ( 'Reading sparse features not implemented' ) else : features = ( self . group [ 'features' ] [ : , lower : upper ] . T if self . version == '0.1' else self . group [ 'features' ] [ lower : upper , ... ] ) labels = self . _labels_group [ lower : upper ] # If we read a single item if to_idx == from_idx : features = [ features ] labels = [ labels ] # Several items case: split them from the index else : item_ends = self . _index [ from_idx : to_idx ] - from_pos [ 0 ] + 1 features = np . split ( features , item_ends , axis = 0 ) labels = np . split ( labels , item_ends , axis = 0 ) items = self . items . data [ from_idx : to_idx + 1 ] if self . properties is None : properties = None else : properties = self . properties [ from_idx : to_idx + 1 ] return Data ( items , labels , features , properties = properties , check = False )
Retrieve requested data coordinates from the h5features index .
532
12
25,810
def writesgf ( self , sgffilename ) : size = self . size outfile = open ( sgffilename , "w" ) if not outfile : print "Couldn't create " + sgffilename return black_name = self . blackplayer . get_program_name ( ) white_name = self . whiteplayer . get_program_name ( ) black_seed = self . blackplayer . get_random_seed ( ) white_seed = self . whiteplayer . get_random_seed ( ) handicap = self . handicap komi = self . komi result = self . resultw outfile . write ( "(;GM[1]FF[4]RU[Japanese]SZ[%s]HA[%s]KM[%s]RE[%s]\n" % ( size , handicap , komi , result ) ) outfile . write ( "PW[%s (random seed %s)]PB[%s (random seed %s)]\n" % ( white_name , white_seed , black_name , black_seed ) ) outfile . write ( self . sgffilestart ) if handicap > 1 : outfile . write ( "AB" ) for stone in self . handicap_stones : outfile . write ( "[%s]" % ( coords_to_sgf ( size , stone ) ) ) outfile . write ( "PL[W]\n" ) to_play = self . first_to_play for move in self . moves : sgfmove = coords_to_sgf ( size , move ) outfile . write ( ";%s[%s]\n" % ( to_play , sgfmove ) ) if to_play == "B" : to_play = "W" else : to_play = "B" outfile . write ( ")\n" ) outfile . close
Write the game to an SGF file after a game
424
11
25,811
def _escapeText ( text ) : output = "" index = 0 match = reCharsToEscape . search ( text , index ) while match : output = output + text [ index : match . start ( ) ] + '\\' + text [ match . start ( ) ] index = match . end ( ) match = reCharsToEscape . search ( text , index ) output = output + text [ index : ] return output
Adds backslash - escapes to property value characters that need them .
92
14
25,812
def parse ( self ) : c = Collection ( ) while self . index < self . datalen : g = self . parseOneGame ( ) if g : c . append ( g ) else : break return c
Parses the SGF data stored in self . data and returns a Collection .
45
17
25,813
def parseOneGame ( self ) : if self . index < self . datalen : match = self . reGameTreeStart . match ( self . data , self . index ) if match : self . index = match . end ( ) return self . parseGameTree ( ) return None
Parses one game from self . data . Returns a GameTree containing one game or None if the end of self . data has been reached .
60
30
25,814
def reset ( self ) : self . gametree = self . game self . nodenum = 0 self . index = 0 self . stack = [ ] self . node = self . gametree [ self . index ] self . _setChildren ( ) self . _setFlags ( )
Set Cursor to point to the start of the root GameTree self . game .
60
17
25,815
def previous ( self ) : if self . index - 1 >= 0 : # more main line? self . index = self . index - 1 elif self . stack : # were we in a variation? self . gametree = self . stack . pop ( ) self . index = len ( self . gametree ) - 1 else : raise GameTreeEndError self . node = self . gametree [ self . index ] self . nodenum = self . nodenum - 1 self . _setChildren ( ) self . _setFlags ( ) return self . node
Moves the Cursor to & returns the previous Node . Raises GameTreeEndError if the start of a branch is exceeded .
119
27
25,816
def _setChildren ( self ) : if self . index + 1 < len ( self . gametree ) : self . children = [ self . gametree [ self . index + 1 ] ] else : self . children = map ( lambda list : list [ 0 ] , self . gametree . variations )
Sets up self . children .
66
7
25,817
def _setFlags ( self ) : self . atEnd = not self . gametree . variations and ( self . index + 1 == len ( self . gametree ) ) self . atStart = not self . stack and ( self . index == 0 )
Sets up the flags self . atEnd and self . atStart .
55
15
25,818
def ripple_carry_add ( A , B , cin = 0 ) : if len ( A ) != len ( B ) : raise ValueError ( "expected A and B to be equal length" ) ss , cs = list ( ) , list ( ) for i , a in enumerate ( A ) : c = ( cin if i == 0 else cs [ i - 1 ] ) ss . append ( a ^ B [ i ] ^ c ) cs . append ( a & B [ i ] | a & c | B [ i ] & c ) return farray ( ss ) , farray ( cs )
Return symbolic logic for an N - bit ripple carry adder .
129
13
25,819
def kogge_stone_add ( A , B , cin = 0 ) : if len ( A ) != len ( B ) : raise ValueError ( "expected A and B to be equal length" ) N = len ( A ) # generate/propagate logic gs = [ A [ i ] & B [ i ] for i in range ( N ) ] ps = [ A [ i ] ^ B [ i ] for i in range ( N ) ] for i in range ( clog2 ( N ) ) : start = 1 << i for j in range ( start , N ) : gs [ j ] = gs [ j ] | ps [ j ] & gs [ j - start ] ps [ j ] = ps [ j ] & ps [ j - start ] # sum logic ss = [ A [ 0 ] ^ B [ 0 ] ^ cin ] ss += [ A [ i ] ^ B [ i ] ^ gs [ i - 1 ] for i in range ( 1 , N ) ] return farray ( ss ) , farray ( gs )
Return symbolic logic for an N - bit Kogge - Stone adder .
228
16
25,820
def brent_kung_add ( A , B , cin = 0 ) : if len ( A ) != len ( B ) : raise ValueError ( "expected A and B to be equal length" ) N = len ( A ) # generate/propagate logic gs = [ A [ i ] & B [ i ] for i in range ( N ) ] ps = [ A [ i ] ^ B [ i ] for i in range ( N ) ] # carry tree for i in range ( floor ( log ( N , 2 ) ) ) : step = 2 ** i for start in range ( 2 ** ( i + 1 ) - 1 , N , 2 ** ( i + 1 ) ) : gs [ start ] = gs [ start ] | ps [ start ] & gs [ start - step ] ps [ start ] = ps [ start ] & ps [ start - step ] # inverse carry tree for i in range ( floor ( log ( N , 2 ) ) - 2 , - 1 , - 1 ) : start = 2 ** ( i + 1 ) - 1 step = 2 ** i while start + step < N : gs [ start + step ] = gs [ start + step ] | ps [ start + step ] & gs [ start ] ps [ start + step ] = ps [ start + step ] & ps [ start ] start += step # sum logic ss = [ A [ 0 ] ^ B [ 0 ] ^ cin ] ss += [ A [ i ] ^ B [ i ] ^ gs [ i - 1 ] for i in range ( 1 , N ) ] return farray ( ss ) , farray ( gs )
Return symbolic logic for an N - bit Brent - Kung adder .
350
14
25,821
def _expect_token ( lexer , types ) : tok = next ( lexer ) if any ( isinstance ( tok , t ) for t in types ) : return tok else : raise Error ( "unexpected token: " + str ( tok ) )
Return the next token or raise an exception .
59
9
25,822
def parse_cnf ( s , varname = 'x' ) : lexer = iter ( CNFLexer ( s ) ) try : ast = _cnf ( lexer , varname ) except lex . RunError as exc : fstr = ( "{0.args[0]}: " "(line: {0.lineno}, offset: {0.offset}, text: {0.text})" ) raise Error ( fstr . format ( exc ) ) # Check for end of buffer _expect_token ( lexer , { EndToken } ) return ast
Parse an input string in DIMACS CNF format and return an expression abstract syntax tree .
122
21
25,823
def _cnf ( lexer , varname ) : _expect_token ( lexer , { KW_p } ) _expect_token ( lexer , { KW_cnf } ) nvars = _expect_token ( lexer , { IntegerToken } ) . value nclauses = _expect_token ( lexer , { IntegerToken } ) . value return _cnf_formula ( lexer , varname , nvars , nclauses )
Return a DIMACS CNF .
109
9
25,824
def _cnf_formula ( lexer , varname , nvars , nclauses ) : clauses = _clauses ( lexer , varname , nvars ) if len ( clauses ) < nclauses : fstr = "formula has fewer than {} clauses" raise Error ( fstr . format ( nclauses ) ) if len ( clauses ) > nclauses : fstr = "formula has more than {} clauses" raise Error ( fstr . format ( nclauses ) ) return ( 'and' , ) + clauses
Return a DIMACS CNF formula .
124
10
25,825
def _clauses ( lexer , varname , nvars ) : tok = next ( lexer ) toktype = type ( tok ) if toktype is OP_not or toktype is IntegerToken : lexer . unpop_token ( tok ) first = _clause ( lexer , varname , nvars ) rest = _clauses ( lexer , varname , nvars ) return ( first , ) + rest # null else : lexer . unpop_token ( tok ) return tuple ( )
Return a tuple of DIMACS CNF clauses .
118
12
25,826
def _lits ( lexer , varname , nvars ) : tok = _expect_token ( lexer , { OP_not , IntegerToken } ) if isinstance ( tok , IntegerToken ) and tok . value == 0 : return tuple ( ) else : if isinstance ( tok , OP_not ) : neg = True tok = _expect_token ( lexer , { IntegerToken } ) else : neg = False index = tok . value if index > nvars : fstr = "formula literal {} is greater than {}" raise Error ( fstr . format ( index , nvars ) ) lit = ( 'var' , ( varname , ) , ( index , ) ) if neg : lit = ( 'not' , lit ) return ( lit , ) + _lits ( lexer , varname , nvars )
Return a tuple of DIMACS CNF clause literals .
189
14
25,827
def parse_sat ( s , varname = 'x' ) : lexer = iter ( SATLexer ( s ) ) try : ast = _sat ( lexer , varname ) except lex . RunError as exc : fstr = ( "{0.args[0]}: " "(line: {0.lineno}, offset: {0.offset}, text: {0.text})" ) raise Error ( fstr . format ( exc ) ) # Check for end of buffer _expect_token ( lexer , { EndToken } ) return ast
Parse an input string in DIMACS SAT format and return an expression .
119
17
25,828
def _sat ( lexer , varname ) : _expect_token ( lexer , { KW_p } ) fmt = _expect_token ( lexer , { KW_sat , KW_satx , KW_sate , KW_satex } ) . value nvars = _expect_token ( lexer , { IntegerToken } ) . value return _sat_formula ( lexer , varname , fmt , nvars )
Return a DIMACS SAT .
105
8
25,829
def _sat_formula ( lexer , varname , fmt , nvars ) : types = { IntegerToken , LPAREN } | _SAT_TOKS [ fmt ] tok = _expect_token ( lexer , types ) # INT if isinstance ( tok , IntegerToken ) : index = tok . value if not 0 < index <= nvars : fstr = "formula literal {} outside valid range: (0, {}]" raise Error ( fstr . format ( index , nvars ) ) return ( 'var' , ( varname , ) , ( index , ) ) # '-' elif isinstance ( tok , OP_not ) : tok = _expect_token ( lexer , { IntegerToken , LPAREN } ) # '-' INT if isinstance ( tok , IntegerToken ) : index = tok . value if not 0 < index <= nvars : fstr = "formula literal {} outside valid range: (0, {}]" raise Error ( fstr . format ( index , nvars ) ) return ( 'not' , ( 'var' , ( varname , ) , ( index , ) ) ) # '-' '(' FORMULA ')' else : formula = _sat_formula ( lexer , varname , fmt , nvars ) _expect_token ( lexer , { RPAREN } ) return ( 'not' , formula ) # '(' FORMULA ')' elif isinstance ( tok , LPAREN ) : formula = _sat_formula ( lexer , varname , fmt , nvars ) _expect_token ( lexer , { RPAREN } ) return formula # OP '(' FORMULAS ')' else : _expect_token ( lexer , { LPAREN } ) formulas = _formulas ( lexer , varname , fmt , nvars ) _expect_token ( lexer , { RPAREN } ) return ( tok . ASTOP , ) + formulas
Return a DIMACS SAT formula .
438
9
25,830
def _formulas ( lexer , varname , fmt , nvars ) : types = { IntegerToken , LPAREN } | _SAT_TOKS [ fmt ] tok = lexer . peek_token ( ) if any ( isinstance ( tok , t ) for t in types ) : first = _sat_formula ( lexer , varname , fmt , nvars ) rest = _formulas ( lexer , varname , fmt , nvars ) return ( first , ) + rest # null else : return tuple ( )
Return a tuple of DIMACS SAT formulas .
119
11
25,831
def keyword ( self , text ) : cls = self . KEYWORDS [ text ] self . push_token ( cls ( text , self . lineno , self . offset ) )
Push a keyword onto the token queue .
40
8
25,832
def operator ( self , text ) : cls = self . OPERATORS [ text ] self . push_token ( cls ( text , self . lineno , self . offset ) )
Push an operator onto the token queue .
40
8
25,833
def punct ( self , text ) : cls = self . PUNCTUATION [ text ] self . push_token ( cls ( text , self . lineno , self . offset ) )
Push punctuation onto the token queue .
42
8
25,834
def parse ( s ) : d = dict ( ninputs = None , noutputs = None , input_labels = None , output_labels = None , intype = None , cover = set ( ) ) lines = [ line . strip ( ) for line in s . splitlines ( ) ] for i , line in enumerate ( lines , start = 1 ) : # skip comments if not line or _COMMENT . match ( line ) : continue # .i m_in = _NINS . match ( line ) if m_in : if d [ 'ninputs' ] is None : d [ 'ninputs' ] = int ( m_in . group ( 1 ) ) continue else : raise Error ( ".i declared more than once" ) # .o m_out = _NOUTS . match ( line ) if m_out : if d [ 'noutputs' ] is None : d [ 'noutputs' ] = int ( m_out . group ( 1 ) ) continue else : raise Error ( ".o declared more than once" ) # ignore .p m_prod = _PROD . match ( line ) if m_prod : continue # .ilb m_ilb = _ILB . match ( line ) if m_ilb : if d [ 'input_labels' ] is None : d [ 'input_labels' ] = m_ilb . group ( 1 ) . split ( ) continue else : raise Error ( ".ilb declared more than once" ) # .ob m_ob = _OB . match ( line ) if m_ob : if d [ 'output_labels' ] is None : d [ 'output_labels' ] = m_ob . group ( 1 ) . split ( ) continue else : raise Error ( ".ob declared more than once" ) # .type m_type = _TYPE . match ( line ) if m_type : if d [ 'intype' ] is None : d [ 'intype' ] = _TYPES [ m_type . group ( 1 ) ] continue else : raise Error ( ".type declared more tha once" ) # cube m_cube = _CUBE . match ( line ) if m_cube : inputs , outputs = m_cube . groups ( ) invec = tuple ( _INCODE [ c ] for c in inputs ) outvec = tuple ( _OUTCODE [ c ] for c in outputs ) d [ 'cover' ] . add ( ( invec , outvec ) ) continue # ignore .e m_end = _END . match ( line ) if m_end : continue raise Error ( "syntax error on line {}: {}" . format ( i , line ) ) return d
Parse an input string in PLA format and return an intermediate representation dict .
588
15
25,835
def action ( toktype ) : def outer ( func ) : """Return a function that pushes a token onto the token queue.""" def inner ( lexer , text ) : """Push a token onto the token queue.""" value = func ( lexer , text ) lexer . tokens . append ( toktype ( value , lexer . lineno , lexer . offset ) ) return inner return outer
Return a parser action property .
85
6
25,836
def _compile_rules ( self ) : for state , table in self . RULES . items ( ) : patterns = list ( ) actions = list ( ) nextstates = list ( ) for i , row in enumerate ( table ) : if len ( row ) == 2 : pattern , _action = row nextstate = None elif len ( row ) == 3 : pattern , _action , nextstate = row else : fstr = "invalid RULES: state {}, row {}" raise CompileError ( fstr . format ( state , i ) ) patterns . append ( pattern ) actions . append ( _action ) nextstates . append ( nextstate ) reobj = re . compile ( '|' . join ( "(" + p + ")" for p in patterns ) ) self . _rules [ state ] = ( reobj , actions , nextstates )
Compile the rules into the internal lexer state .
185
11
25,837
def _iter_tokens ( self ) : reobj , actions , nextstates = self . _rules [ self . states [ - 1 ] ] mobj = reobj . match ( self . string , self . pos ) while mobj is not None : text = mobj . group ( 0 ) idx = mobj . lastindex - 1 nextstate = nextstates [ idx ] # Take action actions [ idx ] ( self , text ) while self . tokens : yield self . pop_token ( ) if nextstate and nextstate != self . states [ - 1 ] : self . states [ - 1 ] = nextstate # Update position variables self . pos = mobj . end ( ) lines = text . split ( '\n' ) nlines = len ( lines ) - 1 if nlines == 0 : self . offset = self . offset + len ( lines [ 0 ] ) else : self . lineno = self . lineno + nlines self . offset = 1 + len ( lines [ - 1 ] ) reobj , actions , nextstates = self . _rules [ self . states [ - 1 ] ] mobj = reobj . match ( self . string , self . pos ) if self . pos != len ( self . string ) : msg = "unexpected character" text = self . string [ self . pos ] raise RunError ( msg , self . lineno , self . offset , text ) yield EndToken ( "" , self . lineno , self . offset )
Iterate through all tokens in the input string .
315
10
25,838
def parity ( num : int ) -> int : if num < 0 : raise ValueError ( "expected num >= 0" ) par = 0 while num : par ^= ( num & 1 ) num >>= 1 return par
Return the parity of a non - negative integer .
46
10
25,839
def cached_property ( func ) : def get ( self ) : """this docstring will be over-written by func.__doc__""" try : return self . _property_cache [ func ] except AttributeError : self . _property_cache = dict ( ) prop = self . _property_cache [ func ] = func ( self ) return prop except KeyError : prop = self . _property_cache [ func ] = func ( self ) return prop get . __doc__ = func . __doc__ return property ( get )
Return a cached property calculated by the input function .
113
10
25,840
def var ( name , index = None ) : tname = type ( name ) if tname is str : names = ( name , ) elif tname is tuple : names = name else : fstr = "expected name to be a str or tuple, got {0.__name__}" raise TypeError ( fstr . format ( tname ) ) if not names : raise ValueError ( "expected at least one name" ) for name in names : tname = type ( name ) if tname is not str : fstr = "expected name to be a str, got {0.__name__}" raise TypeError ( fstr . format ( tname ) ) if index is None : indices = tuple ( ) else : tindex = type ( index ) if tindex is int : indices = ( index , ) elif tindex is tuple : indices = index else : fstr = "expected index to be an int or tuple, got {0.__name__}" raise TypeError ( fstr . format ( tindex ) ) for index in indices : tindex = type ( index ) if tindex is not int : fstr = "expected index to be an int, got {0.__name__}" raise TypeError ( fstr . format ( tindex ) ) if index < 0 : fstr = "expected index to be >= 0, got {}" raise ValueError ( fstr . format ( index ) ) try : v = VARIABLES [ ( names , indices ) ] except KeyError : v = Variable ( names , indices ) VARIABLES [ ( names , indices ) ] = v return v
Return a unique Variable instance .
345
6
25,841
def iter_cofactors ( self , vs = None ) : vs = self . _expect_vars ( vs ) for point in iter_points ( vs ) : yield self . restrict ( point )
r Iterate through the cofactors of a function over N variables .
44
15
25,842
def smoothing ( self , vs = None ) : return functools . reduce ( operator . or_ , self . iter_cofactors ( vs ) )
r Return the smoothing of a function over a sequence of N variables .
34
15
25,843
def consensus ( self , vs = None ) : return functools . reduce ( operator . and_ , self . iter_cofactors ( vs ) )
r Return the consensus of a function over a sequence of N variables .
33
14
25,844
def derivative ( self , vs = None ) : return functools . reduce ( operator . xor , self . iter_cofactors ( vs ) )
r Return the derivative of a function over a sequence of N variables .
33
14
25,845
def _expect_vars ( vs = None ) : if vs is None : return list ( ) elif isinstance ( vs , Variable ) : return [ vs ] else : checked = list ( ) # Will raise TypeError if vs is not iterable for v in vs : if isinstance ( v , Variable ) : checked . append ( v ) else : fstr = "expected Variable, got {0.__name__}" raise TypeError ( fstr . format ( type ( v ) ) ) return checked
Verify the input type and return a list of Variables .
108
13
25,846
def solve ( self , grid ) : soln = self . S . satisfy_one ( assumptions = self . _parse_grid ( grid ) ) return self . S . soln2point ( soln , self . litmap )
Return a solution point for a Sudoku grid .
49
10
25,847
def _parse_grid ( self , grid ) : chars = [ c for c in grid if c in DIGITS or c in "0." ] if len ( chars ) != 9 ** 2 : raise ValueError ( "expected 9x9 grid" ) return [ self . litmap [ self . X [ i // 9 + 1 , i % 9 + 1 , int ( c ) ] ] for i , c in enumerate ( chars ) if c in DIGITS ]
Return the input constraints for a Sudoku grid .
100
10
25,848
def _soln2str ( self , soln , fancy = False ) : chars = list ( ) for r in range ( 1 , 10 ) : for c in range ( 1 , 10 ) : if fancy and c in ( 4 , 7 ) : chars . append ( "|" ) chars . append ( self . _get_val ( soln , r , c ) ) if fancy and r != 9 : chars . append ( "\n" ) if r in ( 3 , 6 ) : chars . append ( "---+---+---\n" ) return "" . join ( chars )
Convert a Sudoku solution point to a string .
125
11
25,849
def _get_val ( self , soln , r , c ) : for v in range ( 1 , 10 ) : if soln [ self . X [ r , c , v ] ] : return DIGITS [ v - 1 ] return "X"
Return the string value for a solution coordinate .
55
9
25,850
def ttvar ( name , index = None ) : bvar = boolfunc . var ( name , index ) try : var = _VARS [ bvar . uniqid ] except KeyError : var = _VARS [ bvar . uniqid ] = TTVariable ( bvar ) return var
Return a TruthTable variable .
67
6
25,851
def expr2truthtable ( expr ) : inputs = [ ttvar ( v . names , v . indices ) for v in expr . inputs ] return truthtable ( inputs , expr . iter_image ( ) )
Convert an expression into a truth table .
45
9
25,852
def truthtable2expr ( tt , conj = False ) : if conj : outer , inner = ( And , Or ) nums = tt . pcdata . iter_zeros ( ) else : outer , inner = ( Or , And ) nums = tt . pcdata . iter_ones ( ) inputs = [ exprvar ( v . names , v . indices ) for v in tt . inputs ] terms = [ boolfunc . num2term ( num , inputs , conj ) for num in nums ] return outer ( * [ inner ( * term ) for term in terms ] )
Convert a truth table into an expression .
126
9
25,853
def _bin_zfill ( num , width = None ) : s = bin ( num ) [ 2 : ] return s if width is None else s . zfill ( width )
Convert a base - 10 number to a binary string .
38
12
25,854
def zero_mask ( self ) : accum = 0 for i in range ( self . data . itemsize ) : accum += ( 0x55 << ( i << 3 ) ) return accum
Return a mask to determine whether an array chunk has any zeros .
39
14
25,855
def one_mask ( self ) : accum = 0 for i in range ( self . data . itemsize ) : accum += ( 0xAA << ( i << 3 ) ) return accum
Return a mask to determine whether an array chunk has any ones .
39
13
25,856
def iter_zeros ( self ) : num = quotient = 0 while num < self . _len : chunk = self . data [ quotient ] if chunk & self . zero_mask : remainder = 0 while remainder < self . width and num < self . _len : item = ( chunk >> remainder ) & 3 if item == PC_ZERO : yield num remainder += 2 num += 1 else : num += ( self . width >> 1 ) quotient += 1
Iterate through the indices of all zero items .
98
10
25,857
def find_one ( self ) : num = quotient = 0 while num < self . _len : chunk = self . data [ quotient ] if chunk & self . one_mask : remainder = 0 while remainder < self . width and num < self . _len : item = ( chunk >> remainder ) & 3 if item == PC_ONE : return num remainder += 2 num += 1 else : num += ( self . width >> 1 ) quotient += 1 return None
Return the first index of an entry that is either one or DC . If no item is found return None .
98
22
25,858
def is_neg_unate ( self , vs = None ) : vs = self . _expect_vars ( vs ) basis = self . support - set ( vs ) maxcov = [ PC_ONE ] * ( 1 << len ( basis ) ) # Test whether table entries are monotonically decreasing for cf in self . iter_cofactors ( vs ) : for i , item in enumerate ( cf . pcdata ) : if maxcov [ i ] == PC_ZERO and item == PC_ONE : return False maxcov [ i ] = item return True
r Return whether a function is negative unate .
125
10
25,859
def _iter_restrict ( self , zeros , ones ) : inputs = list ( self . inputs ) unmapped = dict ( ) for i , v in enumerate ( self . inputs ) : if v in zeros : inputs [ i ] = 0 elif v in ones : inputs [ i ] = 1 else : unmapped [ v ] = i vs = sorted ( unmapped . keys ( ) ) for num in range ( 1 << len ( vs ) ) : for v , val in boolfunc . num2point ( num , vs ) . items ( ) : inputs [ unmapped [ v ] ] = val yield sum ( ( val << i ) for i , val in enumerate ( inputs ) )
Iterate through indices of all table entries that vary .
149
11
25,860
def bddvar ( name , index = None ) : bvar = boolfunc . var ( name , index ) try : var = _VARS [ bvar . uniqid ] except KeyError : var = _VARS [ bvar . uniqid ] = BDDVariable ( bvar ) _BDDS [ var . node ] = var return var
r Return a unique BDD variable .
76
8
25,861
def _expr2bddnode ( expr ) : if expr . is_zero ( ) : return BDDNODEZERO elif expr . is_one ( ) : return BDDNODEONE else : top = expr . top # Register this variable _ = bddvar ( top . names , top . indices ) root = top . uniqid lo = _expr2bddnode ( expr . restrict ( { top : 0 } ) ) hi = _expr2bddnode ( expr . restrict ( { top : 1 } ) ) return _bddnode ( root , lo , hi )
Convert an expression into a BDD node .
127
10
25,862
def bdd2expr ( bdd , conj = False ) : if conj : outer , inner = ( And , Or ) paths = _iter_all_paths ( bdd . node , BDDNODEZERO ) else : outer , inner = ( Or , And ) paths = _iter_all_paths ( bdd . node , BDDNODEONE ) terms = list ( ) for path in paths : expr_point = { exprvar ( v . names , v . indices ) : val for v , val in _path2point ( path ) . items ( ) } terms . append ( boolfunc . point2term ( expr_point , conj ) ) return outer ( * [ inner ( * term ) for term in terms ] )
Convert a binary decision diagram into an expression .
159
10
25,863
def upoint2bddpoint ( upoint ) : point = dict ( ) for uniqid in upoint [ 0 ] : point [ _VARS [ uniqid ] ] = 0 for uniqid in upoint [ 1 ] : point [ _VARS [ uniqid ] ] = 1 return point
Convert an untyped point into a BDD point .
67
13
25,864
def _bddnode ( root , lo , hi ) : if lo is hi : node = lo else : key = ( root , lo , hi ) try : node = _NODES [ key ] except KeyError : node = _NODES [ key ] = BDDNode ( * key ) return node
Return a unique BDD node .
66
7
25,865
def _bdd ( node ) : try : bdd = _BDDS [ node ] except KeyError : bdd = _BDDS [ node ] = BinaryDecisionDiagram ( node ) return bdd
Return a unique BDD .
44
6
25,866
def _path2point ( path ) : return { _VARS [ node . root ] : int ( node . hi is path [ i + 1 ] ) for i , node in enumerate ( path [ : - 1 ] ) }
Convert a BDD path to a BDD point .
49
12
25,867
def _find_path ( start , end , path = tuple ( ) ) : path = path + ( start , ) if start is end : return path else : ret = None if start . lo is not None : ret = _find_path ( start . lo , end , path ) if ret is None and start . hi is not None : ret = _find_path ( start . hi , end , path ) return ret
Return the path from start to end .
89
8
25,868
def _iter_all_paths ( start , end , rand = False , path = tuple ( ) ) : path = path + ( start , ) if start is end : yield path else : nodes = [ start . lo , start . hi ] if rand : # pragma: no cover random . shuffle ( nodes ) for node in nodes : if node is not None : yield from _iter_all_paths ( node , end , rand , path )
Iterate through all paths from start to end .
96
10
25,869
def _dfs_preorder ( node , visited ) : if node not in visited : visited . add ( node ) yield node if node . lo is not None : yield from _dfs_preorder ( node . lo , visited ) if node . hi is not None : yield from _dfs_preorder ( node . hi , visited )
Iterate through nodes in DFS pre - order .
73
11
25,870
def _dfs_postorder ( node , visited ) : if node . lo is not None : yield from _dfs_postorder ( node . lo , visited ) if node . hi is not None : yield from _dfs_postorder ( node . hi , visited ) if node not in visited : visited . add ( node ) yield node
Iterate through nodes in DFS post - order .
73
11
25,871
def _bfs ( node , visited ) : queue = collections . deque ( ) queue . appendleft ( node ) while queue : node = queue . pop ( ) if node not in visited : if node . lo is not None : queue . appendleft ( node . lo ) if node . hi is not None : queue . appendleft ( node . hi ) visited . add ( node ) yield node
Iterate through nodes in BFS order .
83
9
25,872
def parse ( s ) : lexer = iter ( BoolExprLexer ( s ) ) try : expr = _expr ( lexer ) except lex . RunError as exc : fstr = ( "{0.args[0]}: " "(line: {0.lineno}, offset: {0.offset}, text: {0.text})" ) raise Error ( fstr . format ( exc ) ) # Check for end of buffer _expect_token ( lexer , { EndToken } ) return expr
Parse a Boolean expression string and return an expression abstract syntax tree .
110
14
25,873
def _ite ( lexer ) : s = _impl ( lexer ) tok = next ( lexer ) # IMPL '?' ITE ':' ITE if isinstance ( tok , OP_question ) : d1 = _ite ( lexer ) _expect_token ( lexer , { OP_colon } ) d0 = _ite ( lexer ) return ( 'ite' , s , d1 , d0 ) # IMPL else : lexer . unpop_token ( tok ) return s
Return an ITE expression .
112
6
25,874
def _impl ( lexer ) : p = _sumterm ( lexer ) tok = next ( lexer ) # SUMTERM '=>' IMPL if isinstance ( tok , OP_rarrow ) : q = _impl ( lexer ) return ( 'implies' , p , q ) # SUMTERM '<=>' IMPL elif isinstance ( tok , OP_lrarrow ) : q = _impl ( lexer ) return ( 'equal' , p , q ) # SUMTERM else : lexer . unpop_token ( tok ) return p
Return an Implies expression .
126
6
25,875
def _sumterm ( lexer ) : xorterm = _xorterm ( lexer ) sumterm_prime = _sumterm_prime ( lexer ) if sumterm_prime is None : return xorterm else : return ( 'or' , xorterm , sumterm_prime )
Return a sum term expresssion .
64
8
25,876
def _sumterm_prime ( lexer ) : tok = next ( lexer ) # '|' XORTERM SUMTERM' if isinstance ( tok , OP_or ) : xorterm = _xorterm ( lexer ) sumterm_prime = _sumterm_prime ( lexer ) if sumterm_prime is None : return xorterm else : return ( 'or' , xorterm , sumterm_prime ) # null else : lexer . unpop_token ( tok ) return None
Return a sum term expression eliminates left recursion .
114
10
25,877
def _xorterm ( lexer ) : prodterm = _prodterm ( lexer ) xorterm_prime = _xorterm_prime ( lexer ) if xorterm_prime is None : return prodterm else : return ( 'xor' , prodterm , xorterm_prime )
Return an xor term expresssion .
67
9
25,878
def _xorterm_prime ( lexer ) : tok = next ( lexer ) # '^' PRODTERM XORTERM' if isinstance ( tok , OP_xor ) : prodterm = _prodterm ( lexer ) xorterm_prime = _xorterm_prime ( lexer ) if xorterm_prime is None : return prodterm else : return ( 'xor' , prodterm , xorterm_prime ) # null else : lexer . unpop_token ( tok ) return None
Return an xor term expression eliminates left recursion .
119
11
25,879
def _prodterm ( lexer ) : factor = _factor ( lexer ) prodterm_prime = _prodterm_prime ( lexer ) if prodterm_prime is None : return factor else : return ( 'and' , factor , prodterm_prime )
Return a product term expression .
58
6
25,880
def _prodterm_prime ( lexer ) : tok = next ( lexer ) # '&' FACTOR PRODTERM' if isinstance ( tok , OP_and ) : factor = _factor ( lexer ) prodterm_prime = _prodterm_prime ( lexer ) if prodterm_prime is None : return factor else : return ( 'and' , factor , prodterm_prime ) # null else : lexer . unpop_token ( tok ) return None
Return a product term expression eliminates left recursion .
108
10
25,881
def _factor ( lexer ) : tok = _expect_token ( lexer , FACTOR_TOKS ) # '~' F toktype = type ( tok ) if toktype is OP_not : return ( 'not' , _factor ( lexer ) ) # '(' EXPR ')' elif toktype is LPAREN : expr = _expr ( lexer ) _expect_token ( lexer , { RPAREN } ) return expr # OPN '(' ... ')' elif any ( toktype is t for t in OPN_TOKS ) : op = tok . ASTOP _expect_token ( lexer , { LPAREN } ) tok = next ( lexer ) # OPN '(' ')' if isinstance ( tok , RPAREN ) : xs = tuple ( ) # OPN '(' XS ')' else : lexer . unpop_token ( tok ) xs = _args ( lexer ) _expect_token ( lexer , { RPAREN } ) return ( op , ) + xs # ITE '(' EXPR ',' EXPR ',' EXPR ')' elif toktype is KW_ite : _expect_token ( lexer , { LPAREN } ) s = _expr ( lexer ) _expect_token ( lexer , { COMMA } ) d1 = _expr ( lexer ) _expect_token ( lexer , { COMMA } ) d0 = _expr ( lexer ) _expect_token ( lexer , { RPAREN } ) return ( 'ite' , s , d1 , d0 ) # Implies '(' EXPR ',' EXPR ')' elif toktype is KW_implies : _expect_token ( lexer , { LPAREN } ) p = _expr ( lexer ) _expect_token ( lexer , { COMMA } ) q = _expr ( lexer ) _expect_token ( lexer , { RPAREN } ) return ( 'implies' , p , q ) # Not '(' EXPR ')' elif toktype is KW_not : _expect_token ( lexer , { LPAREN } ) x = _expr ( lexer ) _expect_token ( lexer , { RPAREN } ) return ( 'not' , x ) # VARIABLE elif toktype is NameToken : lexer . unpop_token ( tok ) return _variable ( lexer ) # '0' | '1' else : if tok . value not in { 0 , 1 } : raise Error ( "unexpected token: " + str ( tok ) ) return ( 'const' , tok . value )
Return a factor expression .
608
5
25,882
def _zom_arg ( lexer ) : tok = next ( lexer ) # ',' EXPR ZOM_X if isinstance ( tok , COMMA ) : return ( _expr ( lexer ) , ) + _zom_arg ( lexer ) # null else : lexer . unpop_token ( tok ) return tuple ( )
Return zero or more arguments .
77
6
25,883
def _variable ( lexer ) : names = _names ( lexer ) tok = next ( lexer ) # NAMES '[' ... ']' if isinstance ( tok , LBRACK ) : indices = _indices ( lexer ) _expect_token ( lexer , { RBRACK } ) # NAMES else : lexer . unpop_token ( tok ) indices = tuple ( ) return ( 'var' , names , indices )
Return a variable expression .
98
5
25,884
def _names ( lexer ) : first = _expect_token ( lexer , { NameToken } ) . value rest = _zom_name ( lexer ) rnames = ( first , ) + rest return rnames [ : : - 1 ]
Return a tuple of names .
55
6
25,885
def _zom_name ( lexer ) : tok = next ( lexer ) # '.' NAME ZOM_NAME if isinstance ( tok , DOT ) : first = _expect_token ( lexer , { NameToken } ) . value rest = _zom_name ( lexer ) return ( first , ) + rest # null else : lexer . unpop_token ( tok ) return tuple ( )
Return zero or more names .
91
6
25,886
def _indices ( lexer ) : first = _expect_token ( lexer , { IntegerToken } ) . value rest = _zom_index ( lexer ) return ( first , ) + rest
Return a tuple of indices .
45
6
25,887
def _zom_index ( lexer ) : tok = next ( lexer ) # ',' INT if isinstance ( tok , COMMA ) : first = _expect_token ( lexer , { IntegerToken } ) . value rest = _zom_index ( lexer ) return ( first , ) + rest # null else : lexer . unpop_token ( tok ) return tuple ( )
Return zero or more indices .
88
6
25,888
def subword ( w ) : w = w . reshape ( 4 , 8 ) return SBOX [ w [ 0 ] ] + SBOX [ w [ 1 ] ] + SBOX [ w [ 2 ] ] + SBOX [ w [ 3 ] ]
Function used in the Key Expansion routine that takes a four - byte input word and applies an S - box to each of the four bytes to produce an output word .
54
33
25,889
def multiply ( a , col ) : a = a . reshape ( 4 , 4 , 4 ) col = col . reshape ( 4 , 8 ) return fcat ( rowxcol ( a [ 0 ] , col ) , rowxcol ( a [ 1 ] , col ) , rowxcol ( a [ 2 ] , col ) , rowxcol ( a [ 3 ] , col ) , )
Multiply a matrix by one column .
85
9
25,890
def rowxcol ( row , col ) : row = row . reshape ( 4 , 4 ) col = col . reshape ( 4 , 8 ) ret = uint2exprs ( 0 , 8 ) for i in range ( 4 ) : for j in range ( 4 ) : if row [ i , j ] : ret ^= xtime ( col [ i ] , j ) return ret
Multiply one row and one column .
83
9
25,891
def shift_rows ( state ) : state = state . reshape ( 4 , 4 , 8 ) return fcat ( state [ 0 ] [ 0 ] , state [ 1 ] [ 1 ] , state [ 2 ] [ 2 ] , state [ 3 ] [ 3 ] , state [ 1 ] [ 0 ] , state [ 2 ] [ 1 ] , state [ 3 ] [ 2 ] , state [ 0 ] [ 3 ] , state [ 2 ] [ 0 ] , state [ 3 ] [ 1 ] , state [ 0 ] [ 2 ] , state [ 1 ] [ 3 ] , state [ 3 ] [ 0 ] , state [ 0 ] [ 1 ] , state [ 1 ] [ 2 ] , state [ 2 ] [ 3 ] )
Transformation in the Cipher that processes the State by cyclically shifting the last three rows of the State by different offsets .
153
24
25,892
def key_expand ( key , Nk = 4 ) : assert Nk in { 4 , 6 , 8 } Nr = Nk + 6 key = key . reshape ( Nk , 32 ) rkey = exprzeros ( 4 * ( Nr + 1 ) , 32 ) for i in range ( Nk ) : rkey [ i ] = key [ i ] for i in range ( Nk , 4 * ( Nr + 1 ) ) : if i % Nk == 0 : rkey [ i ] = rkey [ i - Nk ] ^ subword ( rotword ( rkey [ i - 1 ] ) ) ^ RCON [ i // Nk ] . zext ( 32 - 8 ) elif Nk > 6 and i % Nk == 4 : rkey [ i ] = rkey [ i - Nk ] ^ subword ( rkey [ i - 1 ] ) else : rkey [ i ] = rkey [ i - Nk ] ^ rkey [ i - 1 ] return rkey
Expand the key into the round key .
222
9
25,893
def cipher ( rkey , pt , Nk = 4 ) : assert Nk in { 4 , 6 , 8 } Nr = Nk + 6 rkey = rkey . reshape ( 4 * ( Nr + 1 ) , 32 ) pt = pt . reshape ( 128 ) # first round state = add_round_key ( pt , rkey [ 0 : 4 ] ) for i in range ( 1 , Nr ) : state = sub_bytes ( state ) state = shift_rows ( state ) state = mix_columns ( state ) state = add_round_key ( state , rkey [ 4 * i : 4 * ( i + 1 ) ] ) # final round state = sub_bytes ( state ) state = shift_rows ( state ) state = add_round_key ( state , rkey [ 4 * Nr : 4 * ( Nr + 1 ) ] ) return state
AES encryption cipher .
193
5
25,894
def inv_cipher ( rkey , ct , Nk = 4 ) : assert Nk in { 4 , 6 , 8 } Nr = Nk + 6 rkey = rkey . reshape ( 4 * ( Nr + 1 ) , 32 ) ct = ct . reshape ( 128 ) # first round state = add_round_key ( ct , rkey [ 4 * Nr : 4 * ( Nr + 1 ) ] ) for i in range ( Nr - 1 , 0 , - 1 ) : state = inv_shift_rows ( state ) state = inv_sub_bytes ( state ) state = add_round_key ( state , rkey [ 4 * i : 4 * ( i + 1 ) ] ) state = inv_mix_columns ( state ) # final round state = inv_shift_rows ( state ) state = inv_sub_bytes ( state ) state = add_round_key ( state , rkey [ 0 : 4 ] ) return state
AES decryption cipher .
215
6
25,895
def encrypt ( key , pt , Nk = 4 ) : assert Nk in { 4 , 6 , 8 } rkey = key_expand ( key , Nk ) ct = cipher ( rkey , pt , Nk ) return ct
Encrypt a plain text block .
53
7
25,896
def decrypt ( key , ct , Nk = 4 ) : assert Nk in { 4 , 6 , 8 } rkey = key_expand ( key , Nk ) pt = inv_cipher ( rkey , ct , Nk ) return pt
Decrypt a plain text block .
56
7
25,897
def gray2bin ( G ) : return farray ( [ G [ i : ] . uxor ( ) for i , _ in enumerate ( G ) ] )
Convert a gray - coded vector into a binary - coded vector .
36
14
25,898
def _assume2point ( ) : point = dict ( ) for lit in _ASSUMPTIONS : if isinstance ( lit , Complement ) : point [ ~ lit ] = 0 elif isinstance ( lit , Variable ) : point [ lit ] = 1 return point
Convert global assumptions to a point .
58
8
25,899
def exprvar ( name , index = None ) : bvar = boolfunc . var ( name , index ) try : var = _LITS [ bvar . uniqid ] except KeyError : var = _LITS [ bvar . uniqid ] = Variable ( bvar ) return var
r Return a unique Expression variable .
63
7