idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
25,900 | def _exprcomp ( node ) : try : comp = _LITS [ node . data ( ) ] except KeyError : comp = _LITS [ node . data ( ) ] = Complement ( node ) return comp | Return a unique Expression complement . | 46 | 6 |
25,901 | def expr ( obj , simplify = True ) : if isinstance ( obj , Expression ) : return obj # False, True, 0, 1 elif isinstance ( obj , int ) and obj in { 0 , 1 } : return _CONSTS [ obj ] elif isinstance ( obj , str ) : ast = pyeda . parsing . boolexpr . parse ( obj ) ex = ast2expr ( ast ) if simplify : ex = ex . simplify ( ) return ex else : return One if bool ( obj ) else Zero | Convert an arbitrary object into an Expression . | 112 | 9 |
25,902 | def ast2expr ( ast ) : if ast [ 0 ] == 'const' : return _CONSTS [ ast [ 1 ] ] elif ast [ 0 ] == 'var' : return exprvar ( ast [ 1 ] , ast [ 2 ] ) else : xs = [ ast2expr ( x ) for x in ast [ 1 : ] ] return ASTOPS [ ast [ 0 ] ] ( * xs , simplify = False ) | Convert an abstract syntax tree to an Expression . | 93 | 10 |
25,903 | def expr2dimacscnf ( ex ) : litmap , nvars , clauses = ex . encode_cnf ( ) return litmap , DimacsCNF ( nvars , clauses ) | Convert an expression into an equivalent DIMACS CNF . | 44 | 14 |
25,904 | def expr2dimacssat ( ex ) : if not ex . simple : raise ValueError ( "expected ex to be simplified" ) litmap , nvars = ex . encode_inputs ( ) formula = _expr2sat ( ex , litmap ) if 'xor' in formula : if '=' in formula : fmt = 'satex' else : fmt = 'satx' elif '=' in formula : fmt = 'sate' else : fmt = 'sat' return "p {} {}\n{}" . format ( fmt , nvars , formula ) | Convert an expression into an equivalent DIMACS SAT string . | 125 | 14 |
25,905 | def _expr2sat ( ex , litmap ) : # pragma: no cover if isinstance ( ex , Literal ) : return str ( litmap [ ex ] ) elif isinstance ( ex , NotOp ) : return "-(" + _expr2sat ( ex . x , litmap ) + ")" elif isinstance ( ex , OrOp ) : return "+(" + " " . join ( _expr2sat ( x , litmap ) for x in ex . xs ) + ")" elif isinstance ( ex , AndOp ) : return "*(" + " " . join ( _expr2sat ( x , litmap ) for x in ex . xs ) + ")" elif isinstance ( ex , XorOp ) : return ( "xor(" + " " . join ( _expr2sat ( x , litmap ) for x in ex . xs ) + ")" ) elif isinstance ( ex , EqualOp ) : return "=(" + " " . join ( _expr2sat ( x , litmap ) for x in ex . xs ) + ")" else : fstr = ( "expected ex to be a Literal or Not/Or/And/Xor/Equal op, " "got {0.__name__}" ) raise ValueError ( fstr . format ( type ( ex ) ) ) | Convert an expression to a DIMACS SAT string . | 292 | 13 |
25,906 | def upoint2exprpoint ( upoint ) : point = dict ( ) for uniqid in upoint [ 0 ] : point [ _LITS [ uniqid ] ] = 0 for uniqid in upoint [ 1 ] : point [ _LITS [ uniqid ] ] = 1 return point | Convert an untyped point into an Expression point . | 66 | 12 |
25,907 | def Not ( x , simplify = True ) : x = Expression . box ( x ) . node y = exprnode . not_ ( x ) if simplify : y = y . simplify ( ) return _expr ( y ) | Expression negation operator | 46 | 5 |
25,908 | def Equal ( * xs , simplify = True ) : xs = [ Expression . box ( x ) . node for x in xs ] y = exprnode . eq ( * xs ) if simplify : y = y . simplify ( ) return _expr ( y ) | Expression equality operator | 57 | 4 |
25,909 | def Implies ( p , q , simplify = True ) : p = Expression . box ( p ) . node q = Expression . box ( q ) . node y = exprnode . impl ( p , q ) if simplify : y = y . simplify ( ) return _expr ( y ) | Expression implication operator | 60 | 4 |
25,910 | def Unequal ( * xs , simplify = True ) : xs = [ Expression . box ( x ) . node for x in xs ] y = exprnode . not_ ( exprnode . eq ( * xs ) ) if simplify : y = y . simplify ( ) return _expr ( y ) | Expression inequality operator | 66 | 4 |
25,911 | def OneHot0 ( * xs , simplify = True , conj = True ) : xs = [ Expression . box ( x ) . node for x in xs ] terms = list ( ) if conj : for x0 , x1 in itertools . combinations ( xs , 2 ) : terms . append ( exprnode . or_ ( exprnode . not_ ( x0 ) , exprnode . not_ ( x1 ) ) ) y = exprnode . and_ ( * terms ) else : for _xs in itertools . combinations ( xs , len ( xs ) - 1 ) : terms . append ( exprnode . and_ ( * [ exprnode . not_ ( x ) for x in _xs ] ) ) y = exprnode . or_ ( * terms ) if simplify : y = y . simplify ( ) return _expr ( y ) | Return an expression that means at most one input function is true . | 184 | 13 |
25,912 | def OneHot ( * xs , simplify = True , conj = True ) : xs = [ Expression . box ( x ) . node for x in xs ] terms = list ( ) if conj : for x0 , x1 in itertools . combinations ( xs , 2 ) : terms . append ( exprnode . or_ ( exprnode . not_ ( x0 ) , exprnode . not_ ( x1 ) ) ) terms . append ( exprnode . or_ ( * xs ) ) y = exprnode . and_ ( * terms ) else : for i , xi in enumerate ( xs ) : zeros = [ exprnode . not_ ( x ) for x in xs [ : i ] + xs [ i + 1 : ] ] terms . append ( exprnode . and_ ( xi , * zeros ) ) y = exprnode . or_ ( * terms ) if simplify : y = y . simplify ( ) return _expr ( y ) | Return an expression that means exactly one input function is true . | 210 | 12 |
25,913 | def NHot ( n , * xs , simplify = True ) : if not isinstance ( n , int ) : raise TypeError ( "expected n to be an int" ) if not 0 <= n <= len ( xs ) : fstr = "expected 0 <= n <= {}, got {}" raise ValueError ( fstr . format ( len ( xs ) , n ) ) xs = [ Expression . box ( x ) . node for x in xs ] num = len ( xs ) terms = list ( ) for hot_idxs in itertools . combinations ( range ( num ) , n ) : hot_idxs = set ( hot_idxs ) _xs = [ xs [ i ] if i in hot_idxs else exprnode . not_ ( xs [ i ] ) for i in range ( num ) ] terms . append ( exprnode . and_ ( * _xs ) ) y = exprnode . or_ ( * terms ) if simplify : y = y . simplify ( ) return _expr ( y ) | Return an expression that means exactly N input functions are true . | 223 | 12 |
25,914 | def Majority ( * xs , simplify = True , conj = False ) : xs = [ Expression . box ( x ) . node for x in xs ] if conj : terms = list ( ) for _xs in itertools . combinations ( xs , ( len ( xs ) + 1 ) // 2 ) : terms . append ( exprnode . or_ ( * _xs ) ) y = exprnode . and_ ( * terms ) else : terms = list ( ) for _xs in itertools . combinations ( xs , len ( xs ) // 2 + 1 ) : terms . append ( exprnode . and_ ( * _xs ) ) y = exprnode . or_ ( * terms ) if simplify : y = y . simplify ( ) return _expr ( y ) | Return an expression that means the majority of input functions are true . | 167 | 13 |
25,915 | def Mux ( fs , sel , simplify = True ) : # convert Mux([a, b], x) to Mux([a, b], [x]) if isinstance ( sel , Expression ) : sel = [ sel ] if len ( sel ) < clog2 ( len ( fs ) ) : fstr = "expected at least {} select bits, got {}" raise ValueError ( fstr . format ( clog2 ( len ( fs ) ) , len ( sel ) ) ) it = boolfunc . iter_terms ( sel ) y = exprnode . or_ ( * [ exprnode . and_ ( f . node , * [ lit . node for lit in next ( it ) ] ) for f in fs ] ) if simplify : y = y . simplify ( ) return _expr ( y ) | Return an expression that multiplexes a sequence of input functions over a sequence of select functions . | 178 | 18 |
25,916 | def _backtrack ( ex ) : if ex is Zero : return None elif ex is One : return dict ( ) else : v = ex . top points = { v : 0 } , { v : 1 } for point in points : soln = _backtrack ( ex . restrict ( point ) ) if soln is not None : soln . update ( point ) return soln return None | If this function is satisfiable return a satisfying input upoint . Otherwise return None . | 83 | 17 |
25,917 | def _iter_backtrack ( ex , rand = False ) : if ex is One : yield dict ( ) elif ex is not Zero : if rand : v = random . choice ( ex . inputs ) if rand else ex . top else : v = ex . top points = [ { v : 0 } , { v : 1 } ] if rand : random . shuffle ( points ) for point in points : for soln in _iter_backtrack ( ex . restrict ( point ) , rand ) : soln . update ( point ) yield soln | Iterate through all satisfying points using backtrack algorithm . | 115 | 11 |
25,918 | def _tseitin ( ex , auxvarname , auxvars = None ) : if isinstance ( ex , Literal ) : return ex , list ( ) else : if auxvars is None : auxvars = list ( ) lits = list ( ) constraints = list ( ) for x in ex . xs : lit , subcons = _tseitin ( x , auxvarname , auxvars ) lits . append ( lit ) constraints . extend ( subcons ) auxvarindex = len ( auxvars ) auxvar = exprvar ( auxvarname , auxvarindex ) auxvars . append ( auxvar ) f = ASTOPS [ ex . ASTOP ] ( * lits ) constraints . append ( ( auxvar , f ) ) return auxvar , constraints | Convert a factored expression to a literal and a list of constraints . | 173 | 15 |
25,919 | def eq ( self , other ) : other_node = self . box ( other ) . node return _expr ( exprnode . eq ( self . node , other_node ) ) | Boolean equal operator . | 38 | 5 |
25,920 | def pushdown_not ( self ) : node = self . node . pushdown_not ( ) if node is self . node : return self else : return _expr ( node ) | Return an expression with NOT operators pushed down thru dual ops . | 38 | 12 |
25,921 | def simplify ( self ) : node = self . node . simplify ( ) if node is self . node : return self else : return _expr ( node ) | Return a simplified expression . | 32 | 5 |
25,922 | def to_binary ( self ) : node = self . node . to_binary ( ) if node is self . node : return self else : return _expr ( node ) | Convert N - ary operators to binary operators . | 36 | 11 |
25,923 | def to_nnf ( self ) : node = self . node . to_nnf ( ) if node is self . node : return self else : return _expr ( node ) | Return an equivalent expression is negation normal form . | 38 | 10 |
25,924 | def to_dnf ( self ) : node = self . node . to_dnf ( ) if node is self . node : return self else : return _expr ( node ) | Return an equivalent expression in disjunctive normal form . | 38 | 11 |
25,925 | def to_cnf ( self ) : node = self . node . to_cnf ( ) if node is self . node : return self else : return _expr ( node ) | Return an equivalent expression in conjunctive normal form . | 38 | 11 |
25,926 | def complete_sum ( self ) : node = self . node . complete_sum ( ) if node is self . node : return self else : return _expr ( node ) | Return an equivalent DNF expression that includes all prime implicants . | 36 | 14 |
25,927 | def expand ( self , vs = None , conj = False ) : vs = self . _expect_vars ( vs ) if vs : outer , inner = ( And , Or ) if conj else ( Or , And ) terms = [ inner ( self . restrict ( p ) , * boolfunc . point2term ( p , conj ) ) for p in boolfunc . iter_points ( vs ) ] if conj : terms = [ term for term in terms if term is not One ] else : terms = [ term for term in terms if term is not Zero ] return outer ( * terms , simplify = False ) else : return self | Return the Shannon expansion with respect to a list of variables . | 132 | 12 |
25,928 | def encode_inputs ( self ) : litmap = dict ( ) nvars = 0 for i , v in enumerate ( self . inputs , start = 1 ) : litmap [ v ] = i litmap [ ~ v ] = - i litmap [ i ] = v litmap [ - i ] = ~ v nvars += 1 return litmap , nvars | Return a compact encoding for input variables . | 81 | 8 |
25,929 | def tseitin ( self , auxvarname = 'aux' ) : if self . is_cnf ( ) : return self _ , constraints = _tseitin ( self . to_nnf ( ) , auxvarname ) fst = constraints [ - 1 ] [ 1 ] rst = [ Equal ( v , ex ) . to_cnf ( ) for v , ex in constraints [ : - 1 ] ] return And ( fst , * rst ) | Convert the expression to Tseitin s encoding . | 104 | 12 |
25,930 | def equivalent ( self , other ) : f = Xor ( self , self . box ( other ) ) return f . satisfy_one ( ) is None | Return True if this expression is equivalent to other . | 32 | 10 |
25,931 | def reduce ( self ) : support = frozenset ( range ( 1 , self . nvars + 1 ) ) new_clauses = set ( ) for clause in self . clauses : vs = list ( support - { abs ( uniqid ) for uniqid in clause } ) if vs : for num in range ( 1 << len ( vs ) ) : new_part = { v if bit_on ( num , i ) else ~ v for i , v in enumerate ( vs ) } new_clauses . add ( clause | new_part ) else : new_clauses . add ( clause ) return self . __class__ ( self . nvars , new_clauses ) | Reduce to a canonical form . | 152 | 7 |
25,932 | def decode ( self , litmap ) : return Or ( * [ And ( * [ litmap [ idx ] for idx in clause ] ) for clause in self . clauses ] ) | Convert the DNF to an expression . | 39 | 9 |
25,933 | def satisfy_one ( self , assumptions = None , * * params ) : verbosity = params . get ( 'verbosity' , 0 ) default_phase = params . get ( 'default_phase' , 2 ) propagation_limit = params . get ( 'propagation_limit' , - 1 ) decision_limit = params . get ( 'decision_limit' , - 1 ) seed = params . get ( 'seed' , 1 ) return picosat . satisfy_one ( self . nvars , self . clauses , assumptions , verbosity , default_phase , propagation_limit , decision_limit , seed ) | If the input CNF is satisfiable return a satisfying input point . A contradiction will return None . | 133 | 20 |
25,934 | def satisfy_all ( self , * * params ) : verbosity = params . get ( 'verbosity' , 0 ) default_phase = params . get ( 'default_phase' , 2 ) propagation_limit = params . get ( 'propagation_limit' , - 1 ) decision_limit = params . get ( 'decision_limit' , - 1 ) seed = params . get ( 'seed' , 1 ) yield from picosat . satisfy_all ( self . nvars , self . clauses , verbosity , default_phase , propagation_limit , decision_limit , seed ) | Iterate through all satisfying input points . | 128 | 8 |
25,935 | def soln2point ( soln , litmap ) : return { litmap [ i ] : int ( val > 0 ) for i , val in enumerate ( soln , start = 1 ) } | Convert a solution vector to a point . | 43 | 9 |
25,936 | def _cover2exprs ( inputs , noutputs , cover ) : fs = list ( ) for i in range ( noutputs ) : terms = list ( ) for invec , outvec in cover : if outvec [ i ] : term = list ( ) for j , v in enumerate ( inputs ) : if invec [ j ] == 1 : term . append ( ~ v ) elif invec [ j ] == 2 : term . append ( v ) terms . append ( term ) fs . append ( Or ( * [ And ( * term ) for term in terms ] ) ) return tuple ( fs ) | Convert a cover to a tuple of Expression instances . | 131 | 11 |
25,937 | def fcat ( * fs ) : items = list ( ) for f in fs : if isinstance ( f , boolfunc . Function ) : items . append ( f ) elif isinstance ( f , farray ) : items . extend ( f . flat ) else : raise TypeError ( "expected Function or farray" ) return farray ( items ) | Concatenate a sequence of farrays . | 75 | 10 |
25,938 | def _dims2shape ( * dims ) : if not dims : raise ValueError ( "expected at least one dimension spec" ) shape = list ( ) for dim in dims : if isinstance ( dim , int ) : dim = ( 0 , dim ) if isinstance ( dim , tuple ) and len ( dim ) == 2 : if dim [ 0 ] < 0 : raise ValueError ( "expected low dimension to be >= 0" ) if dim [ 1 ] < 0 : raise ValueError ( "expected high dimension to be >= 0" ) if dim [ 0 ] > dim [ 1 ] : raise ValueError ( "expected low <= high dimensions" ) start , stop = dim else : raise TypeError ( "expected dimension to be int or (int, int)" ) shape . append ( ( start , stop ) ) return tuple ( shape ) | Convert input dimensions to a shape . | 179 | 8 |
25,939 | def _volume ( shape ) : prod = 1 for start , stop in shape : prod *= stop - start return prod | Return the volume of a shape . | 25 | 7 |
25,940 | def _zeros ( ftype , * dims ) : shape = _dims2shape ( * dims ) objs = [ ftype . box ( 0 ) for _ in range ( _volume ( shape ) ) ] return farray ( objs , shape , ftype ) | Return a new farray filled with zeros . | 60 | 10 |
25,941 | def _vars ( ftype , name , * dims ) : shape = _dims2shape ( * dims ) objs = list ( ) for indices in itertools . product ( * [ range ( i , j ) for i , j in shape ] ) : objs . append ( _VAR [ ftype ] ( name , indices ) ) return farray ( objs , shape , ftype ) | Return a new farray filled with Boolean variables . | 89 | 10 |
25,942 | def _uint2objs ( ftype , num , length = None ) : if num == 0 : objs = [ ftype . box ( 0 ) ] else : _num = num objs = list ( ) while _num != 0 : objs . append ( ftype . box ( _num & 1 ) ) _num >>= 1 if length : if length < len ( objs ) : fstr = "overflow: num = {} requires length >= {}, got length = {}" raise ValueError ( fstr . format ( num , len ( objs ) , length ) ) else : while len ( objs ) < length : objs . append ( ftype . box ( 0 ) ) return objs | Convert an unsigned integer to a list of constant expressions . | 152 | 12 |
25,943 | def _uint2farray ( ftype , num , length = None ) : if num < 0 : raise ValueError ( "expected num >= 0" ) else : objs = _uint2objs ( ftype , num , length ) return farray ( objs ) | Convert an unsigned integer to an farray . | 58 | 10 |
25,944 | def _int2farray ( ftype , num , length = None ) : if num < 0 : req_length = clog2 ( abs ( num ) ) + 1 objs = _uint2objs ( ftype , 2 ** req_length + num ) else : req_length = clog2 ( num + 1 ) + 1 objs = _uint2objs ( ftype , num , req_length ) if length : if length < req_length : fstr = "overflow: num = {} requires length >= {}, got length = {}" raise ValueError ( fstr . format ( num , req_length , length ) ) else : sign = objs [ - 1 ] objs += [ sign ] * ( length - req_length ) return farray ( objs ) | Convert a signed integer to an farray . | 170 | 10 |
25,945 | def _itemize ( objs ) : if not isinstance ( objs , collections . Sequence ) : raise TypeError ( "expected a sequence of Function" ) isseq = [ isinstance ( obj , collections . Sequence ) for obj in objs ] if not any ( isseq ) : ftype = None for obj in objs : if ftype is None : if isinstance ( obj , BinaryDecisionDiagram ) : ftype = BinaryDecisionDiagram elif isinstance ( obj , Expression ) : ftype = Expression elif isinstance ( obj , TruthTable ) : ftype = TruthTable else : raise TypeError ( "expected valid Function inputs" ) elif not isinstance ( obj , ftype ) : raise ValueError ( "expected uniform Function types" ) return list ( objs ) , ( ( 0 , len ( objs ) ) , ) , ftype elif all ( isseq ) : items = list ( ) shape = None ftype = None for obj in objs : _items , _shape , _ftype = _itemize ( obj ) if shape is None : shape = _shape elif shape != _shape : raise ValueError ( "expected uniform farray dimensions" ) if ftype is None : ftype = _ftype elif ftype != _ftype : raise ValueError ( "expected uniform Function types" ) items += _items shape = ( ( 0 , len ( objs ) ) , ) + shape return items , shape , ftype else : raise ValueError ( "expected uniform farray dimensions" ) | Recursive helper function for farray . | 329 | 8 |
25,946 | def _check_shape ( shape ) : if isinstance ( shape , tuple ) : for dim in shape : if ( isinstance ( dim , tuple ) and len ( dim ) == 2 and isinstance ( dim [ 0 ] , int ) and isinstance ( dim [ 1 ] , int ) ) : if dim [ 0 ] < 0 : raise ValueError ( "expected low dimension to be >= 0" ) if dim [ 1 ] < 0 : raise ValueError ( "expected high dimension to be >= 0" ) if dim [ 0 ] > dim [ 1 ] : raise ValueError ( "expected low <= high dimensions" ) else : raise TypeError ( "expected shape dimension to be (int, int)" ) else : raise TypeError ( "expected shape to be tuple of (int, int)" ) | Verify that a shape has the right format . | 167 | 10 |
25,947 | def _norm_index ( dim , index , start , stop ) : length = stop - start if - length <= index < 0 : normindex = index + length elif start <= index < stop : normindex = index - start else : fstr = "expected dim {} index in range [{}, {})" raise IndexError ( fstr . format ( dim , start , stop ) ) return normindex | Return an index normalized to an farray start index . | 84 | 11 |
25,948 | def _norm_slice ( sl , start , stop ) : length = stop - start if sl . start is None : normstart = 0 else : if sl . start < 0 : if sl . start < - length : normstart = 0 else : normstart = sl . start + length else : if sl . start > stop : normstart = length else : normstart = sl . start - start if sl . stop is None : normstop = length else : if sl . stop < 0 : if sl . stop < - length : normstop = 0 else : normstop = sl . stop + length else : if sl . stop > stop : normstop = length else : normstop = sl . stop - start if normstop < normstart : normstop = normstart return slice ( normstart , normstop ) | Return a slice normalized to an farray start index . | 169 | 11 |
25,949 | def _filtdim ( items , shape , dim , nsl ) : normshape = tuple ( stop - start for start , stop in shape ) nsl_type = type ( nsl ) newitems = list ( ) # Number of groups num = reduce ( operator . mul , normshape [ : dim + 1 ] ) # Size of each group size = len ( items ) // num # Size of the dimension n = normshape [ dim ] if nsl_type is int : for i in range ( num ) : if i % n == nsl : newitems += items [ size * i : size * ( i + 1 ) ] # Collapse dimension newshape = shape [ : dim ] + shape [ dim + 1 : ] elif nsl_type is slice : for i in range ( num ) : if nsl . start <= ( i % n ) < nsl . stop : newitems += items [ size * i : size * ( i + 1 ) ] # Reshape dimension offset = shape [ dim ] [ 0 ] redim = ( offset + nsl . start , offset + nsl . stop ) newshape = shape [ : dim ] + ( redim , ) + shape [ dim + 1 : ] # farray else : if nsl . size < clog2 ( n ) : fstr = "expected dim {} select to have >= {} bits, got {}" raise ValueError ( fstr . format ( dim , clog2 ( n ) , nsl . size ) ) groups = [ list ( ) for _ in range ( n ) ] for i in range ( num ) : groups [ i % n ] += items [ size * i : size * ( i + 1 ) ] for muxins in zip ( * groups ) : it = boolfunc . iter_terms ( nsl . _items ) xs = [ reduce ( operator . and_ , ( muxin , ) + next ( it ) ) for muxin in muxins ] newitems . append ( reduce ( operator . or_ , xs ) ) # Collapse dimension newshape = shape [ : dim ] + shape [ dim + 1 : ] return newitems , newshape | Return items shape filtered by a dimension slice . | 461 | 9 |
25,950 | def _iter_coords ( nsls ) : # First convert all slices to ranges ranges = list ( ) for nsl in nsls : if isinstance ( nsl , int ) : ranges . append ( range ( nsl , nsl + 1 ) ) else : ranges . append ( range ( nsl . start , nsl . stop ) ) # Iterate through all matching coordinates yield from itertools . product ( * ranges ) | Iterate through all matching coordinates in a sequence of slices . | 94 | 12 |
25,951 | def restrict ( self , point ) : items = [ f . restrict ( point ) for f in self . _items ] return self . __class__ ( items , self . shape , self . ftype ) | Apply the restrict method to all functions . | 43 | 8 |
25,952 | def compose ( self , mapping ) : items = [ f . compose ( mapping ) for f in self . _items ] return self . __class__ ( items , self . shape , self . ftype ) | Apply the compose method to all functions . | 43 | 8 |
25,953 | def reshape ( self , * dims ) : shape = _dims2shape ( * dims ) if _volume ( shape ) != self . size : raise ValueError ( "expected shape with equal volume" ) return self . __class__ ( self . _items , shape , self . ftype ) | Return an equivalent farray with a modified shape . | 65 | 10 |
25,954 | def to_uint ( self ) : num = 0 for i , f in enumerate ( self . _items ) : if f . is_zero ( ) : pass elif f . is_one ( ) : num += 1 << i else : fstr = "expected all functions to be a constant (0 or 1) form" raise ValueError ( fstr ) return num | Convert vector to an unsigned integer if possible . | 79 | 10 |
25,955 | def to_int ( self ) : num = self . to_uint ( ) if num and self . _items [ - 1 ] . unbox ( ) : return num - ( 1 << self . size ) else : return num | Convert vector to an integer if possible . | 48 | 9 |
25,956 | def uor ( self ) : return reduce ( operator . or_ , self . _items , self . ftype . box ( 0 ) ) | Unary OR reduction operator | 30 | 5 |
25,957 | def uand ( self ) : return reduce ( operator . and_ , self . _items , self . ftype . box ( 1 ) ) | Unary AND reduction operator | 30 | 5 |
25,958 | def uxor ( self ) : return reduce ( operator . xor , self . _items , self . ftype . box ( 0 ) ) | Unary XOR reduction operator | 31 | 6 |
25,959 | def _keys2sls ( self , keys , key2sl ) : sls = list ( ) if isinstance ( keys , tuple ) : for key in keys : sls . append ( key2sl ( key ) ) else : sls . append ( key2sl ( keys ) ) if len ( sls ) > self . ndim : fstr = "expected <= {0.ndim} slice dimensions, got {1}" raise ValueError ( fstr . format ( self , len ( sls ) ) ) return sls | Convert an input key to a list of slices . | 114 | 11 |
25,960 | def _coord2offset ( self , coord ) : size = self . size offset = 0 for dim , index in enumerate ( coord ) : size //= self . _normshape [ dim ] offset += size * index return offset | Convert a normalized coordinate to an item offset . | 48 | 10 |
25,961 | def _op_shape ( self , other ) : if isinstance ( other , farray ) : if self . shape == other . shape : return self . shape elif self . size == other . size : return None else : raise ValueError ( "expected operand sizes to match" ) else : raise TypeError ( "expected farray input" ) | Return shape that will be used by farray constructor . | 74 | 11 |
25,962 | def delete_records ( keep = 20 ) : sql = "SELECT * from records where is_deleted<>1 ORDER BY id desc LIMIT -1 offset {}" . format ( keep ) assert isinstance ( g . db , sqlite3 . Connection ) c = g . db . cursor ( ) c . execute ( sql ) rows = c . fetchall ( ) for row in rows : name = row [ 1 ] xmind = join ( app . config [ 'UPLOAD_FOLDER' ] , name ) xml = join ( app . config [ 'UPLOAD_FOLDER' ] , name [ : - 5 ] + 'xml' ) for f in [ xmind , xml ] : if exists ( f ) : os . remove ( f ) sql = 'UPDATE records SET is_deleted=1 WHERE id = ?' c . execute ( sql , ( row [ 0 ] , ) ) g . db . commit ( ) | Clean up files on server and mark the record as deleted | 202 | 11 |
25,963 | def check_expected_errors ( self , test_method ) : f = lambda key , default = [ ] : getattr ( test_method , key , default ) expected_error_page = f ( EXPECTED_ERROR_PAGE , default = None ) allowed_error_pages = f ( ALLOWED_ERROR_PAGES ) expected_error_messages = f ( EXPECTED_ERROR_MESSAGES ) allowed_error_messages = f ( ALLOWED_ERROR_MESSAGES ) self . check_errors ( expected_error_page , allowed_error_pages , expected_error_messages , allowed_error_messages , ) | This method is called after each test . It will read decorated informations and check if there are expected errors . | 146 | 22 |
25,964 | def get_error_page ( self ) : try : error_page = self . get_elm ( class_name = 'error-page' ) except NoSuchElementException : pass else : header = error_page . get_elm ( tag_name = 'h1' ) return header . text | Method returning error page . Should return string . | 66 | 9 |
25,965 | def get_error_traceback ( self ) : try : error_page = self . get_elm ( class_name = 'error-page' ) traceback = error_page . get_elm ( class_name = 'traceback' ) except NoSuchElementException : pass else : return traceback . text | Method returning traceback of error page . | 69 | 8 |
25,966 | def get_error_messages ( self ) : try : error_elms = self . get_elms ( class_name = 'error' ) except NoSuchElementException : return [ ] else : try : error_values = [ error_elm . get_attribute ( 'error' ) for error_elm in error_elms ] except Exception : error_values = [ error_elm . text for error_elm in error_elms ] finally : return error_values | Method returning error messages . Should return list of messages . | 106 | 11 |
25,967 | def check_expected_infos ( self , test_method ) : f = lambda key , default = [ ] : getattr ( test_method , key , default ) expected_info_messages = f ( EXPECTED_INFO_MESSAGES ) allowed_info_messages = f ( ALLOWED_INFO_MESSAGES ) self . check_infos ( expected_info_messages , allowed_info_messages ) | This method is called after each test . It will read decorated informations and check if there are expected infos . | 97 | 23 |
25,968 | def get_info_messages ( self ) : try : info_elms = self . get_elms ( class_name = 'info' ) except NoSuchElementException : return [ ] else : try : info_values = [ info_elm . get_attribute ( 'info' ) for info_elm in info_elms ] except Exception : # pylint: disable=broad-except info_values = [ info_elm . text for info_elm in info_elms ] finally : return info_values | Method returning info messages . Should return list of messages . | 116 | 11 |
25,969 | def _make_instance ( cls , element_class , webelement ) : if isinstance ( webelement , FirefoxWebElement ) : element_class = copy . deepcopy ( element_class ) element_class . __bases__ = tuple ( FirefoxWebElement if base is WebElement else base for base in element_class . __bases__ ) return element_class ( webelement ) | Firefox uses another implementation of element . This method switch base of wrapped element to firefox one . | 86 | 20 |
25,970 | def html ( self ) : try : body = self . get_elm ( tag_name = 'body' ) except selenium_exc . NoSuchElementException : return None else : return body . get_attribute ( 'innerHTML' ) | Returns innerHTML of whole page . On page have to be tag body . | 53 | 15 |
25,971 | def switch_to_window ( self , window_name = None , title = None , url = None ) : if window_name : self . switch_to . window ( window_name ) return if url : url = self . get_url ( path = url ) for window_handle in self . window_handles : self . switch_to . window ( window_handle ) if title and self . title == title : return if url and self . current_url == url : return raise selenium_exc . NoSuchWindowException ( 'Window (title=%s, url=%s) not found.' % ( title , url ) ) | WebDriver implements switching to other window only by it s name . With wrapper there is also option to switch by title of window or URL . URL can be also relative path . | 138 | 35 |
25,972 | def close_window ( self , window_name = None , title = None , url = None ) : main_window_handle = self . current_window_handle self . switch_to_window ( window_name , title , url ) self . close ( ) self . switch_to_window ( main_window_handle ) | WebDriver implements only closing current window . If you want to close some window without having to switch to it use this method . | 70 | 25 |
25,973 | def close_other_windows ( self ) : main_window_handle = self . current_window_handle for window_handle in self . window_handles : if window_handle == main_window_handle : continue self . switch_to_window ( window_handle ) self . close ( ) self . switch_to_window ( main_window_handle ) | Closes all not current windows . Useful for tests - after each test you can automatically close all windows . | 78 | 21 |
25,974 | def close_alert ( self , ignore_exception = False ) : try : alert = self . get_alert ( ) alert . accept ( ) except : if not ignore_exception : raise | JS alerts all blocking . This method closes it . If there is no alert method raises exception . In tests is good to call this method with ignore_exception setted to True which will ignore any exception . | 41 | 42 |
25,975 | def wait_for_alert ( self , timeout = None ) : if not timeout : timeout = self . default_wait_timeout alert = Alert ( self ) # There is no better way how to check alert appearance def alert_shown ( driver ) : try : alert . text return True except selenium_exc . NoAlertPresentException : return False self . wait ( timeout ) . until ( alert_shown ) return alert | Shortcut for waiting for alert . If it not ends with exception it returns that alert . Detault timeout is ~ . default_wait_timeout . | 88 | 30 |
25,976 | def type_name ( self ) -> T . Optional [ str ] : return self . args [ 1 ] if len ( self . args ) > 1 else None | Return type name associated with given docstring metadata . | 33 | 10 |
25,977 | def params ( self ) -> T . List [ DocstringParam ] : return [ DocstringParam . from_meta ( meta ) for meta in self . meta if meta . args [ 0 ] in { "param" , "parameter" , "arg" , "argument" , "key" , "keyword" } ] | Return parameters indicated in docstring . | 69 | 7 |
25,978 | def raises ( self ) -> T . List [ DocstringRaises ] : return [ DocstringRaises . from_meta ( meta ) for meta in self . meta if meta . args [ 0 ] in { "raises" , "raise" , "except" , "exception" } ] | Return exceptions indicated in docstring . | 63 | 7 |
25,979 | def returns ( self ) -> T . Optional [ DocstringReturns ] : try : return next ( DocstringReturns . from_meta ( meta ) for meta in self . meta if meta . args [ 0 ] in { "return" , "returns" , "yield" , "yields" } ) except StopIteration : return None | Return return information indicated in docstring . | 73 | 8 |
25,980 | def _build_meta ( text : str , title : str ) -> DocstringMeta : meta = _sections [ title ] if meta == "returns" and ":" not in text . split ( ) [ 0 ] : return DocstringMeta ( [ meta ] , description = text ) # Split spec and description before , desc = text . split ( ":" , 1 ) if desc : desc = desc [ 1 : ] if desc [ 0 ] == " " else desc if "\n" in desc : first_line , rest = desc . split ( "\n" , 1 ) desc = first_line + "\n" + inspect . cleandoc ( rest ) desc = desc . strip ( "\n" ) # Build Meta args m = re . match ( r"(\S+) \((\S+)\)$" , before ) if meta == "param" and m : arg_name , type_name = m . group ( 1 , 2 ) args = [ meta , type_name , arg_name ] else : args = [ meta , before ] return DocstringMeta ( args , description = desc ) | Build docstring element . | 233 | 5 |
25,981 | def parse ( text : str ) -> Docstring : ret = Docstring ( ) if not text : return ret # Clean according to PEP-0257 text = inspect . cleandoc ( text ) # Find first title and split on its position match = _titles_re . search ( text ) if match : desc_chunk = text [ : match . start ( ) ] meta_chunk = text [ match . start ( ) : ] else : desc_chunk = text meta_chunk = "" # Break description into short and long parts parts = desc_chunk . split ( "\n" , 1 ) ret . short_description = parts [ 0 ] or None if len ( parts ) > 1 : long_desc_chunk = parts [ 1 ] or "" ret . blank_after_short_description = long_desc_chunk . startswith ( "\n" ) ret . blank_after_long_description = long_desc_chunk . endswith ( "\n\n" ) ret . long_description = long_desc_chunk . strip ( ) or None # Split by sections determined by titles matches = list ( _titles_re . finditer ( meta_chunk ) ) if not matches : return ret splits = [ ] for j in range ( len ( matches ) - 1 ) : splits . append ( ( matches [ j ] . end ( ) , matches [ j + 1 ] . start ( ) ) ) splits . append ( ( matches [ - 1 ] . end ( ) , len ( meta_chunk ) ) ) chunks = { } for j , ( start , end ) in enumerate ( splits ) : title = matches [ j ] . group ( 1 ) if title not in _valid : continue chunks [ title ] = meta_chunk [ start : end ] . strip ( "\n" ) if not chunks : return ret # Add elements from each chunk for title , chunk in chunks . items ( ) : # Determine indent indent_match = re . search ( r"^\s+" , chunk ) if not indent_match : raise ParseError ( f'Can\'t infer indent from "{chunk}"' ) indent = indent_match . group ( ) # Check for returns/yeilds (only one element) if _sections [ title ] in ( "returns" , "yields" ) : part = inspect . cleandoc ( chunk ) ret . meta . append ( _build_meta ( part , title ) ) continue # Split based on lines which have exactly that indent _re = "^" + indent + r"(?=\S)" c_matches = list ( re . finditer ( _re , chunk , flags = re . M ) ) if not c_matches : raise ParseError ( f'No specification for "{title}": "{chunk}"' ) c_splits = [ ] for j in range ( len ( c_matches ) - 1 ) : c_splits . append ( ( c_matches [ j ] . end ( ) , c_matches [ j + 1 ] . start ( ) ) ) c_splits . append ( ( c_matches [ - 1 ] . end ( ) , len ( chunk ) ) ) for j , ( start , end ) in enumerate ( c_splits ) : part = chunk [ start : end ] . strip ( "\n" ) ret . meta . append ( _build_meta ( part , title ) ) return ret | Parse the Google - style docstring into its components . | 744 | 12 |
25,982 | def _determine_tool ( files ) : for file in files : linker_ext = file . split ( '.' ) [ - 1 ] if "sct" in linker_ext or "lin" in linker_ext : yield ( str ( file ) , "uvision" ) elif "ld" in linker_ext : yield ( str ( file ) , "make_gcc_arm" ) elif "icf" in linker_ext : yield ( str ( file ) , "iar_arm" ) | Yields tuples in the form of ( linker file tool the file links for | 116 | 18 |
25,983 | def _get_option ( self , settings , find_key ) : # This is used as in IAR template, everything # is as an array with random positions. We look for key with an index for option in settings : if option [ 'name' ] == find_key : return settings . index ( option ) | Return index for provided key | 66 | 5 |
25,984 | def _ewp_flags_set ( self , ewp_dic_subset , project_dic , flag_type , flag_dic ) : try : if flag_type in project_dic [ 'misc' ] . keys ( ) : # enable commands index_option = self . _get_option ( ewp_dic_subset , flag_dic [ 'enable' ] ) self . _set_option ( ewp_dic_subset [ index_option ] , '1' ) index_option = self . _get_option ( ewp_dic_subset , flag_dic [ 'set' ] ) if type ( ewp_dic_subset [ index_option ] [ 'state' ] ) != list : # if it's string, only one state previous_state = ewp_dic_subset [ index_option ] [ 'state' ] ewp_dic_subset [ index_option ] [ 'state' ] = [ ] ewp_dic_subset [ index_option ] [ 'state' ] . append ( previous_state ) for item in project_dic [ 'misc' ] [ flag_type ] : ewp_dic_subset [ index_option ] [ 'state' ] . append ( item ) except KeyError : return | Flags from misc to set to ewp project | 292 | 9 |
25,985 | def _ewp_files_set ( self , ewp_dic , project_dic ) : # empty any files in the template which are not grouped try : ewp_dic [ 'project' ] [ 'file' ] = [ ] except KeyError : pass # empty groups ewp_dic [ 'project' ] [ 'group' ] = [ ] i = 0 for group_name , files in project_dic [ 'groups' ] . items ( ) : ewp_dic [ 'project' ] [ 'group' ] . append ( { 'name' : group_name , 'file' : [ ] } ) for file in files : ewp_dic [ 'project' ] [ 'group' ] [ i ] [ 'file' ] . append ( { 'name' : file } ) ewp_dic [ 'project' ] [ 'group' ] [ i ] [ 'file' ] = sorted ( ewp_dic [ 'project' ] [ 'group' ] [ i ] [ 'file' ] , key = lambda x : os . path . basename ( x [ 'name' ] . lower ( ) ) ) i += 1 | Fills files in the ewp dictionary | 255 | 8 |
25,986 | def _clean_xmldict_single_dic ( self , dictionary ) : for k , v in dictionary . items ( ) : if v is None : dictionary [ k ] = '' | Every None replace by in the dic as xml parsers puts None in those fiels which is not valid for IAR | 41 | 25 |
25,987 | def _fix_paths ( self , data ) : data [ 'include_paths' ] = [ join ( '$PROJ_DIR$' , path ) for path in data [ 'include_paths' ] ] if data [ 'linker_file' ] : data [ 'linker_file' ] = join ( '$PROJ_DIR$' , data [ 'linker_file' ] ) data [ 'groups' ] = { } for attribute in SOURCE_KEYS : for k , v in data [ attribute ] . items ( ) : if k not in data [ 'groups' ] : data [ 'groups' ] [ k ] = [ ] data [ 'groups' ] [ k ] . extend ( [ join ( '$PROJ_DIR$' , file ) for file in v ] ) for k , v in data [ 'include_files' ] . items ( ) : if k not in data [ 'groups' ] : data [ 'groups' ] [ k ] = [ ] data [ 'groups' ] [ k ] . extend ( [ join ( '$PROJ_DIR$' , file ) for file in v ] ) # sort groups data [ 'groups' ] = OrderedDict ( sorted ( data [ 'groups' ] . items ( ) , key = lambda t : t [ 0 ] ) ) | All paths needs to be fixed - add PROJ_DIR prefix + normalize | 290 | 16 |
25,988 | def build_project ( self ) : # > IarBuild [project_path] -build [project_name] proj_path = join ( getcwd ( ) , self . workspace [ 'files' ] [ 'ewp' ] ) if proj_path . split ( '.' ) [ - 1 ] != 'ewp' : proj_path += '.ewp' if not os . path . exists ( proj_path ) : logger . debug ( "The file: %s does not exists, exported prior building?" % proj_path ) return - 1 logger . debug ( "Building IAR project: %s" % proj_path ) args = [ join ( self . env_settings . get_env_settings ( 'iar' ) , 'IarBuild.exe' ) , proj_path , '-build' , os . path . splitext ( os . path . basename ( self . workspace [ 'files' ] [ 'ewp' ] ) ) [ 0 ] ] logger . debug ( args ) try : p = Popen ( args , stdin = PIPE , stdout = PIPE , stderr = PIPE ) output , err = p . communicate ( ) except : logger . error ( "Project: %s build failed. Please check IARBUILD path in the user_settings.py file." % self . workspace [ 'files' ] [ 'ewp' ] ) return - 1 else : build_log_path = os . path . join ( os . path . dirname ( proj_path ) , 'build_log.txt' ) with open ( build_log_path , 'w' ) as f : f . write ( output ) num_errors = self . _parse_subprocess_output ( output ) if num_errors == 0 : logger . info ( "Project: %s build completed." % self . workspace [ 'files' ] [ 'ewp' ] ) return 0 else : logger . error ( "Project: %s build failed with %d errors" % ( self . workspace [ 'files' ] [ 'ewp' ] , num_errors ) ) return - 1 | Build IAR project | 467 | 4 |
25,989 | def gen_file_jinja ( self , template_file , data , output , dest_path ) : if not os . path . exists ( dest_path ) : os . makedirs ( dest_path ) output = join ( dest_path , output ) logger . debug ( "Generating: %s" % output ) env = Environment ( ) env . loader = FileSystemLoader ( self . TEMPLATE_DIR ) # TODO: undefined=StrictUndefined - this needs fixes in templates template = env . get_template ( template_file ) target_text = template . render ( data ) open ( output , "w" ) . write ( target_text ) return dirname ( output ) , output | Fills data to the project template using jinja2 . | 153 | 13 |
25,990 | def _expand_data ( self , old_data , new_data , group ) : for file in old_data : if file : extension = file . split ( "." ) [ - 1 ] . lower ( ) if extension in self . file_types . keys ( ) : new_data [ 'groups' ] [ group ] . append ( self . _expand_one_file ( normpath ( file ) , new_data , extension ) ) else : logger . debug ( "Filetype for file %s not recognized" % file ) if hasattr ( self , '_expand_sort_key' ) : new_data [ 'groups' ] [ group ] = sorted ( new_data [ 'groups' ] [ group ] , key = self . _expand_sort_key ) | data expansion - uvision needs filename and path separately . | 171 | 11 |
25,991 | def _get_groups ( self , data ) : groups = [ ] for attribute in SOURCE_KEYS : for k , v in data [ attribute ] . items ( ) : if k == None : k = 'Sources' if k not in groups : groups . append ( k ) for k , v in data [ 'include_files' ] . items ( ) : if k == None : k = 'Includes' if k not in groups : groups . append ( k ) return groups | Get all groups defined | 102 | 4 |
25,992 | def _iterate ( self , data , expanded_data ) : for attribute in SOURCE_KEYS : for k , v in data [ attribute ] . items ( ) : if k == None : group = 'Sources' else : group = k if group in data [ attribute ] . keys ( ) : self . _expand_data ( data [ attribute ] [ group ] , expanded_data , group ) for k , v in data [ 'include_files' ] . items ( ) : if k == None : group = 'Includes' else : group = k self . _expand_data ( data [ 'include_files' ] [ group ] , expanded_data , group ) # sort groups expanded_data [ 'groups' ] = OrderedDict ( sorted ( expanded_data [ 'groups' ] . items ( ) , key = lambda t : t [ 0 ] ) ) | _Iterate through all data store the result expansion in extended dictionary | 188 | 13 |
25,993 | def export_project ( self ) : output = copy . deepcopy ( self . generated_project ) data_for_make = self . workspace . copy ( ) self . exporter . process_data_for_makefile ( data_for_make ) output [ 'path' ] , output [ 'files' ] [ 'makefile' ] = self . gen_file_jinja ( 'makefile_gcc.tmpl' , data_for_make , 'Makefile' , data_for_make [ 'output_dir' ] [ 'path' ] ) expanded_dic = self . workspace . copy ( ) expanded_dic [ 'rel_path' ] = data_for_make [ 'output_dir' ] [ 'rel_path' ] groups = self . _get_groups ( expanded_dic ) expanded_dic [ 'groups' ] = { } for group in groups : expanded_dic [ 'groups' ] [ group ] = [ ] self . _iterate ( self . workspace , expanded_dic ) # Project file project_path , output [ 'files' ] [ 'cproj' ] = self . gen_file_jinja ( 'eclipse_makefile.cproject.tmpl' , expanded_dic , '.cproject' , data_for_make [ 'output_dir' ] [ 'path' ] ) project_path , output [ 'files' ] [ 'proj_file' ] = self . gen_file_jinja ( 'eclipse.project.tmpl' , expanded_dic , '.project' , data_for_make [ 'output_dir' ] [ 'path' ] ) return output | Processes groups and misc options specific for eclipse and run generator | 366 | 12 |
25,994 | def generate ( self , tool , copied = False , copy = False ) : # copied - already done by external script, copy - do actual copy tools = [ ] if not tool : logger . info ( "Workspace supports one tool for all projects within." ) return - 1 else : tools = [ tool ] result = 0 for export_tool in tools : tool_export = ToolsSupported ( ) . get_tool ( export_tool ) if tool_export is None : result = - 1 continue project_export_dir_overwrite = False if self . settings . export_location_format != self . settings . DEFAULT_EXPORT_LOCATION_FORMAT : location_format = self . settings . export_location_format else : if 'export_dir' in self . workspace_settings : location_format = self . workspace_settings [ 'export_dir' ] [ 0 ] project_export_dir_overwrite = True else : location_format = self . settings . export_location_format # substitute all of the different dynamic values location = PartialFormatter ( ) . format ( location_format , * * { 'project_name' : self . name , 'tool' : tool , 'workspace_name' : self . name } ) workspace_dic = { 'projects' : [ ] , 'settings' : { 'name' : self . name , 'path' : os . path . normpath ( location ) , } , } for project in self . projects : generated_files = { 'projects' : [ ] , 'workspaces' : [ ] , } if project_export_dir_overwrite : project . project [ 'common' ] [ 'export_dir' ] = location # Merge all dics, copy sources if required, correct output dir. This happens here # because we need tool to set proper path (tool might be used as string template) project . _fill_export_dict ( export_tool , copied ) if copy : project . _copy_sources_to_generated_destination ( ) project . project [ 'export' ] [ 'singular' ] = False files = tool_export ( project . project [ 'export' ] , self . settings ) . export_project ( ) # we gather all generated files, needed for workspace files workspace_dic [ 'projects' ] . append ( files ) generated_files [ 'projects' ] . append ( files ) # all projects are genereated, now generate workspace files generated_files [ 'workspaces' ] = tool_export ( workspace_dic , self . settings ) . export_workspace ( ) self . generated_files [ export_tool ] = generated_files return result | Generates a workspace | 570 | 4 |
25,995 | def _validate_tools ( self , tool ) : tools = [ ] if not tool : if len ( self . project [ 'common' ] [ 'tools_supported' ] ) == 0 : logger . info ( "No tool defined." ) return - 1 tools = self . project [ 'common' ] [ 'tools_supported' ] else : tools = [ tool ] return tools | Use tool_supported or tool | 81 | 6 |
25,996 | def _generate_output_dir ( settings , path ) : relpath = os . path . relpath ( settings . root , path ) count = relpath . count ( os . sep ) + 1 return relpath + os . path . sep , count | This is a separate function so that it can be more easily tested | 54 | 13 |
25,997 | def _copy_sources_to_generated_destination ( self ) : files = [ ] for key in FILES_EXTENSIONS . keys ( ) : if type ( self . project [ 'export' ] [ key ] ) is dict : for k , v in self . project [ 'export' ] [ key ] . items ( ) : files . extend ( v ) elif type ( self . project [ 'export' ] [ key ] ) is list : files . extend ( self . project [ 'export' ] [ key ] ) else : files . append ( self . project [ 'export' ] [ key ] ) destination = os . path . join ( self . settings . root , self . project [ 'export' ] [ 'output_dir' ] [ 'path' ] ) if os . path . exists ( destination ) : shutil . rmtree ( destination ) for item in files : s = os . path . join ( self . settings . root , item ) d = os . path . join ( destination , item ) if os . path . isdir ( s ) : shutil . copytree ( s , d ) else : if not os . path . exists ( os . path . dirname ( d ) ) : os . makedirs ( os . path . join ( self . settings . root , os . path . dirname ( d ) ) ) shutil . copy2 ( s , d ) | Copies all project files to specified directory - generated dir | 301 | 11 |
25,998 | def clean ( self , tool ) : tools = self . _validate_tools ( tool ) if tools == - 1 : return - 1 for current_tool in tools : # We get the export dict formed, then use it for cleaning self . _fill_export_dict ( current_tool ) path = self . project [ 'export' ] [ 'output_dir' ] [ 'path' ] if os . path . isdir ( path ) : logger . info ( "Cleaning directory %s" % path ) shutil . rmtree ( path ) return 0 | Clean a project | 120 | 3 |
25,999 | def generate ( self , tool , copied = False , copy = False ) : tools = self . _validate_tools ( tool ) if tools == - 1 : return - 1 generated_files = { } result = 0 for export_tool in tools : exporter = ToolsSupported ( ) . get_tool ( export_tool ) # None is an error if exporter is None : result = - 1 logger . debug ( "Tool: %s was not found" % export_tool ) continue self . _fill_export_dict ( export_tool , copied ) if copy : logger . debug ( "Copying sources to the output directory" ) self . _copy_sources_to_generated_destination ( ) # dump a log file if debug is enabled if logger . isEnabledFor ( logging . DEBUG ) : dump_data = { } dump_data [ 'common' ] = self . project [ 'common' ] dump_data [ 'tool_specific' ] = self . project [ 'tool_specific' ] dump_data [ 'merged' ] = self . project [ 'export' ] handler = logging . FileHandler ( os . path . join ( os . getcwd ( ) , "%s.log" % self . name ) , "w" , encoding = None , delay = "true" ) handler . setLevel ( logging . DEBUG ) logger . addHandler ( handler ) logger . debug ( "\n" + yaml . dump ( dump_data ) ) files = exporter ( self . project [ 'export' ] , self . settings ) . export_project ( ) generated_files [ export_tool ] = files self . generated_files = generated_files return result | Generates a project | 358 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.