idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
240,600 | def delete ( self , * args , * * kwargs ) : source_cache = self . get_source_cache ( ) # First, delete any related thumbnails. self . delete_thumbnails ( source_cache ) # Next, delete the source image. super ( ThumbnailerFieldFile , self ) . delete ( * args , * * kwargs ) # Finally, delete the source cache entry. if source_cache and source_cache . pk is not None : source_cache . delete ( ) | Delete the image along with any generated thumbnails . | 108 | 10 |
240,601 | def delete_thumbnails ( self , source_cache = None ) : source_cache = self . get_source_cache ( ) deleted = 0 if source_cache : thumbnail_storage_hash = utils . get_storage_hash ( self . thumbnail_storage ) for thumbnail_cache in source_cache . thumbnails . all ( ) : # Only attempt to delete the file if it was stored using the # same storage as is currently used. if thumbnail_cache . storage_hash == thumbnail_storage_hash : self . thumbnail_storage . delete ( thumbnail_cache . name ) # Delete the cache thumbnail instance too. thumbnail_cache . delete ( ) deleted += 1 return deleted | Delete any thumbnails generated from the source image . | 142 | 10 |
240,602 | def get_thumbnails ( self , * args , * * kwargs ) : # First, delete any related thumbnails. source_cache = self . get_source_cache ( ) if source_cache : thumbnail_storage_hash = utils . get_storage_hash ( self . thumbnail_storage ) for thumbnail_cache in source_cache . thumbnails . all ( ) : # Only iterate files which are stored using the current # thumbnail storage. if thumbnail_cache . storage_hash == thumbnail_storage_hash : yield ThumbnailFile ( name = thumbnail_cache . name , storage = self . thumbnail_storage ) | Return an iterator which returns ThumbnailFile instances . | 132 | 10 |
240,603 | def save ( self , name , content , * args , * * kwargs ) : options = getattr ( self . field , 'resize_source' , None ) if options : if 'quality' not in options : options [ 'quality' ] = self . thumbnail_quality content = Thumbnailer ( content , name ) . generate_thumbnail ( options ) # If the generated extension differs from the original, use it # instead. orig_name , ext = os . path . splitext ( name ) generated_ext = os . path . splitext ( content . name ) [ 1 ] if generated_ext . lower ( ) != ext . lower ( ) : name = orig_name + generated_ext super ( ThumbnailerImageFieldFile , self ) . save ( name , content , * args , * * kwargs ) | Save the image . | 179 | 4 |
240,604 | def queryset_iterator ( queryset , chunksize = 1000 ) : if queryset . exists ( ) : primary_key = 0 last_pk = queryset . order_by ( '-pk' ) [ 0 ] . pk queryset = queryset . order_by ( 'pk' ) while primary_key < last_pk : for row in queryset . filter ( pk__gt = primary_key ) [ : chunksize ] : primary_key = row . pk yield row gc . collect ( ) | The queryset iterator helps to keep the memory consumption down . And also making it easier to process for weaker computers . | 123 | 24 |
240,605 | def print_stats ( self ) : print ( "{0:-<48}" . format ( str ( datetime . now ( ) . strftime ( '%Y-%m-%d %H:%M ' ) ) ) ) print ( "{0:<40} {1:>7}" . format ( "Sources checked:" , self . sources ) ) print ( "{0:<40} {1:>7}" . format ( "Source references deleted from DB:" , self . source_refs_deleted ) ) print ( "{0:<40} {1:>7}" . format ( "Thumbnails deleted from disk:" , self . thumbnails_deleted ) ) print ( "(Completed in %s seconds)\n" % self . execution_time ) | Print statistics about the cleanup performed . | 165 | 7 |
240,606 | def populate_from_settings ( self ) : settings_aliases = settings . THUMBNAIL_ALIASES if settings_aliases : for target , aliases in settings_aliases . items ( ) : target_aliases = self . _aliases . setdefault ( target , { } ) target_aliases . update ( aliases ) | Populate the aliases from the THUMBNAIL_ALIASES setting . | 74 | 17 |
240,607 | def set ( self , alias , options , target = None ) : target = self . _coerce_target ( target ) or '' target_aliases = self . _aliases . setdefault ( target , { } ) target_aliases [ alias ] = options | Add an alias . | 57 | 4 |
240,608 | def get ( self , alias , target = None ) : for target_part in reversed ( list ( self . _get_targets ( target ) ) ) : options = self . _get ( target_part , alias ) if options : return options | Get a dictionary of aliased options . | 53 | 8 |
240,609 | def all ( self , target = None , include_global = True ) : aliases = { } for target_part in self . _get_targets ( target , include_global ) : aliases . update ( self . _aliases . get ( target_part , { } ) ) return aliases | Get a dictionary of all aliases and their options . | 63 | 10 |
240,610 | def _get ( self , target , alias ) : if target not in self . _aliases : return return self . _aliases [ target ] . get ( alias ) | Internal method to get a specific alias . | 36 | 8 |
240,611 | def _get_targets ( self , target , include_global = True ) : target = self . _coerce_target ( target ) if include_global : yield '' if not target : return target_bits = target . split ( '.' ) for i in range ( len ( target_bits ) ) : yield '.' . join ( target_bits [ : i + 1 ] ) | Internal iterator to split up a complete target into the possible parts it may match . | 84 | 16 |
240,612 | def _coerce_target ( self , target ) : if not target or isinstance ( target , six . string_types ) : return target if not hasattr ( target , 'instance' ) : return None if getattr ( target . instance , '_deferred' , False ) : model = target . instance . _meta . proxy_for_model else : model = target . instance . __class__ return '%s.%s.%s' % ( model . _meta . app_label , model . __name__ , target . field . name , ) | Internal method to coerce a target to a string . | 122 | 11 |
240,613 | def image_entropy ( im ) : if not isinstance ( im , Image . Image ) : # Can only deal with PIL images. Fall back to a constant entropy. return 0 hist = im . histogram ( ) hist_size = float ( sum ( hist ) ) hist = [ h / hist_size for h in hist ] return - sum ( [ p * math . log ( p , 2 ) for p in hist if p != 0 ] ) | Calculate the entropy of an image . Used for smart cropping . | 96 | 15 |
240,614 | def dynamic_import ( import_string ) : # Use rfind rather than rsplit for Python 2.3 compatibility. lastdot = import_string . rfind ( '.' ) if lastdot == - 1 : return __import__ ( import_string , { } , { } , [ ] ) module_name , attr = import_string [ : lastdot ] , import_string [ lastdot + 1 : ] parent_module = __import__ ( module_name , { } , { } , [ attr ] ) return getattr ( parent_module , attr ) | Dynamically import a module or object . | 123 | 9 |
240,615 | def is_transparent ( image ) : if not isinstance ( image , Image . Image ) : # Can only deal with PIL images, fall back to the assumption that that # it's not transparent. return False return ( image . mode in ( 'RGBA' , 'LA' ) or ( image . mode == 'P' and 'transparency' in image . info ) ) | Check to see if an image is transparent . | 81 | 9 |
240,616 | def is_progressive ( image ) : if not isinstance ( image , Image . Image ) : # Can only check PIL images for progressive encoding. return False return ( 'progressive' in image . info ) or ( 'progression' in image . info ) | Check to see if an image is progressive . | 56 | 9 |
240,617 | def get_modified_time ( storage , name ) : try : try : # Prefer Django 1.10 API and fall back to old one modified_time = storage . get_modified_time ( name ) except AttributeError : modified_time = storage . modified_time ( name ) except OSError : return 0 except NotImplementedError : return None if modified_time and timezone . is_naive ( modified_time ) : if getattr ( settings , 'USE_TZ' , False ) : default_timezone = timezone . get_default_timezone ( ) return timezone . make_aware ( modified_time , default_timezone ) return modified_time | Get modified time from storage ensuring the result is a timezone - aware datetime . | 149 | 17 |
240,618 | def namedtuple ( typename , field_names , verbose = False , rename = False ) : if isinstance ( field_names , str ) : field_names = field_names . replace ( ',' , ' ' ) . split ( ) field_names = list ( map ( str , field_names ) ) typename = str ( typename ) for name in [ typename ] + field_names : if type ( name ) != str : raise TypeError ( 'Type names and field names must be strings' ) if _iskeyword ( name ) : raise ValueError ( 'Type names and field names cannot be a ' 'keyword: %r' % name ) if not _isidentifier ( typename ) : raise ValueError ( 'Type names must be valid ' 'identifiers: %r' % name ) seen = set ( ) for name in field_names : if name . startswith ( '_' ) and not rename : raise ValueError ( 'Field names cannot start with an underscore: ' '%r' % name ) if name in seen : raise ValueError ( 'Encountered duplicate field name: %r' % name ) seen . add ( name ) arg_names = [ '_' + str ( i ) for i in range ( len ( field_names ) ) ] # Fill-in the class template class_definition = _class_template . format ( typename = typename , field_names = tuple ( field_names ) , num_fields = len ( field_names ) , arg_list = repr ( tuple ( arg_names ) ) . replace ( "'" , "" ) [ 1 : - 1 ] , repr_fmt = ', ' . join ( _repr_template . format ( name = name ) for name in field_names ) , field_defs = '\n' . join ( _field_template . format ( index = index , name = name ) for index , name in enumerate ( field_names ) if _isidentifier ( name ) ) ) # Execute the template string in a temporary namespace and support # tracing utilities by setting a value for frame.f_globals['__name__'] namespace = dict ( __name__ = 'namedtuple_%s' % typename ) exec ( class_definition , namespace ) result = namespace [ typename ] result . _source = class_definition if verbose : print ( result . _source ) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython). try : result . __module__ = _sys . _getframe ( 1 ) . f_globals . get ( '__name__' , '__main__' ) except ( AttributeError , ValueError ) : pass return result | Returns a new subclass of tuple with named fields . This is a patched version of collections . namedtuple from the stdlib . Unlike the latter it accepts non - identifier strings as field names . All values are accessible through dict syntax . Fields whose names are identifiers are also accessible via attribute syntax as in ordinary namedtuples alongside traditional indexing . This feature is needed as SDMX allows field names to contain - . | 635 | 83 |
240,619 | def write_source ( self , filename ) : with open ( filename , 'w' ) as fp : return json . dump ( self . message . _elem , fp , indent = 4 , sort_keys = True ) | Save source to file by calling write on the root element . | 49 | 12 |
240,620 | def write_source ( self , filename ) : return self . message . _elem . getroottree ( ) . write ( filename , encoding = 'utf8' ) | Save XML source to file by calling write on the root element . | 37 | 13 |
240,621 | def group_attrib ( self ) : group_attributes = [ g . attrib for g in self . dataset . groups if self in g ] if group_attributes : return concat_namedtuples ( * group_attributes ) | return a namedtuple containing all attributes attached to groups of which the given series is a member for each group of which the series is a member | 52 | 29 |
240,622 | def read_instance ( self , cls , sdmxobj , offset = None , first_only = True ) : if offset : try : base = self . _paths [ offset ] ( sdmxobj . _elem ) [ 0 ] except IndexError : return None else : base = sdmxobj . _elem result = self . _paths [ cls ] ( base ) if result : if first_only : return cls ( self , result [ 0 ] ) else : return [ cls ( self , i ) for i in result ] | If cls in _paths and matches return an instance of cls with the first XML element or if first_only is False a list of cls instances for all elements found If no matches were found return None . | 121 | 45 |
240,623 | def load_agency_profile ( cls , source ) : if not isinstance ( source , str_type ) : # so it must be a text file source = source . read ( ) new_agencies = json . loads ( source ) cls . _agencies . update ( new_agencies ) | Classmethod loading metadata on a data provider . source must be a json - formated string or file - like object describing one or more data providers ( URL of the SDMX web API resource types etc . The dict Request . _agencies is updated with the metadata from the source . | 65 | 57 |
240,624 | def series_keys ( self , flow_id , cache = True ) : # Check if requested series keys are already cached cache_id = 'series_keys_' + flow_id if cache_id in self . cache : return self . cache [ cache_id ] else : # download an empty dataset with all available series keys resp = self . data ( flow_id , params = { 'detail' : 'serieskeysonly' } ) l = list ( s . key for s in resp . data . series ) df = PD . DataFrame ( l , columns = l [ 0 ] . _fields , dtype = 'category' ) if cache : self . cache [ cache_id ] = df return df | Get an empty dataset with all possible series keys . | 150 | 10 |
240,625 | def preview_data ( self , flow_id , key = None , count = True , total = True ) : all_keys = self . series_keys ( flow_id ) # Handle the special case that no key is provided if not key : if count : return all_keys . shape [ 0 ] else : return all_keys # So there is a key specifying at least one dimension value. # Wrap single values in 1-elem list for uniform treatment key_l = { k : [ v ] if isinstance ( v , str_type ) else v for k , v in key . items ( ) } # order dim_names that are present in the key dim_names = [ k for k in all_keys if k in key ] # Drop columns that are not in the key key_df = all_keys . loc [ : , dim_names ] if total : # DataFrame with matching series keys bool_series = reduce ( and_ , ( key_df . isin ( key_l ) [ col ] for col in dim_names ) ) if count : return bool_series . value_counts ( ) [ True ] else : return all_keys [ bool_series ] else : # Dict of value combinations as dict keys key_product = product ( * ( key_l [ k ] for k in dim_names ) ) # Replace key tuples by namedtuples PartialKey = namedtuple_factory ( 'PartialKey' , dim_names ) matches = { PartialKey ( k ) : reduce ( and_ , ( key_df . isin ( { k1 : [ v1 ] for k1 , v1 in zip ( dim_names , k ) } ) [ col ] for col in dim_names ) ) for k in key_product } if not count : # dict mapping each key to DataFrame with selected key-set return { k : all_keys [ v ] for k , v in matches . items ( ) } else : # Number of series per key return { k : v . value_counts ( ) [ True ] for k , v in matches . items ( ) } | Get keys or number of series for a prospective dataset query allowing for keys with multiple values per dimension . It downloads the complete list of series keys for a dataflow rather than using constraints and DSD . This feature is however not supported by all data providers . ECB and UNSD are known to work . | 451 | 60 |
240,626 | def write ( self , source = None , * * kwargs ) : if not source : source = self . msg return self . _writer . write ( source = source , * * kwargs ) | Wrappe r to call the writer s write method if present . | 43 | 13 |
240,627 | def parse_json ( path ) : # type: (str) -> List[FunctionInfo] with open ( path ) as f : data = json . load ( f ) # type: List[RawEntry] result = [ ] def assert_type ( value , typ ) : # type: (object, type) -> None assert isinstance ( value , typ ) , '%s: Unexpected type %r' % ( path , type ( value ) . __name__ ) def assert_dict_item ( dictionary , key , typ ) : # type: (Mapping[Any, Any], str, type) -> None assert key in dictionary , '%s: Missing dictionary key %r' % ( path , key ) value = dictionary [ key ] assert isinstance ( value , typ ) , '%s: Unexpected type %r for key %r' % ( path , type ( value ) . __name__ , key ) assert_type ( data , list ) for item in data : assert_type ( item , dict ) assert_dict_item ( item , 'path' , Text ) assert_dict_item ( item , 'line' , int ) assert_dict_item ( item , 'func_name' , Text ) assert_dict_item ( item , 'type_comments' , list ) for comment in item [ 'type_comments' ] : assert_type ( comment , Text ) assert_type ( item [ 'samples' ] , int ) info = FunctionInfo ( encode ( item [ 'path' ] ) , item [ 'line' ] , encode ( item [ 'func_name' ] ) , [ encode ( comment ) for comment in item [ 'type_comments' ] ] , item [ 'samples' ] ) result . append ( info ) return result | Deserialize a JSON file containing runtime collected types . | 379 | 11 |
240,628 | def tokenize ( s ) : # type: (str) -> List[Token] original = s tokens = [ ] # type: List[Token] while True : if not s : tokens . append ( End ( ) ) return tokens elif s [ 0 ] == ' ' : s = s [ 1 : ] elif s [ 0 ] in '()[],*' : tokens . append ( Separator ( s [ 0 ] ) ) s = s [ 1 : ] elif s [ : 2 ] == '->' : tokens . append ( Separator ( '->' ) ) s = s [ 2 : ] else : m = re . match ( r'[-\w]+(\s*(\.|:)\s*[-/\w]*)*' , s ) if not m : raise ParseError ( original ) fullname = m . group ( 0 ) fullname = fullname . replace ( ' ' , '' ) if fullname in TYPE_FIXUPS : fullname = TYPE_FIXUPS [ fullname ] # pytz creates classes with the name of the timezone being used: # https://github.com/stub42/pytz/blob/f55399cddbef67c56db1b83e0939ecc1e276cf42/src/pytz/tzfile.py#L120-L123 # This causes pyannotates to crash as it's invalid to have a class # name with a `/` in it (e.g. "pytz.tzfile.America/Los_Angeles") if fullname . startswith ( 'pytz.tzfile.' ) : fullname = 'datetime.tzinfo' if '-' in fullname or '/' in fullname : # Not a valid Python name; there are many places that # generate these, so we just substitute Any rather # than crashing. fullname = 'Any' tokens . append ( DottedName ( fullname ) ) s = s [ len ( m . group ( 0 ) ) : ] | Translate a type comment into a list of tokens . | 441 | 11 |
240,629 | def generate_annotations_json_string ( source_path , only_simple = False ) : # type: (str, bool) -> List[FunctionData] items = parse_json ( source_path ) results = [ ] for item in items : signature = unify_type_comments ( item . type_comments ) if is_signature_simple ( signature ) or not only_simple : data = { 'path' : item . path , 'line' : item . line , 'func_name' : item . func_name , 'signature' : signature , 'samples' : item . samples } # type: FunctionData results . append ( data ) return results | Produce annotation data JSON file from a JSON file with runtime - collected types . | 145 | 16 |
240,630 | def _my_hash ( arg_list ) : # type: (List[Any]) -> int res = 0 for arg in arg_list : res = res * 31 + hash ( arg ) return res | Simple helper hash function | 43 | 4 |
240,631 | def name_from_type ( type_ ) : # type: (InternalType) -> str if isinstance ( type_ , ( DictType , ListType , TupleType , SetType , IteratorType ) ) : return repr ( type_ ) else : if type_ . __name__ != 'NoneType' : module = type_ . __module__ if module in BUILTIN_MODULES or module == '<unknown>' : # Omit module prefix for known built-ins, for convenience. This # makes unit tests for this module simpler. # Also ignore '<uknown>' modules so pyannotate can parse these types return type_ . __name__ else : name = getattr ( type_ , '__qualname__' , None ) or type_ . __name__ delim = '.' if '.' not in name else ':' return '%s%s%s' % ( module , delim , name ) else : return 'None' | Helper function to get PEP - 484 compatible string representation of our internal types . | 208 | 17 |
240,632 | def resolve_type ( arg ) : # type: (object) -> InternalType arg_type = type ( arg ) if arg_type == list : assert isinstance ( arg , list ) # this line helps mypy figure out types sample = arg [ : min ( 4 , len ( arg ) ) ] tentative_type = TentativeType ( ) for sample_item in sample : tentative_type . add ( resolve_type ( sample_item ) ) return ListType ( tentative_type ) elif arg_type == set : assert isinstance ( arg , set ) # this line helps mypy figure out types sample = [ ] iterator = iter ( arg ) for i in range ( 0 , min ( 4 , len ( arg ) ) ) : sample . append ( next ( iterator ) ) tentative_type = TentativeType ( ) for sample_item in sample : tentative_type . add ( resolve_type ( sample_item ) ) return SetType ( tentative_type ) elif arg_type == FakeIterator : assert isinstance ( arg , FakeIterator ) # this line helps mypy figure out types sample = [ ] iterator = iter ( arg ) for i in range ( 0 , min ( 4 , len ( arg ) ) ) : sample . append ( next ( iterator ) ) tentative_type = TentativeType ( ) for sample_item in sample : tentative_type . add ( resolve_type ( sample_item ) ) return IteratorType ( tentative_type ) elif arg_type == tuple : assert isinstance ( arg , tuple ) # this line helps mypy figure out types sample = list ( arg [ : min ( 10 , len ( arg ) ) ] ) return TupleType ( [ resolve_type ( sample_item ) for sample_item in sample ] ) elif arg_type == dict : assert isinstance ( arg , dict ) # this line helps mypy figure out types key_tt = TentativeType ( ) val_tt = TentativeType ( ) for i , ( k , v ) in enumerate ( iteritems ( arg ) ) : if i > 4 : break key_tt . add ( resolve_type ( k ) ) val_tt . add ( resolve_type ( v ) ) return DictType ( key_tt , val_tt ) else : return type ( arg ) | Resolve object to one of our internal collection types or generic built - in type . | 487 | 17 |
240,633 | def prep_args ( arg_info ) : # type: (ArgInfo) -> ResolvedTypes # pull out any varargs declarations filtered_args = [ a for a in arg_info . args if getattr ( arg_info , 'varargs' , None ) != a ] # we don't care about self/cls first params (perhaps we can test if it's an instance/class method another way?) if filtered_args and ( filtered_args [ 0 ] in ( 'self' , 'cls' ) ) : filtered_args = filtered_args [ 1 : ] pos_args = [ ] # type: List[InternalType] if filtered_args : for arg in filtered_args : if isinstance ( arg , str ) and arg in arg_info . locals : # here we know that return type will be of type "type" resolved_type = resolve_type ( arg_info . locals [ arg ] ) pos_args . append ( resolved_type ) else : pos_args . append ( type ( UnknownType ( ) ) ) varargs = None # type: Optional[List[InternalType]] if arg_info . varargs : varargs_tuple = arg_info . locals [ arg_info . varargs ] # It's unclear what all the possible values for 'varargs_tuple' are, # so perform a defensive type check since we don't want to crash here. if isinstance ( varargs_tuple , tuple ) : varargs = [ resolve_type ( arg ) for arg in varargs_tuple [ : 4 ] ] return ResolvedTypes ( pos_args = pos_args , varargs = varargs ) | Resolve types from ArgInfo | 355 | 6 |
240,634 | def _flush_signature ( key , return_type ) : # type: (FunctionKey, InternalType) -> None signatures = collected_signatures . setdefault ( key , set ( ) ) args_info = collected_args . pop ( key ) if len ( signatures ) < MAX_ITEMS_PER_FUNCTION : signatures . add ( ( args_info , return_type ) ) num_samples [ key ] = num_samples . get ( key , 0 ) + 1 | Store signature for a function . | 106 | 6 |
240,635 | def type_consumer ( ) : # type: () -> None # we are not interested in profiling type_consumer itself # but we start it before any other thread while True : item = _task_queue . get ( ) if isinstance ( item , KeyAndTypes ) : if item . key in collected_args : # Previous call didn't get a corresponding return, perhaps because we # stopped collecting types in the middle of a call or because of # a recursive function. _flush_signature ( item . key , UnknownType ) collected_args [ item . key ] = ArgTypes ( item . types ) else : assert isinstance ( item , KeyAndReturn ) if item . key in collected_args : _flush_signature ( item . key , item . return_type ) _task_queue . task_done ( ) | Infinite loop of the type consumer thread . It gets types to process from the task query . | 173 | 19 |
240,636 | def _make_sampling_sequence ( n ) : # type: (int) -> List[int] seq = list ( range ( 5 ) ) i = 50 while len ( seq ) < n : seq . append ( i ) i += 50 return seq | Return a list containing the proposed call event sampling sequence . | 54 | 11 |
240,637 | def default_filter_filename ( filename ) : # type: (Optional[str]) -> Optional[str] if filename is None : return None elif filename . startswith ( TOP_DIR ) : if filename . startswith ( TOP_DIR_DOT ) : # Skip subdirectories starting with dot (e.g. .vagrant). return None else : # Strip current directory and following slashes. return filename [ TOP_DIR_LEN : ] . lstrip ( os . sep ) elif filename . startswith ( os . sep ) : # Skip absolute paths not under current directory. return None else : return filename | Default filter for filenames . | 135 | 7 |
240,638 | def _filter_types ( types_dict ) : # type: (Dict[FunctionKey, T]) -> Dict[FunctionKey, T] def exclude ( k ) : # type: (FunctionKey) -> bool """Exclude filter""" return k . path . startswith ( '<' ) or k . func_name == '<module>' return { k : v for k , v in iteritems ( types_dict ) if not exclude ( k ) } | Filter type info before dumping it to the file . | 101 | 10 |
240,639 | def _dump_impl ( ) : # type: () -> List[FunctionData] filtered_signatures = _filter_types ( collected_signatures ) sorted_by_file = sorted ( iteritems ( filtered_signatures ) , key = ( lambda p : ( p [ 0 ] . path , p [ 0 ] . line , p [ 0 ] . func_name ) ) ) res = [ ] # type: List[FunctionData] for function_key , signatures in sorted_by_file : comments = [ _make_type_comment ( args , ret_type ) for args , ret_type in signatures ] res . append ( { 'path' : function_key . path , 'line' : function_key . line , 'func_name' : function_key . func_name , 'type_comments' : comments , 'samples' : num_samples . get ( function_key , 0 ) , } ) return res | Internal implementation for dump_stats and dumps_stats | 202 | 10 |
240,640 | def dump_stats ( filename ) : # type: (str) -> None res = _dump_impl ( ) f = open ( filename , 'w' ) json . dump ( res , f , indent = 4 ) f . close ( ) | Write collected information to file . | 51 | 6 |
240,641 | def init_types_collection ( filter_filename = default_filter_filename ) : # type: (Callable[[Optional[str]], Optional[str]]) -> None global _filter_filename _filter_filename = filter_filename sys . setprofile ( _trace_dispatch ) threading . setprofile ( _trace_dispatch ) | Setup profiler hooks to enable type collection . Call this one time from the main thread . | 73 | 18 |
240,642 | def add ( self , type ) : # type: (InternalType) -> None try : if isinstance ( type , SetType ) : if EMPTY_SET_TYPE in self . types_hashable : self . types_hashable . remove ( EMPTY_SET_TYPE ) elif isinstance ( type , ListType ) : if EMPTY_LIST_TYPE in self . types_hashable : self . types_hashable . remove ( EMPTY_LIST_TYPE ) elif isinstance ( type , IteratorType ) : if EMPTY_ITERATOR_TYPE in self . types_hashable : self . types_hashable . remove ( EMPTY_ITERATOR_TYPE ) elif isinstance ( type , DictType ) : if EMPTY_DICT_TYPE in self . types_hashable : self . types_hashable . remove ( EMPTY_DICT_TYPE ) for item in self . types_hashable : if isinstance ( item , DictType ) : if item . key_type == type . key_type : item . val_type . merge ( type . val_type ) return self . types_hashable . add ( type ) except ( TypeError , AttributeError ) : try : if type not in self . types : self . types . append ( type ) except AttributeError : if TypeWasIncomparable not in self . types : self . types . append ( TypeWasIncomparable ) | Add type to the runtime type samples . | 311 | 8 |
240,643 | def merge ( self , other ) : # type: (TentativeType) -> None for hashables in other . types_hashable : self . add ( hashables ) for non_hashbles in other . types : self . add ( non_hashbles ) | Merge two TentativeType instances | 56 | 7 |
240,644 | def infer_annotation ( type_comments ) : # type: (List[str]) -> Tuple[List[Argument], AbstractType] assert type_comments args = { } # type: Dict[int, Set[Argument]] returns = set ( ) for comment in type_comments : arg_types , return_type = parse_type_comment ( comment ) for i , arg_type in enumerate ( arg_types ) : args . setdefault ( i , set ( ) ) . add ( arg_type ) returns . add ( return_type ) combined_args = [ ] for i in sorted ( args ) : arg_infos = list ( args [ i ] ) kind = argument_kind ( arg_infos ) if kind is None : raise InferError ( 'Ambiguous argument kinds:\n' + '\n' . join ( type_comments ) ) types = [ arg . type for arg in arg_infos ] combined = combine_types ( types ) if str ( combined ) == 'None' : # It's very rare for an argument to actually be typed `None`, more likely than # not we simply don't have any data points for this argument. combined = UnionType ( [ ClassType ( 'None' ) , AnyType ( ) ] ) if kind != ARG_POS and ( len ( str ( combined ) ) > 120 or isinstance ( combined , UnionType ) ) : # Avoid some noise. combined = AnyType ( ) combined_args . append ( Argument ( combined , kind ) ) combined_return = combine_types ( returns ) return combined_args , combined_return | Given some type comments return a single inferred signature . | 344 | 10 |
240,645 | def argument_kind ( args ) : # type: (List[Argument]) -> Optional[str] kinds = set ( arg . kind for arg in args ) if len ( kinds ) != 1 : return None return kinds . pop ( ) | Return the kind of an argument based on one or more descriptions of the argument . | 50 | 16 |
240,646 | def combine_types ( types ) : # type: (Iterable[AbstractType]) -> AbstractType items = simplify_types ( types ) if len ( items ) == 1 : return items [ 0 ] else : return UnionType ( items ) | Given some types return a combined and simplified type . | 50 | 10 |
240,647 | def simplify_types ( types ) : # type: (Iterable[AbstractType]) -> List[AbstractType] flattened = flatten_types ( types ) items = filter_ignored_items ( flattened ) items = [ simplify_recursive ( item ) for item in items ] items = merge_items ( items ) items = dedupe_types ( items ) # We have to remove reundant items after everything has been simplified and # merged as this simplification may be what makes items redundant. items = remove_redundant_items ( items ) if len ( items ) > 3 : return [ AnyType ( ) ] else : return items | Given some types give simplified types representing the union of types . | 135 | 12 |
240,648 | def simplify_recursive ( typ ) : # type: (AbstractType) -> AbstractType if isinstance ( typ , UnionType ) : return combine_types ( typ . items ) elif isinstance ( typ , ClassType ) : simplified = ClassType ( typ . name , [ simplify_recursive ( arg ) for arg in typ . args ] ) args = simplified . args if ( simplified . name == 'Dict' and len ( args ) == 2 and isinstance ( args [ 0 ] , ClassType ) and args [ 0 ] . name in ( 'str' , 'Text' ) and isinstance ( args [ 1 ] , UnionType ) and not is_optional ( args [ 1 ] ) ) : # Looks like a potential case for TypedDict, which we don't properly support yet. return ClassType ( 'Dict' , [ args [ 0 ] , AnyType ( ) ] ) return simplified elif isinstance ( typ , TupleType ) : return TupleType ( [ simplify_recursive ( item ) for item in typ . items ] ) return typ | Simplify all components of a type . | 228 | 9 |
240,649 | def remove_redundant_items ( items ) : # type: (List[AbstractType]) -> List[AbstractType] result = [ ] for item in items : for other in items : if item is not other and is_redundant_union_item ( item , other ) : break else : result . append ( item ) return result | Filter out redundant union items . | 73 | 6 |
240,650 | def is_redundant_union_item ( first , other ) : # type: (AbstractType, AbstractType) -> bool if isinstance ( first , ClassType ) and isinstance ( other , ClassType ) : if first . name == 'str' and other . name == 'Text' : return True elif first . name == 'bool' and other . name == 'int' : return True elif first . name == 'int' and other . name == 'float' : return True elif ( first . name in ( 'List' , 'Dict' , 'Set' ) and other . name == first . name ) : if not first . args and other . args : return True elif len ( first . args ) == len ( other . args ) and first . args : result = all ( first_arg == other_arg or other_arg == AnyType ( ) for first_arg , other_arg in zip ( first . args , other . args ) ) return result return False | If union has both items is the first one redundant? | 213 | 11 |
240,651 | def merge_items ( items ) : # type: (List[AbstractType]) -> List[AbstractType] result = [ ] while items : item = items . pop ( ) merged = None for i , other in enumerate ( items ) : merged = merged_type ( item , other ) if merged : break if merged : del items [ i ] items . append ( merged ) else : result . append ( item ) return list ( reversed ( result ) ) | Merge union items that can be merged . | 95 | 9 |
240,652 | def merged_type ( t , s ) : # type: (AbstractType, AbstractType) -> Optional[AbstractType] if isinstance ( t , TupleType ) and isinstance ( s , TupleType ) : if len ( t . items ) == len ( s . items ) : return TupleType ( [ combine_types ( [ ti , si ] ) for ti , si in zip ( t . items , s . items ) ] ) all_items = t . items + s . items if all_items and all ( item == all_items [ 0 ] for item in all_items [ 1 : ] ) : # Merge multiple compatible fixed-length tuples into a variable-length tuple type. return ClassType ( 'Tuple' , [ all_items [ 0 ] ] ) elif ( isinstance ( t , TupleType ) and isinstance ( s , ClassType ) and s . name == 'Tuple' and len ( s . args ) == 1 ) : if all ( item == s . args [ 0 ] for item in t . items ) : # Merge fixed-length tuple and variable-length tuple. return s elif isinstance ( s , TupleType ) and isinstance ( t , ClassType ) and t . name == 'Tuple' : return merged_type ( s , t ) elif isinstance ( s , NoReturnType ) : return t elif isinstance ( t , NoReturnType ) : return s elif isinstance ( s , AnyType ) : # This seems to be usually desirable, since Anys tend to come from unknown types. return t elif isinstance ( t , AnyType ) : # Similar to above. return s return None | Return merged type if two items can be merged in to a different more general type . | 360 | 17 |
240,653 | def dump_annotations ( type_info , files ) : with open ( type_info ) as f : data = json . load ( f ) for item in data : path , line , func_name = item [ 'path' ] , item [ 'line' ] , item [ 'func_name' ] if files and path not in files : for f in files : if path . startswith ( os . path . join ( f , '' ) ) : break else : continue # Outer loop print ( "%s:%d: in %s:" % ( path , line , func_name ) ) type_comments = item [ 'type_comments' ] signature = unify_type_comments ( type_comments ) arg_types = signature [ 'arg_types' ] return_type = signature [ 'return_type' ] print ( " # type: (%s) -> %s" % ( ", " . join ( arg_types ) , return_type ) ) | Dump annotations out of type_info filtered by files . | 207 | 12 |
240,654 | def strip_py ( arg ) : # type: (str) -> Optional[str] for ext in PY_EXTENSIONS : if arg . endswith ( ext ) : return arg [ : - len ( ext ) ] return None | Strip a trailing . py or . pyi suffix . Return None if no such suffix is found . | 51 | 21 |
240,655 | def get_decorators ( self , node ) : if node . parent is None : return [ ] results = { } if not self . decorated . match ( node . parent , results ) : return [ ] decorators = results . get ( 'dd' ) or [ results [ 'd' ] ] decs = [ ] for d in decorators : for child in d . children : if isinstance ( child , Leaf ) and child . type == token . NAME : decs . append ( child . value ) return decs | Return a list of decorators found on a function definition . | 111 | 12 |
240,656 | def has_return_exprs ( self , node ) : results = { } if self . return_expr . match ( node , results ) : return True for child in node . children : if child . type not in ( syms . funcdef , syms . classdef ) : if self . has_return_exprs ( child ) : return True return False | Traverse the tree below node looking for return expr . | 77 | 11 |
240,657 | def inform_if_paths_invalid ( egrc_path , examples_dir , custom_dir , debug = True ) : if ( not debug ) : return if ( egrc_path ) : _inform_if_path_does_not_exist ( egrc_path ) if ( examples_dir ) : _inform_if_path_does_not_exist ( examples_dir ) if ( custom_dir ) : _inform_if_path_does_not_exist ( custom_dir ) | If egrc_path examples_dir or custom_dir is truthy and debug is True informs the user that a path is not set . | 116 | 30 |
240,658 | def get_egrc_config ( cli_egrc_path ) : resolved_path = get_priority ( cli_egrc_path , DEFAULT_EGRC_PATH , None ) expanded_path = get_expanded_path ( resolved_path ) # Start as if nothing was defined in the egrc. egrc_config = get_empty_config ( ) if os . path . isfile ( expanded_path ) : egrc_config = get_config_tuple_from_egrc ( expanded_path ) return egrc_config | Return a Config namedtuple based on the contents of the egrc . | 125 | 16 |
240,659 | def get_resolved_config ( egrc_path , examples_dir , custom_dir , use_color , pager_cmd , squeeze , debug = True , ) : # Call this with the passed in values, NOT the resolved values. We are # informing the caller only if values passed in at the command line are # invalid. If you pass a path to a nonexistent egrc, for example, it's # helpful to know. If you don't have an egrc, and thus one isn't found # later at the default location, we don't want to notify them. inform_if_paths_invalid ( egrc_path , examples_dir , custom_dir ) # Expand the paths so we can use them with impunity later. examples_dir = get_expanded_path ( examples_dir ) custom_dir = get_expanded_path ( custom_dir ) # The general rule is: caller-defined, egrc-defined, defaults. We'll try # and get all three then use get_priority to choose the right one. egrc_config = get_egrc_config ( egrc_path ) resolved_examples_dir = get_priority ( examples_dir , egrc_config . examples_dir , DEFAULT_EXAMPLES_DIR ) resolved_examples_dir = get_expanded_path ( resolved_examples_dir ) resolved_custom_dir = get_priority ( custom_dir , egrc_config . custom_dir , DEFAULT_CUSTOM_DIR ) resolved_custom_dir = get_expanded_path ( resolved_custom_dir ) resolved_use_color = get_priority ( use_color , egrc_config . use_color , DEFAULT_USE_COLOR ) resolved_pager_cmd = get_priority ( pager_cmd , egrc_config . pager_cmd , DEFAULT_PAGER_CMD ) # There is no command line option for this, so in this case we will use the # priority: egrc, environment, DEFAULT. environment_editor_cmd = get_editor_cmd_from_environment ( ) resolved_editor_cmd = get_priority ( egrc_config . editor_cmd , environment_editor_cmd , DEFAULT_EDITOR_CMD ) color_config = None if resolved_use_color : default_color_config = get_default_color_config ( ) color_config = merge_color_configs ( egrc_config . color_config , default_color_config ) resolved_squeeze = get_priority ( squeeze , egrc_config . squeeze , DEFAULT_SQUEEZE ) # Pass in None, as subs can't be specified at the command line. resolved_subs = get_priority ( None , egrc_config . subs , get_default_subs ( ) ) result = Config ( examples_dir = resolved_examples_dir , custom_dir = resolved_custom_dir , color_config = color_config , use_color = resolved_use_color , pager_cmd = resolved_pager_cmd , editor_cmd = resolved_editor_cmd , squeeze = resolved_squeeze , subs = resolved_subs , ) return result | Create a Config namedtuple . Passed in values will override defaults . | 714 | 14 |
240,660 | def get_config_tuple_from_egrc ( egrc_path ) : with open ( egrc_path , 'r' ) as egrc : try : config = ConfigParser . RawConfigParser ( ) except AttributeError : config = ConfigParser ( ) config . readfp ( egrc ) # default to None examples_dir = None custom_dir = None use_color = None pager_cmd = None squeeze = None subs = None editor_cmd = None if config . has_option ( DEFAULT_SECTION , EG_EXAMPLES_DIR ) : examples_dir = config . get ( DEFAULT_SECTION , EG_EXAMPLES_DIR ) examples_dir = get_expanded_path ( examples_dir ) if config . has_option ( DEFAULT_SECTION , CUSTOM_EXAMPLES_DIR ) : custom_dir = config . get ( DEFAULT_SECTION , CUSTOM_EXAMPLES_DIR ) custom_dir = get_expanded_path ( custom_dir ) if config . has_option ( DEFAULT_SECTION , USE_COLOR ) : use_color_raw = config . get ( DEFAULT_SECTION , USE_COLOR ) use_color = _parse_bool_from_raw_egrc_value ( use_color_raw ) if config . has_option ( DEFAULT_SECTION , PAGER_CMD ) : pager_cmd_raw = config . get ( DEFAULT_SECTION , PAGER_CMD ) pager_cmd = ast . literal_eval ( pager_cmd_raw ) if config . has_option ( DEFAULT_SECTION , EDITOR_CMD ) : editor_cmd_raw = config . get ( DEFAULT_SECTION , EDITOR_CMD ) editor_cmd = ast . literal_eval ( editor_cmd_raw ) color_config = get_custom_color_config_from_egrc ( config ) if config . has_option ( DEFAULT_SECTION , SQUEEZE ) : squeeze_raw = config . get ( DEFAULT_SECTION , SQUEEZE ) squeeze = _parse_bool_from_raw_egrc_value ( squeeze_raw ) if config . has_section ( SUBSTITUTION_SECTION ) : subs = get_substitutions_from_config ( config ) return Config ( examples_dir = examples_dir , custom_dir = custom_dir , color_config = color_config , use_color = use_color , pager_cmd = pager_cmd , editor_cmd = editor_cmd , squeeze = squeeze , subs = subs , ) | Create a Config named tuple from the values specified in the . egrc . Expands any paths as necessary . | 583 | 23 |
240,661 | def get_expanded_path ( path ) : if path : result = path result = os . path . expanduser ( result ) result = os . path . expandvars ( result ) return result else : return None | Expand ~ and variables in a path . If path is not truthy return None . | 46 | 18 |
240,662 | def get_editor_cmd_from_environment ( ) : result = os . getenv ( ENV_VISUAL ) if ( not result ) : result = os . getenv ( ENV_EDITOR ) return result | Gets and editor command from environment variables . | 47 | 9 |
240,663 | def _inform_if_path_does_not_exist ( path ) : expanded_path = get_expanded_path ( path ) if not os . path . exists ( expanded_path ) : print ( 'Could not find custom path at: {}' . format ( expanded_path ) ) | If the path does not exist print a message saying so . This is intended to be helpful to users if they specify a custom path that eg cannot find . | 64 | 31 |
240,664 | def get_custom_color_config_from_egrc ( config ) : pound = _get_color_from_config ( config , CONFIG_NAMES . pound ) heading = _get_color_from_config ( config , CONFIG_NAMES . heading ) code = _get_color_from_config ( config , CONFIG_NAMES . code ) backticks = _get_color_from_config ( config , CONFIG_NAMES . backticks ) prompt = _get_color_from_config ( config , CONFIG_NAMES . prompt ) pound_reset = _get_color_from_config ( config , CONFIG_NAMES . pound_reset ) heading_reset = _get_color_from_config ( config , CONFIG_NAMES . heading_reset ) code_reset = _get_color_from_config ( config , CONFIG_NAMES . code_reset ) backticks_reset = _get_color_from_config ( config , CONFIG_NAMES . backticks_reset ) prompt_reset = _get_color_from_config ( config , CONFIG_NAMES . prompt_reset ) result = ColorConfig ( pound = pound , heading = heading , code = code , backticks = backticks , prompt = prompt , pound_reset = pound_reset , heading_reset = heading_reset , code_reset = code_reset , backticks_reset = backticks_reset , prompt_reset = prompt_reset ) return result | Get the ColorConfig from the egrc config object . Any colors not defined will be None . | 320 | 20 |
240,665 | def _get_color_from_config ( config , option ) : if not config . has_option ( COLOR_SECTION , option ) : return None else : return ast . literal_eval ( config . get ( COLOR_SECTION , option ) ) | Helper method to uet an option from the COLOR_SECTION of the config . | 56 | 18 |
240,666 | def parse_substitution_from_list ( list_rep ) : # We are expecting [pattern, replacement [, is_multiline]] if type ( list_rep ) is not list : raise SyntaxError ( 'Substitution must be a list' ) if len ( list_rep ) < 2 : raise SyntaxError ( 'Substitution must be a list of size 2' ) pattern = list_rep [ 0 ] replacement = list_rep [ 1 ] # By default, substitutions are not multiline. is_multiline = False if ( len ( list_rep ) > 2 ) : is_multiline = list_rep [ 2 ] if type ( is_multiline ) is not bool : raise SyntaxError ( 'is_multiline must be a boolean' ) result = substitute . Substitution ( pattern , replacement , is_multiline ) return result | Parse a substitution from the list representation in the config file . | 194 | 13 |
240,667 | def get_substitutions_from_config ( config ) : result = [ ] pattern_names = config . options ( SUBSTITUTION_SECTION ) pattern_names . sort ( ) for name in pattern_names : pattern_val = config . get ( SUBSTITUTION_SECTION , name ) list_rep = ast . literal_eval ( pattern_val ) substitution = parse_substitution_from_list ( list_rep ) result . append ( substitution ) return result | Return a list of Substitution objects from the config sorted alphabetically by pattern name . Returns an empty list if no Substitutions are specified . If there are problems parsing the values a help message will be printed and an error will be thrown . | 105 | 49 |
240,668 | def get_default_color_config ( ) : result = ColorConfig ( pound = DEFAULT_COLOR_POUND , heading = DEFAULT_COLOR_HEADING , code = DEFAULT_COLOR_CODE , backticks = DEFAULT_COLOR_BACKTICKS , prompt = DEFAULT_COLOR_PROMPT , pound_reset = DEFAULT_COLOR_POUND_RESET , heading_reset = DEFAULT_COLOR_HEADING_RESET , code_reset = DEFAULT_COLOR_CODE_RESET , backticks_reset = DEFAULT_COLOR_BACKTICKS_RESET , prompt_reset = DEFAULT_COLOR_PROMPT_RESET ) return result | Get a color config object with all the defaults . | 153 | 10 |
240,669 | def get_empty_config ( ) : empty_color_config = get_empty_color_config ( ) result = Config ( examples_dir = None , custom_dir = None , color_config = empty_color_config , use_color = None , pager_cmd = None , editor_cmd = None , squeeze = None , subs = None ) return result | Return an empty Config object with no options set . | 79 | 10 |
240,670 | def get_empty_color_config ( ) : empty_color_config = ColorConfig ( pound = None , heading = None , code = None , backticks = None , prompt = None , pound_reset = None , heading_reset = None , code_reset = None , backticks_reset = None , prompt_reset = None ) return empty_color_config | Return a color_config with all values set to None . | 80 | 12 |
240,671 | def merge_color_configs ( first , second ) : # We have to get the desired values first and simultaneously, as nametuple # is immutable. pound = get_priority ( first . pound , second . pound , None ) heading = get_priority ( first . heading , second . heading , None ) code = get_priority ( first . code , second . code , None ) backticks = get_priority ( first . backticks , second . backticks , None ) prompt = get_priority ( first . prompt , second . prompt , None ) pound_reset = get_priority ( first . pound_reset , second . pound_reset , None ) heading_reset = get_priority ( first . heading_reset , second . heading_reset , None ) code_reset = get_priority ( first . code_reset , second . code_reset , None ) backticks_reset = get_priority ( first . backticks_reset , second . backticks_reset , None ) prompt_reset = get_priority ( first . prompt_reset , second . prompt_reset , None ) result = ColorConfig ( pound = pound , heading = heading , code = code , backticks = backticks , prompt = prompt , pound_reset = pound_reset , heading_reset = heading_reset , code_reset = code_reset , backticks_reset = backticks_reset , prompt_reset = prompt_reset ) return result | Merge the color configs . | 311 | 7 |
240,672 | def apply_and_get_result ( self , string ) : if self . is_multiline : compiled_pattern = re . compile ( self . pattern , re . MULTILINE ) else : compiled_pattern = re . compile ( self . pattern ) result = re . sub ( compiled_pattern , self . repl , string ) return result | Perform the substitution represented by this object on string and return the result . | 74 | 15 |
240,673 | def colorize_text ( self , text ) : # As originally implemented, this method acts upon all the contents of # the file as a single string using the MULTILINE option of the re # package. I believe this was ostensibly for performance reasons, but # it has a few side effects that are less than ideal. It's non-trivial # to avoid some substitutions based on other matches using this # technique, for example. In the case of block indents, e.g., backticks # that occur in the example ($ pwd is `pwd`) should not be escaped. # With the MULTILINE flag that is not simple. colorize_backticks() is # currently operating on a line by line basis and special casing for # this scenario. If these special cases proliferate, the line breaking # should occur here in order to minimize the number of iterations. result = text result = self . colorize_heading ( result ) result = self . colorize_block_indent ( result ) result = self . colorize_backticks ( result ) return result | Colorize the text . | 231 | 5 |
240,674 | def _recursive_get_all_file_names ( dir ) : if not dir : return [ ] result = [ ] for basedir , dirs , files in os . walk ( dir ) : result . extend ( files ) return result | Get all the file names in the directory . Gets all the top level file names only not the full path . | 51 | 22 |
240,675 | def edit_custom_examples ( program , config ) : if ( not config . custom_dir ) or ( not os . path . exists ( config . custom_dir ) ) : _inform_cannot_edit_no_custom_dir ( ) return # resolve aliases resolved_program = get_resolved_program ( program , config ) custom_file_paths = get_file_paths_for_program ( resolved_program , config . custom_dir ) if ( len ( custom_file_paths ) > 0 ) : path_to_edit = custom_file_paths [ 0 ] else : # A new file. path_to_edit = os . path . join ( config . custom_dir , resolved_program + '.md' ) # Edit the first. Handles the base case. subprocess . call ( [ config . editor_cmd , path_to_edit ] ) | Edit custom examples for the given program creating the file if it does not exist . | 195 | 16 |
240,676 | def get_file_paths_for_program ( program , dir_to_search ) : if dir_to_search is None : return [ ] else : wanted_file_name = program + EXAMPLE_FILE_SUFFIX result = [ ] for basedir , dirs , file_names in os . walk ( dir_to_search ) : for file_name in file_names : if file_name == wanted_file_name : result . append ( os . path . join ( basedir , file_name ) ) return result | Return an array of full paths matching the given program . If no directory is present returns an empty list . | 118 | 21 |
240,677 | def page_string ( str_to_page , pager_cmd ) : # By default, we expect the command to be `less -R`. If that is the # pager_cmd, but they don't have less on their machine, odds are they're # just using the default value. In this case the pager will fail, so we'll # just go via pydoc.pager, which tries to do smarter checking that we don't # want to bother trying to replicate. use_fallback_page_function = False if pager_cmd is None : use_fallback_page_function = True elif pager_cmd == FLAG_FALLBACK : use_fallback_page_function = True try : if use_fallback_page_function : pydoc . pager ( str_to_page ) else : # Otherwise, obey the user. pydoc . pipepager ( str_to_page , cmd = pager_cmd ) except KeyboardInterrupt : pass | Page str_to_page via the pager . | 217 | 11 |
240,678 | def get_list_of_all_supported_commands ( config ) : default_files = _recursive_get_all_file_names ( config . examples_dir ) custom_files = _recursive_get_all_file_names ( config . custom_dir ) # Now filter so we only have example files, not things like aliases.json. default_files = [ path for path in default_files if _is_example_file ( path ) ] custom_files = [ path for path in custom_files if _is_example_file ( path ) ] def get_without_suffix ( file_name ) : """ Return the file name without the suffix, or the file name itself if it does not have the suffix. """ return file_name . split ( EXAMPLE_FILE_SUFFIX ) [ 0 ] default_files = [ get_without_suffix ( f ) for f in default_files ] custom_files = [ get_without_suffix ( f ) for f in custom_files ] set_default_commands = set ( default_files ) set_custom_commands = set ( custom_files ) alias_dict = get_alias_dict ( config ) both_defined = set_default_commands & set_custom_commands only_default = set_default_commands - set_custom_commands only_custom = set_custom_commands - set_default_commands all_commands = both_defined | only_default | only_custom command_to_rep = { } for command in all_commands : rep = None if command in both_defined : rep = command + ' ' + FLAG_CUSTOM_AND_DEFAULT elif command in only_default : rep = command elif command in only_custom : rep = command + ' ' + FLAG_ONLY_CUSTOM else : raise NameError ( 'command not in known set: ' + str ( command ) ) command_to_rep [ command ] = rep result = [ ] all_commands_and_aliases = all_commands . union ( alias_dict . keys ( ) ) for command in all_commands_and_aliases : if command in alias_dict : # aliases get precedence target = alias_dict [ command ] rep_of_target = command_to_rep [ target ] result . append ( command + ' -> ' + rep_of_target ) else : rep = command_to_rep [ command ] result . append ( rep ) result . sort ( ) return result | Generate a list of all the commands that have examples known to eg . The format of the list is the command names . The fact that there are examples for cp for example would mean that cp was in the list . | 552 | 44 |
240,679 | def get_squeezed_contents ( contents ) : line_between_example_code = substitute . Substitution ( '\n\n ' , '\n ' , True ) lines_between_examples = substitute . Substitution ( '\n\n\n' , '\n\n' , True ) lines_between_sections = substitute . Substitution ( '\n\n\n\n' , '\n\n\n' , True ) result = contents result = line_between_example_code . apply_and_get_result ( result ) result = lines_between_examples . apply_and_get_result ( result ) result = lines_between_sections . apply_and_get_result ( result ) return result | Squeeze the contents by removing blank lines between definition and example and remove duplicate blank lines except between sections . | 165 | 22 |
240,680 | def get_colorized_contents ( contents , color_config ) : colorizer = color . EgColorizer ( color_config ) result = colorizer . colorize_text ( contents ) return result | Colorize the contents based on the color_config . | 43 | 11 |
240,681 | def get_substituted_contents ( contents , substitutions ) : result = contents for sub in substitutions : result = sub . apply_and_get_result ( result ) return result | Perform a list of substitutions and return the result . | 42 | 12 |
240,682 | def get_resolved_program ( program , config_obj ) : alias_dict = get_alias_dict ( config_obj ) if program in alias_dict : return alias_dict [ program ] else : return program | Take a program that may be an alias for another program and return the resolved program . | 47 | 17 |
240,683 | def get_alias_dict ( config_obj ) : if not config_obj . examples_dir : return { } alias_file_path = _get_alias_file_path ( config_obj ) if not os . path . isfile ( alias_file_path ) : return { } alias_file_contents = _get_contents_of_file ( alias_file_path ) result = json . loads ( alias_file_contents ) return result | Return a dictionary consisting of all aliases known to eg . | 101 | 11 |
240,684 | def assert_not_called ( _mock_self ) : self = _mock_self if self . call_count != 0 : msg = ( "Expected '%s' to not have been called. Called %s times." % ( self . _mock_name or 'mock' , self . call_count ) ) raise AssertionError ( msg ) | assert that the mock was never called . | 81 | 8 |
240,685 | def assert_called ( _mock_self ) : self = _mock_self if self . call_count == 0 : msg = ( "Expected '%s' to have been called." % self . _mock_name or 'mock' ) raise AssertionError ( msg ) | assert that the mock was called at least once | 65 | 9 |
240,686 | def assert_called_once ( _mock_self ) : self = _mock_self if not self . call_count == 1 : msg = ( "Expected '%s' to have been called once. Called %s times." % ( self . _mock_name or 'mock' , self . call_count ) ) raise AssertionError ( msg ) | assert that the mock was called only once . | 82 | 9 |
240,687 | def assert_called_once_with ( _mock_self , * args , * * kwargs ) : self = _mock_self if not self . call_count == 1 : msg = ( "Expected '%s' to be called once. Called %s times." % ( self . _mock_name or 'mock' , self . call_count ) ) raise AssertionError ( msg ) return self . assert_called_with ( * args , * * kwargs ) | assert that the mock was called exactly once and with the specified arguments . | 110 | 14 |
240,688 | def normalize ( self ) : chrfunc = chr if py3k else HTMLEntity . _unichr if self . named : return chrfunc ( htmlentities . name2codepoint [ self . value ] ) if self . hexadecimal : return chrfunc ( int ( self . value , 16 ) ) return chrfunc ( int ( self . value ) ) | Return the unicode character represented by the HTML entity . | 87 | 11 |
240,689 | def _detach_children ( self ) : children = [ val [ 0 ] for val in self . _children . values ( ) ] for child in children : child ( ) . _parent = list ( self ) self . _children . clear ( ) | Remove all children and give them independent parent copies . | 53 | 10 |
240,690 | def _handle_parameter ( self , default ) : key = None showkey = False self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . TemplateParamEquals ) : key = self . _pop ( ) showkey = True self . _push ( ) elif isinstance ( token , ( tokens . TemplateParamSeparator , tokens . TemplateClose ) ) : self . _tokens . append ( token ) value = self . _pop ( ) if key is None : key = Wikicode ( SmartList ( [ Text ( str ( default ) ) ] ) ) return Parameter ( key , value , showkey ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_parameter() missed a close token" ) | Handle a case where a parameter is at the head of the tokens . | 189 | 14 |
240,691 | def _handle_template ( self , token ) : params = [ ] default = 1 self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . TemplateParamSeparator ) : if not params : name = self . _pop ( ) param = self . _handle_parameter ( default ) params . append ( param ) if not param . showkey : default += 1 elif isinstance ( token , tokens . TemplateClose ) : if not params : name = self . _pop ( ) return Template ( name , params ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_template() missed a close token" ) | Handle a case where a template is at the head of the tokens . | 164 | 14 |
240,692 | def _handle_argument ( self , token ) : name = None self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . ArgumentSeparator ) : name = self . _pop ( ) self . _push ( ) elif isinstance ( token , tokens . ArgumentClose ) : if name is not None : return Argument ( name , self . _pop ( ) ) return Argument ( self . _pop ( ) ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_argument() missed a close token" ) | Handle a case where an argument is at the head of the tokens . | 142 | 14 |
240,693 | def _handle_wikilink ( self , token ) : title = None self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . WikilinkSeparator ) : title = self . _pop ( ) self . _push ( ) elif isinstance ( token , tokens . WikilinkClose ) : if title is not None : return Wikilink ( title , self . _pop ( ) ) return Wikilink ( self . _pop ( ) ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_wikilink() missed a close token" ) | Handle a case where a wikilink is at the head of the tokens . | 154 | 16 |
240,694 | def _handle_external_link ( self , token ) : brackets , url = token . brackets , None self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . ExternalLinkSeparator ) : url = self . _pop ( ) self . _push ( ) elif isinstance ( token , tokens . ExternalLinkClose ) : if url is not None : return ExternalLink ( url , self . _pop ( ) , brackets ) return ExternalLink ( self . _pop ( ) , brackets = brackets ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_external_link() missed a close token" ) | Handle when an external link is at the head of the tokens . | 162 | 13 |
240,695 | def _handle_entity ( self , token ) : token = self . _tokens . pop ( ) if isinstance ( token , tokens . HTMLEntityNumeric ) : token = self . _tokens . pop ( ) if isinstance ( token , tokens . HTMLEntityHex ) : text = self . _tokens . pop ( ) self . _tokens . pop ( ) # Remove HTMLEntityEnd return HTMLEntity ( text . text , named = False , hexadecimal = True , hex_char = token . char ) self . _tokens . pop ( ) # Remove HTMLEntityEnd return HTMLEntity ( token . text , named = False , hexadecimal = False ) self . _tokens . pop ( ) # Remove HTMLEntityEnd return HTMLEntity ( token . text , named = True , hexadecimal = False ) | Handle a case where an HTML entity is at the head of the tokens . | 209 | 15 |
240,696 | def _handle_heading ( self , token ) : level = token . level self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . HeadingEnd ) : title = self . _pop ( ) return Heading ( title , level ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_heading() missed a close token" ) | Handle a case where a heading is at the head of the tokens . | 104 | 14 |
240,697 | def _handle_comment ( self , token ) : self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . CommentEnd ) : contents = self . _pop ( ) return Comment ( contents ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_comment() missed a close token" ) | Handle a case where an HTML comment is at the head of the tokens . | 95 | 15 |
240,698 | def _handle_attribute ( self , start ) : name = quotes = None self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . TagAttrEquals ) : name = self . _pop ( ) self . _push ( ) elif isinstance ( token , tokens . TagAttrQuote ) : quotes = token . char elif isinstance ( token , ( tokens . TagAttrStart , tokens . TagCloseOpen , tokens . TagCloseSelfclose ) ) : self . _tokens . append ( token ) if name : value = self . _pop ( ) else : name , value = self . _pop ( ) , None return Attribute ( name , value , quotes , start . pad_first , start . pad_before_eq , start . pad_after_eq ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_attribute() missed a close token" ) | Handle a case where a tag attribute is at the head of the tokens . | 222 | 15 |
240,699 | def _handle_tag ( self , token ) : close_tokens = ( tokens . TagCloseSelfclose , tokens . TagCloseClose ) implicit , attrs , contents , closing_tag = False , [ ] , None , None wiki_markup , invalid = token . wiki_markup , token . invalid or False wiki_style_separator , closing_wiki_markup = None , wiki_markup self . _push ( ) while self . _tokens : token = self . _tokens . pop ( ) if isinstance ( token , tokens . TagAttrStart ) : attrs . append ( self . _handle_attribute ( token ) ) elif isinstance ( token , tokens . TagCloseOpen ) : wiki_style_separator = token . wiki_markup padding = token . padding or "" tag = self . _pop ( ) self . _push ( ) elif isinstance ( token , tokens . TagOpenClose ) : closing_wiki_markup = token . wiki_markup contents = self . _pop ( ) self . _push ( ) elif isinstance ( token , close_tokens ) : if isinstance ( token , tokens . TagCloseSelfclose ) : closing_wiki_markup = token . wiki_markup tag = self . _pop ( ) self_closing = True padding = token . padding or "" implicit = token . implicit or False else : self_closing = False closing_tag = self . _pop ( ) return Tag ( tag , contents , attrs , wiki_markup , self_closing , invalid , implicit , padding , closing_tag , wiki_style_separator , closing_wiki_markup ) else : self . _write ( self . _handle_token ( token ) ) raise ParserError ( "_handle_tag() missed a close token" ) | Handle a case where a tag is at the head of the tokens . | 397 | 14 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.