idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
17,300 | def post ( self , url , params = { } , files = [ ] ) : url = self . host + url try : if files : return self . post_multipart ( url , params , files ) else : data = urllib . parse . urlencode ( params ) if not PY3 : data = str ( data ) resp = requests . post ( url , data = data , headers = self . headers , auth = self . oauth ) return self . json_parse ( resp ) except HTTPError as e : return self . json_parse ( e . response ) | Issues a POST request against the API allows for multipart data uploads |
17,301 | def json_parse ( self , response ) : try : data = response . json ( ) except ValueError : data = { 'meta' : { 'status' : 500 , 'msg' : 'Server Error' } , 'response' : { "error" : "Malformed JSON or HTML was returned." } } if 200 <= data [ 'meta' ] [ 'status' ] <= 399 : return data [ 'response' ] else : return data | Wraps and abstracts response validation and JSON parsing to make sure the user gets the correct response . |
17,302 | def post_multipart ( self , url , params , files ) : resp = requests . post ( url , data = params , params = params , files = files , headers = self . headers , allow_redirects = False , auth = self . oauth ) return self . json_parse ( resp ) | Generates and issues a multipart request for data files |
17,303 | def avatar ( self , blogname , size = 64 ) : url = "/v2/blog/{}/avatar/{}" . format ( blogname , size ) return self . send_api_request ( "get" , url ) | Retrieves the url of the blog s avatar |
17,304 | def tagged ( self , tag , ** kwargs ) : kwargs . update ( { 'tag' : tag } ) return self . send_api_request ( "get" , '/v2/tagged' , kwargs , [ 'before' , 'limit' , 'filter' , 'tag' , 'api_key' ] , True ) | Gets a list of posts tagged with the given tag |
17,305 | def posts ( self , blogname , type = None , ** kwargs ) : if type is None : url = '/v2/blog/{}/posts' . format ( blogname ) else : url = '/v2/blog/{}/posts/{}' . format ( blogname , type ) return self . send_api_request ( "get" , url , kwargs , [ 'id' , 'tag' , 'limit' , 'offset' , 'before' , 'reblog_info' , 'notes_info' , 'filter' , 'api_key' ] , True ) | Gets a list of posts from a particular blog |
17,306 | def blog_info ( self , blogname ) : url = "/v2/blog/{}/info" . format ( blogname ) return self . send_api_request ( "get" , url , { } , [ 'api_key' ] , True ) | Gets the information of the given blog |
17,307 | def blog_following ( self , blogname , ** kwargs ) : url = "/v2/blog/{}/following" . format ( blogname ) return self . send_api_request ( "get" , url , kwargs , [ 'limit' , 'offset' ] ) | Gets the publicly exposed list of blogs that a blog follows |
17,308 | def like ( self , id , reblog_key ) : url = "/v2/user/like" params = { 'id' : id , 'reblog_key' : reblog_key } return self . send_api_request ( "post" , url , params , [ 'id' , 'reblog_key' ] ) | Like the post of the given blog |
17,309 | def unlike ( self , id , reblog_key ) : url = "/v2/user/unlike" params = { 'id' : id , 'reblog_key' : reblog_key } return self . send_api_request ( "post" , url , params , [ 'id' , 'reblog_key' ] ) | Unlike the post of the given blog |
17,310 | def create_photo ( self , blogname , ** kwargs ) : kwargs . update ( { "type" : "photo" } ) return self . _send_post ( blogname , kwargs ) | Create a photo post or photoset on a blog |
17,311 | def create_text ( self , blogname , ** kwargs ) : kwargs . update ( { "type" : "text" } ) return self . _send_post ( blogname , kwargs ) | Create a text post on a blog |
17,312 | def create_quote ( self , blogname , ** kwargs ) : kwargs . update ( { "type" : "quote" } ) return self . _send_post ( blogname , kwargs ) | Create a quote post on a blog |
17,313 | def create_link ( self , blogname , ** kwargs ) : kwargs . update ( { "type" : "link" } ) return self . _send_post ( blogname , kwargs ) | Create a link post on a blog |
17,314 | def create_chat ( self , blogname , ** kwargs ) : kwargs . update ( { "type" : "chat" } ) return self . _send_post ( blogname , kwargs ) | Create a chat post on a blog |
17,315 | def reblog ( self , blogname , ** kwargs ) : url = "/v2/blog/{}/post/reblog" . format ( blogname ) valid_options = [ 'id' , 'reblog_key' , 'comment' ] + self . _post_valid_options ( kwargs . get ( 'type' , None ) ) if 'tags' in kwargs and kwargs [ 'tags' ] : kwargs [ 'tags' ] = "," . join ( kwargs [ 'tags' ] ) return self . send_api_request ( 'post' , url , kwargs , valid_options ) | Creates a reblog on the given blogname |
17,316 | def delete_post ( self , blogname , id ) : url = "/v2/blog/{}/post/delete" . format ( blogname ) return self . send_api_request ( 'post' , url , { 'id' : id } , [ 'id' ] ) | Deletes a post with the given id |
17,317 | def _send_post ( self , blogname , params ) : url = "/v2/blog/{}/post" . format ( blogname ) valid_options = self . _post_valid_options ( params . get ( 'type' , None ) ) if len ( params . get ( "tags" , [ ] ) ) > 0 : params [ 'tags' ] = "," . join ( params [ 'tags' ] ) return self . send_api_request ( "post" , url , params , valid_options ) | Formats parameters and sends the API request off . Validates common and per - post - type parameters and formats your tags for you . |
17,318 | def send_api_request ( self , method , url , params = { } , valid_parameters = [ ] , needs_api_key = False ) : if needs_api_key : params . update ( { 'api_key' : self . request . consumer_key } ) valid_parameters . append ( 'api_key' ) files = { } if 'data' in params : if isinstance ( params [ 'data' ] , list ) : for idx , data in enumerate ( params [ 'data' ] ) : files [ 'data[' + str ( idx ) + ']' ] = open ( params [ 'data' ] [ idx ] , 'rb' ) else : files = { 'data' : open ( params [ 'data' ] , 'rb' ) } del params [ 'data' ] validate_params ( valid_parameters , params ) if method == "get" : return self . request . get ( url , params ) else : return self . request . post ( url , params , files ) | Sends the url with parameters to the requested url validating them to make sure that they are what we expect to have passed to us |
17,319 | def run ( path , code = None , params = None , ** meta ) : tree = compile ( code , path , "exec" , ast . PyCF_ONLY_AST ) McCabeChecker . max_complexity = int ( params . get ( 'complexity' , 10 ) ) return [ { 'lnum' : lineno , 'offset' : offset , 'text' : text , 'type' : McCabeChecker . _code } for lineno , offset , text , _ in McCabeChecker ( tree , path ) . run ( ) ] | MCCabe code checking . |
17,320 | def split_csp_str ( val ) : seen = set ( ) values = val if isinstance ( val , ( list , tuple ) ) else val . strip ( ) . split ( ',' ) return [ x for x in values if x and not ( x in seen or seen . add ( x ) ) ] | Split comma separated string into unique values keeping their order . |
17,321 | def parse_linters ( linters ) : result = list ( ) for name in split_csp_str ( linters ) : linter = LINTERS . get ( name ) if linter : result . append ( ( name , linter ) ) else : logging . warning ( "Linter `%s` not found." , name ) return result | Initialize choosen linters . |
17,322 | def get_default_config_file ( rootdir = None ) : if rootdir is None : return DEFAULT_CONFIG_FILE for path in CONFIG_FILES : path = os . path . join ( rootdir , path ) if os . path . isfile ( path ) and os . access ( path , os . R_OK ) : return path | Search for configuration file . |
17,323 | def parse_options ( args = None , config = True , rootdir = CURDIR , ** overrides ) : args = args or [ ] options = PARSER . parse_args ( args ) options . file_params = dict ( ) options . linters_params = dict ( ) if config : cfg = get_config ( str ( options . options ) , rootdir = rootdir ) for opt , val in cfg . default . items ( ) : LOGGER . info ( 'Find option %s (%s)' , opt , val ) passed_value = getattr ( options , opt , _Default ( ) ) if isinstance ( passed_value , _Default ) : if opt == 'paths' : val = val . split ( ) if opt == 'skip' : val = fix_pathname_sep ( val ) setattr ( options , opt , _Default ( val ) ) for name , opts in cfg . sections . items ( ) : if name == cfg . default_section : continue if name . startswith ( 'pylama' ) : name = name [ 7 : ] if name in LINTERS : options . linters_params [ name ] = dict ( opts ) continue mask = re . compile ( fnmatch . translate ( fix_pathname_sep ( name ) ) ) options . file_params [ mask ] = dict ( opts ) _override_options ( options , ** overrides ) for name in options . __dict__ : value = getattr ( options , name ) if isinstance ( value , _Default ) : setattr ( options , name , process_value ( name , value . value ) ) if options . concurrent and 'pylint' in options . linters : LOGGER . warning ( 'Can\'t parse code asynchronously with pylint enabled.' ) options . concurrent = False return options | Parse options from command line and configuration files . |
17,324 | def _override_options ( options , ** overrides ) : for opt , val in overrides . items ( ) : passed_value = getattr ( options , opt , _Default ( ) ) if opt in ( 'ignore' , 'select' ) and passed_value : value = process_value ( opt , passed_value . value ) value += process_value ( opt , val ) setattr ( options , opt , value ) elif isinstance ( passed_value , _Default ) : setattr ( options , opt , process_value ( opt , val ) ) | Override options . |
17,325 | def process_value ( name , value ) : action = ACTIONS . get ( name ) if not action : return value if callable ( action . type ) : return action . type ( value ) if action . const : return bool ( int ( value ) ) return value | Compile option value . |
17,326 | def get_config ( ini_path = None , rootdir = None ) : config = Namespace ( ) config . default_section = 'pylama' if not ini_path : path = get_default_config_file ( rootdir ) if path : config . read ( path ) else : config . read ( ini_path ) return config | Load configuration from INI . |
17,327 | def setup_logger ( options ) : LOGGER . setLevel ( logging . INFO if options . verbose else logging . WARN ) if options . report : LOGGER . removeHandler ( STREAM ) LOGGER . addHandler ( logging . FileHandler ( options . report , mode = 'w' ) ) if options . options : LOGGER . info ( 'Try to read configuration from: %r' , options . options ) | Do the logger setup with options . |
17,328 | def run ( path , code = None , params = None , ** meta ) : import _ast builtins = params . get ( "builtins" , "" ) if builtins : builtins = builtins . split ( "," ) tree = compile ( code , path , "exec" , _ast . PyCF_ONLY_AST ) w = checker . Checker ( tree , path , builtins = builtins ) w . messages = sorted ( w . messages , key = lambda m : m . lineno ) return [ { 'lnum' : m . lineno , 'text' : m . message % m . message_args , 'type' : m . message [ 0 ] } for m in w . messages ] | Check code with pyflakes . |
17,329 | def check_path ( options , rootdir = None , candidates = None , code = None ) : if not candidates : candidates = [ ] for path_ in options . paths : path = op . abspath ( path_ ) if op . isdir ( path ) : for root , _ , files in walk ( path ) : candidates += [ op . relpath ( op . join ( root , f ) , CURDIR ) for f in files ] else : candidates . append ( path ) if rootdir is None : rootdir = path if op . isdir ( path ) else op . dirname ( path ) paths = [ ] for path in candidates : if not options . force and not any ( l . allow ( path ) for _ , l in options . linters ) : continue if not op . exists ( path ) : continue paths . append ( path ) if options . concurrent : return check_async ( paths , options , rootdir ) errors = [ ] for path in paths : errors += run ( path = path , code = code , rootdir = rootdir , options = options ) return errors | Check path . |
17,330 | def shell ( args = None , error = True ) : if args is None : args = sys . argv [ 1 : ] options = parse_options ( args ) setup_logger ( options ) LOGGER . info ( options ) if options . hook : from . hook import install_hook for path in options . paths : return install_hook ( path ) return process_paths ( options , error = error ) | Endpoint for console . |
17,331 | def process_paths ( options , candidates = None , error = True ) : errors = check_path ( options , rootdir = CURDIR , candidates = candidates ) if options . format in [ 'pycodestyle' , 'pep8' ] : pattern = "%(filename)s:%(lnum)s:%(col)s: %(text)s" elif options . format == 'pylint' : pattern = "%(filename)s:%(lnum)s: [%(type)s] %(text)s" else : pattern = "%(filename)s:%(lnum)s:%(col)s: [%(type)s] %(text)s" for er in errors : if options . abspath : er . _info [ 'filename' ] = op . abspath ( er . filename ) LOGGER . warning ( pattern , er . _info ) if error : sys . exit ( int ( bool ( errors ) ) ) return errors | Process files and log errors . |
17,332 | def reset ( self , source ) : self . tokens = [ ] self . source = source self . pos = 0 | Reset scanner s state . |
17,333 | def scan ( self ) : self . pre_scan ( ) token = None end = len ( self . source ) while self . pos < end : best_pat = None best_pat_len = 0 for p , regexp in self . patterns : m = regexp . match ( self . source , self . pos ) if m : best_pat = p best_pat_len = len ( m . group ( 0 ) ) break if best_pat is None : raise SyntaxError ( "SyntaxError[@char {0}: {1}]" . format ( self . pos , "Bad token." ) ) if best_pat in self . ignore : self . pos += best_pat_len continue token = ( best_pat , self . source [ self . pos : self . pos + best_pat_len ] , self . pos , self . pos + best_pat_len , ) self . pos = token [ - 1 ] self . tokens . append ( token ) | Scan source and grab tokens . |
17,334 | def pre_scan ( self ) : escape_re = re . compile ( r'\\\n[\t ]+' ) self . source = escape_re . sub ( '' , self . source ) | Prepare string for scanning . |
17,335 | def iteritems ( self , raw = False ) : for key in self : yield key , self . __getitem__ ( key , raw = raw ) | Iterate self items . |
17,336 | def read ( self , * files , ** params ) : for f in files : try : with io . open ( f , encoding = 'utf-8' ) as ff : NS_LOGGER . info ( 'Read from `{0}`' . format ( ff . name ) ) self . parse ( ff . read ( ) , ** params ) except ( IOError , TypeError , SyntaxError , io . UnsupportedOperation ) : if not self . silent_read : NS_LOGGER . error ( 'Reading error `{0}`' . format ( ff . name ) ) raise | Read and parse INI files . |
17,337 | def write ( self , f ) : if isinstance ( f , str ) : f = io . open ( f , 'w' , encoding = 'utf-8' ) if not hasattr ( f , 'read' ) : raise AttributeError ( "Wrong type of file: {0}" . format ( type ( f ) ) ) NS_LOGGER . info ( 'Write to `{0}`' . format ( f . name ) ) for section in self . sections . keys ( ) : f . write ( '[{0}]\n' . format ( section ) ) for k , v in self [ section ] . items ( ) : f . write ( '{0:15}= {1}\n' . format ( k , v ) ) f . write ( '\n' ) f . close ( ) | Write namespace as INI file . |
17,338 | def parse ( self , source , update = True , ** params ) : scanner = INIScanner ( source ) scanner . scan ( ) section = self . default_section name = None for token in scanner . tokens : if token [ 0 ] == 'KEY_VALUE' : name , value = re . split ( '[=:]' , token [ 1 ] , 1 ) name , value = name . strip ( ) , value . strip ( ) if not update and name in self [ section ] : continue self [ section ] [ name ] = value elif token [ 0 ] == 'SECTION' : section = token [ 1 ] . strip ( '[]' ) elif token [ 0 ] == 'CONTINUATION' : if not name : raise SyntaxError ( "SyntaxError[@char {0}: {1}]" . format ( token [ 2 ] , "Bad continuation." ) ) self [ section ] [ name ] += '\n' + token [ 1 ] . strip ( ) | Parse INI source as string . |
17,339 | def parse_modeline ( code ) : seek = MODELINE_RE . search ( code ) if seek : return dict ( v . split ( '=' ) for v in seek . group ( 1 ) . split ( ':' ) ) return dict ( ) | Parse params from file s modeline . |
17,340 | def prepare_params ( modeline , fileconfig , options ) : params = dict ( skip = False , ignore = [ ] , select = [ ] , linters = [ ] ) if options : params [ 'ignore' ] = list ( options . ignore ) params [ 'select' ] = list ( options . select ) for config in filter ( None , [ modeline , fileconfig ] ) : for key in ( 'ignore' , 'select' , 'linters' ) : params [ key ] += process_value ( key , config . get ( key , [ ] ) ) params [ 'skip' ] = bool ( int ( config . get ( 'skip' , False ) ) ) params [ 'skip' ] = False params [ 'ignore' ] = set ( params [ 'ignore' ] ) params [ 'select' ] = set ( params [ 'select' ] ) return params | Prepare and merge a params from modelines and configs . |
17,341 | def filter_errors ( errors , select = None , ignore = None , ** params ) : select = select or [ ] ignore = ignore or [ ] for e in errors : for s in select : if e . number . startswith ( s ) : yield e break else : for s in ignore : if e . number . startswith ( s ) : break else : yield e | Filter errors by select and ignore options . |
17,342 | def filter_skiplines ( code , errors ) : if not errors : return errors enums = set ( er . lnum for er in errors ) removed = set ( [ num for num , l in enumerate ( code . split ( '\n' ) , 1 ) if num in enums and SKIP_PATTERN ( l ) ] ) if removed : errors = [ er for er in errors if er . lnum not in removed ] return errors | Filter lines by noqa . |
17,343 | def check_async ( paths , options , rootdir = None ) : LOGGER . info ( 'Async code checking is enabled.' ) path_queue = Queue . Queue ( ) result_queue = Queue . Queue ( ) for num in range ( CPU_COUNT ) : worker = Worker ( path_queue , result_queue ) worker . setDaemon ( True ) LOGGER . info ( 'Start worker #%s' , ( num + 1 ) ) worker . start ( ) for path in paths : path_queue . put ( ( path , dict ( options = options , rootdir = rootdir ) ) ) path_queue . join ( ) errors = [ ] while True : try : errors += result_queue . get ( False ) except Queue . Empty : break return errors | Check given paths asynchronously . |
17,344 | def run ( self ) : while True : path , params = self . path_queue . get ( ) errors = run ( path , ** params ) self . result_queue . put ( errors ) self . path_queue . task_done ( ) | Run tasks from queue . |
17,345 | def _parse ( self , line ) : try : result = line . split ( ':' , maxsplit = 4 ) filename , line_num_txt , column_txt , message_type , text = result except ValueError : return try : self . line_num = int ( line_num_txt . strip ( ) ) self . column = int ( column_txt . strip ( ) ) except ValueError : return self . filename = filename self . message_type = message_type . strip ( ) self . text = text . strip ( ) self . valid = True | Parse the output line |
17,346 | def to_result ( self ) : text = [ self . text ] if self . note : text . append ( self . note ) return { 'lnum' : self . line_num , 'col' : self . column , 'text' : ' - ' . join ( text ) , 'type' : self . types . get ( self . message_type , '' ) } | Convert to the Linter . run return value |
17,347 | def run ( path , code = None , params = None , ** meta ) : args = [ path , '--follow-imports=skip' , '--show-column-numbers' ] stdout , stderr , status = api . run ( args ) messages = [ ] for line in stdout . split ( '\n' ) : line . strip ( ) if not line : continue message = _MyPyMessage ( line ) if message . valid : if message . message_type == 'note' : if messages [ - 1 ] . line_num == message . line_num : messages [ - 1 ] . add_note ( message . text ) else : messages . append ( message ) return [ m . to_result ( ) for m in messages ] | Check code with mypy . |
17,348 | def prepare_value ( value ) : if isinstance ( value , ( list , tuple , set ) ) : return "," . join ( value ) if isinstance ( value , bool ) : return "y" if value else "n" return str ( value ) | Prepare value to pylint . |
17,349 | def run ( path , code = None , params = None , ** meta ) : parser = get_parser ( ) for option in parser . option_list : if option . dest and option . dest in params : value = params [ option . dest ] if isinstance ( value , str ) : params [ option . dest ] = option . convert_value ( option , value ) for key in [ "filename" , "exclude" , "select" , "ignore" ] : if key in params and isinstance ( params [ key ] , str ) : params [ key ] = _parse_multi_options ( params [ key ] ) P8Style = StyleGuide ( reporter = _PycodestyleReport , ** params ) buf = StringIO ( code ) return P8Style . input_file ( path , lines = buf . readlines ( ) ) | Check code with pycodestyle . |
17,350 | def init_file ( self , filename , lines , expected , line_offset ) : super ( _PycodestyleReport , self ) . init_file ( filename , lines , expected , line_offset ) self . errors = [ ] | Prepare storage for errors . |
17,351 | def error ( self , line_number , offset , text , check ) : code = super ( _PycodestyleReport , self ) . error ( line_number , offset , text , check ) if code : self . errors . append ( dict ( text = text , type = code . replace ( 'E' , 'C' ) , col = offset + 1 , lnum = line_number , ) ) | Save errors . |
17,352 | def run ( path , code = None , params = None , ** meta ) : if 'ignore_decorators' in params : ignore_decorators = params [ 'ignore_decorators' ] else : ignore_decorators = None check_source_args = ( code , path , ignore_decorators ) if THIRD_ARG else ( code , path ) return [ { 'lnum' : e . line , 'text' : ( e . message [ 0 : 4 ] + e . message [ 5 : ] if e . message [ 4 ] == ':' else e . message ) , 'type' : 'D' , 'number' : e . code } for e in PyDocChecker ( ) . check_source ( * check_source_args ) ] | pydocstyle code checking . |
17,353 | def run ( path , code = None , params = None , ignore = None , select = None , ** meta ) : complexity = params . get ( 'complexity' , 10 ) no_assert = params . get ( 'no_assert' , False ) show_closures = params . get ( 'show_closures' , False ) visitor = ComplexityVisitor . from_code ( code , no_assert = no_assert ) blocks = visitor . blocks if show_closures : blocks = add_inner_blocks ( blocks ) return [ { 'lnum' : block . lineno , 'col' : block . col_offset , 'type' : 'R' , 'number' : 'R709' , 'text' : 'R701: %s is too complex %d' % ( block . name , block . complexity ) } for block in visitor . blocks if block . complexity > complexity ] | Check code with Radon . |
17,354 | def git_hook ( error = True ) : _ , files_modified , _ = run ( "git diff-index --cached --name-only HEAD" ) options = parse_options ( ) setup_logger ( options ) if sys . version_info >= ( 3 , ) : candidates = [ f . decode ( 'utf-8' ) for f in files_modified ] else : candidates = [ str ( f ) for f in files_modified ] if candidates : process_paths ( options , candidates = candidates , error = error ) | Run pylama after git commit . |
17,355 | def hg_hook ( ui , repo , node = None , ** kwargs ) : seen = set ( ) paths = [ ] if len ( repo ) : for rev in range ( repo [ node ] , len ( repo ) ) : for file_ in repo [ rev ] . files ( ) : file_ = op . join ( repo . root , file_ ) if file_ in seen or not op . exists ( file_ ) : continue seen . add ( file_ ) paths . append ( file_ ) options = parse_options ( ) setup_logger ( options ) if paths : process_paths ( options , candidates = paths ) | Run pylama after mercurial commit . |
17,356 | def install_git ( path ) : hook = op . join ( path , 'pre-commit' ) with open ( hook , 'w' ) as fd : fd . write ( ) chmod ( hook , 484 ) | Install hook in Git repository . |
17,357 | def install_hg ( path ) : hook = op . join ( path , 'hgrc' ) if not op . isfile ( hook ) : open ( hook , 'w+' ) . close ( ) c = ConfigParser ( ) c . readfp ( open ( hook , 'r' ) ) if not c . has_section ( 'hooks' ) : c . add_section ( 'hooks' ) if not c . has_option ( 'hooks' , 'commit' ) : c . set ( 'hooks' , 'commit' , 'python:pylama.hooks.hg_hook' ) if not c . has_option ( 'hooks' , 'qrefresh' ) : c . set ( 'hooks' , 'qrefresh' , 'python:pylama.hooks.hg_hook' ) c . write ( open ( hook , 'w+' ) ) | Install hook in Mercurial repository . |
17,358 | def install_hook ( path ) : git = op . join ( path , '.git' , 'hooks' ) hg = op . join ( path , '.hg' ) if op . exists ( git ) : install_git ( git ) LOGGER . warn ( 'Git hook has been installed.' ) elif op . exists ( hg ) : install_hg ( hg ) LOGGER . warn ( 'Mercurial hook has been installed.' ) else : LOGGER . error ( 'VCS has not found. Check your path.' ) sys . exit ( 1 ) | Auto definition of SCM and hook installation . |
17,359 | def run ( path , code = None , params = None , ** meta ) : code = converter ( code ) line_numbers = commented_out_code_line_numbers ( code ) lines = code . split ( '\n' ) result = [ ] for line_number in line_numbers : line = lines [ line_number - 1 ] result . append ( dict ( lnum = line_number , offset = len ( line ) - len ( line . rstrip ( ) ) , text = converter ( 'E800 Found commented out code: ' ) + line , type = 'E800' , ) ) return result | Eradicate code checking . |
17,360 | def remove_duplicates ( errors ) : passed = defaultdict ( list ) for error in errors : key = error . linter , error . number if key in DUPLICATES : if key in passed [ error . lnum ] : continue passed [ error . lnum ] = DUPLICATES [ key ] yield error | Filter duplicates from given error s list . |
17,361 | def fork ( self , strictindex , new_value ) : forked_chunk = YAMLChunk ( deepcopy ( self . _ruamelparsed ) , pointer = self . pointer , label = self . label , key_association = copy ( self . _key_association ) , ) forked_chunk . contents [ self . ruamelindex ( strictindex ) ] = new_value . as_marked_up ( ) forked_chunk . strictparsed ( ) [ strictindex ] = deepcopy ( new_value . as_marked_up ( ) ) return forked_chunk | Return a chunk referring to the same location in a duplicated document . |
17,362 | def make_child_of ( self , chunk ) : if self . is_mapping ( ) : for key , value in self . contents . items ( ) : self . key ( key , key ) . pointer . make_child_of ( chunk . pointer ) self . val ( key ) . make_child_of ( chunk ) elif self . is_sequence ( ) : for index , item in enumerate ( self . contents ) : self . index ( index ) . make_child_of ( chunk ) else : self . pointer . make_child_of ( chunk . pointer ) | Link one YAML chunk to another . |
17,363 | def _select ( self , pointer ) : return YAMLChunk ( self . _ruamelparsed , pointer = pointer , label = self . _label , strictparsed = self . _strictparsed , key_association = copy ( self . _key_association ) , ) | Get a YAMLChunk referenced by a pointer . |
17,364 | def index ( self , strictindex ) : return self . _select ( self . _pointer . index ( self . ruamelindex ( strictindex ) ) ) | Return a chunk in a sequence referenced by index . |
17,365 | def ruamelindex ( self , strictindex ) : return ( self . key_association . get ( strictindex , strictindex ) if self . is_mapping ( ) else strictindex ) | Get the ruamel equivalent of a strict parsed index . |
17,366 | def val ( self , strictkey ) : ruamelkey = self . ruamelindex ( strictkey ) return self . _select ( self . _pointer . val ( ruamelkey , strictkey ) ) | Return a chunk referencing a value in a mapping with the key key . |
17,367 | def key ( self , key , strictkey = None ) : return self . _select ( self . _pointer . key ( key , strictkey ) ) | Return a chunk referencing a key in a mapping with the name key . |
17,368 | def textslice ( self , start , end ) : return self . _select ( self . _pointer . textslice ( start , end ) ) | Return a chunk referencing a slice of a scalar text value . |
17,369 | def flatten ( items ) : for x in items : if isinstance ( x , Iterable ) and not isinstance ( x , ( str , bytes ) ) : for sub_x in flatten ( x ) : yield sub_x else : yield x | Yield items from any nested iterable . |
17,370 | def comma_separated_positions ( text ) : chunks = [ ] start = 0 end = 0 for item in text . split ( "," ) : space_increment = 1 if item [ 0 ] == " " else 0 start += space_increment end += len ( item . lstrip ( ) ) + space_increment chunks . append ( ( start , end ) ) start += len ( item . lstrip ( ) ) + 1 end = start return chunks | Start and end positions of comma separated text items . |
17,371 | def ruamel_structure ( data , validator = None ) : if isinstance ( data , dict ) : if len ( data ) == 0 : raise exceptions . CannotBuildDocumentsFromEmptyDictOrList ( "Document must be built with non-empty dicts and lists" ) return CommentedMap ( [ ( ruamel_structure ( key ) , ruamel_structure ( value ) ) for key , value in data . items ( ) ] ) elif isinstance ( data , list ) : if len ( data ) == 0 : raise exceptions . CannotBuildDocumentsFromEmptyDictOrList ( "Document must be built with non-empty dicts and lists" ) return CommentedSeq ( [ ruamel_structure ( item ) for item in data ] ) elif isinstance ( data , bool ) : return u"yes" if data else u"no" elif isinstance ( data , ( int , float ) ) : return str ( data ) else : if not is_string ( data ) : raise exceptions . CannotBuildDocumentFromInvalidData ( ( "Document must be built from a combination of:\n" "string, int, float, bool or nonempty list/dict\n\n" "Instead, found variable with type '{}': '{}'" ) . format ( type ( data ) . __name__ , data ) ) return data | Take dicts and lists and return a ruamel . yaml style structure of CommentedMaps CommentedSeqs and data . |
17,372 | def rbdd ( * keywords ) : settings = _personal_settings ( ) . data settings [ "engine" ] [ "rewrite" ] = True _storybook ( settings [ "engine" ] ) . with_params ( ** { "python version" : settings [ "params" ] [ "python version" ] } ) . only_uninherited ( ) . shortcut ( * keywords ) . play ( ) | Run story matching keywords and rewrite story if code changed . |
17,373 | def rerun ( version = "3.7.0" ) : from commandlib import Command Command ( DIR . gen . joinpath ( "py{0}" . format ( version ) , "bin" , "python" ) ) ( DIR . gen . joinpath ( "state" , "examplepythoncode.py" ) ) . in_dir ( DIR . gen . joinpath ( "state" ) ) . run ( ) | Rerun last example code block with specified version of python . |
17,374 | def data ( self ) : if isinstance ( self . _value , CommentedMap ) : mapping = OrderedDict ( ) for key , value in self . _value . items ( ) : mapping [ key . data ] = value . data return mapping elif isinstance ( self . _value , CommentedSeq ) : return [ item . data for item in self . _value ] else : return self . _value | Returns raw data representation of the document or document segment . |
17,375 | def as_yaml ( self ) : dumped = dump ( self . as_marked_up ( ) , Dumper = StrictYAMLDumper , allow_unicode = True ) return dumped if sys . version_info [ 0 ] == 3 else dumped . decode ( "utf8" ) | Render the YAML node and subnodes as string . |
17,376 | def text ( self ) : if isinstance ( self . _value , CommentedMap ) : raise TypeError ( "{0} is a mapping, has no text value." . format ( repr ( self ) ) ) if isinstance ( self . _value , CommentedSeq ) : raise TypeError ( "{0} is a sequence, has no text value." . format ( repr ( self ) ) ) return self . _text | Return string value of scalar whatever value it was parsed as . |
17,377 | def partition_source ( src ) : ast_obj = ast . parse ( src . encode ( 'UTF-8' ) ) visitor = TopLevelImportVisitor ( ) visitor . visit ( ast_obj ) line_offsets = get_line_offsets_by_line_no ( src ) chunks = [ ] startpos = 0 pending_chunk_type = None possible_ending_tokens = None seen_import = False for ( token_type , token_text , ( srow , scol ) , ( erow , ecol ) , _ , ) in tokenize . generate_tokens ( io . StringIO ( src ) . readline ) : if pending_chunk_type is None : if not seen_import and token_type == tokenize . COMMENT : if 'noreorder' in token_text : chunks . append ( CodePartition ( CodeType . CODE , src [ startpos : ] ) ) break else : pending_chunk_type = CodeType . PRE_IMPORT_CODE possible_ending_tokens = TERMINATES_COMMENT elif not seen_import and token_type == tokenize . STRING : pending_chunk_type = CodeType . PRE_IMPORT_CODE possible_ending_tokens = TERMINATES_DOCSTRING elif scol == 0 and srow in visitor . top_level_import_line_numbers : seen_import = True pending_chunk_type = CodeType . IMPORT possible_ending_tokens = TERMINATES_IMPORT elif token_type == tokenize . NL : endpos = line_offsets [ erow ] + ecol srctext = src [ startpos : endpos ] startpos = endpos chunks . append ( CodePartition ( CodeType . NON_CODE , srctext ) ) elif token_type == tokenize . COMMENT : if 'noreorder' in token_text : chunks . append ( CodePartition ( CodeType . CODE , src [ startpos : ] ) ) break else : pending_chunk_type = CodeType . CODE possible_ending_tokens = TERMINATES_COMMENT elif token_type == tokenize . ENDMARKER : pass else : chunks . append ( CodePartition ( CodeType . CODE , src [ startpos : ] ) ) break elif token_type in possible_ending_tokens : endpos = line_offsets [ erow ] + ecol srctext = src [ startpos : endpos ] startpos = endpos chunks . append ( CodePartition ( pending_chunk_type , srctext ) ) pending_chunk_type = None possible_ending_tokens = None elif token_type == tokenize . COMMENT and 'noreorder' in token_text : chunks . append ( CodePartition ( CodeType . CODE , src [ startpos : ] ) ) break chunks = [ chunk for chunk in chunks if chunk . src ] assert _partitions_to_src ( chunks ) == src return chunks | Partitions source into a list of CodePartition s for import refactoring . |
17,378 | def separate_comma_imports ( partitions ) : def _inner ( ) : for partition in partitions : if partition . code_type is CodeType . IMPORT : import_obj = import_obj_from_str ( partition . src ) if import_obj . has_multiple_imports : for new_import_obj in import_obj . split_imports ( ) : yield CodePartition ( CodeType . IMPORT , new_import_obj . to_text ( ) , ) else : yield partition else : yield partition return list ( _inner ( ) ) | Turns import a b into import a and import b |
17,379 | def _module_to_base_modules ( s ) : parts = s . split ( '.' ) for i in range ( 1 , len ( parts ) ) : yield '.' . join ( parts [ : i ] ) | return all module names that would be imported due to this import - import |
17,380 | def apply_thresholds ( input , thresholds , choices ) : condlist = [ input <= threshold for threshold in thresholds ] if len ( condlist ) == len ( choices ) - 1 : condlist += [ True ] assert len ( condlist ) == len ( choices ) , "apply_thresholds must be called with the same number of thresholds than choices, or one more choice" return np . select ( condlist , choices ) | Return one of the choices depending on the input position compared to thresholds for each input . |
17,381 | def update ( self , period = None , start = None , stop = None , value = None ) : if period is not None : if start is not None or stop is not None : raise TypeError ( "Wrong input for 'update' method: use either 'update(period, value = value)' or 'update(start = start, stop = stop, value = value)'. You cannot both use 'period' and 'start' or 'stop'." ) if isinstance ( period , str ) : period = periods . period ( period ) start = period . start stop = period . stop if start is None : raise ValueError ( "You must provide either a start or a period" ) start_str = str ( start ) stop_str = str ( stop . offset ( 1 , 'day' ) ) if stop else None old_values = self . values_list new_values = [ ] n = len ( old_values ) i = 0 if stop_str : while ( i < n ) and ( old_values [ i ] . instant_str >= stop_str ) : new_values . append ( old_values [ i ] ) i += 1 if stop_str : if new_values and ( stop_str == new_values [ - 1 ] . instant_str ) : pass else : if i < n : overlapped_value = old_values [ i ] . value value_name = _compose_name ( self . name , item_name = stop_str ) new_interval = ParameterAtInstant ( value_name , stop_str , data = { 'value' : overlapped_value } ) new_values . append ( new_interval ) else : value_name = _compose_name ( self . name , item_name = stop_str ) new_interval = ParameterAtInstant ( value_name , stop_str , data = { 'value' : None } ) new_values . append ( new_interval ) value_name = _compose_name ( self . name , item_name = start_str ) new_interval = ParameterAtInstant ( value_name , start_str , data = { 'value' : value } ) new_values . append ( new_interval ) while ( i < n ) and ( old_values [ i ] . instant_str >= start_str ) : i += 1 while i < n : new_values . append ( old_values [ i ] ) i += 1 self . values_list = new_values | Change the value for a given period . |
17,382 | def merge ( self , other ) : for child_name , child in other . children . items ( ) : self . add_child ( child_name , child ) | Merges another ParameterNode into the current node . |
17,383 | def add_child ( self , name , child ) : if name in self . children : raise ValueError ( "{} has already a child named {}" . format ( self . name , name ) ) if not ( isinstance ( child , ParameterNode ) or isinstance ( child , Parameter ) or isinstance ( child , Scale ) ) : raise TypeError ( "child must be of type ParameterNode, Parameter, or Scale. Instead got {}" . format ( type ( child ) ) ) self . children [ name ] = child setattr ( self , name , child ) | Add a new child to the node . |
17,384 | def replace_variable ( self , variable ) : name = variable . __name__ if self . variables . get ( name ) is not None : del self . variables [ name ] self . load_variable ( variable , update = False ) | Replaces an existing OpenFisca variable in the tax and benefit system by a new one . |
17,385 | def add_variables_from_file ( self , file_path ) : try : file_name = path . splitext ( path . basename ( file_path ) ) [ 0 ] module_name = '{}_{}_{}' . format ( id ( self ) , hash ( path . abspath ( file_path ) ) , file_name ) module_directory = path . dirname ( file_path ) try : module = load_module ( module_name , * find_module ( file_name , [ module_directory ] ) ) except NameError as e : logging . error ( str ( e ) + ": if this code used to work, this error might be due to a major change in OpenFisca-Core. Checkout the changelog to learn more: <https://github.com/openfisca/openfisca-core/blob/master/CHANGELOG.md>" ) raise potential_variables = [ getattr ( module , item ) for item in dir ( module ) if not item . startswith ( '__' ) ] for pot_variable in potential_variables : if isclass ( pot_variable ) and issubclass ( pot_variable , Variable ) and pot_variable . __module__ == module_name : self . add_variable ( pot_variable ) except Exception : log . error ( 'Unable to load OpenFisca variables from file "{}"' . format ( file_path ) ) raise | Adds all OpenFisca variables contained in a given file to the tax and benefit system . |
17,386 | def add_variables_from_directory ( self , directory ) : py_files = glob . glob ( path . join ( directory , "*.py" ) ) for py_file in py_files : self . add_variables_from_file ( py_file ) subdirectories = glob . glob ( path . join ( directory , "*/" ) ) for subdirectory in subdirectories : self . add_variables_from_directory ( subdirectory ) | Recursively explores a directory and adds all OpenFisca variables found there to the tax and benefit system . |
17,387 | def load_extension ( self , extension ) : try : package = importlib . import_module ( extension ) extension_directory = package . __path__ [ 0 ] except ImportError : message = linesep . join ( [ traceback . format_exc ( ) , 'Error loading extension: `{}` is neither a directory, nor a package.' . format ( extension ) , 'Are you sure it is installed in your environment? If so, look at the stack trace above to determine the origin of this error.' , 'See more at <https://github.com/openfisca/openfisca-extension-template#installing>.' ] ) raise ValueError ( message ) self . add_variables_from_directory ( extension_directory ) param_dir = path . join ( extension_directory , 'parameters' ) if path . isdir ( param_dir ) : extension_parameters = ParameterNode ( directory_path = param_dir ) self . parameters . merge ( extension_parameters ) | Loads an extension to the tax and benefit system . |
17,388 | def apply_reform ( self , reform_path ) : from openfisca_core . reforms import Reform try : reform_package , reform_name = reform_path . rsplit ( '.' , 1 ) except ValueError : raise ValueError ( '`{}` does not seem to be a path pointing to a reform. A path looks like `some_country_package.reforms.some_reform.`' . format ( reform_path ) ) try : reform_module = importlib . import_module ( reform_package ) except ImportError : message = linesep . join ( [ traceback . format_exc ( ) , 'Could not import `{}`.' . format ( reform_package ) , 'Are you sure of this reform module name? If so, look at the stack trace above to determine the origin of this error.' ] ) raise ValueError ( message ) reform = getattr ( reform_module , reform_name , None ) if reform is None : raise ValueError ( '{} has no attribute {}' . format ( reform_package , reform_name ) ) if not issubclass ( reform , Reform ) : raise ValueError ( '`{}` does not seem to be a valid Openfisca reform.' . format ( reform_path ) ) return reform ( self ) | Generates a new tax and benefit system applying a reform to the tax and benefit system . |
17,389 | def get_variable ( self , variable_name , check_existence = False ) : variables = self . variables found = variables . get ( variable_name ) if not found and check_existence : raise VariableNotFound ( variable_name , self ) return found | Get a variable from the tax and benefit system . |
17,390 | def neutralize_variable ( self , variable_name ) : self . variables [ variable_name ] = get_neutralized_variable ( self . get_variable ( variable_name ) ) | Neutralizes an OpenFisca variable existing in the tax and benefit system . |
17,391 | def load_parameters ( self , path_to_yaml_dir ) : parameters = ParameterNode ( '' , directory_path = path_to_yaml_dir ) if self . preprocess_parameters is not None : parameters = self . preprocess_parameters ( parameters ) self . parameters = parameters | Loads the legislation parameter for a directory containing YAML parameters files . |
17,392 | def get_parameters_at_instant ( self , instant ) : if isinstance ( instant , periods . Period ) : instant = instant . start elif isinstance ( instant , ( str , int ) ) : instant = periods . instant ( instant ) else : assert isinstance ( instant , periods . Instant ) , "Expected an Instant (e.g. Instant((2017, 1, 1)) ). Got: {}." . format ( instant ) parameters_at_instant = self . _parameters_at_instant_cache . get ( instant ) if parameters_at_instant is None and self . parameters is not None : parameters_at_instant = self . parameters . get_at_instant ( str ( instant ) ) self . _parameters_at_instant_cache [ instant ] = parameters_at_instant return parameters_at_instant | Get the parameters of the legislation at a given instant |
17,393 | def get_package_metadata ( self ) : if self . baseline : return self . baseline . get_package_metadata ( ) fallback_metadata = { 'name' : self . __class__ . __name__ , 'version' : '' , 'repository_url' : '' , 'location' : '' , } module = inspect . getmodule ( self ) if not module . __package__ : return fallback_metadata package_name = module . __package__ . split ( '.' ) [ 0 ] try : distribution = pkg_resources . get_distribution ( package_name ) except pkg_resources . DistributionNotFound : return fallback_metadata location = inspect . getsourcefile ( module ) . split ( package_name ) [ 0 ] . rstrip ( '/' ) home_page_metadatas = [ metadata . split ( ':' , 1 ) [ 1 ] . strip ( ' ' ) for metadata in distribution . _get_metadata ( distribution . PKG_INFO ) if 'Home-page' in metadata ] repository_url = home_page_metadatas [ 0 ] if home_page_metadatas else '' return { 'name' : distribution . key , 'version' : distribution . version , 'repository_url' : repository_url , 'location' : location , } | Gets metatada relative to the country package the tax and benefit system is built from . |
17,394 | def get_variables ( self , entity = None ) : if not entity : return self . variables else : return { variable_name : variable for variable_name , variable in self . variables . items ( ) if variable . entity . key == entity . key } | Gets all variables contained in a tax and benefit system . |
17,395 | def build_from_dict ( self , tax_benefit_system , input_dict ) : input_dict = self . explicit_singular_entities ( tax_benefit_system , input_dict ) if any ( key in tax_benefit_system . entities_plural ( ) for key in input_dict . keys ( ) ) : return self . build_from_entities ( tax_benefit_system , input_dict ) else : return self . build_from_variables ( tax_benefit_system , input_dict ) | Build a simulation from input_dict |
17,396 | def build_from_entities ( self , tax_benefit_system , input_dict ) : input_dict = deepcopy ( input_dict ) simulation = Simulation ( tax_benefit_system , tax_benefit_system . instantiate_entities ( ) ) for ( variable_name , _variable ) in tax_benefit_system . variables . items ( ) : self . register_variable ( variable_name , simulation . get_variable_population ( variable_name ) . entity ) check_type ( input_dict , dict , [ 'error' ] ) axes = input_dict . pop ( 'axes' , None ) unexpected_entities = [ entity for entity in input_dict if entity not in tax_benefit_system . entities_plural ( ) ] if unexpected_entities : unexpected_entity = unexpected_entities [ 0 ] raise SituationParsingError ( [ unexpected_entity ] , '' . join ( [ "Some entities in the situation are not defined in the loaded tax and benefit system." , "These entities are not found: {0}." , "The defined entities are: {1}." ] ) . format ( ', ' . join ( unexpected_entities ) , ', ' . join ( tax_benefit_system . entities_plural ( ) ) ) ) persons_json = input_dict . get ( tax_benefit_system . person_entity . plural , None ) if not persons_json : raise SituationParsingError ( [ tax_benefit_system . person_entity . plural ] , 'No {0} found. At least one {0} must be defined to run a simulation.' . format ( tax_benefit_system . person_entity . key ) ) persons_ids = self . add_person_entity ( simulation . persons . entity , persons_json ) for entity_class in tax_benefit_system . group_entities : instances_json = input_dict . get ( entity_class . plural ) if instances_json is not None : self . add_group_entity ( self . persons_plural , persons_ids , entity_class , instances_json ) else : self . add_default_group_entity ( persons_ids , entity_class ) if axes : self . axes = axes self . expand_axes ( ) try : self . finalize_variables_init ( simulation . persons ) except PeriodMismatchError as e : self . raise_period_mismatch ( simulation . persons . entity , persons_json , e ) for entity_class in tax_benefit_system . group_entities : try : population = simulation . populations [ entity_class . key ] self . finalize_variables_init ( population ) except PeriodMismatchError as e : self . raise_period_mismatch ( population . entity , instances_json , e ) return simulation | Build a simulation from a Python dict input_dict fully specifying entities . |
17,397 | def build_from_variables ( self , tax_benefit_system , input_dict ) : count = _get_person_count ( input_dict ) simulation = self . build_default_simulation ( tax_benefit_system , count ) for variable , value in input_dict . items ( ) : if not isinstance ( value , dict ) : if self . default_period is None : raise SituationParsingError ( [ variable ] , "Can't deal with type: expected object. Input variables should be set for specific periods. For instance: {'salary': {'2017-01': 2000, '2017-02': 2500}}, or {'birth_date': {'ETERNITY': '1980-01-01'}}." ) simulation . set_input ( variable , self . default_period , value ) else : for period_str , dated_value in value . items ( ) : simulation . set_input ( variable , period_str , dated_value ) return simulation | Build a simulation from a Python dict input_dict describing variables values without expliciting entities . |
17,398 | def explicit_singular_entities ( self , tax_benefit_system , input_dict ) : singular_keys = set ( input_dict ) . intersection ( tax_benefit_system . entities_by_singular ( ) ) if not singular_keys : return input_dict result = { entity_id : entity_description for ( entity_id , entity_description ) in input_dict . items ( ) if entity_id in tax_benefit_system . entities_plural ( ) } for singular in singular_keys : plural = tax_benefit_system . entities_by_singular ( ) [ singular ] . plural result [ plural ] = { singular : input_dict [ singular ] } return result | Preprocess input_dict to explicit entities defined using the single - entity shortcut |
17,399 | def add_person_entity ( self , entity , instances_json ) : check_type ( instances_json , dict , [ entity . plural ] ) entity_ids = list ( map ( str , instances_json . keys ( ) ) ) self . persons_plural = entity . plural self . entity_ids [ self . persons_plural ] = entity_ids self . entity_counts [ self . persons_plural ] = len ( entity_ids ) for instance_id , instance_object in instances_json . items ( ) : check_type ( instance_object , dict , [ entity . plural , instance_id ] ) self . init_variable_values ( entity , instance_object , str ( instance_id ) ) return self . get_ids ( entity . plural ) | Add the simulation s instances of the persons entity as described in instances_json . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.