idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
236,100
def play ( self , sox_effects = ( ) ) : audio_data = self . getAudioData ( ) logging . getLogger ( ) . info ( "Playing speech segment (%s): '%s'" % ( self . lang , self ) ) cmd = [ "sox" , "-q" , "-t" , "mp3" , "-" ] if sys . platform . startswith ( "win32" ) : cmd . extend ( ( "-t" , "waveaudio" ) ) cmd . extend ( ( "-d" , "trim" , "0.1" , "reverse" , "trim" , "0.07" , "reverse" ) ) # "trim", "0.25", "-0.1" cmd . extend ( sox_effects ) logging . getLogger ( ) . debug ( "Start player process" ) p = subprocess . Popen ( cmd , stdin = subprocess . PIPE , stdout = subprocess . DEVNULL ) p . communicate ( input = audio_data ) if p . returncode != 0 : raise RuntimeError ( ) logging . getLogger ( ) . debug ( "Done playing" )
Play the segment .
257
4
236,101
def buildUrl ( self , cache_friendly = False ) : params = collections . OrderedDict ( ) params [ "client" ] = "tw-ob" params [ "ie" ] = "UTF-8" params [ "idx" ] = str ( self . segment_num ) if self . segment_count is not None : params [ "total" ] = str ( self . segment_count ) params [ "textlen" ] = str ( len ( self . text ) ) params [ "tl" ] = self . lang lower_text = self . text . lower ( ) params [ "q" ] = lower_text return "%s?%s" % ( __class__ . BASE_URL , urllib . parse . urlencode ( params ) )
Construct the URL to get the sound from Goggle API .
166
12
236,102
def download ( self , url ) : logging . getLogger ( ) . debug ( "Downloading '%s'..." % ( url ) ) response = __class__ . session . get ( url , headers = { "User-Agent" : "Mozilla/5.0" } , timeout = 3.1 ) response . raise_for_status ( ) return response . content
Download a sound file .
82
5
236,103
def to_param_dict ( self ) : param_dict = { } for index , dictionary in enumerate ( self . value ) : for key , value in dictionary . items ( ) : param_name = '{param_name}[{index}][{key}]' . format ( param_name = self . param_name , index = index , key = key ) param_dict [ param_name ] = value return OrderedDict ( sorted ( param_dict . items ( ) ) )
Sorts to ensure Order is consistent for Testing
107
9
236,104
def _get ( cls , kwarg_name ) : param_classes = cls . _discover_params ( ) try : param_class = param_classes [ kwarg_name ] except KeyError : raise ValueError ( 'invalid param keyword {}' . format ( kwarg_name ) ) else : return param_class
Returns a Param Class Instance by its kwarg or param name
75
14
236,105
def _process_params ( self , params ) : new_params = OrderedDict ( ) for param_name , param_value in sorted ( params . items ( ) ) : param_value = params [ param_name ] ParamClass = AirtableParams . _get ( param_name ) new_params . update ( ParamClass ( param_value ) . to_param_dict ( ) ) return new_params
Process params names or values as needed using filters
91
9
236,106
def _batch_request ( self , func , iterable ) : responses = [ ] for item in iterable : responses . append ( func ( item ) ) time . sleep ( self . API_LIMIT ) return responses
Internal Function to limit batch calls to API limit
47
9
236,107
def _add_members ( self , catmembers ) : members = [ x for x in catmembers if x [ 'ns' ] == 0 ] subcats = [ x for x in catmembers if x [ 'ns' ] == 14 ] if 'members' in self . data : self . data [ 'members' ] . extend ( members ) else : self . data . update ( { 'members' : members } ) if subcats : if 'subcategories' in self . data : self . data [ 'subcategories' ] . extend ( subcats ) else : self . data . update ( { 'subcategories' : subcats } )
Adds category members and subcategories to data
139
9
236,108
def _query ( self , action , qobj ) : title = self . params . get ( 'title' ) pageid = self . params . get ( 'pageid' ) if action == 'random' : return qobj . random ( namespace = 14 ) elif action == 'category' : return qobj . category ( title , pageid , self . _continue_params ( ) )
Form query to enumerate category
83
6
236,109
def _set_data ( self , action ) : data = self . _load_response ( action ) self . _handle_continuations ( data , 'category' ) if action == 'category' : members = data . get ( 'query' ) . get ( 'categorymembers' ) if members : self . _add_members ( members ) if action == 'random' : rand = data [ 'query' ] [ 'random' ] [ 0 ] data = { 'pageid' : rand . get ( 'id' ) , 'title' : rand . get ( 'title' ) } self . data . update ( data ) self . params . update ( data )
Set category member data from API response
142
7
236,110
def _sitelist ( self , matrix ) : _list = [ ] for item in matrix : sites = [ ] if isinstance ( matrix [ item ] , list ) : sites = matrix [ item ] elif isinstance ( matrix [ item ] , dict ) : sites = matrix [ item ] [ 'site' ] for site in sites : if len ( site . keys ( ) ) > 4 : # closed, fishbowl, private continue domain = self . params . get ( 'domain' ) if domain : if domain in site [ 'url' ] : _list . append ( site [ 'url' ] ) else : _list . append ( site [ 'url' ] ) return _list
Returns a list of sites from a SiteMatrix optionally filtered by domain param
145
14
236,111
def handle_wikidata_errors ( data , query ) : entities = data . get ( 'entities' ) if not entities : raise LookupError ( query ) elif '-1' in entities : raise LookupError ( query ) else : item = list ( entities . values ( ) ) [ 0 ] if 'missing' in item : errmsg = "wikidata item %s has been deleted" % item [ 'id' ] raise LookupError ( errmsg )
Raises LookupError if wikidata error found
103
11
236,112
def prettyprint ( datastr ) : maxwidth = WPToolsQuery . MAXWIDTH rpad = WPToolsQuery . RPAD extent = maxwidth - ( rpad + 2 ) for line in datastr : if len ( line ) >= maxwidth : line = line [ : extent ] + '...' utils . stderr ( line )
Print page data strings to stderr
78
8
236,113
def _continue_params ( self ) : if not self . data . get ( 'continue' ) : return params = [ ] for item in self . data [ 'continue' ] : params . append ( "&%s=%s" % ( item , self . data [ 'continue' ] [ item ] ) ) return '' . join ( params )
Returns query string fragment continue parameters
75
6
236,114
def _handle_continuations ( self , response , cache_key ) : rcontinue = response . get ( 'continue' ) listen = [ 'blcontinue' , 'cmcontinue' , 'plcontinue' ] cparams = { } if rcontinue : for flag in listen : if rcontinue . get ( flag ) : cparams [ flag ] = rcontinue . get ( flag ) if cparams : self . data [ 'continue' ] = cparams del self . cache [ cache_key ] else : # no more continuations if 'continue' in self . data : del self . data [ 'continue' ]
Select continue params and clear cache or last continue params
130
10
236,115
def _get ( self , action , show , proxy , timeout ) : silent = self . flags [ 'silent' ] if action in self . cache : if action != 'imageinfo' and action != 'labels' : utils . stderr ( "+ %s results in cache" % action , silent ) return else : self . cache [ action ] = { } if self . flags . get ( 'skip' ) and action in self . flags [ 'skip' ] : if not self . flags [ 'silent' ] : utils . stderr ( "+ skipping %s" % action ) return if 'requests' not in self . data : self . data [ 'requests' ] = [ ] if len ( self . data [ 'requests' ] ) >= self . REQUEST_LIMIT : raise StopIteration ( "Hit REQUEST_LIMIT = %d" % self . REQUEST_LIMIT ) if self . data [ 'requests' ] and self . REQUEST_DELAY : utils . stderr ( "REQUEST_DELAY = %d seconds" % self . REQUEST_DELAY ) sleep ( self . REQUEST_DELAY ) # make the request qobj = WPToolsQuery ( lang = self . params [ 'lang' ] , variant = self . params . get ( 'variant' ) , wiki = self . params . get ( 'wiki' ) , endpoint = self . params . get ( 'endpoint' ) ) qstr = self . _query ( action , qobj ) req = self . _request ( proxy , timeout ) response = req . get ( qstr , qobj . status ) self . cache [ action ] [ 'query' ] = qstr self . cache [ action ] [ 'response' ] = response self . cache [ action ] [ 'info' ] = req . info self . data [ 'requests' ] . append ( action ) self . _set_data ( action ) if show and not self . flags . get ( 'silent' ) : self . show ( )
make HTTP request and cache response
451
6
236,116
def _load_response ( self , action ) : _query = self . cache [ action ] [ 'query' ] . replace ( '&format=json' , '' ) response = self . cache [ action ] [ 'response' ] if not response : raise ValueError ( "Empty response: %s" % self . params ) try : data = utils . json_loads ( response ) except ValueError : raise ValueError ( _query ) if data . get ( 'warnings' ) : if 'WARNINGS' in self . data : self . data [ 'WARNINGS' ] . update ( data [ 'warnings' ] ) else : self . data [ 'WARNINGS' ] = data [ 'warnings' ] if data . get ( 'error' ) : utils . stderr ( "API error: %s" % data . get ( 'error' ) ) raise LookupError ( _query ) if 'query' in action and data . get ( 'query' ) : if data [ 'query' ] . get ( 'pages' ) : if data [ 'query' ] [ 'pages' ] [ 0 ] . get ( 'missing' ) : raise LookupError ( _query ) if action == 'parse' and not data . get ( 'parse' ) : raise LookupError ( _query ) if action == 'wikidata' : handle_wikidata_errors ( data , _query ) return data
returns API reponse from cache or raises ValueError
307
11
236,117
def _request ( self , proxy , timeout ) : return request . WPToolsRequest ( self . flags [ 'silent' ] , self . flags [ 'verbose' ] , proxy , timeout )
Returns WPToolsRequest object
43
6
236,118
def info ( self , action = None ) : if action in self . cache : return self . cache [ action ] [ 'info' ] return self . cache . keys ( ) or None
returns cached request info for given action or list of cached actions
39
13
236,119
def show ( self ) : if not self . data : return if self . data . get ( 'continue' ) : return ptitle = self . params . get ( 'title' ) dtitle = self . data . get ( 'title' ) pageid = self . params . get ( 'pageid' ) seed = dtitle or ptitle or pageid if utils . is_text ( seed ) : seed = seed . replace ( '_' , ' ' ) prettyprint ( self . _build_showstr ( seed ) )
Pretty - print instance data
114
5
236,120
def _get_entity_prop ( self , entity , prop ) : variant = self . params . get ( 'variant' ) lang = self . params . get ( 'lang' ) if entity . get ( prop ) : ent = entity [ prop ] try : return ent [ variant or lang ] . get ( 'value' ) except AttributeError : return ent . get ( 'value' )
returns Wikidata entity property value
84
8
236,121
def _marshal_claims ( self , query_claims ) : claims = reduce_claims ( query_claims ) # self.data['claimq'] = query_claims self . data [ 'claims' ] = claims entities = set ( ) for eid in claims : if self . user_labels : if eid in self . user_labels or eid == 'P31' : entities . add ( eid ) # P (property) else : continue # get only wanted entities else : entities . add ( eid ) # P (property) for val in claims [ eid ] : if utils . is_text ( val ) and re . match ( r'^Q\d+$' , val ) : entities . add ( val ) # Q (item) self . data [ 'entities' ] = list ( entities )
set Wikidata entities from query claims
187
8
236,122
def _pop_entities ( self , limit = 50 ) : pop = self . data [ 'entities' ] [ : limit ] del self . data [ 'entities' ] [ : limit ] return pop
returns up to limit entities and pops them off the list
45
12
236,123
def _query ( self , action , qobj ) : if action == 'labels' : return qobj . labels ( self . _pop_entities ( ) ) elif action == 'wikidata' : return qobj . wikidata ( self . params . get ( 'title' ) , self . params . get ( 'wikibase' ) )
returns wikidata query string
78
7
236,124
def _set_title ( self , item ) : title = None lang = self . params [ 'lang' ] label = self . data [ 'label' ] if item . get ( 'sitelinks' ) : for link in item [ 'sitelinks' ] : if link == "%swiki" % lang : title = item [ 'sitelinks' ] [ link ] [ 'title' ] self . data [ 'title' ] = title . replace ( ' ' , '_' ) if not self . data . get ( 'title' ) and label : self . data [ 'title' ] = label . replace ( ' ' , '_' ) if self . data . get ( 'title' ) and not self . params . get ( 'title' ) : self . params [ 'title' ] = self . data [ 'title' ]
attempt to set title from wikidata
182
9
236,125
def _update_images ( self ) : wd_images = self . data [ 'claims' ] . get ( 'P18' ) # image if wd_images : if not isinstance ( wd_images , list ) : wd_images = [ wd_images ] if 'image' not in self . data : self . data [ 'image' ] = [ ] for img_file in wd_images : self . data [ 'image' ] . append ( { 'file' : img_file , 'kind' : 'wikidata-image' } )
add images from Wikidata
127
6
236,126
def _update_wikidata ( self ) : claims = self . data [ 'claims' ] for ent in claims : plabel = self . data [ 'labels' ] . get ( ent ) # P (property) label if plabel : plabel = "%s (%s)" % ( plabel , ent ) claim = [ ] for item in claims [ ent ] : ilabel = item if utils . is_text ( item ) and re . match ( r'^Q\d+$' , item ) : ilabel = self . data [ 'labels' ] . get ( item ) # Q (item) label if ilabel : ilabel = "%s (%s)" % ( ilabel , item ) if len ( claims [ ent ] ) == 1 : claim = ilabel else : claim . append ( ilabel ) if plabel and ilabel : self . data [ 'wikidata' ] [ plabel ] = claim
set wikidata from claims and labels
202
8
236,127
def _html_image ( page ) : source = _image ( page ) if not source : return alt = page . data . get ( 'label' ) or page . data . get ( 'title' ) img = "<img src=\"%s\"" % source img += " alt=\"%s\" title=\"%s\" " % ( alt , alt ) img += "align=\"right\" width=\"240\">" return img
returns HTML img tag
90
5
236,128
def _html_title ( page ) : link = "<a href=\"%s\">%s</a>" % ( page . data . get ( 'url' ) , page . data . get ( 'title' ) ) desc = page . data . get ( 'description' ) if desc : link += "&mdash;<i>%s</i>" % desc else : link += "&mdash;<i>description</i>" if link : return "<p>%s</p>" % link
returns Wiki - linked HTML title
109
7
236,129
def _page_html ( page ) : out = [ ] out . append ( _html_title ( page ) ) out . append ( _html_image ( page ) ) out . append ( page . data . get ( 'extract' ) ) return "\n" . join ( [ x for x in out if x ] )
returns assembled HTML output
70
5
236,130
def _page_text ( page , nowrap = False ) : title = page . data [ 'title' ] title = "%s\n%s" % ( title , "=" * len ( title ) ) desc = page . data . get ( 'description' ) if desc : desc = "_%s_" % desc img = _text_image ( page ) pars = page . data . get ( 'extext' ) if pars : # pars = pars.replace(' * ', '\n * ') pars = re . sub ( r'[ ]+\*[ ]+' , '* ' , pars ) if pars and not nowrap : parlist = [ ] for par in pars . split ( "\n\n" ) : parlist . append ( "\n" . join ( textwrap . wrap ( par ) ) ) disambiguation = page . data . get ( 'disambiguation' ) if disambiguation : parlist . append ( ' * ' + "\n * " . join ( page . data . get ( 'links' ) ) ) pars = "\n\n" . join ( parlist ) url = '<%s>' % page . data [ 'url' ] txt = [ ] txt . append ( title ) txt . append ( desc ) txt . append ( url ) txt . append ( pars ) txt . append ( img ) return "\n\n" . join ( [ x for x in txt if x ] )
returns assembled text output
321
5
236,131
def _safe_exit ( start , output ) : try : sys . stdout . write ( output ) sys . stdout . flush ( ) except TypeError : # python3 sys . stdout . write ( str ( output , 'utf-8' ) ) sys . stdout . flush ( ) except IOError : pass seconds = time . time ( ) - start print ( "\n\n%5.3f seconds" % ( seconds ) , file = sys . stderr )
exit without breaking pipes
103
4
236,132
def _text_image ( page ) : img = None alt = page . data . get ( 'label' ) or page . data . get ( 'title' ) source = _image ( page ) if source : img = "![%s](%s)" % ( alt , source ) return img
returns text image URL
64
5
236,133
def get ( args ) : html = args . H lang = args . l nowrap = args . n query = args . q silent = args . s title = args . t verbose = args . v wiki = args . w if query : qobj = WPToolsQuery ( lang = lang , wiki = wiki ) if title : return qobj . query ( title ) return qobj . random ( ) page = wptools . page ( title , lang = lang , silent = silent , verbose = verbose , wiki = wiki ) try : page . get_query ( ) except ( StandardError , ValueError , LookupError ) : return "NOT_FOUND" if not page . data . get ( 'extext' ) : out = page . cache [ 'query' ] [ 'query' ] out = _page_text ( page , nowrap ) if html : out = _page_html ( page ) try : return out . encode ( 'utf-8' ) except KeyError : return out
invoke wptools and assemble selected output
214
8
236,134
def main ( args ) : start = time . time ( ) output = get ( args ) _safe_exit ( start , output )
invoke wptools and exit safely
28
7
236,135
def __insert_image_info ( self , title , _from , info ) : for img in self . data [ 'image' ] : if 'url' not in img : if title == img [ 'file' ] : # matching title/file img . update ( info ) elif _from == img [ 'file' ] : # matching from/file img . update ( info )
Insert API image INFO into matching image dict
82
8
236,136
def __pull_image_info ( self , title , imageinfo , normalized ) : for info in imageinfo : info . update ( { 'title' : title } ) # get API normalized "from" filename for matching _from = None for norm in normalized : if title == norm [ 'to' ] : _from = norm [ 'from' ] # let's put all "metadata" in one member info [ 'metadata' ] = { } extmetadata = info . get ( 'extmetadata' ) if extmetadata : info [ 'metadata' ] . update ( extmetadata ) del info [ 'extmetadata' ] self . __insert_image_info ( title , _from , info )
Pull image INFO from API response and insert
146
8
236,137
def _extend_data ( self , datapoint , new_data ) : if new_data : try : self . data [ datapoint ] . extend ( new_data ) except KeyError : self . data [ datapoint ] = new_data
extend or assign new data to datapoint
56
10
236,138
def _missing_imageinfo ( self ) : if 'image' not in self . data : return missing = [ ] for img in self . data [ 'image' ] : if 'url' not in img : missing . append ( img [ 'file' ] ) return list ( set ( missing ) )
returns list of image filenames that are missing info
64
12
236,139
def _query ( self , action , qobj ) : title = self . params . get ( 'title' ) pageid = self . params . get ( 'pageid' ) wikibase = self . params . get ( 'wikibase' ) qstr = None if action == 'random' : qstr = qobj . random ( ) elif action == 'query' : qstr = qobj . query ( title , pageid , self . _continue_params ( ) ) elif action == 'querymore' : qstr = qobj . querymore ( title , pageid , self . _continue_params ( ) ) elif action == 'parse' : qstr = qobj . parse ( title , pageid ) elif action == 'imageinfo' : qstr = qobj . imageinfo ( self . _missing_imageinfo ( ) ) elif action == 'labels' : qstr = qobj . labels ( self . _pop_entities ( ) ) elif action == 'wikidata' : qstr = qobj . wikidata ( title , wikibase ) elif action == 'restbase' : qstr = qobj . restbase ( self . params . get ( 'rest_endpoint' ) , title ) if qstr is None : raise ValueError ( "Unknown action: %s" % action ) return qstr
returns WPToolsQuery string
295
7
236,140
def _set_data ( self , action ) : if 'query' in action : self . _set_query_data ( action ) elif action == 'imageinfo' : self . _set_imageinfo_data ( ) elif action == 'parse' : self . _set_parse_data ( ) elif action == 'random' : self . _set_random_data ( ) elif action == 'labels' : self . _set_labels ( ) elif action == 'wikidata' : self . _set_wikidata ( ) self . get_labels ( ) elif action == 'restbase' : self . _set_restbase_data ( ) self . _update_imageinfo ( ) self . _update_params ( )
marshals response data into page data
168
8
236,141
def _set_parse_image ( self , infobox ) : image = infobox . get ( 'image' ) cover = infobox . get ( 'Cover' ) or infobox . get ( 'cover' ) if image or cover : if 'image' not in self . data : self . data [ 'image' ] = [ ] if image and utils . isfilename ( image ) : self . data [ 'image' ] . append ( { 'kind' : 'parse-image' , 'file' : image } ) if cover and utils . isfilename ( cover ) : self . data [ 'image' ] . append ( { 'kind' : 'parse-cover' , 'file' : cover } )
set image data from action = parse response
158
8
236,142
def _set_query_data_fast_1 ( self , page ) : self . data [ 'pageid' ] = page . get ( 'pageid' ) assessments = page . get ( 'pageassessments' ) if assessments : self . data [ 'assessments' ] = assessments extract = page . get ( 'extract' ) if extract : self . data [ 'extract' ] = extract extext = html2text . html2text ( extract ) if extext : self . data [ 'extext' ] = extext . strip ( ) fullurl = page . get ( 'fullurl' ) if fullurl : self . data [ 'url' ] = fullurl self . data [ 'url_raw' ] = fullurl + '?action=raw' length = page . get ( 'length' ) if length : self . data [ 'length' ] = length self . _extend_data ( 'links' , utils . get_links ( page . get ( 'links' ) ) ) self . _update_data ( 'modified' , 'page' , page . get ( 'touched' ) ) pageprops = page . get ( 'pageprops' ) if pageprops : wikibase = pageprops . get ( 'wikibase_item' ) if wikibase : self . data [ 'wikibase' ] = wikibase self . data [ 'wikidata_url' ] = utils . wikidata_url ( wikibase ) if 'disambiguation' in pageprops : self . data [ 'disambiguation' ] = len ( self . data [ 'links' ] )
set less expensive action = query response data PART 1
363
10
236,143
def _set_query_data_fast_2 ( self , page ) : self . data [ 'pageid' ] = page . get ( 'pageid' ) redirects = page . get ( 'redirects' ) if redirects : self . data [ 'redirects' ] = redirects terms = page . get ( 'terms' ) if terms : if terms . get ( 'alias' ) : self . data [ 'aliases' ] = terms [ 'alias' ] if terms . get ( 'description' ) : self . data [ 'description' ] = next ( iter ( terms [ 'description' ] ) , None ) if terms . get ( 'label' ) : self . data [ 'label' ] = next ( iter ( terms [ 'label' ] ) , None ) title = page . get ( 'title' ) self . data [ 'title' ] = title if not self . params . get ( 'title' ) : self . params [ 'title' ] = title watchers = page . get ( 'watchers' ) if watchers : self . data [ 'watchers' ] = watchers self . _set_query_image ( page )
set less expensive action = query response data PART 2
253
10
236,144
def _set_query_data_slow ( self , page ) : categories = page . get ( 'categories' ) if categories : self . data [ 'categories' ] = [ x [ 'title' ] for x in categories ] if page . get ( 'contributors' ) : contributors = page . get ( 'contributors' ) or 0 anoncontributors = page . get ( 'anoncontributors' ) or 0 if isinstance ( contributors , list ) : contributors = len ( contributors ) self . data [ 'contributors' ] = contributors + anoncontributors files = page . get ( 'images' ) # really, these are FILES if files : self . data [ 'files' ] = [ x [ 'title' ] for x in files ] languages = page . get ( 'langlinks' ) if languages : self . data [ 'languages' ] = languages pageviews = page . get ( 'pageviews' ) if pageviews : values = [ x for x in pageviews . values ( ) if x ] if values : self . data [ 'views' ] = int ( sum ( values ) / len ( values ) ) else : self . data [ 'views' ] = 0
set more expensive action = query response data
263
8
236,145
def _set_query_image ( self , page ) : pageimage = page . get ( 'pageimage' ) thumbnail = page . get ( 'thumbnail' ) if pageimage or thumbnail : if 'image' not in self . data : self . data [ 'image' ] = [ ] if pageimage : self . data [ 'image' ] . append ( { 'kind' : 'query-pageimage' , 'file' : pageimage } ) if thumbnail : qthumb = { 'kind' : 'query-thumbnail' } qthumb . update ( thumbnail ) qthumb [ 'url' ] = thumbnail . get ( 'source' ) del qthumb [ 'source' ] qthumb [ 'file' ] = qthumb [ 'url' ] . split ( '/' ) [ - 2 ] self . data [ 'image' ] . append ( qthumb )
set image data from action = query response
193
8
236,146
def _set_random_data ( self ) : rdata = self . _load_response ( 'random' ) rdata = rdata [ 'query' ] [ 'random' ] [ 0 ] pageid = rdata . get ( 'id' ) title = rdata . get ( 'title' ) self . data . update ( { 'pageid' : pageid , 'title' : title } )
sets page data from random request
88
6
236,147
def _update_data ( self , datapoint , key , new_data ) : if new_data : try : self . data [ datapoint ] . update ( { key : new_data } ) except KeyError : self . data [ datapoint ] = { key : new_data }
update or assign new data to datapoint
65
9
236,148
def _update_params ( self ) : if self . data . get ( 'title' ) : self . params [ 'title' ] = self . data . get ( 'title' ) if self . data . get ( 'pageid' ) : self . params [ 'pageid' ] = self . data . get ( 'pageid' ) if self . data . get ( 'wikibase' ) : self . params [ 'wikibase' ] = self . data . get ( 'wikibase' )
update params from response data
111
5
236,149
def skip_action ( self , action ) : if 'skip' not in self . flags : self . flags [ 'skip' ] = [ ] self . flags [ 'skip' ] . append ( action )
append action to skip flag
44
5
236,150
def images ( self , fields = None , token = None ) : if 'image' not in self . data : return out = [ ] for img in self . data [ 'image' ] : if token and token not in img [ 'kind' ] : continue info = { } for key in img : if fields and key not in fields : continue info . update ( { key : img [ key ] } ) if info : out . append ( info ) return out
Returns image info keys for kind containing token
97
8
236,151
def get ( self , url , status ) : # consistently faster than requests by 3x # # r = requests.get(url, # headers={'User-Agent': self.user_agent}) # return r.text crl = self . cobj try : crl . setopt ( pycurl . URL , url ) except UnicodeEncodeError : crl . setopt ( pycurl . URL , url . encode ( 'utf-8' ) ) if not self . silent : print ( status , file = sys . stderr ) if self . DISABLED : print ( "Requests DISABLED" , file = sys . stderr ) else : return self . curl_perform ( crl )
in favor of python - requests for speed
155
8
236,152
def curl_perform ( self , crl ) : bfr = BytesIO ( ) crl . setopt ( crl . WRITEFUNCTION , bfr . write ) crl . perform ( ) info = curl_info ( crl ) if info : if self . verbose and not self . silent : for item in sorted ( info ) : print ( " %s: %s" % ( item , info [ item ] ) , file = sys . stderr ) self . info = info body = bfr . getvalue ( ) bfr . close ( ) return body
performs HTTP GET and returns body of response
125
9
236,153
def safequote_restbase ( title ) : try : return quote ( title . encode ( 'utf-8' ) , safe = '' ) except UnicodeDecodeError : return quote ( title , safe = '' )
Safequote restbase title possibly having slash in title
45
12
236,154
def category ( self , title , pageid = None , cparams = None , namespace = None ) : query = self . LIST . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , LIST = 'categorymembers' ) status = pageid or title query += "&cmlimit=500" if namespace is not None : query += "&cmnamespace=%d" % namespace if title and pageid : title = None if title : query += "&cmtitle=" + safequote ( title ) if pageid : query += "&cmpageid=%d" % pageid if cparams : query += cparams status += ' (%s)' % cparams self . set_status ( 'categorymembers' , status ) return query
Returns category query string
165
4
236,155
def labels ( self , qids ) : if len ( qids ) > 50 : raise ValueError ( "The limit is 50." ) self . domain = 'www.wikidata.org' self . uri = self . wiki_uri ( self . domain ) query = self . WIKIDATA . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , LANG = self . variant or self . lang , PROPS = 'labels' ) qids = '|' . join ( qids ) query += "&ids=%s" % qids self . set_status ( 'labels' , qids ) return query
Returns Wikidata labels query string
145
7
236,156
def imageinfo ( self , files ) : files = '|' . join ( [ safequote ( x ) for x in files ] ) self . set_status ( 'imageinfo' , files ) return self . IMAGEINFO . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , FILES = files )
Returns imageinfo query string
74
5
236,157
def parse ( self , title , pageid = None ) : qry = self . PARSE . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , PAGE = safequote ( title ) or pageid ) if pageid and not title : qry = qry . replace ( '&page=' , '&pageid=' ) . replace ( '&redirects' , '' ) if self . variant : qry += '&variant=' + self . variant self . set_status ( 'parse' , pageid or title ) return qry
Returns Mediawiki action = parse query string
125
8
236,158
def query ( self , titles , pageids = None , cparams = None ) : query = self . QUERY . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , TITLES = safequote ( titles ) or pageids ) status = titles or pageids if pageids and not titles : query = query . replace ( '&titles=' , '&pageids=' ) if cparams : query += cparams status += " (%s)" % cparams if self . variant : query += '&variant=' + self . variant self . set_status ( 'query' , status ) return query
Returns MediaWiki action = query query string
135
8
236,159
def random ( self , namespace = 0 ) : query = self . LIST . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , LIST = 'random' ) query += "&rnlimit=1&rnnamespace=%d" % namespace emoji = [ u'\U0001f32f' , # burrito or wrap u'\U0001f355' , # slice of pizza u'\U0001f35c' , # steaming bowl of ramen u'\U0001f363' , # sushi u'\U0001f369' , # doughnut u'\U0001f36a' , # cookie u'\U0001f36d' , # lollipop u'\U0001f370' , # strawberry shortcake ] action = 'random' if namespace : action = 'random:%d' % namespace self . set_status ( action , random . choice ( emoji ) ) return query
Returns query string for random page
206
6
236,160
def restbase ( self , endpoint , title ) : if not endpoint : raise ValueError ( "invalid endpoint: %s" % endpoint ) route = endpoint if title and endpoint != '/page/' : route = endpoint + safequote_restbase ( title ) self . set_status ( 'restbase' , route ) return "%s/api/rest_v1/%s" % ( self . uri , route [ 1 : ] )
Returns RESTBase query string
95
5
236,161
def site ( self , action ) : query = None viewdays = 7 hostpath = self . uri + self . endpoint if action == 'siteinfo' : query = hostpath + ( '?action=query' '&meta=siteinfo|siteviews' '&siprop=general|statistics' '&list=mostviewed&pvimlimit=max' ) query += '&pvisdays=%d' % viewdays # meta=siteviews self . set_status ( 'query' , 'siteinfo|siteviews|mostviewed' ) elif action == 'sitematrix' : query = hostpath + '?action=sitematrix' self . set_status ( 'sitematrix' , 'all' ) elif action == 'sitevisitors' : query = hostpath + ( '?action=query' '&meta=siteviews&pvismetric=uniques' ) query += '&pvisdays=%d' % viewdays # meta=siteviews self . set_status ( 'query' , 'siteviews:uniques' ) if not query : raise ValueError ( "Could not form query" ) query += '&format=json&formatversion=2' return query
Returns site query
270
3
236,162
def wikidata ( self , title , wikibase = None ) : self . domain = 'www.wikidata.org' self . uri = self . wiki_uri ( self . domain ) query = self . WIKIDATA . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , LANG = self . variant or self . lang , PROPS = "aliases|info|claims|descriptions|labels|sitelinks" ) if wikibase : query += "&ids=%s" % wikibase elif title : title = safequote ( title ) query += "&sites=%swiki" % self . lang query += "&titles=%s" % title self . set_status ( 'wikidata' , wikibase or title ) return query
Returns Wikidata query string
183
6
236,163
def _handle_response ( self ) : content = self . cache [ 'restbase' ] [ 'info' ] [ 'content-type' ] if content . startswith ( 'text/html' ) : html = self . cache [ 'restbase' ] [ 'response' ] if isinstance ( html , bytes ) : html = html . decode ( 'utf-8' ) self . data [ 'html' ] = html return response = self . _load_response ( 'restbase' ) http_status = self . cache [ 'restbase' ] [ 'info' ] [ 'status' ] if http_status == 404 : raise LookupError ( self . cache [ 'restbase' ] [ 'query' ] ) if self . params . get ( 'endpoint' ) == '/page/' : msg = "RESTBase /page/ entry points: %s" % response . get ( 'items' ) utils . stderr ( msg ) del self . cache [ 'restbase' ] return return response
returns RESTBase response if appropriate
222
7
236,164
def _query ( self , action , qobj ) : return qobj . restbase ( self . params [ 'rest_endpoint' ] , self . params . get ( 'title' ) )
returns WPToolsQuery string from action
42
9
236,165
def _unpack_images ( self , rdata ) : image = rdata . get ( 'image' ) # /page/mobile-sections-lead originalimage = rdata . get ( 'originalimage' ) # /page/summary thumbnail = rdata . get ( 'thumbnail' ) # /page/summary if image or originalimage or thumbnail : if 'image' not in self . data : self . data [ 'image' ] = [ ] def file_url ( info ) : """ put image source in url and set file key """ if 'source' in info : info [ 'url' ] = info [ 'source' ] info [ 'file' ] = info [ 'source' ] . split ( '/' ) [ - 1 ] del info [ 'source' ] return info if image : img = { 'kind' : 'restbase-image' } img . update ( image ) self . data [ 'image' ] . append ( file_url ( img ) ) if originalimage : img = { 'kind' : 'restbase-original' } img . update ( originalimage ) self . data [ 'image' ] . append ( file_url ( img ) ) if thumbnail : img = { 'kind' : 'restbase-thumb' } img . update ( thumbnail ) self . data [ 'image' ] . append ( file_url ( img ) )
Set image data from RESTBase response
294
7
236,166
def is_text ( obj , name = None ) : try : # python2 ans = isinstance ( obj , basestring ) except NameError : # python3 ans = isinstance ( obj , str ) if name : print ( "is_text: (%s) %s = %s" % ( ans , name , obj . __class__ ) , file = sys . stderr ) return ans
returns True if object is text - like
86
9
236,167
def json_loads ( data ) : try : return json . loads ( data , encoding = 'utf-8' ) except TypeError : return json . loads ( data . decode ( 'utf-8' ) )
python - version - safe json . loads
45
8
236,168
def stderr ( msg , silent = False ) : if not silent : print ( msg , file = sys . stderr )
write msg to stderr if not silent
28
9
236,169
def template_to_dict ( tree , debug = 0 , find = False ) : # you can compare (most) raw Infobox wikitext like this: # https://en.wikipedia.org/wiki/TITLE?action=raw&section=0 obj = defaultdict ( str ) errors = [ ] for item in tree : try : name = item . findtext ( 'name' ) . strip ( ) if debug : template_to_dict_debug ( name , item , debug ) find_val = template_to_dict_find ( item , debug ) # DEPRECATED iter_val = template_to_dict_iter ( item , debug ) value = iter_val if find : value = find_val if name and value : obj [ name ] = value . strip ( ) except AttributeError : if isinstance ( item , lxml . etree . ElementBase ) : name = item . tag . strip ( ) text = item . text . strip ( ) if item . tag == 'title' : obj [ 'infobox' ] = text else : obj [ name ] = text except : errors . append ( lxml . etree . tostring ( item ) ) if errors : obj [ 'errors' ] = errors return dict ( obj )
returns wikitext template as dict
271
8
236,170
def template_to_dict_debug ( name , item , debug ) : if debug == 1 : print ( "\n%s = " % name ) elif debug > 1 : print ( "\n%s" % name ) print ( "=" * 64 ) print ( lxml . etree . tostring ( item ) ) print ( )
Print debug statements to compare algorithms
72
6
236,171
def template_to_dict_iter_debug ( elm ) : if elm . text is not None : print ( " <%s>%s</%s>" % ( elm . tag , elm . text , elm . tag ) , end = '' ) if elm . tail is not None : print ( elm . tail ) else : print ( ) else : if elm . tail is not None : print ( " <%s>%s" % ( elm . tag , elm . tail ) ) else : print ( " <%s>" % elm . tag )
Print expanded element on stdout for debugging
129
8
236,172
def template_to_text ( tmpl , debug = 0 ) : tarr = [ ] for item in tmpl . itertext ( ) : tarr . append ( item ) text = "{{%s}}" % "|" . join ( tarr ) . strip ( ) if debug > 1 : print ( "+ template_to_text:" ) print ( " %s" % text ) return text
convert parse tree template to text
88
7
236,173
def post_build ( self , p , pay ) : p += pay if self . auxdlen != 0 : print ( "NOTICE: A properly formatted and complaint V3 Group Record should have an Auxiliary Data length of zero (0)." ) print ( " Subsequent Group Records are lost!" ) return p
Called implicitly before a packet is sent .
65
9
236,174
def post_build ( self , p , pay ) : p += pay if self . type in [ 0 , 0x31 , 0x32 , 0x22 ] : # for these, field is reserved (0) p = p [ : 1 ] + chr ( 0 ) + p [ 2 : ] if self . chksum is None : ck = checksum ( p [ : 2 ] + p [ 4 : ] ) p = p [ : 2 ] + ck . to_bytes ( 2 , 'big' ) + p [ 4 : ] return p
Called implicitly before a packet is sent to compute and place IGMPv3 checksum .
121
19
236,175
def mysummary ( self ) : if isinstance ( self . underlayer , IP ) : return self . underlayer . sprintf ( "IGMPv3: %IP.src% > %IP.dst% %IGMPv3.type% %IGMPv3.gaddr%" ) else : return self . sprintf ( "IGMPv3 %IGMPv3.type% %IGMPv3.gaddr%" )
Display a summary of the IGMPv3 object .
95
11
236,176
def adjust_ether ( self , ip = None , ether = None ) : # The rules are: # 1. send to the group mac address address corresponding to the IP.dst if ip != None and ip . haslayer ( IP ) and ether != None and ether . haslayer ( Ether ) : iplong = atol ( ip . dst ) ether . dst = "01:00:5e:%02x:%02x:%02x" % ( ( iplong >> 16 ) & 0x7F , ( iplong >> 8 ) & 0xFF , ( iplong ) & 0xFF ) # print "igmpize ip " + ip.dst + " as mac " + ether.dst return True else : return False
Called to explicitely fixup an associated Ethernet header
164
12
236,177
def adjust_ip ( self , ip = None ) : if ip != None and ip . haslayer ( IP ) : if ( self . type == 0x11 ) : if ( self . gaddr == "0.0.0.0" ) : ip . dst = "224.0.0.1" # IP rule 1 retCode = True elif isValidMCAddr ( self . gaddr ) : ip . dst = self . gaddr # IP rule 3a retCode = True else : print ( "Warning: Using invalid Group Address" ) retCode = False elif ( ( self . type == 0x17 ) and isValidMCAddr ( self . gaddr ) ) : ip . dst = "224.0.0.2" # IP rule 2 retCode = True elif ( ( self . type == 0x12 ) or ( self . type == 0x16 ) ) and ( isValidMCAddr ( self . gaddr ) ) : ip . dst = self . gaddr # IP rule 3b retCode = True else : print ( "Warning: Using invalid IGMP Type" ) retCode = False else : print ( "Warning: No IGMP Group Address set" ) retCode = False if retCode == True : ip . ttl = 1 # IP Rule 4 ip . options = [ IPOption_Router_Alert ( ) ] # IP rule 5 return retCode
Called to explicitely fixup an associated IP header
300
12
236,178
def ospf_lsa_checksum ( lsa ) : # This is based on the GPLed C implementation in Zebra <http://www.zebra.org/> CHKSUM_OFFSET = 16 if len ( lsa ) < CHKSUM_OFFSET : raise Exception ( "LSA Packet too short (%s bytes)" % len ( lsa ) ) c0 = c1 = 0 # Calculation is done with checksum set to zero lsa = lsa [ : CHKSUM_OFFSET ] + lsa [ CHKSUM_OFFSET + 2 : ] for char in lsa [ 2 : ] : # leave out age c0 += char c1 += c0 c0 %= 255 c1 %= 255 x = ( ( len ( lsa ) - CHKSUM_OFFSET - 1 ) * c0 - c1 ) % 255 if ( x <= 0 ) : x += 255 y = 510 - c0 - x if ( y > 255 ) : y -= 255 checksum = ( x << 8 ) + y return checksum
Fletcher checksum for OSPF LSAs returned as a 2 byte string .
229
17
236,179
def _LSAGuessPayloadClass ( p , * * kargs ) : # This is heavily based on scapy-cdp.py by Nicolas Bareil and Arnaud Ebalard # XXX: This only works if all payload cls = conf . raw_layer if len ( p ) >= 4 : typ = p [ 3 ] clsname = _OSPF_LSclasses . get ( typ , "Raw" ) cls = globals ( ) [ clsname ] return cls ( p , * * kargs )
Guess the correct LSA class for a given payload
115
11
236,180
def locate_ip ( ip ) : ip = map ( int , ip . split ( "." ) ) ip = ip [ 3 ] + ( ip [ 2 ] << 8 ) + ( ip [ 1 ] << 16 ) + ( ip [ 0 ] << 24 ) cloc = country_loc_kdb . get_base ( ) db = IP_country_kdb . get_base ( ) d = 0 f = len ( db ) - 1 while ( f - d ) > 1 : guess = ( d + f ) / 2 if ip > db [ guess ] [ 0 ] : d = guess else : f = guess s , e , c = db [ guess ] if s <= ip and ip <= e : return cloc . get ( c , None )
Get geographic coordinates from IP using geoip database
161
9
236,181
def _resolve_one ( self , ip ) : if in6_isaddr6to4 ( ip ) : # for 6to4, use embedded @ tmp = inet_pton ( socket . AF_INET6 , ip ) addr = inet_ntop ( socket . AF_INET , tmp [ 2 : 6 ] ) elif in6_isaddrTeredo ( ip ) : # for Teredo, use mapped address addr = teredoAddrExtractInfo ( ip ) [ 2 ] else : addr = ip _ , asn , desc = AS_resolver_riswhois . _resolve_one ( self , addr ) return ip , asn , desc
overloaded version to provide a Whois resolution on the embedded IPv4 address if the address is 6to4 or Teredo . Otherwise the native IPv6 address is passed .
149
36
236,182
def in6_6to4ExtractAddr ( addr ) : try : addr = inet_pton ( socket . AF_INET6 , addr ) except : return None if addr [ : 2 ] != b" \x02" : return None return inet_ntop ( socket . AF_INET , addr [ 2 : 6 ] )
Extract IPv4 address embbeded in 6to4 address . Passed address must be a 6to4 addrees . None is returned on error .
75
31
236,183
def in6_getLocalUniquePrefix ( ) : # Extracted from RFC 1305 (NTP) : # NTP timestamps are represented as a 64-bit unsigned fixed-point number, # in seconds relative to 0h on 1 January 1900. The integer part is in the # first 32 bits and the fraction part in the last 32 bits. # epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0) # x = time.time() # from time import gmtime, strftime, gmtime, mktime # delta = mktime(gmtime(0)) - mktime(self.epoch) # x = x-delta tod = time . time ( ) # time of day. Will bother with epoch later i = int ( tod ) j = int ( ( tod - i ) * ( 2 ** 32 ) ) tod = struct . pack ( "!II" , i , j ) # TODO: Add some check regarding system address gathering rawmac = get_if_raw_hwaddr ( conf . iface6 ) mac = b":" . join ( map ( lambda x : b"%.02x" % ord ( x ) , list ( rawmac ) ) ) # construct modified EUI-64 ID eui64 = inet_pton ( socket . AF_INET6 , '::' + in6_mactoifaceid ( mac ) ) [ 8 : ] import sha globalid = sha . new ( tod + eui64 ) . digest ( ) [ : 5 ] return inet_ntop ( socket . AF_INET6 , b'\xfd' + globalid + b'\x00' * 10 )
Returns a pseudo - randomly generated Local Unique prefix . Function follows recommandation of Section 3 . 2 . 2 of RFC 4193 for prefix generation .
372
30
236,184
def in6_getnsmac ( a ) : # return multicast Ethernet address associated with multicast v6 destination a = struct . unpack ( '16B' , a ) [ - 4 : ] mac = '33:33:' mac += ( ':' . join ( map ( lambda x : '%.2x' % x , a ) ) ) return mac
Return the multicast mac address associated with provided IPv6 address . Passed address must be in network format .
78
21
236,185
def _where ( filename , dirs = [ ] , env = "PATH" ) : if not isinstance ( dirs , list ) : dirs = [ dirs ] if glob ( filename ) : return filename paths = [ os . curdir ] + os . environ [ env ] . split ( os . path . pathsep ) + dirs for path in paths : for match in glob ( os . path . join ( path , filename ) ) : if match : return os . path . normpath ( match ) raise IOError ( "File not found: %s" % filename )
Find file in current dir or system path
125
8
236,186
def win_find_exe ( filename , installsubdir = None , env = "ProgramFiles" ) : for fn in [ filename , filename + ".exe" ] : try : if installsubdir is None : path = _where ( fn ) else : path = _where ( fn , dirs = [ os . path . join ( os . environ [ env ] , installsubdir ) ] ) except IOError : path = filename else : break return path
Find executable in current dir system path or given ProgramFiles subdir
97
13
236,187
def init_loopback ( self , data ) : self . name = data [ "name" ] self . description = data [ 'description' ] self . win_index = data [ 'win_index' ] self . mac = data [ "mac" ] self . guid = data [ "guid" ] self . ip = "127.0.0.1"
Just initialize the object for our Pseudo Loopback
79
10
236,188
def update ( self , data ) : self . name = data [ "name" ] self . description = data [ 'description' ] self . win_index = data [ 'win_index' ] # Other attributes are optional if conf . use_winpcapy : self . _update_pcapdata ( ) try : self . ip = socket . inet_ntoa ( get_if_raw_addr ( data [ 'guid' ] ) ) except ( KeyError , AttributeError , NameError ) : pass try : self . mac = data [ 'mac' ] except KeyError : pass
Update info about network interface according to given dnet dictionary
129
11
236,189
def pcap_name ( self , devname ) : try : pcap_name = self . data [ devname ] . pcap_name except KeyError : raise ValueError ( "Unknown network interface %r" % devname ) else : return pcap_name
Return pcap device name for given Windows device name .
57
11
236,190
def bitmap2RRlist ( bitmap ) : # RFC 4034, 4.1.2. The Type Bit Maps Field RRlist = [ ] while bitmap : if len ( bitmap ) < 2 : warning ( "bitmap too short (%i)" % len ( bitmap ) ) return #window_block = ord(bitmap[0]) # window number window_block = ( bitmap [ 0 ] ) # window number offset = 256 * window_block # offset of the Ressource Record #bitmap_len = ord(bitmap[0]) # length of the bitmap in bytes bitmap_len = ( bitmap [ 1 ] ) # length of the bitmap in bytes if bitmap_len <= 0 or bitmap_len > 32 : warning ( "bitmap length is no valid (%i)" % bitmap_len ) return tmp_bitmap = bitmap [ 2 : 2 + bitmap_len ] # Let's compare each bit of tmp_bitmap and compute the real RR value for b in range ( len ( tmp_bitmap ) ) : v = 128 for i in range ( 8 ) : #if ord(tmp_bitmap[b]) & v: if ( tmp_bitmap [ b ] ) & v : # each of the RR is encoded as a bit RRlist += [ offset + b * 8 + i ] v = v >> 1 # Next block if any bitmap = bitmap [ 2 + bitmap_len : ] return RRlist
Decode the Type Bit Maps field of the NSEC Resource Record into an integer list .
317
18
236,191
def IE_Dispatcher ( s ) : if len ( s ) < 1 : return Raw ( s ) # Get the IE type ietype = ord ( s [ 0 ] ) cls = ietypecls . get ( ietype , Raw ) # if ietype greater than 128 are TLVs if cls == Raw and ietype & 128 == 128 : cls = IE_NotImplementedTLV return cls ( s )
Choose the correct Information Element class .
98
7
236,192
def filter ( self , func ) : return self . __class__ ( list ( filter ( func , self . res ) ) , name = "filtered %s" % self . listname )
Returns a packet list filtered by a truth function
41
9
236,193
def multiplot ( self , f , lfilter = None , * * kargs ) : d = defaultdict ( list ) for i in self . res : if lfilter and not lfilter ( i ) : continue k , v = f ( i ) d [ k ] . append ( v ) figure = plt . figure ( ) ax = figure . add_axes ( plt . axes ( ) ) for i in d : ax . plot ( d [ i ] , * * kargs ) return figure
Uses a function that returns a label and a value for this label then plots all the values label by label
107
22
236,194
def filter ( self , func ) : return self . __class__ ( [ i for i in self . res if func ( * i ) ] , name = 'filtered %s' % self . listname )
Returns a SndRcv list filtered by a truth function
45
12
236,195
def _PPIGuessPayloadClass ( p , * * kargs ) : if len ( p ) >= 4 : t , pfh_len = struct . unpack ( "<HH" , p [ : 4 ] ) # Find out if the value t is in the dict _ppi_types. # If not, return the default TLV class cls = getPPIType ( t , "default" ) pfh_len += 4 out = cls ( p [ : pfh_len ] , * * kargs ) if ( out . payload ) : out . payload = conf . raw_layer ( out . payload . load ) if ( len ( p ) > pfh_len ) : out . payload . payload = conf . padding_layer ( p [ pfh_len : ] ) elif ( len ( p ) > pfh_len ) : out . payload = conf . padding_layer ( p [ pfh_len : ] ) else : out = conf . raw_layer ( p , * * kargs ) return out
This function tells the PacketListField how it should extract the TLVs from the payload . We pass cls only the length string pfh_len says it needs . If a payload is returned that means part of the sting was unused . This converts to a Raw layer and the remainder of p is added as Raw s payload . If there is no payload the remainder of p is added as out s payload .
230
84
236,196
def i2b ( self , pkt , x ) : if type ( x ) is str : x = bytes ( [ ord ( i ) for i in x ] ) return x
Convert internal value to internal value
38
7
236,197
def randval ( self ) : fmtt = self . fmt [ - 1 ] if fmtt in "BHIQ" : return { "B" : RandByte , "H" : RandShort , "I" : RandInt , "Q" : RandLong } [ fmtt ] ( ) elif fmtt == "s" : if self . fmt [ 0 ] in "0123456789" : l = int ( self . fmt [ : - 1 ] ) else : l = int ( self . fmt [ 1 : - 1 ] ) return RandBin ( l ) else : warning ( "no random class for [%s] (fmt=%s)." % ( self . name , self . fmt ) )
Return a volatile object whose value is both random and suitable for this field
159
14
236,198
def str2bytes ( x ) : if type ( x ) is bytes : return x elif type ( x ) is str : return bytes ( [ ord ( i ) for i in x ] ) else : return str2bytes ( str ( x ) )
Convert input argument to bytes
53
6
236,199
def is_private_addr ( x ) : paddrs = [ '10.0.0.0/8' , '172.16.0.0/12' , '192.168.0.0/16' ] found = False for ipr in paddrs : try : if ipaddress . ip_address ( x ) in ipaddress . ip_network ( ipr ) : found = True continue except : break return found
Returns True if the IPv4 Address is an RFC 1918 private address .
93
14