idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
245,100
def traceplot ( trace : sample_types , labels : List [ Union [ str , Tuple [ str , str ] ] ] = None , ax : Any = None , x0 : int = 0 ) -> Any : if labels is None : labels = list ( trace . keys ( ) ) if ax is None : _ , ax = plt . subplots ( len ( labels ) , 1 , squeeze = False ) for index , label in enumerate ( labels ) : data = [ sample for sample in trace [ label ] ] ax [ index ] [ 0 ] . set_title ( label ) ax [ index ] [ 0 ] . plot ( __integer_xaxis ( ax [ index ] [ 0 ] , x0 , len ( data ) ) , data ) __pause_for_crude_animation ( ) return ax
Plot samples values .
174
4
245,101
def read_file_snippets ( file , snippet_store ) : start_reg = re . compile ( "(.*%%SNIPPET_START%% )([a-zA-Z0-9]+)" ) end_reg = re . compile ( "(.*%%SNIPPET_END%% )([a-zA-Z0-9]+)" ) open_snippets = { } with open ( file , encoding = "utf-8" ) as w : lines = w . readlines ( ) for line in lines : printd ( "Got Line: {}" . format ( line ) ) # Check whether we're entering or leaving a snippet m = start_reg . match ( line ) if m : printd ( "Opened Snippet {}" . format ( m . group ( 2 ) ) ) if m . group ( 2 ) in snippet_store : record_error ( "Repeat definition of Snippet {}" . format ( m . group ( 2 ) ) ) elif m . group ( 2 ) in open_snippets : record_error ( "Snippet already opened {}" . format ( m . group ( 2 ) ) ) else : printd ( "Added {} to open snippets list" . format ( m . group ( 2 ) ) ) open_snippets [ m . group ( 2 ) ] = [ ] continue m = end_reg . match ( line ) if m : printd ( "Found end of Snippet {}" . format ( m . group ( 2 ) ) ) if m . group ( 2 ) not in open_snippets : record_error ( "Reached Snippet End but no start" ) elif m . group ( 2 ) in snippet_store : record_error ( "Repeat definition of Snippet {}" . format ( m . group ( 2 ) ) ) else : snippet_store [ m . group ( 2 ) ] = open_snippets [ m . group ( 2 ) ] del open_snippets [ m . group ( 2 ) ] continue # If we've got this far, then we're just a normal line, so we can add this to all open snippets for snippet in open_snippets . values ( ) : printd ( "Adding Line to snippet" ) snippet . append ( line ) # Now, warn about any unclosed snippets for opened in open_snippets : record_error ( "Snippet {} left open - ignoring" . format ( opened ) )
Parse a file and add all snippets to the snippet_store dictionary
529
14
245,102
def strip_block_whitespace ( string_list ) : min_ws = min ( [ ( len ( x ) - len ( x . lstrip ( ) ) ) for x in string_list if x != '\n' ] ) return [ x [ min_ws : ] if x != '\n' else x for x in string_list ]
Treats a list of strings as a code block and strips whitespace so that the min whitespace line sits at char 0 of line .
77
29
245,103
async def prepare ( self , request ) : if request . method != 'GET' : raise HTTPMethodNotAllowed ( request . method , [ 'GET' ] ) if not self . prepared : writer = await super ( ) . prepare ( request ) self . _loop = request . app . loop self . _ping_task = self . _loop . create_task ( self . _ping ( ) ) # explicitly enabling chunked encoding, since content length # usually not known beforehand. self . enable_chunked_encoding ( ) return writer else : # hackish way to check if connection alive # should be updated once we have proper API in aiohttp # https://github.com/aio-libs/aiohttp/issues/3105 if request . protocol . transport is None : # request disconnected raise asyncio . CancelledError ( )
Prepare for streaming and send HTTP headers .
185
9
245,104
async def send ( self , data , id = None , event = None , retry = None ) : buffer = io . StringIO ( ) if id is not None : buffer . write ( self . LINE_SEP_EXPR . sub ( '' , 'id: {}' . format ( id ) ) ) buffer . write ( self . _sep ) if event is not None : buffer . write ( self . LINE_SEP_EXPR . sub ( '' , 'event: {}' . format ( event ) ) ) buffer . write ( self . _sep ) for chunk in self . LINE_SEP_EXPR . split ( data ) : buffer . write ( 'data: {}' . format ( chunk ) ) buffer . write ( self . _sep ) if retry is not None : if not isinstance ( retry , int ) : raise TypeError ( 'retry argument must be int' ) buffer . write ( 'retry: {}' . format ( retry ) ) buffer . write ( self . _sep ) buffer . write ( self . _sep ) await self . write ( buffer . getvalue ( ) . encode ( 'utf-8' ) )
Send data using EventSource protocol
255
6
245,105
async def wait ( self ) : if self . _ping_task is None : raise RuntimeError ( 'Response is not started' ) with contextlib . suppress ( asyncio . CancelledError ) : await self . _ping_task
EventSourceResponse object is used for streaming data to the client this method returns future so we can wain until connection will be closed or other task explicitly call stop_streaming method .
50
37
245,106
def ping_interval ( self , value ) : if not isinstance ( value , int ) : raise TypeError ( "ping interval must be int" ) if value < 0 : raise ValueError ( "ping interval must be greater then 0" ) self . _ping_interval = value
Setter for ping_interval property .
61
9
245,107
def get_parser ( segmenter , * * options ) : if segmenter == 'nlapi' : return NLAPIParser ( * * options ) elif segmenter == 'mecab' : return MecabParser ( ) elif segmenter == 'tinysegmenter' : return TinysegmenterParser ( ) else : raise ValueError ( 'Segmenter {} is not supported.' . format ( segmenter ) )
Gets a parser .
92
5
245,108
def preprocess ( source ) : doc = html5lib . parseFragment ( source ) source = ET . tostring ( doc , encoding = 'utf-8' , method = 'text' ) . decode ( 'utf-8' ) source = source . replace ( u'\n' , u'' ) . strip ( ) source = re . sub ( r'\s\s+' , u' ' , source ) return source
Removes unnecessary break lines and white spaces .
93
9
245,109
def main ( ) : args = docopt ( __doc__ ) if args [ '--version' ] : print ( __version__ ) sys . exit ( ) result = parse ( args [ '<source>' ] , segmenter = args [ '--segmenter' ] , language = args [ '--language' ] , classname = args [ '--classname' ] ) print ( result [ 'html_code' ] ) sys . exit ( )
Budou main method for the command line tool .
98
11
245,110
def parse ( source , segmenter = 'nlapi' , language = None , max_length = None , classname = None , attributes = None , * * kwargs ) : parser = get_parser ( segmenter , * * kwargs ) return parser . parse ( source , language = language , max_length = max_length , classname = classname , attributes = attributes )
Parses input source .
83
6
245,111
def authenticate ( json_path = None ) : msg = ( 'budou.authentication() is deprecated. ' 'Please use budou.get_parser() to obtain a parser instead.' ) warnings . warn ( msg , DeprecationWarning ) parser = get_parser ( 'nlapi' , credentials_path = json_path ) return parser
Gets a Natural Language API parser by authenticating the API .
75
13
245,112
def _memorize ( func ) : def _wrapper ( self , * args , * * kwargs ) : """Wrapper to cache the function's output. """ if self . use_cache : cache = load_cache ( self . cache_filename ) original_key = ':' . join ( [ self . __class__ . __name__ , func . __name__ , '_' . join ( [ str ( a ) for a in args ] ) , '_' . join ( [ str ( w ) for w in kwargs . values ( ) ] ) ] ) cache_key = hashlib . md5 ( original_key . encode ( 'utf-8' ) ) . hexdigest ( ) cached_val = cache . get ( cache_key ) if cached_val : return cached_val val = func ( self , * args , * * kwargs ) if self . use_cache : cache . set ( cache_key , val ) return val return _wrapper
Decorator to cache the given function s output .
211
11
245,113
def _get_source_chunks ( self , input_text , language = None ) : chunks = ChunkList ( ) seek = 0 result = self . _get_annotations ( input_text , language = language ) tokens = result [ 'tokens' ] language = result [ 'language' ] for i , token in enumerate ( tokens ) : word = token [ 'text' ] [ 'content' ] begin_offset = token [ 'text' ] [ 'beginOffset' ] label = token [ 'dependencyEdge' ] [ 'label' ] pos = token [ 'partOfSpeech' ] [ 'tag' ] if begin_offset > seek : chunks . append ( Chunk . space ( ) ) seek = begin_offset chunk = Chunk ( word , pos , label ) if chunk . label in _DEPENDENT_LABEL : # Determining concatenating direction based on syntax dependency. chunk . dependency = i < token [ 'dependencyEdge' ] [ 'headTokenIndex' ] if chunk . is_punct ( ) : chunk . dependency = chunk . is_open_punct ( ) chunks . append ( chunk ) seek += len ( word ) return chunks , language
Returns a chunk list retrieved from Syntax Analysis results .
258
11
245,114
def _group_chunks_by_entities ( self , chunks , entities ) : for entity in entities : chunks_to_concat = chunks . get_overlaps ( entity [ 'beginOffset' ] , len ( entity [ 'content' ] ) ) if not chunks_to_concat : continue new_chunk_word = u'' . join ( [ chunk . word for chunk in chunks_to_concat ] ) new_chunk = Chunk ( new_chunk_word ) chunks . swap ( chunks_to_concat , new_chunk ) return chunks
Groups chunks by entities retrieved from NL API Entity Analysis .
127
12
245,115
def _get_annotations ( self , text , language = '' ) : body = { 'document' : { 'type' : 'PLAIN_TEXT' , 'content' : text , } , 'features' : { 'extract_syntax' : True , } , 'encodingType' : 'UTF32' , } if language : body [ 'document' ] [ 'language' ] = language request = self . service . documents ( ) . annotateText ( body = body ) response = request . execute ( ) tokens = response . get ( 'tokens' , [ ] ) language = response . get ( 'language' ) return { 'tokens' : tokens , 'language' : language }
Returns the list of annotations retrieved from the given text .
154
11
245,116
def _get_entities ( self , text , language = '' ) : body = { 'document' : { 'type' : 'PLAIN_TEXT' , 'content' : text , } , 'encodingType' : 'UTF32' , } if language : body [ 'document' ] [ 'language' ] = language request = self . service . documents ( ) . analyzeEntities ( body = body ) response = request . execute ( ) result = [ ] for entity in response . get ( 'entities' , [ ] ) : mentions = entity . get ( 'mentions' , [ ] ) if not mentions : continue entity_text = mentions [ 0 ] [ 'text' ] offset = entity_text [ 'beginOffset' ] for word in entity_text [ 'content' ] . split ( ) : result . append ( { 'content' : word , 'beginOffset' : offset } ) offset += len ( word ) return result
Returns the list of entities retrieved from the given text .
202
11
245,117
def get ( self , key ) : self . _create_file_if_none_exists ( ) with open ( self . filename , 'rb' ) as file_object : cache_pickle = pickle . load ( file_object ) val = cache_pickle . get ( key , None ) return val
Gets a value by a key .
68
8
245,118
def set ( self , key , val ) : self . _create_file_if_none_exists ( ) with open ( self . filename , 'r+b' ) as file_object : cache_pickle = pickle . load ( file_object ) cache_pickle [ key ] = val file_object . seek ( 0 ) pickle . dump ( cache_pickle , file_object )
Sets a value in a key .
88
8
245,119
def serialize ( self ) : return { 'word' : self . word , 'pos' : self . pos , 'label' : self . label , 'dependency' : self . dependency , 'has_cjk' : self . has_cjk ( ) , }
Returns serialized chunk data in dictionary .
61
8
245,120
def has_cjk ( self ) : cjk_codepoint_ranges = [ ( 4352 , 4607 ) , ( 11904 , 42191 ) , ( 43072 , 43135 ) , ( 44032 , 55215 ) , ( 63744 , 64255 ) , ( 65072 , 65103 ) , ( 65381 , 65500 ) , ( 131072 , 196607 ) ] for char in self . word : if any ( [ start <= ord ( char ) <= end for start , end in cjk_codepoint_ranges ] ) : return True return False
Checks if the word of the chunk contains CJK characters .
130
13
245,121
def get_overlaps ( self , offset , length ) : # In case entity's offset points to a space just before the entity. if '' . join ( [ chunk . word for chunk in self ] ) [ offset ] == ' ' : offset += 1 index = 0 result = ChunkList ( ) for chunk in self : if offset < index + len ( chunk . word ) and index < offset + length : result . append ( chunk ) index += len ( chunk . word ) return result
Returns chunks overlapped with the given range .
103
9
245,122
def swap ( self , old_chunks , new_chunk ) : indexes = [ self . index ( chunk ) for chunk in old_chunks ] del self [ indexes [ 0 ] : indexes [ - 1 ] + 1 ] self . insert ( indexes [ 0 ] , new_chunk )
Swaps old consecutive chunks with new chunk .
63
9
245,123
def resolve_dependencies ( self ) : self . _concatenate_inner ( True ) self . _concatenate_inner ( False ) self . _insert_breaklines ( )
Resolves chunk dependency by concatenating them .
42
10
245,124
def _concatenate_inner ( self , direction ) : tmp_bucket = [ ] source_chunks = self if direction else self [ : : - 1 ] target_chunks = ChunkList ( ) for chunk in source_chunks : if ( # if the chunk has matched dependency, do concatenation. chunk . dependency == direction or # if the chunk is SPACE, concatenate to the previous chunk. ( direction is False and chunk . is_space ( ) ) ) : tmp_bucket . append ( chunk ) continue tmp_bucket . append ( chunk ) if not direction : tmp_bucket = tmp_bucket [ : : - 1 ] new_word = '' . join ( [ tmp_chunk . word for tmp_chunk in tmp_bucket ] ) new_chunk = Chunk ( new_word , pos = chunk . pos , label = chunk . label , dependency = chunk . dependency ) target_chunks . append ( new_chunk ) tmp_bucket = ChunkList ( ) if tmp_bucket : target_chunks += tmp_bucket if not direction : target_chunks = target_chunks [ : : - 1 ] self . list = target_chunks
Concatenates chunks based on each chunk s dependency .
265
12
245,125
def _insert_breaklines ( self ) : target_chunks = ChunkList ( ) for chunk in self : if chunk . word [ - 1 ] == ' ' and chunk . has_cjk ( ) : chunk . word = chunk . word [ : - 1 ] target_chunks . append ( chunk ) target_chunks . append ( chunk . breakline ( ) ) else : target_chunks . append ( chunk ) self . list = target_chunks
Inserts a breakline instead of a trailing space if the chunk is in CJK .
101
18
245,126
def html_serialize ( self , attributes , max_length = None ) : doc = ET . Element ( 'span' ) for chunk in self : if ( chunk . has_cjk ( ) and not ( max_length and len ( chunk . word ) > max_length ) ) : ele = ET . Element ( 'span' ) ele . text = chunk . word for key , val in attributes . items ( ) : ele . attrib [ key ] = val doc . append ( ele ) else : # add word without span tag for non-CJK text (e.g. English) # by appending it after the last element if doc . getchildren ( ) : if doc . getchildren ( ) [ - 1 ] . tail is None : doc . getchildren ( ) [ - 1 ] . tail = chunk . word else : doc . getchildren ( ) [ - 1 ] . tail += chunk . word else : if doc . text is None : doc . text = chunk . word else : doc . text += chunk . word result = ET . tostring ( doc , encoding = 'utf-8' ) . decode ( 'utf-8' ) result = html5lib . serialize ( html5lib . parseFragment ( result ) , sanitize = True , quote_attr_values = 'always' ) return result
Returns concatenated HTML code with SPAN tag .
284
11
245,127
def _etextno_to_uri_subdirectory ( etextno ) : str_etextno = str ( etextno ) . zfill ( 2 ) all_but_last_digit = list ( str_etextno [ : - 1 ] ) subdir_part = "/" . join ( all_but_last_digit ) subdir = "{}/{}" . format ( subdir_part , etextno ) # etextno not zfilled return subdir
Returns the subdirectory that an etextno will be found in a gutenberg mirror . Generally one finds the subdirectory by separating out each digit of the etext number and uses it for a directory . The exception here is for etext numbers less than 10 which are prepended with a 0 for the directory traversal .
105
65
245,128
def _format_download_uri_for_extension ( etextno , extension , mirror = None ) : mirror = mirror or _GUTENBERG_MIRROR root = mirror . strip ( ) . rstrip ( '/' ) path = _etextno_to_uri_subdirectory ( etextno ) uri = '{root}/{path}/{etextno}{extension}' . format ( root = root , path = path , etextno = etextno , extension = extension ) return uri
Returns the download location on the Project Gutenberg servers for a given text and extension . The list of available extensions for a given text can be found via the formaturi metadata extractor .
117
37
245,129
def _format_download_uri ( etextno , mirror = None , prefer_ascii = False ) : mirror = mirror or _GUTENBERG_MIRROR if not _does_mirror_exist ( mirror ) : raise UnknownDownloadUriException ( 'Could not reach Gutenberg mirror "{:s}". Try setting a ' 'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for ' '--mirror flag or GUTENBERG_MIRROR environment variable.' . format ( mirror ) ) # Check https://www.gutenberg.org/files/ for details about available # extensions ; # - .txt is plaintext us-ascii # - -8.txt is 8-bit plaintext, multiple encodings # - -0.txt is UTF-8 ascii_first = ( '.txt' , '-0.txt' , '-8.txt' ) utf8_first = ( '-0.txt' , '-8.txt' , '.txt' ) extensions = ascii_first if prefer_ascii else utf8_first for extension in extensions : uri = _format_download_uri_for_extension ( etextno , extension , mirror ) if _does_uri_exist ( uri ) : return uri raise UnknownDownloadUriException ( 'Failed to find a textual download candidate for {} on {}. ' 'Either the book does not exist or it is only available in ' 'non-textual formats.' . format ( etextno , mirror ) )
Returns the download location on the Project Gutenberg servers for a given text .
344
14
245,130
def load_etext ( etextno , refresh_cache = False , mirror = None , prefer_ascii = False ) : etextno = validate_etextno ( etextno ) cached = os . path . join ( _TEXT_CACHE , '{}.txt.gz' . format ( etextno ) ) if refresh_cache : remove ( cached ) if not os . path . exists ( cached ) : makedirs ( os . path . dirname ( cached ) ) download_uri = _format_download_uri ( etextno , mirror , prefer_ascii ) response = requests . get ( download_uri ) # Ensure proper UTF-8 saving. There might be instances of ebooks or # mirrors which advertise a broken encoding, and this will break # downstream usages. For example, #55517 from aleph.gutenberg.org: # # from gutenberg.acquire import load_etext # print(load_etext(55517, refresh_cache=True)[0:1000]) # # response.encoding will be 'ISO-8859-1' while the file is UTF-8 if response . encoding != response . apparent_encoding : response . encoding = response . apparent_encoding text = response . text with closing ( gzip . open ( cached , 'w' ) ) as cache : cache . write ( text . encode ( 'utf-8' ) ) with closing ( gzip . open ( cached , 'r' ) ) as cache : text = cache . read ( ) . decode ( 'utf-8' ) return text
Returns a unicode representation of the full body of a Project Gutenberg text . After making an initial remote call to Project Gutenberg s servers the text is persisted locally .
341
32
245,131
def disable_logging ( logger = None ) : logger = logger or logging . getLogger ( ) disabled = logger . disabled logger . disabled = True yield logger . disabled = disabled
Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified .
38
22
245,132
def makedirs ( * args , * * kwargs ) : try : os . makedirs ( * args , * * kwargs ) except OSError as ex : if ex . errno != errno . EEXIST : raise
Wrapper around os . makedirs that doesn t raise an exception if the directory already exists .
54
20
245,133
def remove ( path ) : if not os . path . exists ( path ) : return if os . path . isdir ( path ) : return shutil . rmtree ( path ) if os . path . isfile ( path ) : return os . remove ( path )
Wrapper that switches between os . remove and shutil . rmtree depending on whether the provided path is a file or directory .
57
27
245,134
def determine_encoding ( path , default = None ) : byte_order_marks = ( ( 'utf-8-sig' , ( codecs . BOM_UTF8 , ) ) , ( 'utf-16' , ( codecs . BOM_UTF16_LE , codecs . BOM_UTF16_BE ) ) , ( 'utf-32' , ( codecs . BOM_UTF32_LE , codecs . BOM_UTF32_BE ) ) , ) try : with open ( path , 'rb' ) as infile : raw = infile . read ( 4 ) except IOError : return default for encoding , boms in byte_order_marks : if any ( raw . startswith ( bom ) for bom in boms ) : return encoding return default
Determines the encoding of a file based on byte order marks .
172
14
245,135
def reopen_encoded ( fileobj , mode = 'r' , fallback_encoding = None ) : encoding = determine_encoding ( fileobj . name , fallback_encoding ) fileobj . close ( ) return open ( fileobj . name , mode , encoding = encoding )
Makes sure that a file was opened with some valid encoding .
62
13
245,136
def get_metadata ( feature_name , etextno ) : metadata_values = MetadataExtractor . get ( feature_name ) . get_metadata ( etextno ) return frozenset ( metadata_values )
Looks up the value of a meta - data feature for a given text .
47
15
245,137
def get_etexts ( feature_name , value ) : matching_etexts = MetadataExtractor . get ( feature_name ) . get_etexts ( value ) return frozenset ( matching_etexts )
Looks up all the texts that have meta - data matching some criterion .
51
14
245,138
def _uri_to_etext ( cls , uri_ref ) : try : return validate_etextno ( int ( os . path . basename ( uri_ref . toPython ( ) ) ) ) except InvalidEtextIdException : return None
Converts the representation used to identify a text in the meta - data RDF graph to a human - friendly integer text identifier .
57
26
245,139
def _implementations ( cls ) : if cls . __implementations : return cls . __implementations cls . __implementations = { } for implementation in all_subclasses ( MetadataExtractor ) : try : feature_name = implementation . feature_name ( ) cls . __implementations [ feature_name ] = implementation except NotImplementedError : pass return cls . __implementations
Returns all the concrete subclasses of MetadataExtractor .
94
12
245,140
def get ( feature_name ) : implementations = MetadataExtractor . _implementations ( ) try : return implementations [ feature_name ] except KeyError : raise UnsupportedFeatureException ( 'no MetadataExtractor registered for feature "{feature_name}" ' '(try any of the following: {supported_features})' . format ( feature_name = feature_name , supported_features = ', ' . join ( sorted ( implementations ) ) ) )
Returns the MetadataExtractor that can extract information about the provided feature name .
95
16
245,141
def set_metadata_cache ( cache ) : global _METADATA_CACHE if _METADATA_CACHE and _METADATA_CACHE . is_open : _METADATA_CACHE . close ( ) _METADATA_CACHE = cache
Sets the metadata cache object to use .
64
9
245,142
def _create_metadata_cache ( cache_location ) : cache_url = os . getenv ( 'GUTENBERG_FUSEKI_URL' ) if cache_url : return FusekiMetadataCache ( cache_location , cache_url ) try : return SleepycatMetadataCache ( cache_location ) except InvalidCacheException : logging . warning ( 'Unable to create cache based on BSD-DB. ' 'Falling back to SQLite backend. ' 'Performance may be degraded significantly.' ) return SqliteMetadataCache ( cache_location )
Creates a new metadata cache instance appropriate for this platform .
124
12
245,143
def open ( self ) : try : self . graph . open ( self . cache_uri , create = False ) self . _add_namespaces ( self . graph ) self . is_open = True except Exception : raise InvalidCacheException ( 'The cache is invalid or not created' )
Opens an existing cache .
61
6
245,144
def populate ( self ) : if self . exists : raise CacheAlreadyExistsException ( 'location: %s' % self . cache_uri ) self . _populate_setup ( ) with closing ( self . graph ) : with self . _download_metadata_archive ( ) as metadata_archive : for fact in self . _iter_metadata_triples ( metadata_archive ) : self . _add_to_graph ( fact )
Populates a new cache .
93
6
245,145
def refresh ( self ) : if self . exists : self . delete ( ) self . populate ( ) self . open ( )
Refresh the cache by deleting the old one and creating a new one .
26
15
245,146
def _download_metadata_archive ( self ) : with tempfile . NamedTemporaryFile ( delete = False ) as metadata_archive : shutil . copyfileobj ( urlopen ( self . catalog_source ) , metadata_archive ) yield metadata_archive . name remove ( metadata_archive . name )
Makes a remote call to the Project Gutenberg servers and downloads the entire Project Gutenberg meta - data catalog . The catalog describes the texts on Project Gutenberg in RDF . The function returns a file - pointer to the catalog .
64
44
245,147
def _metadata_is_invalid ( cls , fact ) : return any ( isinstance ( token , URIRef ) and ' ' in token for token in fact )
Determines if the fact is not well formed .
37
11
245,148
def _iter_metadata_triples ( cls , metadata_archive_path ) : pg_rdf_regex = re . compile ( r'pg\d+.rdf$' ) with closing ( tarfile . open ( metadata_archive_path ) ) as metadata_archive : for item in metadata_archive : if pg_rdf_regex . search ( item . name ) : with disable_logging ( ) : extracted = metadata_archive . extractfile ( item ) graph = Graph ( ) . parse ( extracted ) for fact in graph : if cls . _metadata_is_invalid ( fact ) : logging . info ( 'skipping invalid triple %s' , fact ) else : yield fact
Yields all meta - data of Project Gutenberg texts contained in the catalog dump .
153
17
245,149
def _populate_setup ( self ) : makedirs ( os . path . dirname ( self . _cache_marker ) ) with codecs . open ( self . _cache_marker , 'w' , encoding = 'utf-8' ) as fobj : fobj . write ( self . cache_uri ) self . graph . open ( self . cache_uri )
Just create a local marker file since the actual database should already be created on the Fuseki server .
83
21
245,150
def delete ( self ) : MetadataCache . delete ( self ) try : self . graph . query ( 'DELETE WHERE { ?s ?p ?o . }' ) except ResultException : # this is often just a false positive since Jena Fuseki does not # return tuples for a deletion query, so swallowing the exception # here is fine logging . exception ( 'error when deleting graph' )
Deletes the local marker file and also any data in the Fuseki server .
86
17
245,151
def _metadata_is_invalid ( cls , fact ) : return ( MetadataCache . _metadata_is_invalid ( fact ) or any ( isinstance ( token , BNode ) for token in fact ) )
Filters out blank nodes since the SPARQLUpdateStore does not support them .
48
17
245,152
def all_subclasses ( cls ) : subclasses = cls . __subclasses__ ( ) descendants = ( descendant for subclass in subclasses for descendant in all_subclasses ( subclass ) ) return set ( subclasses ) | set ( descendants )
Recursively returns all the subclasses of the provided class .
53
13
245,153
def _collapse_cursor ( self , parts ) : final_parts = [ ] for part in parts : # Throw out empty string tokens ("") if not part : continue # Go back, deleting every token in the last 'line' if part == CursorMoveUp : if final_parts : final_parts . pop ( ) while final_parts and '\n' not in final_parts [ - 1 ] : final_parts . pop ( ) continue # Otherwise, just pass this token forward final_parts . append ( part ) return final_parts
Act on any CursorMoveUp commands by deleting preceding tokens
117
12
245,154
def prepare ( self , ansi = '' , ensure_trailing_newline = False ) : body , styles = self . apply_regex ( ansi ) if ensure_trailing_newline and _needs_extra_newline ( body ) : body += '\n' self . _attrs = { 'dark_bg' : self . dark_bg , 'line_wrap' : self . line_wrap , 'font_size' : self . font_size , 'body' : body , 'styles' : styles , } return self . _attrs
Load the contents of ansi into this object
123
9
245,155
def run ( self ) : if self . has_rust_extensions ( ) : log . info ( "running build_rust" ) build_rust = self . get_finalized_command ( "build_rust" ) build_rust . inplace = self . inplace build_rust . run ( ) _build_ext . run ( self )
Run build_rust sub command
75
6
245,156
def get_lib_name ( self ) : # We import in here to make sure the the setup_requires are already installed import toml cfg = toml . load ( self . path ) name = cfg . get ( "lib" , { } ) . get ( "name" ) if name is None : name = cfg . get ( "package" , { } ) . get ( "name" ) if name is None : raise Exception ( "Can not parse library name from Cargo.toml. " "Cargo.toml missing value for 'name' key " "in both the [package] section and the [lib] section" ) name = re . sub ( r"[./\\-]" , "_" , name ) return name
Parse Cargo . toml to get the name of the shared library .
159
15
245,157
def find_rust_extensions ( * directories , * * kwargs ) : # Get the file used to mark a Rust extension libfile = kwargs . get ( "libfile" , "lib.rs" ) # Get the directories to explore directories = directories or [ os . getcwd ( ) ] extensions = [ ] for directory in directories : for base , dirs , files in os . walk ( directory ) : if libfile in files : dotpath = os . path . relpath ( base ) . replace ( os . path . sep , "." ) tomlpath = os . path . join ( base , "Cargo.toml" ) ext = RustExtension ( dotpath , tomlpath , * * kwargs ) ext . libfile = os . path . join ( base , libfile ) extensions . append ( ext ) return extensions
Attempt to find Rust extensions in given directories .
184
9
245,158
def register ( self , event , fn ) : # TODO: Can we check the method signature? self . _handler_dict . setdefault ( event , [ ] ) if fn not in self . _handler_dict [ event ] : self . _handler_dict [ event ] . append ( fn )
Registers the given function as a handler to be applied in response to the the given event .
64
19
245,159
def apply ( self , event , document , * args , * * kwargs ) : for fn in self . _handler_dict . get ( event , [ ] ) : fn ( document , * args , * * kwargs )
Applies all middleware functions registered against the given event in order to the given document .
50
18
245,160
def deregister ( self , event , fn ) : if event in self . _handler_dict and fn in self . _handler_dict [ event ] : self . _handler_dict [ event ] . remove ( fn )
Deregister the handler function from the given event .
47
12
245,161
def unpack_scope ( cls , scope ) : query = { } projection = { } options = { } if isinstance ( scope , tuple ) : if len ( scope ) > 3 : raise ValueError ( "Invalid scope" ) if len ( scope ) >= 1 : query = scope [ 0 ] if len ( scope ) >= 2 : projection = scope [ 1 ] if len ( scope ) == 3 : options = scope [ 2 ] elif isinstance ( scope , dict ) : query = scope else : raise ValueError ( "Invalid scope" ) return query , projection , options
Unpacks the response from a scope function . The function should return either a query a query and a projection or a query a projection and an query options hash .
122
32
245,162
def register_fn ( cls , f ) : def inner ( self , * args , * * kwargs ) : try : query , projection , options = cls . unpack_scope ( f ( * args , * * kwargs ) ) new_query = deepcopy ( self . query ) new_projection = deepcopy ( self . projection ) new_options = deepcopy ( self . options ) deep_merge ( query , new_query ) new_projection . update ( projection ) new_options . update ( options ) return ScopeBuilder ( self . model , self . fns , new_query , new_projection , new_options ) except ValueError : raise ValueError ( "Scope function \"{}\ returns an invalid scope" . format ( f . __name__ ) ) setattr ( cls , f . __name__ , inner )
Registers a scope function on this builder .
185
9
245,163
def cursor ( self ) : if not self . _active_cursor : self . _active_cursor = self . model . find ( self . query , self . projection or None , * * self . options ) return self . _active_cursor
Returns a cursor for the currently assembled query creating it if it doesn t already exist .
54
17
245,164
def _ensure_object_id ( cls , id ) : if isinstance ( id , ObjectId ) : return id if isinstance ( id , basestring ) and OBJECTIDEXPR . match ( id ) : return ObjectId ( id ) return id
Checks whether the given id is an ObjectId instance and if not wraps it .
57
17
245,165
def apply_defaults ( self ) : self . emit ( 'will_apply_defaults' ) self . schema . apply_defaults ( self ) self . emit ( 'did_apply_defaults' )
Apply schema defaults to this document .
46
7
245,166
def reload ( self ) : self . emit ( 'will_reload' ) self . populate ( self . collection . find_one ( type ( self ) . _id_spec ( self [ '_id' ] ) ) ) self . emit ( 'did_reload' )
Reloads the current model s data from the underlying database record updating it in - place .
60
19
245,167
def on ( cls , event , handler_func = None ) : if handler_func : cls . handler_registrar ( ) . register ( event , handler_func ) return def register ( fn ) : cls . handler_registrar ( ) . register ( event , fn ) return fn return register
Registers a handler function whenever an instance of the model emits the given event .
67
16
245,168
def _emit ( self , event , document , * args , * * kwargs ) : self . handler_registrar ( ) . apply ( event , document , * args , * * kwargs )
Inner version of emit which passes the given document as the primary argument to handler functions .
46
18
245,169
def emit ( self , event , * args , * * kwargs ) : self . _emit ( event , self , * args , * * kwargs )
Emits an event call to all handler functions registered against this model s class and the given event type .
36
21
245,170
def static_method ( cls , f ) : setattr ( cls , f . __name__ , staticmethod ( f ) ) return f
Decorator which dynamically binds static methods to the model for later use .
31
15
245,171
def class_method ( cls , f ) : setattr ( cls , f . __name__ , classmethod ( f ) ) return f
Decorator which dynamically binds class methods to the model for later use .
31
15
245,172
def scope ( cls , f ) : if not hasattr ( cls , "scopes" ) : cls . scopes = copy ( STANDARD_SCOPES ) cls . scopes . append ( f ) def create_builder ( self , * args , * * kwargs ) : bldr = ScopeBuilder ( cls , cls . scopes ) return getattr ( bldr , f . __name__ ) ( * args , * * kwargs ) setattr ( cls , f . __name__ , classmethod ( create_builder ) ) return f
Decorator which can dynamically attach a query scope to the model .
127
14
245,173
def _module_name_from_previous_frame ( num_frames_back ) : frm = inspect . stack ( ) [ num_frames_back + 1 ] return inspect . getmodule ( frm [ 0 ] ) . __name__
Returns the module name associated with a frame num_frames_back in the call stack . This function adds 1 to account for itself so num_frames_back should be given relative to the caller .
53
40
245,174
def create_model ( schema , collection , class_name = None ) : if not class_name : class_name = camelize ( str ( collection . name ) ) model_class = type ( class_name , ( Model , ) , dict ( schema = schema , _collection_factory = staticmethod ( lambda : collection ) ) ) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model` was called model_class . __module__ = _module_name_from_previous_frame ( 1 ) return model_class
Main entry point to creating a new mongothon model . Both schema and Pymongo collection objects must be provided .
133
25
245,175
def create_model_offline ( schema , collection_factory , class_name ) : model_class = type ( class_name , ( Model , ) , dict ( schema = schema , _collection_factory = staticmethod ( collection_factory ) ) ) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model_offline` was called model_class . __module__ = _module_name_from_previous_frame ( 1 ) return model_class
Entry point for creating a new Mongothon model without instantiating a database connection . The collection is instead provided through a closure that is resolved upon the model s first database access .
121
36
245,176
def wrap ( value ) : if isinstance ( value , Document ) or isinstance ( value , DocumentList ) : return value elif isinstance ( value , dict ) : return Document ( value ) elif isinstance ( value , list ) : return DocumentList ( value ) else : return value
Wraps the given value in a Document or DocumentList as applicable .
61
14
245,177
def unwrap ( value ) : if isinstance ( value , Document ) : return value . to_dict ( ) elif isinstance ( value , DocumentList ) : return value . to_list ( ) else : return value
Unwraps the given Document or DocumentList as applicable .
47
12
245,178
def note_change ( self , key , value ) : # If we're changing the value and we haven't done so already, note it. if value != self . _instance [ key ] and key not in self . _previous and key not in self . _added : self . _previous [ key ] = self . _instance [ key ] # If we're setting the value back to the original value, discard the change note if key in self . _previous and value == self . _previous [ key ] : del self . _previous [ key ]
Updates change state to reflect a change to a field . Takes care of ignoring no - ops reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset .
120
46
245,179
def note_addition ( self , key , value ) : # If we're adding a field we previously deleted, remove the deleted note. if key in self . _deleted : # If the key we're adding back has a different value, then it's a change if value != self . _deleted [ key ] : self . _previous [ key ] = self . _deleted [ key ] del self . _deleted [ key ] else : self . _added . append ( key )
Updates the change state to reflect the addition of a field . Detects previous changes and deletions of the field and acts accordingly .
105
27
245,180
def note_deletion ( self , key ) : # If we'rew deleting a key we previously added, then there is no diff if key in self . _added : self . _added . remove ( key ) else : # If the deleted key was previously changed, use the original value if key in self . _previous : self . _deleted [ key ] = self . _previous [ key ] del self . _previous [ key ] else : self . _deleted [ key ] = self . _instance [ key ]
Notes the deletion of a field .
114
7
245,181
def changes ( self ) : return { key : ( self . _previous [ key ] , self . _instance [ key ] ) for key in self . _previous }
Returns a dict containing just the fields which have changed on this Document since it was created or last saved together with both their previous and current values
37
28
245,182
def reset_all_changes ( self ) : self . reset_changes ( ) for value in self . values ( ) : if isinstance ( value , Document ) or isinstance ( value , DocumentList ) : value . reset_all_changes ( )
Resets change tracking in this document recursing into child Documents and DocumentLists .
53
18
245,183
def populate ( self , other ) : self . clear ( ) self . update ( other ) self . reset_all_changes ( )
Like update but clears the contents first .
28
8
245,184
def parse ( self , buffer ) : log . debug ( "parsing a %d byte packet" % len ( buffer ) ) ( opcode , ) = struct . unpack ( str ( "!H" ) , buffer [ : 2 ] ) log . debug ( "opcode is %d" % opcode ) packet = self . __create ( opcode ) packet . buffer = buffer return packet . decode ( )
This method is used to parse an existing datagram into its corresponding TftpPacket object . The buffer is the raw bytes off of the network .
89
31
245,185
def __create ( self , opcode ) : tftpassert ( opcode in self . classes , "Unsupported opcode: %d" % opcode ) packet = self . classes [ opcode ] ( ) return packet
This method returns the appropriate class object corresponding to the passed opcode .
48
14
245,186
def add_dup ( self , pkt ) : log . debug ( "Recording a dup of %s" , pkt ) s = str ( pkt ) if s in self . dups : self . dups [ s ] += 1 else : self . dups [ s ] = 1 tftpassert ( self . dups [ s ] < MAX_DUPS , "Max duplicates reached" )
This method adds a dup for a packet to the metrics .
90
12
245,187
def checkTimeout ( self , now ) : log . debug ( "checking for timeout on session %s" , self ) if now - self . last_update > self . timeout : raise TftpTimeout ( "Timeout waiting for traffic" )
Compare current time with last_update time and raise an exception if we re over the timeout time .
51
20
245,188
def end ( self , close_fileobj = True ) : log . debug ( "in TftpContext.end - closing socket" ) self . sock . close ( ) if close_fileobj and self . fileobj is not None and not self . fileobj . closed : log . debug ( "self.fileobj is open - closing" ) self . fileobj . close ( )
Perform session cleanup since the end method should always be called explicitely by the calling code this works better than the destructor . Set close_fileobj to False so fileobj can be returned open .
82
42
245,189
def sethost ( self , host ) : self . __host = host self . address = socket . gethostbyname ( host )
Setter method that also sets the address property as a result of the host that is set .
28
19
245,190
def cycle ( self ) : try : ( buffer , ( raddress , rport ) ) = self . sock . recvfrom ( MAX_BLKSIZE ) except socket . timeout : log . warning ( "Timeout waiting for traffic, retrying..." ) raise TftpTimeout ( "Timed-out waiting for traffic" ) # Ok, we've received a packet. Log it. log . debug ( "Received %d bytes from %s:%s" , len ( buffer ) , raddress , rport ) # And update our last updated time. self . last_update = time . time ( ) # Decode it. recvpkt = self . factory . parse ( buffer ) # Check for known "connection". if raddress != self . address : log . warning ( "Received traffic from %s, expected host %s. Discarding" % ( raddress , self . host ) ) if self . tidport and self . tidport != rport : log . warning ( "Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % ( raddress , rport , self . host , self . tidport ) ) # If there is a packethook defined, call it. We unconditionally # pass all packets, it's up to the client to screen out different # kinds of packets. This way, the client is privy to things like # negotiated options. if self . packethook : self . packethook ( recvpkt ) # And handle it, possibly changing state. self . state = self . state . handle ( recvpkt , raddress , rport ) # If we didn't throw any exceptions here, reset the retry_count to # zero. self . retry_count = 0
Here we wait for a response from the server after sending it something and dispatch appropriate action to that response .
378
21
245,191
def start ( self ) : log . info ( "Sending tftp download request to %s" % self . host ) log . info ( " filename -> %s" % self . file_to_transfer ) log . info ( " options -> %s" % self . options ) self . metrics . start_time = time . time ( ) log . debug ( "Set metrics.start_time to %s" % self . metrics . start_time ) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ ( ) pkt . filename = self . file_to_transfer pkt . mode = "octet" # FIXME - shouldn't hardcode this pkt . options = self . options self . sock . sendto ( pkt . encode ( ) . buffer , ( self . host , self . port ) ) self . next_block = 1 self . last_pkt = pkt self . state = TftpStateSentRRQ ( self ) while self . state : try : log . debug ( "State is %s" % self . state ) self . cycle ( ) except TftpTimeout as err : log . error ( str ( err ) ) self . retry_count += 1 if self . retry_count >= TIMEOUT_RETRIES : log . debug ( "hit max retries, giving up" ) raise else : log . warning ( "resending last packet" ) self . state . resendLast ( ) except TftpFileNotFoundError as err : # If we received file not found, then we should not save the open # output file or we'll be left with a size zero file. Delete it, # if it exists. log . error ( "Received File not found error" ) if self . fileobj is not None and not self . filelike_fileobj : if os . path . exists ( self . fileobj . name ) : log . debug ( "unlinking output file of %s" , self . fileobj . name ) os . unlink ( self . fileobj . name ) raise
Initiate the download .
450
6
245,192
def end ( self ) : TftpContext . end ( self , not self . filelike_fileobj ) self . metrics . end_time = time . time ( ) log . debug ( "Set metrics.end_time to %s" % self . metrics . end_time ) self . metrics . compute ( )
Finish up the context .
68
5
245,193
def download ( self , filename , output , packethook = None , timeout = SOCK_TIMEOUT ) : # We're downloading. log . debug ( "Creating download context with the following params:" ) log . debug ( "host = %s, port = %s, filename = %s" % ( self . host , self . iport , filename ) ) log . debug ( "options = %s, packethook = %s, timeout = %s" % ( self . options , packethook , timeout ) ) self . context = TftpContextClientDownload ( self . host , self . iport , filename , output , self . options , packethook , timeout , localip = self . localip ) self . context . start ( ) # Download happens here self . context . end ( ) metrics = self . context . metrics log . info ( '' ) log . info ( "Download complete." ) if metrics . duration == 0 : log . info ( "Duration too short, rate undetermined" ) else : log . info ( "Downloaded %.2f bytes in %.2f seconds" % ( metrics . bytes , metrics . duration ) ) log . info ( "Average rate: %.2f kbps" % metrics . kbps ) log . info ( "%.2f bytes in resent data" % metrics . resent_bytes ) log . info ( "Received %d duplicate packets" % metrics . dupcount )
This method initiates a tftp download from the configured remote host requesting the filename passed . It writes the file to output which can be a file - like object or a path to a local file . If a packethook is provided it must be a function that takes a single parameter which will be a copy of each DAT packet received in the form of a TftpPacketDAT object . The timeout parameter may be used to override the default SOCK_TIMEOUT setting which is the amount of time that the client will wait for a receive packet to arrive .
306
117
245,194
def upload ( self , filename , input , packethook = None , timeout = SOCK_TIMEOUT ) : self . context = TftpContextClientUpload ( self . host , self . iport , filename , input , self . options , packethook , timeout , localip = self . localip ) self . context . start ( ) # Upload happens here self . context . end ( ) metrics = self . context . metrics log . info ( '' ) log . info ( "Upload complete." ) if metrics . duration == 0 : log . info ( "Duration too short, rate undetermined" ) else : log . info ( "Uploaded %d bytes in %.2f seconds" % ( metrics . bytes , metrics . duration ) ) log . info ( "Average rate: %.2f kbps" % metrics . kbps ) log . info ( "%.2f bytes in resent data" % metrics . resent_bytes ) log . info ( "Resent %d packets" % metrics . dupcount )
This method initiates a tftp upload to the configured remote host uploading the filename passed . It reads the file from input which can be a file - like object or a path to a local file . If a packethook is provided it must be a function that takes a single parameter which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object . The timeout parameter may be used to override the default SOCK_TIMEOUT setting which is the amount of time that the client will wait for a DAT packet to be ACKd by the server .
215
124
245,195
def decode_options ( self , buffer ) : fmt = b"!" options = { } log . debug ( "decode_options: buffer is: %s" , repr ( buffer ) ) log . debug ( "size of buffer is %d bytes" , len ( buffer ) ) if len ( buffer ) == 0 : log . debug ( "size of buffer is zero, returning empty hash" ) return { } # Count the nulls in the buffer. Each one terminates a string. log . debug ( "about to iterate options buffer counting nulls" ) length = 0 for i in range ( len ( buffer ) ) : if ord ( buffer [ i : i + 1 ] ) == 0 : log . debug ( "found a null at length %d" , length ) if length > 0 : fmt += b"%dsx" % length length = - 1 else : raise TftpException ( "Invalid options in buffer" ) length += 1 log . debug ( "about to unpack, fmt is: %s" , fmt ) mystruct = struct . unpack ( fmt , buffer ) tftpassert ( len ( mystruct ) % 2 == 0 , "packet with odd number of option/value pairs" ) for i in range ( 0 , len ( mystruct ) , 2 ) : key = mystruct [ i ] . decode ( 'ascii' ) val = mystruct [ i + 1 ] . decode ( 'ascii' ) log . debug ( "setting option %s to %s" , key , val ) log . debug ( "types are %s and %s" , type ( key ) , type ( val ) ) options [ key ] = val return options
This method decodes the section of the buffer that contains an unknown number of options . It returns a dictionary of option names and values .
360
27
245,196
def encode ( self ) : tftpassert ( self . filename , "filename required in initial packet" ) tftpassert ( self . mode , "mode required in initial packet" ) # Make sure filename and mode are bytestrings. filename = self . filename mode = self . mode if not isinstance ( filename , bytes ) : filename = filename . encode ( 'ascii' ) if not isinstance ( self . mode , bytes ) : mode = mode . encode ( 'ascii' ) ptype = None if self . opcode == 1 : ptype = "RRQ" else : ptype = "WRQ" log . debug ( "Encoding %s packet, filename = %s, mode = %s" , ptype , filename , mode ) for key in self . options : log . debug ( " Option %s = %s" , key , self . options [ key ] ) fmt = b"!H" fmt += b"%dsx" % len ( filename ) if mode == b"octet" : fmt += b"5sx" else : raise AssertionError ( "Unsupported mode: %s" % mode ) # Add options. Note that the options list must be bytes. options_list = [ ] if len ( list ( self . options . keys ( ) ) ) > 0 : log . debug ( "there are options to encode" ) for key in self . options : # Populate the option name name = key if not isinstance ( name , bytes ) : name = name . encode ( 'ascii' ) options_list . append ( name ) fmt += b"%dsx" % len ( name ) # Populate the option value value = self . options [ key ] # Work with all strings. if isinstance ( value , int ) : value = str ( value ) if not isinstance ( value , bytes ) : value = value . encode ( 'ascii' ) options_list . append ( value ) fmt += b"%dsx" % len ( value ) log . debug ( "fmt is %s" , fmt ) log . debug ( "options_list is %s" , options_list ) log . debug ( "size of struct is %d" , struct . calcsize ( fmt ) ) self . buffer = struct . pack ( fmt , self . opcode , filename , mode , * options_list ) log . debug ( "buffer is %s" , repr ( self . buffer ) ) return self
Encode the packet s buffer from the instance variables .
530
11
245,197
def encode ( self ) : if len ( self . data ) == 0 : log . debug ( "Encoding an empty DAT packet" ) data = self . data if not isinstance ( self . data , bytes ) : data = self . data . encode ( 'ascii' ) fmt = b"!HH%ds" % len ( data ) self . buffer = struct . pack ( fmt , self . opcode , self . blocknumber , data ) return self
Encode the DAT packet . This method populates self . buffer and returns self for easy method chaining .
98
23
245,198
def decode ( self ) : # We know the first 2 bytes are the opcode. The second two are the # block number. ( self . blocknumber , ) = struct . unpack ( str ( "!H" ) , self . buffer [ 2 : 4 ] ) log . debug ( "decoding DAT packet, block number %d" , self . blocknumber ) log . debug ( "should be %d bytes in the packet total" , len ( self . buffer ) ) # Everything else is data. self . data = self . buffer [ 4 : ] log . debug ( "found %d bytes of data" , len ( self . data ) ) return self
Decode self . buffer into instance variables . It returns self for easy method chaining .
141
18
245,199
def encode ( self ) : fmt = b"!HH%dsx" % len ( self . errmsgs [ self . errorcode ] ) log . debug ( "encoding ERR packet with fmt %s" , fmt ) self . buffer = struct . pack ( fmt , self . opcode , self . errorcode , self . errmsgs [ self . errorcode ] ) return self
Encode the DAT packet based on instance variables populating self . buffer returning self .
82
18