idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
242,600
def generate_sphinx_all ( ) : # generate list of all nicknames we can generate docstrings for all_nicknames = [ ] def add_nickname ( gtype , a , b ) : nickname = nickname_find ( gtype ) try : Operation . generate_sphinx ( nickname ) all_nicknames . append ( nickname ) except Error : pass type_map ( gtype , add_nickname ) return ffi . NULL type_map ( type_from_name ( 'VipsOperation' ) , add_nickname ) all_nicknames . sort ( ) # remove operations we have to wrap by hand exclude = [ 'scale' , 'ifthenelse' , 'bandjoin' , 'bandrank' ] all_nicknames = [ x for x in all_nicknames if x not in exclude ] # Output summary table print ( '.. class:: pyvips.Image\n' ) print ( ' .. rubric:: Methods\n' ) print ( ' .. autosummary::' ) print ( ' :nosignatures:\n' ) for nickname in all_nicknames : print ( ' ~{0}' . format ( nickname ) ) print ( ) # Output docs print ( ) for nickname in all_nicknames : docstr = Operation . generate_sphinx ( nickname ) docstr = docstr . replace ( '\n' , '\n ' ) print ( ' ' + docstr )
Generate sphinx documentation .
308
7
242,601
def new ( image ) : pointer = vips_lib . vips_region_new ( image . pointer ) if pointer == ffi . NULL : raise Error ( 'unable to make region' ) return pyvips . Region ( pointer )
Make a region on an image .
52
7
242,602
def fetch ( self , x , y , w , h ) : if not at_least_libvips ( 8 , 8 ) : raise Error ( 'libvips too old' ) psize = ffi . new ( 'size_t *' ) pointer = vips_lib . vips_region_fetch ( self . pointer , x , y , w , h , psize ) if pointer == ffi . NULL : raise Error ( 'unable to fetch from region' ) pointer = ffi . gc ( pointer , glib_lib . g_free ) return ffi . buffer ( pointer , psize [ 0 ] )
Fill a region with pixel data .
139
7
242,603
def gtype_to_python ( gtype ) : fundamental = gobject_lib . g_type_fundamental ( gtype ) if gtype in GValue . _gtype_to_python : return GValue . _gtype_to_python [ gtype ] if fundamental in GValue . _gtype_to_python : return GValue . _gtype_to_python [ fundamental ] return '<unknown type>'
Map a gtype to the name of the Python type we use to represent it .
94
17
242,604
def to_enum ( gtype , value ) : if isinstance ( value , basestring if _is_PY2 else str ) : enum_value = vips_lib . vips_enum_from_nick ( b'pyvips' , gtype , _to_bytes ( value ) ) if enum_value < 0 : raise Error ( 'no value {0} in gtype {1} ({2})' . format ( value , type_name ( gtype ) , gtype ) ) else : enum_value = value return enum_value
Turn a string into an enum value ready to be passed into libvips .
121
16
242,605
def from_enum ( gtype , enum_value ) : pointer = vips_lib . vips_enum_nick ( gtype , enum_value ) if pointer == ffi . NULL : raise Error ( 'value not in enum' ) return _to_string ( pointer )
Turn an int back into an enum string .
60
9
242,606
def to_polar ( image ) : # xy image, origin in the centre, scaled to fit image to a circle xy = pyvips . Image . xyz ( image . width , image . height ) xy -= [ image . width / 2.0 , image . height / 2.0 ] scale = min ( image . width , image . height ) / float ( image . width ) xy *= 2.0 / scale index = xy . polar ( ) # scale vertical axis to 360 degrees index *= [ 1 , image . height / 360.0 ] return image . mapim ( index )
Transform image coordinates to polar .
131
6
242,607
def to_rectangular ( image ) : # xy image, vertical scaled to 360 degrees xy = pyvips . Image . xyz ( image . width , image . height ) xy *= [ 1 , 360.0 / image . height ] index = xy . rect ( ) # scale to image rect scale = min ( image . width , image . height ) / float ( image . width ) index *= scale / 2.0 index += [ image . width / 2.0 , image . height / 2.0 ] return image . mapim ( index )
Transform image coordinates to rectangular .
121
6
242,608
def _to_string ( x ) : if x == ffi . NULL : x = 'NULL' else : x = ffi . string ( x ) if isinstance ( x , byte_type ) : x = x . decode ( 'utf-8' ) return x
Convert to a unicode string .
58
8
242,609
def new ( name ) : # logger.debug('VipsInterpolate.new: name = %s', name) vi = vips_lib . vips_interpolate_new ( _to_bytes ( name ) ) if vi == ffi . NULL : raise Error ( 'no such interpolator {0}' . format ( name ) ) return Interpolate ( vi )
Make a new interpolator by name .
83
8
242,610
def _run_cmplx ( fn , image ) : original_format = image . format if image . format != 'complex' and image . format != 'dpcomplex' : if image . bands % 2 != 0 : raise Error ( 'not an even number of bands' ) if image . format != 'float' and image . format != 'double' : image = image . cast ( 'float' ) if image . format == 'double' : new_format = 'dpcomplex' else : new_format = 'complex' image = image . copy ( format = new_format , bands = image . bands / 2 ) image = fn ( image ) if original_format != 'complex' and original_format != 'dpcomplex' : if image . format == 'dpcomplex' : new_format = 'double' else : new_format = 'float' image = image . copy ( format = new_format , bands = image . bands * 2 ) return image
Run a complex function on a non - complex image .
204
11
242,611
def get_suffixes ( ) : names = [ ] if at_least_libvips ( 8 , 8 ) : array = vips_lib . vips_foreign_get_suffixes ( ) i = 0 while array [ i ] != ffi . NULL : name = _to_string ( array [ i ] ) if name not in names : names . append ( name ) glib_lib . g_free ( array [ i ] ) i += 1 glib_lib . g_free ( array ) return names
Get a list of all the filename suffixes supported by libvips .
115
15
242,612
def at_least_libvips ( x , y ) : major = version ( 0 ) minor = version ( 1 ) return major > x or ( major == x and minor >= y )
Is this at least libvips x . y?
41
11
242,613
def type_map ( gtype , fn ) : cb = ffi . callback ( 'VipsTypeMap2Fn' , fn ) return vips_lib . vips_type_map ( gtype , cb , ffi . NULL , ffi . NULL )
Map fn over all child types of gtype .
60
10
242,614
def basicConfig ( * * kwargs ) : logging . basicConfig ( * * kwargs ) logging . _acquireLock ( ) try : stream = logging . root . handlers [ 0 ] stream . setFormatter ( ColoredFormatter ( fmt = kwargs . get ( 'format' , BASIC_FORMAT ) , datefmt = kwargs . get ( 'datefmt' , None ) ) ) finally : logging . _releaseLock ( )
Call logging . basicConfig and override the formatter it creates .
101
13
242,615
def ensure_configured ( func ) : @ functools . wraps ( func ) def wrapper ( * args , * * kwargs ) : if len ( logging . root . handlers ) == 0 : basicConfig ( ) return func ( * args , * * kwargs ) return wrapper
Modify a function to call basicConfig first if no handlers exist .
61
14
242,616
def color ( self , log_colors , level_name ) : if not self . stream . isatty ( ) : log_colors = { } return ColoredFormatter . color ( self , log_colors , level_name )
Only returns colors if STDOUT is a TTY .
53
11
242,617
def setup_logger ( ) : formatter = ColoredFormatter ( "%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s" , datefmt = None , reset = True , log_colors = { 'DEBUG' : 'cyan' , 'INFO' : 'green' , 'WARNING' : 'yellow' , 'ERROR' : 'red' , 'CRITICAL' : 'red' , } ) logger = logging . getLogger ( 'example' ) handler = logging . StreamHandler ( ) handler . setFormatter ( formatter ) logger . addHandler ( handler ) logger . setLevel ( logging . DEBUG ) return logger
Return a logger with a default ColoredFormatter .
155
11
242,618
def _extract_annotations_from_task ( self , task ) : annotations = list ( ) if 'annotations' in task : existing_annotations = task . pop ( 'annotations' ) for v in existing_annotations : if isinstance ( v , dict ) : annotations . append ( v [ 'description' ] ) else : annotations . append ( v ) for key in list ( task . keys ( ) ) : if key . startswith ( 'annotation_' ) : annotations . append ( task [ key ] ) del ( task [ key ] ) return annotations
Removes annotations from a task and returns a list of annotations
124
12
242,619
def task_done ( self , * * kw ) : def validate ( task ) : if not Status . is_pending ( task [ 'status' ] ) : raise ValueError ( "Task is not pending." ) return self . _task_change_status ( Status . COMPLETED , validate , * * kw )
Marks a pending task as done optionally specifying a completion date with the end argument .
69
17
242,620
def task_delete ( self , * * kw ) : def validate ( task ) : if task [ 'status' ] == Status . DELETED : raise ValueError ( "Task is already deleted." ) return self . _task_change_status ( Status . DELETED , validate , * * kw )
Marks a task as deleted optionally specifying a completion date with the end argument .
67
16
242,621
def _execute ( self , * args ) : command = ( [ 'task' , 'rc:%s' % self . config_filename , ] + self . get_configuration_override_args ( ) + [ six . text_type ( arg ) for arg in args ] ) # subprocess is expecting bytestrings only, so nuke unicode if present # and remove control characters for i in range ( len ( command ) ) : if isinstance ( command [ i ] , six . text_type ) : command [ i ] = ( taskw . utils . clean_ctrl_chars ( command [ i ] . encode ( 'utf-8' ) ) ) try : proc = subprocess . Popen ( command , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) stdout , stderr = proc . communicate ( ) except OSError as e : if e . errno == errno . ENOENT : raise OSError ( "Unable to find the 'task' command-line tool." ) raise if proc . returncode != 0 : raise TaskwarriorError ( command , stderr , stdout , proc . returncode ) # We should get bytes from the outside world. Turn those into unicode # as soon as we can. # Everything going into and coming out of taskwarrior *should* be # utf-8, but there are weird edge cases where something totally unusual # made it in.. so we need to be able to handle (or at least try to # handle) whatever. Kitchen tries its best. try : stdout = stdout . decode ( self . config . get ( 'encoding' , 'utf-8' ) ) except UnicodeDecodeError as e : stdout = kitchen . text . converters . to_unicode ( stdout ) try : stderr = stderr . decode ( self . config . get ( 'encoding' , 'utf-8' ) ) except UnicodeDecodeError as e : stderr = kitchen . text . converters . to_unicode ( stderr ) # strip any crazy terminal escape characters like bells, backspaces, # and form feeds for c in ( '\a' , '\b' , '\f' , '' ) : stdout = stdout . replace ( c , '?' ) stderr = stderr . replace ( c , '?' ) return stdout , stderr
Execute a given taskwarrior command with arguments
533
10
242,622
def load_tasks ( self , command = 'all' ) : results = dict ( ( db , self . _get_task_objects ( 'status:%s' % db , 'export' ) ) for db in Command . files ( command ) ) # 'waiting' tasks are returned separately from 'pending' tasks # Here we merge the waiting list back into the pending list. if 'pending' in results : results [ 'pending' ] . extend ( self . _get_task_objects ( 'status:waiting' , 'export' ) ) return results
Returns a dictionary of tasks for a list of command .
124
11
242,623
def filter_tasks ( self , filter_dict ) : query_args = taskw . utils . encode_query ( filter_dict , self . get_version ( ) ) return self . _get_task_objects ( 'export' , * query_args )
Return a filtered list of tasks from taskwarrior .
58
11
242,624
def task_annotate ( self , task , annotation ) : self . _execute ( task [ 'uuid' ] , 'annotate' , '--' , annotation ) id , annotated_task = self . get_task ( uuid = task [ six . u ( 'uuid' ) ] ) return annotated_task
Annotates a task .
71
6
242,625
def task_denotate ( self , task , annotation ) : self . _execute ( task [ 'uuid' ] , 'denotate' , '--' , annotation ) id , denotated_task = self . get_task ( uuid = task [ six . u ( 'uuid' ) ] ) return denotated_task
Removes an annotation from a task .
75
8
242,626
def task_delete ( self , * * kw ) : id , task = self . get_task ( * * kw ) if task [ 'status' ] == Status . DELETED : raise ValueError ( "Task is already deleted." ) self . _execute ( id , 'delete' ) return self . get_task ( uuid = task [ 'uuid' ] ) [ 1 ]
Marks a task as deleted .
85
7
242,627
def task_start ( self , * * kw ) : id , task = self . get_task ( * * kw ) self . _execute ( id , 'start' ) return self . get_task ( uuid = task [ 'uuid' ] ) [ 1 ]
Marks a task as started .
60
7
242,628
def task_stop ( self , * * kw ) : id , task = self . get_task ( * * kw ) self . _execute ( id , 'stop' ) return self . get_task ( uuid = task [ 'uuid' ] ) [ 1 ]
Marks a task as stopped .
60
7
242,629
def to_file ( cls , status ) : return { Status . PENDING : DataFile . PENDING , Status . WAITING : DataFile . PENDING , Status . COMPLETED : DataFile . COMPLETED , Status . DELETED : DataFile . COMPLETED } [ status ]
Returns the file in which this task is stored .
65
10
242,630
def from_stub ( cls , data , udas = None ) : udas = udas or { } fields = cls . FIELDS . copy ( ) fields . update ( udas ) processed = { } for k , v in six . iteritems ( data ) : processed [ k ] = cls . _serialize ( k , v , fields ) return cls ( processed , udas )
Create a Task from an already deserialized dict .
93
11
242,631
def from_input ( cls , input_file = sys . stdin , modify = False , udas = None ) : original_task = input_file . readline ( ) . strip ( ) if modify : modified_task = input_file . readline ( ) . strip ( ) return cls ( json . loads ( modified_task ) , udas = udas ) return cls ( json . loads ( original_task ) , udas = udas )
Create a Task directly from stdin by reading one line . If modify = True two lines are expected which is consistent with the Taskwarrior hook system . The first line is interpreted as the original state of the Task and the second one as the new modified state .
105
53
242,632
def _deserialize ( cls , key , value , fields ) : converter = cls . _get_converter_for_field ( key , None , fields ) return converter . deserialize ( value )
Marshal incoming data into Python objects .
47
8
242,633
def _serialize ( cls , key , value , fields ) : converter = cls . _get_converter_for_field ( key , None , fields ) return converter . serialize ( value )
Marshal outgoing data into Taskwarrior s JSON format .
45
12
242,634
def get_changes ( self , serialized = False , keep = False ) : results = { } # Check for explicitly-registered changes for k , f , t in self . _changes : if k not in results : results [ k ] = [ f , None ] results [ k ] [ 1 ] = ( self . _serialize ( k , t , self . _fields ) if serialized else t ) # Check for changes on subordinate items for k , v in six . iteritems ( self ) : if isinstance ( v , Dirtyable ) : result = v . get_changes ( keep = keep ) if result : if not k in results : results [ k ] = [ result [ 0 ] , None ] results [ k ] [ 1 ] = ( self . _serialize ( k , result [ 1 ] , self . _fields ) if serialized else result [ 1 ] ) # Clear out recorded changes if not keep : self . _changes = [ ] return results
Get a journal of changes that have occurred
204
8
242,635
def update ( self , values , force = False ) : results = { } for k , v in six . iteritems ( values ) : results [ k ] = self . __setitem__ ( k , v , force = force ) return results
Update this task dictionary
51
4
242,636
def set ( self , key , value ) : return self . __setitem__ ( key , value , force = True )
Set a key s value regardless of whether a change is seen .
26
13
242,637
def serialized ( self ) : serialized = { } for k , v in six . iteritems ( self ) : serialized [ k ] = self . _serialize ( k , v , self . _fields ) return serialized
Returns a serialized representation of this task .
49
9
242,638
def encode_task_experimental ( task ) : # First, clean the task: task = task . copy ( ) if 'tags' in task : task [ 'tags' ] = ',' . join ( task [ 'tags' ] ) for k in task : task [ k ] = encode_task_value ( k , task [ k ] ) # Then, format it as a string return [ "%s:\"%s\"" % ( k , v ) if v else "%s:" % ( k , ) for k , v in sorted ( task . items ( ) , key = itemgetter ( 0 ) ) ]
Convert a dict - like task to its string representation Used for adding a task via task add
132
19
242,639
def encode_task ( task ) : # First, clean the task: task = task . copy ( ) if 'tags' in task : task [ 'tags' ] = ',' . join ( task [ 'tags' ] ) for k in task : for unsafe , safe in six . iteritems ( encode_replacements ) : if isinstance ( task [ k ] , six . string_types ) : task [ k ] = task [ k ] . replace ( unsafe , safe ) if isinstance ( task [ k ] , datetime . datetime ) : task [ k ] = task [ k ] . strftime ( "%Y%m%dT%M%H%SZ" ) # Then, format it as a string return "[%s]\n" % " " . join ( [ "%s:\"%s\"" % ( k , v ) for k , v in sorted ( task . items ( ) , key = itemgetter ( 0 ) ) ] )
Convert a dict - like task to its string representation
207
11
242,640
def convert_dict_to_override_args ( config , prefix = '' ) : args = [ ] for k , v in six . iteritems ( config ) : if isinstance ( v , dict ) : args . extend ( convert_dict_to_override_args ( v , prefix = '.' . join ( [ prefix , k , ] ) if prefix else k ) ) else : v = six . text_type ( v ) left = 'rc' + ( ( '.' + prefix ) if prefix else '' ) + '.' + k right = v if ' ' not in v else '"%s"' % v args . append ( '=' . join ( [ left , right ] ) ) return args
Converts a dictionary of override arguments into CLI arguments .
152
11
242,641
def stats_per_chunk ( chunk ) : for block_id in chunk . iter_block ( ) : try : block_counts [ block_id ] += 1 except KeyError : block_counts [ block_id ] = 1
Given a chunk increment the block types with the number of blocks found
52
13
242,642
def bounded_stats_per_chunk ( chunk , block_counts , start , stop ) : chunk_z , chunk_x = chunk . get_coords ( ) for z in range ( 16 ) : world_z = z + chunk_z * 16 if ( ( start != None and world_z < int ( start [ 2 ] ) ) or ( stop != None and world_z > int ( stop [ 2 ] ) ) ) : # Outside the bounding box; skip to next iteration #print("Z break: %d,%d,%d" % (world_z,start[2],stop[2])) break for x in range ( 16 ) : world_x = x + chunk_x * 16 if ( ( start != None and world_x < int ( start [ 0 ] ) ) or ( stop != None and world_x > int ( stop [ 0 ] ) ) ) : # Outside the bounding box; skip to next iteration #print("X break: %d,%d,%d" % (world_x,start[0],stop[0])) break for y in range ( chunk . get_max_height ( ) + 1 ) : if ( ( start != None and y < int ( start [ 1 ] ) ) or ( stop != None and y > int ( stop [ 1 ] ) ) ) : # Outside the bounding box; skip to next iteration #print("Y break: %d,%d,%d" % (y,start[1],stop[1])) break #print("Chunk: %d,%d Coord: %d,%d,%d" % (c['x'], c['z'],x,y,z)) block_id = chunk . get_block ( x , y , z ) if block_id != None : try : block_counts [ block_id ] += 1 except KeyError : block_counts [ block_id ] = 1
Given a chunk return the number of blocks types within the specified selection
422
13
242,643
def process_region_file ( region , start , stop ) : rx = region . loc . x rz = region . loc . z # Does the region overlap the bounding box at all? if ( start != None ) : if ( ( rx + 1 ) * 512 - 1 < int ( start [ 0 ] ) or ( rz + 1 ) * 512 - 1 < int ( start [ 2 ] ) ) : return elif ( stop != None ) : if ( rx * 512 - 1 > int ( stop [ 0 ] ) or rz * 512 - 1 > int ( stop [ 2 ] ) ) : return # Get all chunks print ( "Parsing region %s..." % os . path . basename ( region . filename ) ) for c in region . iter_chunks_class ( ) : cx , cz = c . get_coords ( ) # Does the chunk overlap the bounding box at all? if ( start != None ) : if ( ( cx + 1 ) * 16 + rx * 512 - 1 < int ( start [ 0 ] ) or ( cz + 1 ) * 16 + rz * 512 - 1 < int ( start [ 2 ] ) ) : continue elif ( stop != None ) : if ( cx * 16 + rx * 512 - 1 > int ( stop [ 0 ] ) or cz * 16 + rz * 512 - 1 > int ( stop [ 2 ] ) ) : continue #print("Parsing chunk (" + str(cx) + ", " + str(cz) + ")...") # Fast code if no start or stop coordinates are specified # TODO: also use this code if start/stop is specified, but the complete chunk is included if ( start == None and stop == None ) : stats_per_chunk ( c ) else : # Slow code that iterates through each coordinate bounded_stats_per_chunk ( c , start , stop )
Given a region return the number of blocks of each ID in that region
410
14
242,644
def get_region ( self , x , z ) : if ( x , z ) not in self . regions : if ( x , z ) in self . regionfiles : self . regions [ ( x , z ) ] = region . RegionFile ( self . regionfiles [ ( x , z ) ] ) else : # Return an empty RegionFile object # TODO: this does not yet allow for saving of the region file # TODO: this currently fails with a ValueError! # TODO: generate the correct name, and create the file # and add the fie to self.regionfiles self . regions [ ( x , z ) ] = region . RegionFile ( ) self . regions [ ( x , z ) ] . loc = Location ( x = x , z = z ) return self . regions [ ( x , z ) ]
Get a region using x z coordinates of a region . Cache results .
175
14
242,645
def iter_regions ( self ) : # TODO: Implement BoundingBox # TODO: Implement sort order for x , z in self . regionfiles . keys ( ) : close_after_use = False if ( x , z ) in self . regions : regionfile = self . regions [ ( x , z ) ] else : # It is not yet cached. # Get file, but do not cache later. regionfile = region . RegionFile ( self . regionfiles [ ( x , z ) ] , chunkclass = self . chunkclass ) regionfile . loc = Location ( x = x , z = z ) close_after_use = True try : yield regionfile finally : if close_after_use : regionfile . close ( )
Return an iterable list of all region files . Use this function if you only want to loop through each region files once and do not want to cache the results .
158
33
242,646
def get_nbt ( self , x , z ) : rx , cx = divmod ( x , 32 ) rz , cz = divmod ( z , 32 ) if ( rx , rz ) not in self . regions and ( rx , rz ) not in self . regionfiles : raise InconceivedChunk ( "Chunk %s,%s is not present in world" % ( x , z ) ) nbt = self . get_region ( rx , rz ) . get_nbt ( cx , cz ) assert nbt != None return nbt
Return a NBT specified by the chunk coordinates x z . Raise InconceivedChunk if the NBT file is not yet generated . To get a Chunk object use get_chunk .
126
40
242,647
def get_chunk ( self , x , z ) : return self . chunkclass ( self . get_nbt ( x , z ) )
Return a chunk specified by the chunk coordinates x z . Raise InconceivedChunk if the chunk is not yet generated . To get the raw NBT data use get_nbt .
31
38
242,648
def chunk_count ( self ) : c = 0 for r in self . iter_regions ( ) : c += r . chunk_count ( ) return c
Return a count of the chunks in this world folder .
34
11
242,649
def get_boundingbox ( self ) : b = BoundingBox ( ) for rx , rz in self . regionfiles . keys ( ) : region = self . get_region ( rx , rz ) rx , rz = 32 * rx , 32 * rz for cc in region . get_chunk_coords ( ) : x , z = ( rx + cc [ 'x' ] , rz + cc [ 'z' ] ) b . expand ( x , None , z ) return b
Return minimum and maximum x and z coordinates of the chunks that make up this world save
114
17
242,650
def expand ( self , x , y , z ) : if x != None : if self . minx is None or x < self . minx : self . minx = x if self . maxx is None or x > self . maxx : self . maxx = x if y != None : if self . miny is None or y < self . miny : self . miny = y if self . maxy is None or y > self . maxy : self . maxy = y if z != None : if self . minz is None or z < self . minz : self . minz = z if self . maxz is None or z > self . maxz : self . maxz = z
Expands the bounding
153
5
242,651
def unpack_nbt ( tag ) : if isinstance ( tag , TAG_List ) : return [ unpack_nbt ( i ) for i in tag . tags ] elif isinstance ( tag , TAG_Compound ) : return dict ( ( i . name , unpack_nbt ( i ) ) for i in tag . tags ) else : return tag . value
Unpack an NBT tag into a native Python data structure .
81
13
242,652
def _init_file ( self ) : header_length = 2 * SECTOR_LENGTH if self . size > header_length : self . file . truncate ( header_length ) self . file . seek ( 0 ) self . file . write ( header_length * b'\x00' ) self . size = header_length
Initialise the file header . This will erase any data previously in the file .
73
16
242,653
def _sectors ( self , ignore_chunk = None ) : sectorsize = self . _bytes_to_sector ( self . size ) sectors = [ [ ] for s in range ( sectorsize ) ] sectors [ 0 ] = True # locations sectors [ 1 ] = True # timestamps for m in self . metadata . values ( ) : if not m . is_created ( ) : continue if ignore_chunk == m : continue if m . blocklength and m . blockstart : blockend = m . blockstart + max ( m . blocklength , m . requiredblocks ( ) ) # Ensure 2 <= b < sectorsize, as well as m.blockstart <= b < blockend for b in range ( max ( m . blockstart , 2 ) , min ( blockend , sectorsize ) ) : sectors [ b ] . append ( m ) return sectors
Return a list of all sectors each sector is a list of chunks occupying the block .
184
17
242,654
def _locate_free_sectors ( self , ignore_chunk = None ) : sectors = self . _sectors ( ignore_chunk = ignore_chunk ) # Sectors are considered free, if the value is an empty list. return [ not i for i in sectors ]
Return a list of booleans indicating the free sectors .
62
12
242,655
def get_nbt ( self , x , z ) : # TODO: cache results? data = self . get_blockdata ( x , z ) # This may raise a RegionFileFormatError. data = BytesIO ( data ) err = None try : nbt = NBTFile ( buffer = data ) if self . loc . x != None : x += self . loc . x * 32 if self . loc . z != None : z += self . loc . z * 32 nbt . loc = Location ( x = x , z = z ) return nbt # this may raise a MalformedFileError. Convert to ChunkDataError. except MalformedFileError as e : err = '%s' % e # avoid str(e) due to Unicode issues in Python 2. if err : raise ChunkDataError ( err )
Return a NBTFile of the specified chunk . Raise InconceivedChunk if the chunk is not included in the file .
179
26
242,656
def write_chunk ( self , x , z , nbt_file ) : data = BytesIO ( ) nbt_file . write_file ( buffer = data ) # render to buffer; uncompressed self . write_blockdata ( x , z , data . getvalue ( ) )
Pack the NBT file as binary data and write to file in a compressed format .
63
17
242,657
def unlink_chunk ( self , x , z ) : # This function fails for an empty file. If that is the case, just return. if self . size < 2 * SECTOR_LENGTH : return # zero the region header for the chunk (offset length and time) self . file . seek ( 4 * ( x + 32 * z ) ) self . file . write ( pack ( ">IB" , 0 , 0 ) [ 1 : ] ) self . file . seek ( SECTOR_LENGTH + 4 * ( x + 32 * z ) ) self . file . write ( pack ( ">I" , 0 ) ) # Check if file should be truncated: current = self . metadata [ x , z ] free_sectors = self . _locate_free_sectors ( ignore_chunk = current ) truncate_count = list ( reversed ( free_sectors ) ) . index ( False ) if truncate_count > 0 : self . size = SECTOR_LENGTH * ( len ( free_sectors ) - truncate_count ) self . file . truncate ( self . size ) free_sectors = free_sectors [ : - truncate_count ] # Calculate freed sectors for s in range ( current . blockstart , min ( current . blockstart + current . blocklength , len ( free_sectors ) ) ) : if free_sectors [ s ] : # zero sector s self . file . seek ( SECTOR_LENGTH * s ) self . file . write ( SECTOR_LENGTH * b'\x00' ) # update the header self . metadata [ x , z ] = ChunkMetadata ( x , z )
Remove a chunk from the header of the region file . Fragmentation is not a problem chunks are written to free sectors when possible .
368
26
242,658
def _classname ( self ) : if self . __class__ . __module__ in ( None , ) : return self . __class__ . __name__ else : return "%s.%s" % ( self . __class__ . __module__ , self . __class__ . __name__ )
Return the fully qualified class name .
65
7
242,659
def chests_per_chunk ( chunk ) : chests = [ ] for entity in chunk [ 'Entities' ] : eid = entity [ "id" ] . value if eid == "Minecart" and entity [ "type" ] . value == 1 or eid == "minecraft:chest_minecart" : x , y , z = entity [ "Pos" ] x , y , z = x . value , y . value , z . value # Treasures are empty upon first opening try : items = items_from_nbt ( entity [ "Items" ] ) except KeyError : items = { } chests . append ( Chest ( "Minecart with chest" , ( x , y , z ) , items ) ) for entity in chunk [ 'TileEntities' ] : eid = entity [ "id" ] . value if eid == "Chest" or eid == "minecraft:chest" : x , y , z = entity [ "x" ] . value , entity [ "y" ] . value , entity [ "z" ] . value # Treasures are empty upon first opening try : items = items_from_nbt ( entity [ "Items" ] ) except KeyError : items = { } chests . append ( Chest ( "Chest" , ( x , y , z ) , items ) ) return chests
Find chests and get contents in a given chunk .
285
10
242,660
def get_block ( self , x , y , z ) : sy , by = divmod ( y , 16 ) section = self . get_section ( sy ) if section == None : return None return section . get_block ( x , by , z )
Get a block from relative x y z .
55
9
242,661
def get_blocks_byte_array ( self , buffer = False ) : if buffer : length = len ( self . blocksList ) return BytesIO ( pack ( ">i" , length ) + self . get_blocks_byte_array ( ) ) else : return array . array ( 'B' , self . blocksList ) . tostring ( )
Return a list of all blocks in this chunk .
76
10
242,662
def get_data_byte_array ( self , buffer = False ) : if buffer : length = len ( self . dataList ) return BytesIO ( pack ( ">i" , length ) + self . get_data_byte_array ( ) ) else : return array . array ( 'B' , self . dataList ) . tostring ( )
Return a list of data for all blocks in this chunk .
76
12
242,663
def generate_heightmap ( self , buffer = False , as_array = False ) : non_solids = [ 0 , 8 , 9 , 10 , 11 , 38 , 37 , 32 , 31 ] if buffer : return BytesIO ( pack ( ">i" , 256 ) + self . generate_heightmap ( ) ) # Length + Heightmap, ready for insertion into Chunk NBT else : bytes = [ ] for z in range ( 16 ) : for x in range ( 16 ) : for y in range ( 127 , - 1 , - 1 ) : offset = y + z * 128 + x * 128 * 16 if ( self . blocksList [ offset ] not in non_solids or y == 0 ) : bytes . append ( y + 1 ) break if ( as_array ) : return bytes else : return array . array ( 'B' , bytes ) . tostring ( )
Return a heightmap representing the highest solid blocks in this chunk .
190
13
242,664
def set_blocks ( self , list = None , dict = None , fill_air = False ) : if list : # Inputting a list like self.blocksList self . blocksList = list elif dict : # Inputting a dictionary like result of self.get_blocks_struct() list = [ ] for x in range ( 16 ) : for z in range ( 16 ) : for y in range ( 128 ) : coord = x , y , z offset = y + z * 128 + x * 128 * 16 if ( coord in dict ) : list . append ( dict [ coord ] ) else : if ( self . blocksList [ offset ] and not fill_air ) : list . append ( self . blocksList [ offset ] ) else : list . append ( 0 ) # Air self . blocksList = list else : # None of the above... return False return True
Sets all blocks in this chunk using either a list or dictionary . Blocks not explicitly set can be filled to air by setting fill_air to True .
183
31
242,665
def set_block ( self , x , y , z , id , data = 0 ) : offset = y + z * 128 + x * 128 * 16 self . blocksList [ offset ] = id if ( offset % 2 == 1 ) : # offset is odd index = ( offset - 1 ) // 2 b = self . dataList [ index ] self . dataList [ index ] = ( b & 240 ) + ( data & 15 ) # modify lower bits, leaving higher bits in place else : # offset is even index = offset // 2 b = self . dataList [ index ] self . dataList [ index ] = ( b & 15 ) + ( data << 4 & 240 )
Sets the block a x y z to the specified id and optionally data .
143
16
242,666
def get_block ( self , x , y , z , coord = False ) : """ Laid out like: (0,0,0), (0,1,0), (0,2,0) ... (0,127,0), (0,0,1), (0,1,1), (0,2,1) ... (0,127,1), (0,0,2) ... (0,127,15), (1,0,0), (1,1,0) ... (15,127,15) :: blocks = [] for x in range(15): for z in range(15): for y in range(127): blocks.append(Block(x,y,z)) """ offset = y + z * 128 + x * 128 * 16 if ( coord == False ) else coord [ 1 ] + coord [ 2 ] * 128 + coord [ 0 ] * 128 * 16 return self . blocksList [ offset ]
Return the id of the block at x y z .
205
11
242,667
def tag_info ( self ) : return self . __class__ . __name__ + ( '(%r)' % self . name if self . name else "" ) + ": " + self . valuestr ( )
Return Unicode string with class name and unnested value .
48
11
242,668
def parse_file ( self , filename = None , buffer = None , fileobj = None ) : if filename : self . file = GzipFile ( filename , 'rb' ) elif buffer : if hasattr ( buffer , 'name' ) : self . filename = buffer . name self . file = buffer elif fileobj : if hasattr ( fileobj , 'name' ) : self . filename = fileobj . name self . file = GzipFile ( fileobj = fileobj ) if self . file : try : type = TAG_Byte ( buffer = self . file ) if type . value == self . id : name = TAG_String ( buffer = self . file ) . value self . _parse_buffer ( self . file ) self . name = name self . file . close ( ) else : raise MalformedFileError ( "First record is not a Compound Tag" ) except StructError as e : raise MalformedFileError ( "Partial File Parse: file possibly truncated." ) else : raise ValueError ( "NBTFile.parse_file(): Need to specify either a " "filename or a file object" )
Completely parse a file extracting all tags .
242
9
242,669
def write_file ( self , filename = None , buffer = None , fileobj = None ) : closefile = True if buffer : self . filename = None self . file = buffer closefile = False elif filename : self . filename = filename self . file = GzipFile ( filename , "wb" ) elif fileobj : self . filename = None self . file = GzipFile ( fileobj = fileobj , mode = "wb" ) elif self . filename : self . file = GzipFile ( self . filename , "wb" ) elif not self . file : raise ValueError ( "NBTFile.write_file(): Need to specify either a " "filename or a file object" ) # Render tree to file TAG_Byte ( self . id ) . _render_buffer ( self . file ) TAG_String ( self . name ) . _render_buffer ( self . file ) self . _render_buffer ( self . file ) # make sure the file is complete try : self . file . flush ( ) except ( AttributeError , IOError ) : pass if closefile : try : self . file . close ( ) except ( AttributeError , IOError ) : pass
Write this NBT file to a file .
254
9
242,670
def loads ( self , value ) : raw = False if self . encoding == "utf-8" else True if value is None : return None return msgpack . loads ( value , raw = raw , use_list = self . use_list )
Deserialize value using msgpack . loads .
52
10
242,671
def aiocache_enabled ( cls , fake_return = None ) : def enabled ( func ) : @ functools . wraps ( func ) async def _enabled ( * args , * * kwargs ) : if os . getenv ( "AIOCACHE_DISABLE" ) == "1" : return fake_return return await func ( * args , * * kwargs ) return _enabled return enabled
Use this decorator to be able to fake the return of the function by setting the AIOCACHE_DISABLE environment variable
91
26
242,672
async def add ( self , key , value , ttl = SENTINEL , dumps_fn = None , namespace = None , _conn = None ) : start = time . monotonic ( ) dumps = dumps_fn or self . _serializer . dumps ns_key = self . build_key ( key , namespace = namespace ) await self . _add ( ns_key , dumps ( value ) , ttl = self . _get_ttl ( ttl ) , _conn = _conn ) logger . debug ( "ADD %s %s (%.4f)s" , ns_key , True , time . monotonic ( ) - start ) return True
Stores the value in the given key with ttl if specified . Raises an error if the key already exists .
145
24
242,673
async def get ( self , key , default = None , loads_fn = None , namespace = None , _conn = None ) : start = time . monotonic ( ) loads = loads_fn or self . _serializer . loads ns_key = self . build_key ( key , namespace = namespace ) value = loads ( await self . _get ( ns_key , encoding = self . serializer . encoding , _conn = _conn ) ) logger . debug ( "GET %s %s (%.4f)s" , ns_key , value is not None , time . monotonic ( ) - start ) return value if value is not None else default
Get a value from the cache . Returns default if not found .
143
13
242,674
async def multi_get ( self , keys , loads_fn = None , namespace = None , _conn = None ) : start = time . monotonic ( ) loads = loads_fn or self . _serializer . loads ns_keys = [ self . build_key ( key , namespace = namespace ) for key in keys ] values = [ loads ( value ) for value in await self . _multi_get ( ns_keys , encoding = self . serializer . encoding , _conn = _conn ) ] logger . debug ( "MULTI_GET %s %d (%.4f)s" , ns_keys , len ( [ value for value in values if value is not None ] ) , time . monotonic ( ) - start , ) return values
Get multiple values from the cache values not found are Nones .
164
13
242,675
async def set ( self , key , value , ttl = SENTINEL , dumps_fn = None , namespace = None , _cas_token = None , _conn = None ) : start = time . monotonic ( ) dumps = dumps_fn or self . _serializer . dumps ns_key = self . build_key ( key , namespace = namespace ) res = await self . _set ( ns_key , dumps ( value ) , ttl = self . _get_ttl ( ttl ) , _cas_token = _cas_token , _conn = _conn ) logger . debug ( "SET %s %d (%.4f)s" , ns_key , True , time . monotonic ( ) - start ) return res
Stores the value in the given key with ttl if specified
164
13
242,676
async def multi_set ( self , pairs , ttl = SENTINEL , dumps_fn = None , namespace = None , _conn = None ) : start = time . monotonic ( ) dumps = dumps_fn or self . _serializer . dumps tmp_pairs = [ ] for key , value in pairs : tmp_pairs . append ( ( self . build_key ( key , namespace = namespace ) , dumps ( value ) ) ) await self . _multi_set ( tmp_pairs , ttl = self . _get_ttl ( ttl ) , _conn = _conn ) logger . debug ( "MULTI_SET %s %d (%.4f)s" , [ key for key , value in tmp_pairs ] , len ( pairs ) , time . monotonic ( ) - start , ) return True
Stores multiple values in the given keys .
185
9
242,677
async def delete ( self , key , namespace = None , _conn = None ) : start = time . monotonic ( ) ns_key = self . build_key ( key , namespace = namespace ) ret = await self . _delete ( ns_key , _conn = _conn ) logger . debug ( "DELETE %s %d (%.4f)s" , ns_key , ret , time . monotonic ( ) - start ) return ret
Deletes the given key .
100
6
242,678
async def exists ( self , key , namespace = None , _conn = None ) : start = time . monotonic ( ) ns_key = self . build_key ( key , namespace = namespace ) ret = await self . _exists ( ns_key , _conn = _conn ) logger . debug ( "EXISTS %s %d (%.4f)s" , ns_key , ret , time . monotonic ( ) - start ) return ret
Check key exists in the cache .
101
7
242,679
async def expire ( self , key , ttl , namespace = None , _conn = None ) : start = time . monotonic ( ) ns_key = self . build_key ( key , namespace = namespace ) ret = await self . _expire ( ns_key , ttl , _conn = _conn ) logger . debug ( "EXPIRE %s %d (%.4f)s" , ns_key , ret , time . monotonic ( ) - start ) return ret
Set the ttl to the given key . By setting it to 0 it will disable it
107
18
242,680
async def clear ( self , namespace = None , _conn = None ) : start = time . monotonic ( ) ret = await self . _clear ( namespace , _conn = _conn ) logger . debug ( "CLEAR %s %d (%.4f)s" , namespace , ret , time . monotonic ( ) - start ) return ret
Clears the cache in the cache namespace . If an alternative namespace is given it will clear those ones instead .
77
22
242,681
async def raw ( self , command , * args , _conn = None , * * kwargs ) : start = time . monotonic ( ) ret = await self . _raw ( command , * args , encoding = self . serializer . encoding , _conn = _conn , * * kwargs ) logger . debug ( "%s (%.4f)s" , command , time . monotonic ( ) - start ) return ret
Send the raw command to the underlying client . Note that by using this CMD you will lose compatibility with other backends .
95
25
242,682
async def close ( self , * args , _conn = None , * * kwargs ) : start = time . monotonic ( ) ret = await self . _close ( * args , _conn = _conn , * * kwargs ) logger . debug ( "CLOSE (%.4f)s" , time . monotonic ( ) - start ) return ret
Perform any resource clean up necessary to exit the program safely . After closing cmd execution is still possible but you will have to close again before exiting .
80
30
242,683
def get ( self , alias : str ) : try : return self . _caches [ alias ] except KeyError : pass config = self . get_alias_config ( alias ) cache = _create_cache ( * * deepcopy ( config ) ) self . _caches [ alias ] = cache return cache
Retrieve cache identified by alias . Will return always the same instance
65
13
242,684
def create ( self , alias = None , cache = None , * * kwargs ) : if alias : config = self . get_alias_config ( alias ) elif cache : warnings . warn ( "Creating a cache with an explicit config is deprecated, use 'aiocache.Cache'" , DeprecationWarning , ) config = { "cache" : cache } else : raise TypeError ( "create call needs to receive an alias or a cache" ) cache = _create_cache ( * * { * * config , * * kwargs } ) return cache
Create a new cache . Either alias or cache params are required . You can use kwargs to pass extra parameters to configure the cache .
120
28
242,685
async def async_poller ( client , initial_response , deserialization_callback , polling_method ) : try : client = client if isinstance ( client , ServiceClientAsync ) else client . _client except AttributeError : raise ValueError ( "Poller client parameter must be a low-level msrest Service Client or a SDK client." ) response = initial_response . response if isinstance ( initial_response , ClientRawResponse ) else initial_response if isinstance ( deserialization_callback , type ) and issubclass ( deserialization_callback , Model ) : deserialization_callback = deserialization_callback . deserialize # Might raise a CloudError polling_method . initialize ( client , response , deserialization_callback ) await polling_method . run ( ) return polling_method . resource ( )
Async Poller for long running operations .
177
8
242,686
def send ( self , request , * * kwargs ) : session = request . context . session old_max_redirects = None if 'max_redirects' in kwargs : warnings . warn ( "max_redirects in operation kwargs is deprecated, use config.redirect_policy instead" , DeprecationWarning ) old_max_redirects = session . max_redirects session . max_redirects = int ( kwargs [ 'max_redirects' ] ) old_trust_env = None if 'use_env_proxies' in kwargs : warnings . warn ( "use_env_proxies in operation kwargs is deprecated, use config.proxies instead" , DeprecationWarning ) old_trust_env = session . trust_env session . trust_env = bool ( kwargs [ 'use_env_proxies' ] ) old_retries = { } if 'retries' in kwargs : warnings . warn ( "retries in operation kwargs is deprecated, use config.retry_policy instead" , DeprecationWarning ) max_retries = kwargs [ 'retries' ] for protocol in self . _protocols : old_retries [ protocol ] = session . adapters [ protocol ] . max_retries session . adapters [ protocol ] . max_retries = max_retries try : return self . next . send ( request , * * kwargs ) finally : if old_max_redirects : session . max_redirects = old_max_redirects if old_trust_env : session . trust_env = old_trust_env if old_retries : for protocol in self . _protocols : session . adapters [ protocol ] . max_retries = old_retries [ protocol ]
Patch the current session with Request level operation config .
406
10
242,687
def _request ( self , method , url , params , headers , content , form_content ) : # type: (str, str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> ClientRequest request = ClientRequest ( method , self . format_url ( url ) ) if params : request . format_parameters ( params ) if headers : request . headers . update ( headers ) # All requests should contain a Accept. # This should be done by Autorest, but wasn't in old Autorest # Force it for now, but might deprecate it later. if "Accept" not in request . headers : _LOGGER . debug ( "Accept header absent and forced to application/json" ) request . headers [ 'Accept' ] = 'application/json' if content is not None : request . add_content ( content ) if form_content : request . add_formdata ( form_content ) return request
Create ClientRequest object .
215
5
242,688
def format_url ( self , url , * * kwargs ) : # type: (str, Any) -> str url = url . format ( * * kwargs ) parsed = urlparse ( url ) if not parsed . scheme or not parsed . netloc : url = url . lstrip ( '/' ) base = self . config . base_url . format ( * * kwargs ) . rstrip ( '/' ) url = urljoin ( base + '/' , url ) return url
Format request URL with the client base URL unless the supplied URL is already absolute .
106
16
242,689
def get ( self , url , params = None , headers = None , content = None , form_content = None ) : # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> ClientRequest request = self . _request ( 'GET' , url , params , headers , content , form_content ) request . method = 'GET' return request
Create a GET request object .
99
6
242,690
def put ( self , url , params = None , headers = None , content = None , form_content = None ) : # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> ClientRequest request = self . _request ( 'PUT' , url , params , headers , content , form_content ) return request
Create a PUT request object .
92
7
242,691
def send_formdata ( self , request , headers = None , content = None , * * config ) : request . headers = headers request . add_formdata ( content ) return self . send ( request , * * config )
Send data as a multipart form - data request . We only deal with file - like objects or strings at this point . The requests is not yet streamed .
48
32
242,692
def add_header ( self , header , value ) : # type: (str, str) -> None warnings . warn ( "Private attribute _client.add_header is deprecated. Use config.headers instead." , DeprecationWarning ) self . config . headers [ header ] = value
Add a persistent header - this header will be applied to all requests sent during the current client session .
60
20
242,693
def signed_session ( self , session = None ) : # type: (Optional[requests.Session]) -> requests.Session session = super ( ApiKeyCredentials , self ) . signed_session ( session ) session . headers . update ( self . in_headers ) try : # params is actually Union[bytes, MutableMapping[Text, Text]] session . params . update ( self . in_query ) # type: ignore except AttributeError : # requests.params can be bytes raise ValueError ( "session.params must be a dict to be used in ApiKeyCredentials" ) return session
Create requests session with ApiKey .
132
8
242,694
def deserialize_from_text ( cls , data , content_type = None ) : # type: (Optional[Union[AnyStr, IO]], Optional[str]) -> Any if hasattr ( data , 'read' ) : # Assume a stream data = cast ( IO , data ) . read ( ) if isinstance ( data , bytes ) : data_as_str = data . decode ( encoding = 'utf-8-sig' ) else : # Explain to mypy the correct type. data_as_str = cast ( str , data ) # Remove Byte Order Mark if present in string data_as_str = data_as_str . lstrip ( _BOM ) if content_type is None : return data if content_type in cls . JSON_MIMETYPES : try : return json . loads ( data_as_str ) except ValueError as err : raise DeserializationError ( "JSON is invalid: {}" . format ( err ) , err ) elif "xml" in ( content_type or [ ] ) : try : return ET . fromstring ( data_as_str ) except ET . ParseError : # It might be because the server has an issue, and returned JSON with # content-type XML.... # So let's try a JSON load, and if it's still broken # let's flow the initial exception def _json_attemp ( data ) : try : return True , json . loads ( data ) except ValueError : return False , None # Don't care about this one success , json_result = _json_attemp ( data ) if success : return json_result # If i'm here, it's not JSON, it's not XML, let's scream # and raise the last context in this block (the XML exception) # The function hack is because Py2.7 messes up with exception # context otherwise. _LOGGER . critical ( "Wasn't XML not JSON, failing" ) raise_with_traceback ( DeserializationError , "XML is invalid" ) raise DeserializationError ( "Cannot deserialize content-type: {}" . format ( content_type ) )
Decode data according to content - type .
465
9
242,695
def deserialize_from_http_generics ( cls , body_bytes , headers ) : # type: (Optional[Union[AnyStr, IO]], Mapping) -> Any # Try to use content-type from headers if available content_type = None if 'content-type' in headers : content_type = headers [ 'content-type' ] . split ( ";" ) [ 0 ] . strip ( ) . lower ( ) # Ouch, this server did not declare what it sent... # Let's guess it's JSON... # Also, since Autorest was considering that an empty body was a valid JSON, # need that test as well.... else : content_type = "application/json" if body_bytes : return cls . deserialize_from_text ( body_bytes , content_type ) return None
Deserialize from HTTP response .
178
7
242,696
def on_response ( self , request , response , * * kwargs ) : # type: (Request, Response, Any) -> None # If response was asked as stream, do NOT read anything and quit now if kwargs . get ( "stream" , True ) : return http_response = response . http_response response . context [ self . CONTEXT_NAME ] = self . deserialize_from_http_generics ( http_response . text ( ) , http_response . headers )
Extract data from the body of a REST response object .
108
12
242,697
def add_headers ( self , header_dict ) : # type: (Dict[str, str]) -> None if not self . response : return for name , data_type in header_dict . items ( ) : value = self . response . headers . get ( name ) value = self . _deserialize ( data_type , value ) self . headers [ name ] = value
Deserialize a specific header .
82
7
242,698
def log_request ( _ , request , * _args , * * _kwargs ) : # type: (Any, ClientRequest, str, str) -> None if not _LOGGER . isEnabledFor ( logging . DEBUG ) : return try : _LOGGER . debug ( "Request URL: %r" , request . url ) _LOGGER . debug ( "Request method: %r" , request . method ) _LOGGER . debug ( "Request headers:" ) for header , value in request . headers . items ( ) : if header . lower ( ) == 'authorization' : value = '*****' _LOGGER . debug ( " %r: %r" , header , value ) _LOGGER . debug ( "Request body:" ) # We don't want to log the binary data of a file upload. if isinstance ( request . body , types . GeneratorType ) : _LOGGER . debug ( "File upload" ) else : _LOGGER . debug ( str ( request . body ) ) except Exception as err : # pylint: disable=broad-except _LOGGER . debug ( "Failed to log request: %r" , err )
Log a client request .
247
5
242,699
def log_response ( _ , _request , response , * _args , * * kwargs ) : # type: (Any, ClientRequest, ClientResponse, str, Any) -> Optional[ClientResponse] if not _LOGGER . isEnabledFor ( logging . DEBUG ) : return None try : _LOGGER . debug ( "Response status: %r" , response . status_code ) _LOGGER . debug ( "Response headers:" ) for res_header , value in response . headers . items ( ) : _LOGGER . debug ( " %r: %r" , res_header , value ) # We don't want to log binary data if the response is a file. _LOGGER . debug ( "Response content:" ) pattern = re . compile ( r'attachment; ?filename=["\w.]+' , re . IGNORECASE ) header = response . headers . get ( 'content-disposition' ) if header and pattern . match ( header ) : filename = header . partition ( '=' ) [ 2 ] _LOGGER . debug ( "File attachments: %s" , filename ) elif response . headers . get ( "content-type" , "" ) . endswith ( "octet-stream" ) : _LOGGER . debug ( "Body contains binary data." ) elif response . headers . get ( "content-type" , "" ) . startswith ( "image" ) : _LOGGER . debug ( "Body contains image data." ) else : if kwargs . get ( 'stream' , False ) : _LOGGER . debug ( "Body is streamable" ) else : _LOGGER . debug ( response . text ( ) ) return response except Exception as err : # pylint: disable=broad-except _LOGGER . debug ( "Failed to log response: %s" , repr ( err ) ) return response
Log a server response .
402
5