idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
239,300
def check_meta ( meta ) : if not isinstance ( meta , dict ) : raise ValueError ( "bad metadata - not a dictionary" ) if not isinstance ( meta . get ( "announce" ) , basestring ) : raise ValueError ( "bad announce URL - not a string" ) check_info ( meta . get ( "info" ) ) return meta
Validate meta dict .
80
5
239,301
def clean_meta ( meta , including_info = False , logger = None ) : modified = set ( ) for key in meta . keys ( ) : if [ key ] not in METAFILE_STD_KEYS : if logger : logger ( "Removing key %r..." % ( key , ) ) del meta [ key ] modified . add ( key ) if including_info : for key in meta [ "info" ] . keys ( ) : if [ "info" , key ] not in METAFILE_STD_KEYS : if logger : logger ( "Removing key %r..." % ( "info." + key , ) ) del meta [ "info" ] [ key ] modified . add ( "info." + key ) for idx , entry in enumerate ( meta [ "info" ] . get ( "files" , [ ] ) ) : for key in entry . keys ( ) : if [ "info" , "files" , key ] not in METAFILE_STD_KEYS : if logger : logger ( "Removing key %r from file #%d..." % ( key , idx + 1 ) ) del entry [ key ] modified . add ( "info.files." + key ) # Remove crap that certain PHP software puts in paths entry [ "path" ] = [ i for i in entry [ "path" ] if i ] return modified
Clean meta dict . Optionally log changes using the given logger .
292
13
239,302
def sanitize ( meta , diagnostics = False ) : bad_encodings , bad_fields = set ( ) , set ( ) def sane_encoding ( field , text ) : "Transcoding helper." for encoding in ( 'utf-8' , meta . get ( 'encoding' , None ) , 'cp1252' ) : if encoding : try : u8_text = text . decode ( encoding ) . encode ( "utf-8" ) if encoding != 'utf-8' : bad_encodings . add ( encoding ) bad_fields . add ( field ) return u8_text except UnicodeError : continue else : # Broken beyond anything reasonable bad_encodings . add ( 'UNKNOWN/EXOTIC' ) bad_fields . add ( field ) return str ( text , 'utf-8' , 'replace' ) . replace ( '\ufffd' , '_' ) . encode ( "utf-8" ) # Go through all string fields and check them for field in ( "comment" , "created by" ) : if field in meta : meta [ field ] = sane_encoding ( field , meta [ field ] ) meta [ "info" ] [ "name" ] = sane_encoding ( 'info name' , meta [ "info" ] [ "name" ] ) for entry in meta [ "info" ] . get ( "files" , [ ] ) : entry [ "path" ] = [ sane_encoding ( 'file path' , i ) for i in entry [ "path" ] ] return ( meta , bad_encodings , bad_fields ) if diagnostics else meta
Try to fix common problems especially transcode non - standard string encodings .
355
16
239,303
def add_fast_resume ( meta , datapath ) : # Get list of files files = meta [ "info" ] . get ( "files" , None ) single = files is None if single : if os . path . isdir ( datapath ) : datapath = os . path . join ( datapath , meta [ "info" ] [ "name" ] ) files = [ Bunch ( path = [ os . path . abspath ( datapath ) ] , length = meta [ "info" ] [ "length" ] , ) ] # Prepare resume data resume = meta . setdefault ( "libtorrent_resume" , { } ) resume [ "bitfield" ] = len ( meta [ "info" ] [ "pieces" ] ) // 20 resume [ "files" ] = [ ] piece_length = meta [ "info" ] [ "piece length" ] offset = 0 for fileinfo in files : # Get the path into the filesystem filepath = os . sep . join ( fileinfo [ "path" ] ) if not single : filepath = os . path . join ( datapath , filepath . strip ( os . sep ) ) # Check file size if os . path . getsize ( filepath ) != fileinfo [ "length" ] : raise OSError ( errno . EINVAL , "File size mismatch for %r [is %d, expected %d]" % ( filepath , os . path . getsize ( filepath ) , fileinfo [ "length" ] , ) ) # Add resume data for this file resume [ "files" ] . append ( dict ( priority = 1 , mtime = int ( os . path . getmtime ( filepath ) ) , completed = ( offset + fileinfo [ "length" ] + piece_length - 1 ) // piece_length - offset // piece_length , ) ) offset += fileinfo [ "length" ] return meta
Add fast resume data to a metafile dict .
416
11
239,304
def data_size ( metadata ) : info = metadata [ 'info' ] if 'length' in info : # Single file total_size = info [ 'length' ] else : # Directory structure total_size = sum ( [ f [ 'length' ] for f in info [ 'files' ] ] ) return total_size
Calculate the size of a torrent based on parsed metadata .
69
13
239,305
def checked_open ( filename , log = None , quiet = False ) : with open ( filename , "rb" ) as handle : raw_data = handle . read ( ) data = bencode . bdecode ( raw_data ) try : check_meta ( data ) if raw_data != bencode . bencode ( data ) : raise ValueError ( "Bad bencoded data - dict keys out of order?" ) except ValueError as exc : if log : # Warn about it, unless it's a quiet value query if not quiet : log . warn ( "%s: %s" % ( filename , exc ) ) else : raise return data
Open and validate the given metafile . Optionally provide diagnostics on the passed logger for invalid metafiles which then just cause a warning but no exception . quiet can supress that warning .
136
40
239,306
def format ( self , obj , context , maxlevels , level ) : # pylint: disable=arguments-differ if isinstance ( obj , basestring ) and "://" in fmt . to_unicode ( obj ) : obj = mask_keys ( obj ) return pprint . PrettyPrinter . format ( self , obj , context , maxlevels , level )
Mask obj if it looks like an URL then pass it to the super class .
81
16
239,307
def _get_datapath ( self ) : if self . _datapath is None : raise OSError ( errno . ENOENT , "You didn't provide any datapath for %r" % self . filename ) return self . _datapath
Get a valid datapath else raise an exception .
59
11
239,308
def _set_datapath ( self , datapath ) : if datapath : self . _datapath = datapath . rstrip ( os . sep ) self . _fifo = int ( stat . S_ISFIFO ( os . stat ( self . datapath ) . st_mode ) ) else : self . _datapath = None self . _fifo = False
Set a datapath .
88
6
239,309
def walk ( self ) : # FIFO? if self . _fifo : if self . _fifo > 1 : raise RuntimeError ( "INTERNAL ERROR: FIFO read twice!" ) self . _fifo += 1 # Read paths relative to directory containing the FIFO with open ( self . datapath , "r" ) as fifo : while True : relpath = fifo . readline ( ) . rstrip ( '\n' ) if not relpath : # EOF? break self . LOG . debug ( "Read relative path %r from FIFO..." % ( relpath , ) ) yield os . path . join ( os . path . dirname ( self . datapath ) , relpath ) self . LOG . debug ( "FIFO %r closed!" % ( self . datapath , ) ) # Directory? elif os . path . isdir ( self . datapath ) : # Walk the directory tree for dirpath , dirnames , filenames in os . walk ( self . datapath ) : #, followlinks=True): # Don't scan blacklisted directories for bad in dirnames [ : ] : if any ( fnmatch . fnmatch ( bad , pattern ) for pattern in self . ignore ) : dirnames . remove ( bad ) # Yield all filenames that aren't blacklisted for filename in filenames : if not any ( fnmatch . fnmatch ( filename , pattern ) for pattern in self . ignore ) : #yield os.path.join(dirpath[len(self.datapath)+1:], filename) yield os . path . join ( dirpath , filename ) # Single file else : # Yield the filename yield self . datapath
Generate paths in self . datapath .
374
10
239,310
def _calc_size ( self ) : return sum ( os . path . getsize ( filename ) for filename in self . walk ( ) )
Get total size of self . datapath .
31
10
239,311
def _make_info ( self , piece_size , progress , walker , piece_callback = None ) : # These collect the file descriptions and piece hashes file_list = [ ] pieces = [ ] # Initialize progress state hashing_secs = time . time ( ) totalsize = - 1 if self . _fifo else self . _calc_size ( ) totalhashed = 0 # Start a new piece sha1sum = hashlib . sha1 ( ) done = 0 filename = None # Hash all files for filename in walker : # Assemble file info filesize = os . path . getsize ( filename ) filepath = filename [ len ( os . path . dirname ( self . datapath ) if self . _fifo else self . datapath ) : ] . lstrip ( os . sep ) file_list . append ( { "length" : filesize , "path" : [ fmt . to_utf8 ( x ) for x in fmt . to_unicode ( filepath ) . replace ( os . sep , '/' ) . split ( '/' ) ] , } ) self . LOG . debug ( "Hashing %r, size %d..." % ( filename , filesize ) ) # Open file and hash it fileoffset = 0 handle = open ( filename , "rb" ) try : while fileoffset < filesize : # Read rest of piece or file, whatever is smaller chunk = handle . read ( min ( filesize - fileoffset , piece_size - done ) ) sha1sum . update ( chunk ) # bogus pylint: disable=E1101 done += len ( chunk ) fileoffset += len ( chunk ) totalhashed += len ( chunk ) # Piece is done if done == piece_size : pieces . append ( sha1sum . digest ( ) ) # bogus pylint: disable=E1101 if piece_callback : piece_callback ( filename , pieces [ - 1 ] ) # Start a new piece sha1sum = hashlib . sha1 ( ) done = 0 # Report progress if progress : progress ( totalhashed , totalsize ) finally : handle . close ( ) # Add hash of partial last piece if done > 0 : pieces . append ( sha1sum . digest ( ) ) # bogus pylint: disable=E1103 if piece_callback : piece_callback ( filename , pieces [ - 1 ] ) # Build the meta dict metainfo = { "pieces" : b"" . join ( pieces ) , "piece length" : piece_size , "name" : os . path . basename ( self . datapath ) , } # Handle directory/FIFO vs. single file if self . _fifo or os . path . isdir ( self . datapath ) : metainfo [ "files" ] = file_list else : metainfo [ "length" ] = totalhashed hashing_secs = time . time ( ) - hashing_secs self . LOG . info ( "Hashing of %s took %.1f secs (%s/s)" % ( fmt . human_size ( totalhashed ) . strip ( ) , hashing_secs , fmt . human_size ( totalhashed / hashing_secs ) . strip ( ) , ) ) # Return validated info dict return check_info ( metainfo ) , totalhashed
Create info dict .
722
4
239,312
def _make_meta ( self , tracker_url , root_name , private , progress ) : # Calculate piece size if self . _fifo : # TODO we need to add a (command line) param, probably for total data size # for now, always 1MB piece_size_exp = 20 else : total_size = self . _calc_size ( ) if total_size : piece_size_exp = int ( math . log ( total_size ) / math . log ( 2 ) ) - 9 else : piece_size_exp = 0 piece_size_exp = min ( max ( 15 , piece_size_exp ) , 24 ) piece_size = 2 ** piece_size_exp # Build info hash info , totalhashed = self . _make_info ( piece_size , progress , self . walk ( ) if self . _fifo else sorted ( self . walk ( ) ) ) # Enforce unique hash per tracker info [ "x_cross_seed" ] = hashlib . md5 ( tracker_url ) . hexdigest ( ) # Set private flag if private : info [ "private" ] = 1 # Freely chosen root name (default is basename of the data path) if root_name : info [ "name" ] = root_name # Torrent metadata meta = { "info" : info , "announce" : tracker_url . strip ( ) , } #XXX meta["encoding"] = "UTF-8" # Return validated meta dict return check_meta ( meta ) , totalhashed
Create torrent dict .
332
4
239,313
def check ( self , metainfo , datapath , progress = None ) : if datapath : self . datapath = datapath def check_piece ( filename , piece ) : "Callback for new piece" if piece != metainfo [ "info" ] [ "pieces" ] [ check_piece . piece_index : check_piece . piece_index + 20 ] : self . LOG . warn ( "Piece #%d: Hashes differ in file %r" % ( check_piece . piece_index // 20 , filename ) ) check_piece . piece_index += 20 check_piece . piece_index = 0 datameta , _ = self . _make_info ( int ( metainfo [ "info" ] [ "piece length" ] ) , progress , [ datapath ] if "length" in metainfo [ "info" ] else ( os . path . join ( * ( [ datapath ] + i [ "path" ] ) ) for i in metainfo [ "info" ] [ "files" ] ) , piece_callback = check_piece ) return datameta [ "pieces" ] == metainfo [ "info" ] [ "pieces" ]
Check piece hashes of a metafile against the given datapath .
262
15
239,314
def _start ( self , items ) : # TODO: Filter by a custom date field, for scheduled downloads starting at a certain time, or after a given delay # TODO: Don't start anything more if download BW is used >= config threshold in % # Check if anything more is ready to start downloading startable = [ i for i in items if self . config . startable . match ( i ) ] if not startable : self . LOG . debug ( "Checked %d item(s), none startable according to [ %s ]" , len ( items ) , self . config . startable ) return # Check intermission delay now = time . time ( ) if now < self . last_start : # compensate for summer time and other oddities self . last_start = now delayed = int ( self . last_start + self . config . intermission - now ) if delayed > 0 : self . LOG . debug ( "Delaying start of {:d} item(s)," " due to {:d}s intermission with {:d}s left" . format ( len ( startable ) , self . config . intermission , delayed ) ) return # TODO: sort by priority, then loaded time # Stick to "start_at_once" parameter, unless "downloading_min" is violated downloading = [ i for i in items if self . config . downloading . match ( i ) ] start_now = max ( self . config . start_at_once , self . config . downloading_min - len ( downloading ) ) start_now = min ( start_now , len ( startable ) ) #down_traffic = sum(i.down for i in downloading) ##self.LOG.info("%d downloading, down %d" % (len(downloading), down_traffic)) # Start eligible items for idx , item in enumerate ( startable ) : # Check if we reached 'start_now' in this run if idx >= start_now : self . LOG . debug ( "Only starting %d item(s) in this run, %d more could be downloading" % ( start_now , len ( startable ) - idx , ) ) break # TODO: Prevent start of more torrents that can fit on the drive (taking "off" files into account) # (restarts items that were stopped due to the "low_diskspace" schedule, and also avoids triggering it at all) # Only check the other conditions when we have `downloading_min` covered if len ( downloading ) < self . config . downloading_min : self . LOG . debug ( "Catching up from %d to a minimum of %d downloading item(s)" % ( len ( downloading ) , self . config . downloading_min ) ) else : # Limit to the given maximum of downloading items if len ( downloading ) >= self . config . downloading_max : self . LOG . debug ( "Already downloading %d item(s) out of %d max, %d more could be downloading" % ( len ( downloading ) , self . config . downloading_max , len ( startable ) - idx , ) ) break # If we made it here, start it! self . last_start = now downloading . append ( item ) self . LOG . info ( "%s '%s' [%s, #%s]" % ( "WOULD start" if self . config . dry_run else "Starting" , fmt . to_utf8 ( item . name ) , item . alias , item . hash ) ) if not self . config . dry_run : item . start ( ) if not self . config . quiet : self . proxy . log ( xmlrpc . NOHASH , "%s: Started '%s' {%s}" % ( self . __class__ . __name__ , fmt . to_utf8 ( item . name ) , item . alias , ) )
Start some items if conditions are met .
832
8
239,315
def run ( self ) : try : self . proxy = config_ini . engine . open ( ) # Get items from 'pyrotorque' view items = list ( config_ini . engine . items ( self . VIEWNAME , cache = False ) ) if self . sort_key : items . sort ( key = self . sort_key ) #self.LOG.debug("Sorted: %r" % [i.name for i in items]) # Handle found items self . _start ( items ) self . LOG . debug ( "%s - %s" % ( config_ini . engine . engine_id , self . proxy ) ) except ( error . LoggableError , xmlrpc . ERRORS ) as exc : # only debug, let the statistics logger do its job self . LOG . debug ( str ( exc ) )
Queue manager job callback .
177
5
239,316
def print_help_fields ( ) : # Mock entries, so they fulfill the expectations towards a field definition def custom_manifold ( ) : "named rTorrent custom attribute, e.g. 'custom_completion_target'" return ( "custom_KEY" , custom_manifold ) def kind_manifold ( ) : "file types that contribute at least N% to the item's total size" return ( "kind_N" , kind_manifold ) print ( '' ) print ( "Fields are:" ) print ( "\n" . join ( [ " %-21s %s" % ( name , field . __doc__ ) for name , field in sorted ( engine . FieldDefinition . FIELDS . items ( ) + [ custom_manifold ( ) , kind_manifold ( ) , ] ) ] ) ) print ( '' ) print ( "Format specifiers are:" ) print ( "\n" . join ( [ " %-21s %s" % ( name , doc ) for name , doc in sorted ( formatting . OutputMapping . formatter_help ( ) ) ] ) ) print ( '' ) print ( "Append format specifiers using a '.' to field names in '-o' lists,\n" "e.g. 'size.sz' or 'completed.raw.delta'." )
Print help about fields and field formatters .
294
9
239,317
def add ( self , field , val ) : if engine . FieldDefinition . FIELDS [ field ] . _matcher is matching . TimeFilter : val = self . _basetime - val try : self . total [ field ] += val self . min [ field ] = min ( self . min [ field ] , val ) if field in self . min else val self . max [ field ] = max ( self . max [ field ] , val ) except ( ValueError , TypeError ) : self . errors [ field ] += 1
Add a sample
112
3
239,318
def help_completion_fields ( self ) : for name , field in sorted ( engine . FieldDefinition . FIELDS . items ( ) ) : if issubclass ( field . _matcher , matching . BoolFilter ) : yield "%s=no" % ( name , ) yield "%s=yes" % ( name , ) continue elif issubclass ( field . _matcher , matching . PatternFilter ) : yield "%s=" % ( name , ) yield "%s=/" % ( name , ) yield "%s=?" % ( name , ) yield "%s=\"'*'\"" % ( name , ) continue elif issubclass ( field . _matcher , matching . NumericFilterBase ) : for i in range ( 10 ) : yield "%s=%d" % ( name , i ) else : yield "%s=" % ( name , ) yield r"%s=+" % ( name , ) yield r"%s=-" % ( name , ) yield "custom_" yield "kind_"
Return valid field names .
222
5
239,319
def format_item ( self , item , defaults = None , stencil = None ) : from pyrobase . osutil import shell_escape try : item_text = fmt . to_console ( formatting . format_item ( self . options . output_format , item , defaults ) ) except ( NameError , ValueError , TypeError ) , exc : self . fatal ( "Trouble with formatting item %r\n\n FORMAT = %r\n\n REASON =" % ( item , self . options . output_format ) , exc ) raise # in --debug mode if self . options . shell : item_text = '\t' . join ( shell_escape ( i ) for i in item_text . split ( '\t' ) ) # Justify headers according to stencil if stencil : item_text = '\t' . join ( i . ljust ( len ( s ) ) for i , s in zip ( item_text . split ( '\t' ) , stencil ) ) return item_text
Format an item .
225
4
239,320
def emit ( self , item , defaults = None , stencil = None , to_log = False , item_formatter = None ) : item_text = self . format_item ( item , defaults , stencil ) # Post-process line? if item_formatter : item_text = item_formatter ( item_text ) # For a header, use configured escape codes on a terminal if item is None and os . isatty ( sys . stdout . fileno ( ) ) : item_text = '' . join ( ( config . output_header_ecma48 , item_text , "\x1B[0m" ) ) # Dump to selected target if to_log : if callable ( to_log ) : to_log ( item_text ) else : self . LOG . info ( item_text ) elif self . options . nul : sys . stdout . write ( item_text + '\0' ) sys . stdout . flush ( ) else : print ( item_text ) return item_text . count ( '\n' ) + 1
Print an item to stdout or the log on INFO level .
234
13
239,321
def validate_output_format ( self , default_format ) : output_format = self . options . output_format # Use default format if none is given if output_format is None : output_format = default_format # Check if it's a custom output format from configuration # (they take precedence over field names, so name them wisely) output_format = config . formats . get ( output_format , output_format ) # Expand plain field list to usable form if re . match ( r"^[,._0-9a-zA-Z]+$" , output_format ) : self . plain_output_format = True output_format = "%%(%s)s" % ")s\t%(" . join ( formatting . validate_field_list ( output_format , allow_fmt_specs = True ) ) # Replace some escape sequences output_format = ( output_format . replace ( r"\\" , "\\" ) . replace ( r"\n" , "\n" ) . replace ( r"\t" , "\t" ) . replace ( r"\$" , "\0" ) # the next 3 allow using $() instead of %() . replace ( "$(" , "%(" ) . replace ( "\0" , "$" ) . replace ( r"\ " , " " ) # to prevent stripping in config file #.replace(r"\", "\") ) self . options . output_format = formatting . preparse ( output_format )
Prepare output format for later use .
322
8
239,322
def get_output_fields ( self ) : # Re-engineer list from output format # XXX TODO: Would be better to use a FieldRecorder class to catch the full field names emit_fields = list ( i . lower ( ) for i in re . sub ( r"[^_A-Z]+" , ' ' , self . format_item ( None ) ) . split ( ) ) # Validate result result = [ ] for name in emit_fields [ : ] : if name not in engine . FieldDefinition . FIELDS : self . LOG . warn ( "Omitted unknown name '%s' from statistics and output format sorting" % name ) else : result . append ( name ) return result
Get field names from output template .
151
7
239,323
def validate_sort_fields ( self ) : sort_fields = ',' . join ( self . options . sort_fields ) if sort_fields == '*' : sort_fields = self . get_output_fields ( ) return formatting . validate_sort_fields ( sort_fields or config . sort_fields )
Take care of sorting .
68
5
239,324
def show_in_view ( self , sourceview , matches , targetname = None ) : append = self . options . append_view or self . options . alter_view == 'append' remove = self . options . alter_view == 'remove' action_name = ', appending to' if append else ', removing from' if remove else ' into' targetname = config . engine . show ( matches , targetname or self . options . to_view or "rtcontrol" , append = append , disjoin = remove ) msg = "Filtered %d out of %d torrents using [ %s ]" % ( len ( matches ) , sourceview . size ( ) , sourceview . matcher ) self . LOG . info ( "%s%s rTorrent view %r." % ( msg , action_name , targetname ) ) config . engine . log ( msg )
Show search result in ncurses view .
187
9
239,325
def heatmap ( self , df , imagefile ) : import seaborn as sns import matplotlib . ticker as tkr import matplotlib . pyplot as plt from matplotlib . colors import LinearSegmentedColormap sns . set ( ) with sns . axes_style ( 'whitegrid' ) : fig , ax = plt . subplots ( figsize = ( 5 , 11 ) ) # inches cmax = max ( df [ self . args [ 2 ] ] . max ( ) , self . CMAP_MIN_MAX ) csteps = { 0.0 : 'darkred' , 0.3 / cmax : 'red' , 0.6 / cmax : 'orangered' , 0.9 / cmax : 'coral' , 1.0 / cmax : 'skyblue' , 1.5 / cmax : 'blue' , 1.9 / cmax : 'darkblue' , 2.0 / cmax : 'darkgreen' , 3.0 / cmax : 'green' , ( self . CMAP_MIN_MAX - .1 ) / cmax : 'palegreen' , 1.0 : 'yellow' } cmap = LinearSegmentedColormap . from_list ( 'RdGrYl' , sorted ( csteps . items ( ) ) , N = 256 ) dataset = df . pivot ( * self . args ) sns . heatmap ( dataset , mask = dataset . isnull ( ) , annot = False , linewidths = .5 , square = True , ax = ax , cmap = cmap , annot_kws = dict ( stretch = 'condensed' ) ) ax . tick_params ( axis = 'y' , labelrotation = 30 , labelsize = 8 ) # ax.get_yaxis().set_major_formatter(tkr.FuncFormatter(lambda x, p: x)) plt . savefig ( imagefile )
Create the heat map .
426
5
239,326
def mainloop ( self ) : # Get client state proxy = config . engine . open ( ) views = [ x for x in sorted ( proxy . view . list ( ) ) if x . startswith ( self . PREFIX ) ] current_view = real_current_view = proxy . ui . current_view ( ) if current_view not in views : if views : current_view = views [ 0 ] else : raise error . UserError ( "There are no '{}*' views defined at all!" . format ( self . PREFIX ) ) # Check options if self . options . list : for name in sorted ( views ) : print ( "{} {:5d} {}" . format ( '*' if name == real_current_view else ' ' , proxy . view . size ( xmlrpc . NOHASH , name ) , name [ self . PREFIX_LEN : ] ) ) elif self . options . next or self . options . prev or self . options . update : # Determine next in line if self . options . update : new_view = current_view else : new_view = ( views * 2 ) [ views . index ( current_view ) + ( 1 if self . options . next else - 1 ) ] self . LOG . info ( "{} category view '{}'." . format ( "Updating" if self . options . update else "Switching to" , new_view ) ) # Update and switch to filtered view proxy . pyro . category . update ( xmlrpc . NOHASH , new_view [ self . PREFIX_LEN : ] ) proxy . ui . current_view . set ( new_view ) else : self . LOG . info ( "Current category view is '{}'." . format ( current_view [ self . PREFIX_LEN : ] ) ) self . LOG . info ( "Use '--help' to get usage information." )
Manage category views .
415
5
239,327
def _custom_fields ( ) : # Import some commonly needed modules import os from pyrocore . torrent import engine , matching from pyrocore . util import fmt # PUT CUSTOM FIELD CODE HERE # Disk space check (as an example) # see https://pyrocore.readthedocs.io/en/latest/custom.html#has-room def has_room ( obj ) : "Check disk space." pathname = obj . path if pathname and not os . path . exists ( pathname ) : pathname = os . path . dirname ( pathname ) if pathname and os . path . exists ( pathname ) : stats = os . statvfs ( pathname ) return ( stats . f_bavail * stats . f_frsize - int ( diskspace_threshold_mb ) * 1024 ** 2 > obj . size * ( 1.0 - obj . done / 100.0 ) ) else : return None yield engine . DynamicField ( engine . untyped , "has_room" , "check whether the download will fit on its target device" , matcher = matching . BoolFilter , accessor = has_room , formatter = lambda val : "OK" if val else "??" if val is None else "NO" ) globals ( ) . setdefault ( "diskspace_threshold_mb" , "500" )
Yield custom field definitions .
301
6
239,328
def engine_data ( engine ) : views = ( "default" , "main" , "started" , "stopped" , "complete" , "incomplete" , "seeding" , "leeching" , "active" , "messages" ) methods = [ "throttle.global_up.rate" , "throttle.global_up.max_rate" , "throttle.global_down.rate" , "throttle.global_down.max_rate" , ] # Get data via multicall proxy = engine . open ( ) calls = [ dict ( methodName = method , params = [ ] ) for method in methods ] + [ dict ( methodName = "view.size" , params = [ '' , view ] ) for view in views ] result = proxy . system . multicall ( calls , flatten = True ) # Build result object data = dict ( now = time . time ( ) , engine_id = engine . engine_id , versions = engine . versions , uptime = engine . uptime , upload = [ result [ 0 ] , result [ 1 ] ] , download = [ result [ 2 ] , result [ 3 ] ] , views = dict ( [ ( name , result [ 4 + i ] ) for i , name in enumerate ( views ) ] ) , ) return data
Get important performance data and metadata from rTorrent .
283
10
239,329
def _write_pidfile ( pidfile ) : pid = str ( os . getpid ( ) ) handle = open ( pidfile , 'w' ) try : handle . write ( "%s\n" % pid ) finally : handle . close ( )
Write file with current process ID .
54
7
239,330
def guard ( pidfile , guardfile = None ) : # Check guard if guardfile and not os . path . exists ( guardfile ) : raise EnvironmentError ( "Guard file '%s' not found, won't start!" % guardfile ) if os . path . exists ( pidfile ) : running , pid = check_process ( pidfile ) if running : raise EnvironmentError ( "Daemon process #%d still running, won't start!" % pid ) else : logging . getLogger ( "daemonize" ) . info ( "Process #%d disappeared, continuing..." % pid ) # Keep race condition window small, by immediately writing launcher process ID _write_pidfile ( pidfile )
Raise an EnvironmentError when the guardfile doesn t exist or the process with the ID found in pidfile is still active .
149
26
239,331
def daemonize ( pidfile = None , logfile = None , sync = True ) : log = logging . getLogger ( "daemonize" ) ppid = os . getpid ( ) try : pid = os . fork ( ) if pid > 0 : log . debug ( "Parent exiting (PID %d, CHILD %d)" % ( ppid , pid ) ) sys . exit ( 0 ) except OSError as exc : log . critical ( "fork #1 failed (PID %d): (%d) %s\n" % ( os . getpid ( ) , exc . errno , exc . strerror ) ) sys . exit ( 1 ) ##os.chdir("/") ##os.umask(0022) os . setsid ( ) try : pid = os . fork ( ) if pid > 0 : log . debug ( "Session leader exiting (PID %d, PPID %d, DEMON %d)" % ( os . getpid ( ) , ppid , pid ) ) sys . exit ( 0 ) except OSError as exc : log . critical ( "fork #2 failed (PID %d): (%d) %s\n" % ( os . getpid ( ) , exc . errno , exc . strerror ) ) sys . exit ( 1 ) if pidfile : _write_pidfile ( pidfile ) def sig_term ( * dummy ) : "Handler for SIGTERM." sys . exit ( 0 ) stdin = open ( "/dev/null" , "r" ) os . dup2 ( stdin . fileno ( ) , sys . stdin . fileno ( ) ) signal . signal ( signal . SIGTERM , sig_term ) if logfile : try : logfile + "" except TypeError : if logfile . fileno ( ) != sys . stdout . fileno ( ) : os . dup2 ( logfile . fileno ( ) , sys . stdout . fileno ( ) ) if logfile . fileno ( ) != sys . stderr . fileno ( ) : os . dup2 ( logfile . fileno ( ) , sys . stderr . fileno ( ) ) else : log . debug ( "Redirecting stdout / stderr to %r" % logfile ) loghandle = open ( logfile , "a+" ) os . dup2 ( loghandle . fileno ( ) , sys . stdout . fileno ( ) ) os . dup2 ( loghandle . fileno ( ) , sys . stderr . fileno ( ) ) loghandle . close ( ) if sync : # Wait for 5 seconds at most, in 10ms steps polling = 5 , .01 for _ in range ( int ( polling [ 0 ] * 1 / polling [ 1 ] ) ) : try : os . kill ( ppid , 0 ) except OSError : break else : time . sleep ( polling [ 1 ] ) log . debug ( "Process detached (PID %d)" % os . getpid ( ) )
Fork the process into the background .
663
8
239,332
def flatten ( nested , containers = ( list , tuple ) ) : flat = list ( nested ) # handle iterators / generators i = 0 while i < len ( flat ) : while isinstance ( flat [ i ] , containers ) : if not flat [ i ] : # kill empty list flat . pop ( i ) # inspect new 'i'th element in outer loop i -= 1 break else : flat [ i : i + 1 ] = ( flat [ i ] ) # 'i'th element is scalar, proceed i += 1 return flat
Flatten a nested list in - place and return it .
115
12
239,333
def gendocs ( ) : helppage = path ( "docs/references-cli-usage.rst" ) content = [ ".. automatically generated using 'paver gendocs'." , "" , ".. contents::" , " :local:" , "" , ".. note::" , "" , " The help output presented here applies to version ``%s`` of the tools." % sh ( "pyroadmin --version" , capture = True ) . split ( ) [ 1 ] , "" , ] for tool in sorted ( project . entry_points [ "console_scripts" ] ) : tool , _ = tool . split ( None , 1 ) content . extend ( [ ".. _cli-usage-%s:" % tool , "" , tool , '^' * len ( tool ) , "" , "::" , "" , ] ) help_opt = "--help-fields --config-dir /tmp" if tool == "rtcontrol" else "--help" help_txt = sh ( "%s -q %s" % ( tool , help_opt ) , capture = True , ignore_error = True ) . splitlines ( ) content . extend ( ' ' + i for i in help_txt if ' on Python ' not in i and 'Copyright (c) 200' not in i and 'see the full documentation' not in i and ' https://pyrocore.readthedocs.io/' not in i ) content . extend ( [ "" , ] ) content = [ line . rstrip ( ) for line in content if all ( i not in line for i in ( ", Copyright (c) " , "Total time: " , "Configuration file '/tmp/" ) ) ] content = [ line for line , succ in zip ( content , content [ 1 : ] + [ '' ] ) if line or succ ] # filter twin empty lines helppage . write_lines ( content )
create some doc pages automatically
410
5
239,334
def watchdog_pid ( ) : result = sh ( 'netstat -tulpn 2>/dev/null | grep 127.0.0.1:{:d}' . format ( SPHINX_AUTOBUILD_PORT ) , capture = True , ignore_error = True ) pid = result . strip ( ) pid = pid . split ( ) [ - 1 ] if pid else None pid = pid . split ( '/' , 1 ) [ 0 ] if pid and pid != '-' else None return pid
Get watchdog PID via netstat .
111
7
239,335
def autodocs ( ) : build_dir = path ( 'docs/_build' ) index_html = build_dir / 'html/index.html' if build_dir . exists ( ) : build_dir . rmtree ( ) with pushd ( "docs" ) : print "\n*** Generating API doc ***\n" sh ( "sphinx-apidoc -o apidoc -f -T -M ../src/pyrocore" ) sh ( "sphinx-apidoc -o apidoc -f -T -M $(dirname $(python -c 'import tempita; print(tempita.__file__)'))" ) print "\n*** Generating HTML doc ***\n" sh ( 'nohup %s/Makefile SPHINXBUILD="sphinx-autobuild -p %d' ' -i \'.*\' -i \'*.log\' -i \'*.png\' -i \'*.txt\'" html >autobuild.log 2>&1 &' % ( os . getcwd ( ) , SPHINX_AUTOBUILD_PORT ) ) for i in range ( 25 ) : time . sleep ( 2.5 ) pid = watchdog_pid ( ) if pid : sh ( "touch docs/index.rst" ) sh ( 'ps {}' . format ( pid ) ) url = 'http://localhost:{port:d}/' . format ( port = SPHINX_AUTOBUILD_PORT ) print ( "\n*** Open '{}' in your browser..." . format ( url ) ) break
create Sphinx docs locally and start a watchdog
354
9
239,336
def stopdocs ( ) : for i in range ( 4 ) : pid = watchdog_pid ( ) if pid : if not i : sh ( 'ps {}' . format ( pid ) ) sh ( 'kill {}' . format ( pid ) ) time . sleep ( .5 ) else : break
stop Sphinx watchdog
62
4
239,337
def coverage ( ) : coverage_index = path ( "build/coverage/index.html" ) coverage_index . remove ( ) sh ( "paver test" ) coverage_index . exists ( ) and webbrowser . open ( coverage_index )
generate coverage report and show in browser
54
8
239,338
def lookup_announce_alias ( name ) : for alias , urls in announce . items ( ) : if alias . lower ( ) == name . lower ( ) : return alias , urls raise KeyError ( "Unknown alias %s" % ( name , ) )
Get canonical alias name and announce URL list for the given alias .
57
13
239,339
def map_announce2alias ( url ) : import urlparse # Try to find an exact alias URL match and return its label for alias , urls in announce . items ( ) : if any ( i == url for i in urls ) : return alias # Try to find an alias URL prefix and return its label parts = urlparse . urlparse ( url ) server = urlparse . urlunparse ( ( parts . scheme , parts . netloc , "/" , None , None , None ) ) for alias , urls in announce . items ( ) : if any ( i . startswith ( server ) for i in urls ) : return alias # Return 2nd level domain name if no alias found try : return '.' . join ( parts . netloc . split ( ':' ) [ 0 ] . split ( '.' ) [ - 2 : ] ) except IndexError : return parts . netloc
Get tracker alias for announce URL and if none is defined the 2nd level domain .
190
17
239,340
def validate ( key , val ) : if val and val . startswith ( "~/" ) : return os . path . expanduser ( val ) if key == "output_header_frequency" : return int ( val , 10 ) if key . endswith ( "_ecma48" ) : return eval ( "'%s'" % val . replace ( "'" , r"\'" ) ) # pylint: disable=eval-used return val
Validate a configuration value .
97
6
239,341
def _update_config ( self , namespace ) : # pylint: disable=no-self-use for key , val in namespace . items ( ) : setattr ( config , key , val )
Inject the items from the given dict into the configuration .
43
12
239,342
def _interpolation_escape ( self , namespace ) : for key , val in namespace . items ( ) : if '%' in val : namespace [ key ] = self . INTERPOLATION_ESCAPE . sub ( lambda match : '%' + match . group ( 0 ) , val )
Re - escape interpolation strings .
64
7
239,343
def _validate_namespace ( self , namespace ) : # Update config values (so other code can access them in the bootstrap phase) self . _update_config ( namespace ) # Validate announce URLs for key , val in namespace [ "announce" ] . items ( ) : if isinstance ( val , basestring ) : namespace [ "announce" ] [ key ] = val . split ( ) # Re-escape output formats self . _interpolation_escape ( namespace [ "formats" ] ) # Create objects from module specs for factory in ( "engine" , ) : if isinstance ( namespace [ factory ] , basestring ) : namespace [ factory ] = pymagic . import_name ( namespace [ factory ] ) ( ) if namespace [ factory ] else None # Do some standard type conversions for key in namespace : # Split lists if key . endswith ( "_list" ) and isinstance ( namespace [ key ] , basestring ) : namespace [ key ] = [ i . strip ( ) for i in namespace [ key ] . replace ( ',' , ' ' ) . split ( ) ] # Resolve factory and callback handler lists elif any ( key . endswith ( i ) for i in ( "_factories" , "_callbacks" ) ) and isinstance ( namespace [ key ] , basestring ) : namespace [ key ] = [ pymagic . import_name ( i . strip ( ) ) for i in namespace [ key ] . replace ( ',' , ' ' ) . split ( ) ] # Update config values again self . _update_config ( namespace )
Validate the given namespace . This method is idempotent!
343
14
239,344
def _set_from_ini ( self , namespace , ini_file ) : # Isolate global values global_vars = dict ( ( key , val ) for key , val in namespace . items ( ) if isinstance ( val , basestring ) ) # Copy all sections for section in ini_file . sections ( ) : # Get values set so far if section == "GLOBAL" : raw_vars = global_vars else : raw_vars = namespace . setdefault ( section . lower ( ) , { } ) # Override with values set in this INI file raw_vars . update ( dict ( ini_file . items ( section , raw = True ) ) ) # Interpolate and validate all values if section == "FORMATS" : self . _interpolation_escape ( raw_vars ) raw_vars . update ( dict ( ( key , validate ( key , val ) ) for key , val in ini_file . items ( section , vars = raw_vars ) ) ) # Update global values namespace . update ( global_vars )
Copy values from loaded INI file to namespace .
238
10
239,345
def _set_defaults ( self , namespace , optional_cfg_files ) : # Add current configuration directory namespace [ "config_dir" ] = self . config_dir # Load defaults for idx , cfg_file in enumerate ( [ self . CONFIG_INI ] + optional_cfg_files ) : if any ( i in cfg_file for i in set ( '/' + os . sep ) ) : continue # skip any non-plain filenames try : defaults = pymagic . resource_string ( "pyrocore" , "data/config/" + cfg_file ) #@UndefinedVariable except IOError as exc : if idx and exc . errno == errno . ENOENT : continue raise ini_file = ConfigParser . SafeConfigParser ( ) ini_file . optionxform = str # case-sensitive option names ini_file . readfp ( StringIO . StringIO ( defaults ) , "<defaults>" ) self . _set_from_ini ( namespace , ini_file )
Set default values in the given dict .
226
8
239,346
def _load_ini ( self , namespace , config_file ) : self . LOG . debug ( "Loading %r..." % ( config_file , ) ) ini_file = ConfigParser . SafeConfigParser ( ) ini_file . optionxform = str # case-sensitive option names if ini_file . read ( config_file ) : self . _set_from_ini ( namespace , ini_file ) else : self . LOG . warning ( "Configuration file %r not found," " use the command 'pyroadmin --create-config' to create it!" % ( config_file , ) )
Load INI style configuration .
133
6
239,347
def _load_py ( self , namespace , config_file ) : if config_file and os . path . isfile ( config_file ) : self . LOG . debug ( "Loading %r..." % ( config_file , ) ) exec ( compile ( open ( config_file ) . read ( ) , config_file , 'exec' ) , # pylint: disable=exec-used vars ( config ) , namespace ) else : self . LOG . warning ( "Configuration file %r not found!" % ( config_file , ) )
Load scripted configuration .
117
4
239,348
def load ( self , optional_cfg_files = None ) : optional_cfg_files = optional_cfg_files or [ ] # Guard against coding errors if self . _loaded : raise RuntimeError ( "INTERNAL ERROR: Attempt to load configuration twice!" ) try : # Load configuration namespace = { } self . _set_defaults ( namespace , optional_cfg_files ) self . _load_ini ( namespace , os . path . join ( self . config_dir , self . CONFIG_INI ) ) for cfg_file in optional_cfg_files : if not os . path . isabs ( cfg_file ) : cfg_file = os . path . join ( self . config_dir , cfg_file ) if os . path . exists ( cfg_file ) : self . _load_ini ( namespace , cfg_file ) self . _validate_namespace ( namespace ) self . _load_py ( namespace , namespace [ "config_script" ] ) self . _validate_namespace ( namespace ) for callback in namespace [ "config_validator_callbacks" ] : callback ( ) except ConfigParser . ParsingError as exc : raise error . UserError ( exc ) # Ready to go... self . _loaded = True
Actually load the configuation from either the default location or the given directory .
274
15
239,349
def create ( self , remove_all_rc_files = False ) : # Check and create configuration directory if os . path . exists ( self . config_dir ) : self . LOG . debug ( "Configuration directory %r already exists!" % ( self . config_dir , ) ) else : os . mkdir ( self . config_dir ) if remove_all_rc_files : for subdir in ( '.' , 'rtorrent.d' ) : config_files = list ( glob . glob ( os . path . join ( os . path . abspath ( self . config_dir ) , subdir , '*.rc' ) ) ) config_files += list ( glob . glob ( os . path . join ( os . path . abspath ( self . config_dir ) , subdir , '*.rc.default' ) ) ) for config_file in config_files : self . LOG . info ( "Removing %r!" % ( config_file , ) ) os . remove ( config_file ) # Create default configuration files for filepath in sorted ( walk_resources ( "pyrocore" , "data/config" ) ) : # Load from package data text = pymagic . resource_string ( "pyrocore" , "data/config" + filepath ) # Create missing subdirs config_file = self . config_dir + filepath if not os . path . exists ( os . path . dirname ( config_file ) ) : os . makedirs ( os . path . dirname ( config_file ) ) # Write configuration files config_trail = [ ".default" ] if os . path . exists ( config_file ) : self . LOG . debug ( "Configuration file %r already exists!" % ( config_file , ) ) else : config_trail . append ( '' ) for i in config_trail : with open ( config_file + i , "w" ) as handle : handle . write ( text ) self . LOG . info ( "Configuration file %r written!" % ( config_file + i , ) )
Create default configuration files at either the default location or the given directory .
446
14
239,350
def make_magnet_meta ( self , magnet_uri ) : import cgi import hashlib if magnet_uri . startswith ( "magnet:" ) : magnet_uri = magnet_uri [ 7 : ] meta = { "magnet-uri" : "magnet:" + magnet_uri } magnet_params = cgi . parse_qs ( magnet_uri . lstrip ( '?' ) ) meta_name = magnet_params . get ( "xt" , [ hashlib . sha1 ( magnet_uri ) . hexdigest ( ) ] ) [ 0 ] if "dn" in magnet_params : meta_name = "%s-%s" % ( magnet_params [ "dn" ] [ 0 ] , meta_name ) meta_name = re . sub ( r"[^-_,a-zA-Z0-9]+" , '.' , meta_name ) . strip ( '.' ) . replace ( "urn.btih." , "" ) if not config . magnet_watch : self . fatal ( "You MUST set the 'magnet_watch' config option!" ) meta_path = os . path . join ( config . magnet_watch , "magnet-%s.torrent" % meta_name ) self . LOG . debug ( "Writing magnet-uri metafile %r..." % ( meta_path , ) ) try : bencode . bwrite ( meta_path , meta ) except EnvironmentError as exc : self . fatal ( "Error writing magnet-uri metafile %r (%s)" % ( meta_path , exc , ) ) raise
Create a magnet - uri torrent .
345
8
239,351
def get_class_logger ( obj ) : return logging . getLogger ( obj . __class__ . __module__ + '.' + obj . __class__ . __name__ )
Get a logger specific for the given object s class .
41
11
239,352
def default ( self , o ) : # pylint: disable=method-hidden if isinstance ( o , set ) : return list ( sorted ( o ) ) elif hasattr ( o , 'as_dict' ) : return o . as_dict ( ) else : return super ( JSONEncoder , self ) . default ( o )
Support more object types .
73
5
239,353
def fmt_sz ( intval ) : try : return fmt . human_size ( intval ) except ( ValueError , TypeError ) : return "N/A" . rjust ( len ( fmt . human_size ( 0 ) ) )
Format a byte sized value .
53
6
239,354
def fmt_iso ( timestamp ) : try : return fmt . iso_datetime ( timestamp ) except ( ValueError , TypeError ) : return "N/A" . rjust ( len ( fmt . iso_datetime ( 0 ) ) )
Format a UNIX timestamp to an ISO datetime string .
52
12
239,355
def fmt_duration ( duration ) : try : return fmt . human_duration ( float ( duration ) , 0 , 2 , True ) except ( ValueError , TypeError ) : return "N/A" . rjust ( len ( fmt . human_duration ( 0 , 0 , 2 , True ) ) )
Format a duration value in seconds to a readable form .
65
11
239,356
def fmt_subst ( regex , subst ) : return lambda text : re . sub ( regex , subst , text ) if text else text
Replace regex with string .
29
6
239,357
def preparse ( output_format ) : try : return templating . preparse ( output_format , lambda path : os . path . join ( config . config_dir , "templates" , path ) ) except ImportError as exc : if "tempita" in str ( exc ) : raise error . UserError ( "To be able to use Tempita templates, install the 'tempita' package (%s)\n" " Possibly USING THE FOLLOWING COMMAND:\n" " %s/easy_install tempita" % ( exc , os . path . dirname ( sys . executable ) ) ) raise except IOError as exc : raise error . LoggableError ( "Cannot read template: {}" . format ( exc ) )
Do any special processing of a template and return the result .
160
12
239,358
def validate_field_list ( fields , allow_fmt_specs = False , name_filter = None ) : formats = [ i [ 4 : ] for i in globals ( ) if i . startswith ( "fmt_" ) ] try : fields = [ i . strip ( ) for i in fields . replace ( ',' , ' ' ) . split ( ) ] except AttributeError : # Not a string, expecting an iterable pass if name_filter : fields = [ name_filter ( name ) for name in fields ] for name in fields : if allow_fmt_specs and '.' in name : fullname = name name , fmtspecs = name . split ( '.' , 1 ) for fmtspec in fmtspecs . split ( '.' ) : if fmtspec not in formats and fmtspec != "raw" : raise error . UserError ( "Unknown format specification %r in %r" % ( fmtspec , fullname ) ) if name not in engine . FieldDefinition . FIELDS and not engine . TorrentProxy . add_manifold_attribute ( name ) : raise error . UserError ( "Unknown field name %r" % ( name , ) ) return fields
Make sure the fields in the given list exist .
271
10
239,359
def validate_sort_fields ( sort_fields ) : # Allow descending order per field by prefixing with '-' descending = set ( ) def sort_order_filter ( name ) : "Helper to remove flag and memoize sort order" if name . startswith ( '-' ) : name = name [ 1 : ] descending . add ( name ) return name # Split and validate field list sort_fields = validate_field_list ( sort_fields , name_filter = sort_order_filter ) log . debug ( "Sorting order is: %s" % ", " . join ( [ ( '-' if i in descending else '' ) + i for i in sort_fields ] ) ) # No descending fields? if not descending : return operator . attrgetter ( * tuple ( sort_fields ) ) # Need to provide complex key class Key ( object ) : "Complex sort order key" def __init__ ( self , obj , * args ) : "Remember object to be compared" self . obj = obj def __lt__ ( self , other ) : "Compare to other key" for field in sort_fields : lhs , rhs = getattr ( self . obj , field ) , getattr ( other . obj , field ) if lhs == rhs : continue return rhs < lhs if field in descending else lhs < rhs return False return Key
Make sure the fields in the given list exist and return sorting key .
292
14
239,360
def formatter_help ( cls ) : result = [ ( "raw" , "Switch off the default field formatter." ) ] for name , method in globals ( ) . items ( ) : if name . startswith ( "fmt_" ) : result . append ( ( name [ 4 : ] , method . __doc__ . strip ( ) ) ) return result
Return a list of format specifiers and their documentation .
81
11
239,361
def timeparse ( sval , granularity = 'seconds' ) : match = COMPILED_SIGN . match ( sval ) sign = - 1 if match . groupdict ( ) [ 'sign' ] == '-' else 1 sval = match . groupdict ( ) [ 'unsigned' ] for timefmt in COMPILED_TIMEFORMATS : match = timefmt . match ( sval ) if match and match . group ( 0 ) . strip ( ) : mdict = match . groupdict ( ) if granularity == 'minutes' : mdict = _interpret_as_minutes ( sval , mdict ) # if all of the fields are integer numbers if all ( v . isdigit ( ) for v in list ( mdict . values ( ) ) if v ) : return sign * sum ( [ MULTIPLIERS [ k ] * int ( v , 10 ) for ( k , v ) in list ( mdict . items ( ) ) if v is not None ] ) # if SECS is an integer number elif ( 'secs' not in mdict or mdict [ 'secs' ] is None or mdict [ 'secs' ] . isdigit ( ) ) : # we will return an integer return ( sign * int ( sum ( [ MULTIPLIERS [ k ] * float ( v ) for ( k , v ) in list ( mdict . items ( ) ) if k != 'secs' and v is not None ] ) ) + ( int ( mdict [ 'secs' ] , 10 ) if mdict [ 'secs' ] else 0 ) ) else : # SECS is a float, we will return a float return sign * sum ( [ MULTIPLIERS [ k ] * float ( v ) for ( k , v ) in list ( mdict . items ( ) ) if v is not None ] )
Parse a time expression returning it as a number of seconds . If possible the return value will be an int ; if this is not possible the return will be a float . Returns None if a time expression cannot be parsed from the given string .
403
49
239,362
def get_client ( project_id = None , credentials = None , service_url = None , service_account = None , private_key = None , private_key_file = None , json_key = None , json_key_file = None , readonly = True , swallow_results = True , num_retries = 0 ) : if not credentials : assert ( service_account and ( private_key or private_key_file ) ) or ( json_key or json_key_file ) , 'Must provide AssertionCredentials or service account and P12 key\ or JSON key' if not project_id : assert json_key or json_key_file , 'Must provide project_id unless json_key or json_key_file is\ provided' if service_url is None : service_url = DISCOVERY_URI scope = BIGQUERY_SCOPE_READ_ONLY if readonly else BIGQUERY_SCOPE if private_key_file : credentials = _credentials ( ) . from_p12_keyfile ( service_account , private_key_file , scopes = scope ) if private_key : try : if isinstance ( private_key , basestring ) : private_key = private_key . decode ( 'utf-8' ) except NameError : # python3 -- private_key is already unicode pass credentials = _credentials ( ) . from_p12_keyfile_buffer ( service_account , StringIO ( private_key ) , scopes = scope ) if json_key_file : with open ( json_key_file , 'r' ) as key_file : json_key = json . load ( key_file ) if json_key : credentials = _credentials ( ) . from_json_keyfile_dict ( json_key , scopes = scope ) if not project_id : project_id = json_key [ 'project_id' ] bq_service = _get_bq_service ( credentials = credentials , service_url = service_url ) return BigQueryClient ( bq_service , project_id , swallow_results , num_retries )
Return a singleton instance of BigQueryClient . Either AssertionCredentials or a service account and private key combination need to be provided in order to authenticate requests to BigQuery .
471
39
239,363
def get_projects ( bq_service ) : projects_request = bq_service . projects ( ) . list ( ) . execute ( ) projects = [ ] for project in projects_request . get ( 'projects' , [ ] ) : project_data = { 'id' : project [ 'id' ] , 'name' : project [ 'friendlyName' ] } projects . append ( project_data ) return projects
Given the BigQuery service return data about all projects .
90
11
239,364
def _get_bq_service ( credentials = None , service_url = None ) : assert credentials , 'Must provide ServiceAccountCredentials' http = credentials . authorize ( Http ( ) ) service = build ( 'bigquery' , 'v2' , http = http , discoveryServiceUrl = service_url , cache_discovery = False ) return service
Construct an authorized BigQuery service object .
78
8
239,365
def _submit_query_job ( self , query_data ) : logger . debug ( 'Submitting query job: %s' % query_data ) job_collection = self . bigquery . jobs ( ) try : query_reply = job_collection . query ( projectId = self . project_id , body = query_data ) . execute ( num_retries = self . num_retries ) except HttpError as e : if query_data . get ( "dryRun" , False ) : return None , json . loads ( e . content . decode ( 'utf8' ) ) raise job_id = query_reply [ 'jobReference' ] . get ( 'jobId' ) schema = query_reply . get ( 'schema' , { 'fields' : None } ) [ 'fields' ] rows = query_reply . get ( 'rows' , [ ] ) job_complete = query_reply . get ( 'jobComplete' , False ) # raise exceptions if it's not an async query # and job is not completed after timeout if not job_complete and query_data . get ( "timeoutMs" , False ) : logger . error ( 'BigQuery job %s timeout' % job_id ) raise BigQueryTimeoutException ( ) return job_id , [ self . _transform_row ( row , schema ) for row in rows ]
Submit a query job to BigQuery .
292
8
239,366
def _insert_job ( self , body_object ) : logger . debug ( 'Submitting job: %s' % body_object ) job_collection = self . bigquery . jobs ( ) return job_collection . insert ( projectId = self . project_id , body = body_object ) . execute ( num_retries = self . num_retries )
Submit a job to BigQuery
79
6
239,367
def query ( self , query , max_results = None , timeout = 0 , dry_run = False , use_legacy_sql = None , external_udf_uris = None ) : logger . debug ( 'Executing query: %s' % query ) query_data = { 'query' : query , 'timeoutMs' : timeout * 1000 , 'dryRun' : dry_run , 'maxResults' : max_results } if use_legacy_sql is not None : query_data [ 'useLegacySql' ] = use_legacy_sql if external_udf_uris : query_data [ 'userDefinedFunctionResources' ] = [ { 'resourceUri' : u } for u in external_udf_uris ] return self . _submit_query_job ( query_data )
Submit a query to BigQuery .
183
7
239,368
def get_query_schema ( self , job_id ) : query_reply = self . get_query_results ( job_id , offset = 0 , limit = 0 ) if not query_reply [ 'jobComplete' ] : logger . warning ( 'BigQuery job %s not complete' % job_id ) raise UnfinishedQueryException ( ) return query_reply [ 'schema' ] [ 'fields' ]
Retrieve the schema of a query by job id .
91
11
239,369
def get_table_schema ( self , dataset , table , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : result = self . bigquery . tables ( ) . get ( projectId = project_id , tableId = table , datasetId = dataset ) . execute ( num_retries = self . num_retries ) except HttpError as e : if int ( e . resp [ 'status' ] ) == 404 : logger . warn ( 'Table %s.%s does not exist' , dataset , table ) return None raise return result [ 'schema' ] [ 'fields' ]
Return the table schema .
143
5
239,370
def check_job ( self , job_id ) : query_reply = self . get_query_results ( job_id , offset = 0 , limit = 0 ) return ( query_reply . get ( 'jobComplete' , False ) , int ( query_reply . get ( 'totalRows' , 0 ) ) )
Return the state and number of results of a query by job id .
70
14
239,371
def get_query_rows ( self , job_id , offset = None , limit = None , timeout = 0 ) : # Get query results query_reply = self . get_query_results ( job_id , offset = offset , limit = limit , timeout = timeout ) if not query_reply [ 'jobComplete' ] : logger . warning ( 'BigQuery job %s not complete' % job_id ) raise UnfinishedQueryException ( ) schema = query_reply [ "schema" ] [ "fields" ] rows = query_reply . get ( 'rows' , [ ] ) page_token = query_reply . get ( "pageToken" ) records = [ self . _transform_row ( row , schema ) for row in rows ] # Append to records if there are multiple pages for query results while page_token and ( not limit or len ( records ) < limit ) : query_reply = self . get_query_results ( job_id , offset = offset , limit = limit , page_token = page_token , timeout = timeout ) page_token = query_reply . get ( "pageToken" ) rows = query_reply . get ( 'rows' , [ ] ) records += [ self . _transform_row ( row , schema ) for row in rows ] return records [ : limit ] if limit else records
Retrieve a list of rows from a query table by job id . This method will append results from multiple pages together . If you want to manually page through results you can use get_query_results method directly .
285
43
239,372
def check_dataset ( self , dataset_id , project_id = None ) : dataset = self . get_dataset ( dataset_id , project_id ) return bool ( dataset )
Check to see if a dataset exists .
43
8
239,373
def get_dataset ( self , dataset_id , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : dataset = self . bigquery . datasets ( ) . get ( projectId = project_id , datasetId = dataset_id ) . execute ( num_retries = self . num_retries ) except HttpError : dataset = { } return dataset
Retrieve a dataset if it exists otherwise return an empty dict .
92
13
239,374
def get_table ( self , dataset , table , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : table = self . bigquery . tables ( ) . get ( projectId = project_id , datasetId = dataset , tableId = table ) . execute ( num_retries = self . num_retries ) except HttpError : table = { } return table
Retrieve a table if it exists otherwise return an empty dict .
93
13
239,375
def create_table ( self , dataset , table , schema , expiration_time = None , time_partitioning = False , project_id = None ) : project_id = self . _get_project_id ( project_id ) body = { 'schema' : { 'fields' : schema } , 'tableReference' : { 'tableId' : table , 'projectId' : project_id , 'datasetId' : dataset } } if expiration_time is not None : body [ 'expirationTime' ] = expiration_time if time_partitioning : body [ 'timePartitioning' ] = { 'type' : 'DAY' } try : table = self . bigquery . tables ( ) . insert ( projectId = project_id , datasetId = dataset , body = body ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return table except HttpError as e : logger . error ( ( 'Cannot create table {0}.{1}.{2}\n' 'Http Error: {3}' ) . format ( project_id , dataset , table , e . content ) ) if self . swallow_results : return False else : return { }
Create a new table in the dataset .
271
8
239,376
def patch_table ( self , dataset , table , schema , project_id = None ) : project_id = self . _get_project_id ( project_id ) body = { 'schema' : { 'fields' : schema } , 'tableReference' : { 'tableId' : table , 'projectId' : project_id , 'datasetId' : dataset } } try : result = self . bigquery . tables ( ) . patch ( projectId = project_id , datasetId = dataset , body = body ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return result except HttpError as e : logger . error ( ( 'Cannot patch table {0}.{1}.{2}\n' 'Http Error: {3}' ) . format ( project_id , dataset , table , e . content ) ) if self . swallow_results : return False else : return { }
Patch an existing table in the dataset .
211
8
239,377
def create_view ( self , dataset , view , query , use_legacy_sql = None , project_id = None ) : project_id = self . _get_project_id ( project_id ) body = { 'tableReference' : { 'tableId' : view , 'projectId' : project_id , 'datasetId' : dataset } , 'view' : { 'query' : query } } if use_legacy_sql is not None : body [ 'view' ] [ 'useLegacySql' ] = use_legacy_sql try : view = self . bigquery . tables ( ) . insert ( projectId = project_id , datasetId = dataset , body = body ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return view except HttpError as e : logger . error ( ( 'Cannot create view {0}.{1}\n' 'Http Error: {2}' ) . format ( dataset , view , e . content ) ) if self . swallow_results : return False else : return { }
Create a new view in the dataset .
245
8
239,378
def delete_table ( self , dataset , table , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : response = self . bigquery . tables ( ) . delete ( projectId = project_id , datasetId = dataset , tableId = table ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return response except HttpError as e : logger . error ( ( 'Cannot delete table {0}.{1}\n' 'Http Error: {2}' ) . format ( dataset , table , e . content ) ) if self . swallow_results : return False else : return { }
Delete a table from the dataset .
155
7
239,379
def get_tables ( self , dataset_id , app_id , start_time , end_time , project_id = None ) : if isinstance ( start_time , datetime ) : start_time = calendar . timegm ( start_time . utctimetuple ( ) ) if isinstance ( end_time , datetime ) : end_time = calendar . timegm ( end_time . utctimetuple ( ) ) every_table = self . _get_all_tables ( dataset_id , project_id ) app_tables = every_table . get ( app_id , { } ) return self . _filter_tables_by_time ( app_tables , start_time , end_time )
Retrieve a list of tables that are related to the given app id and are inside the range of start and end times .
161
25
239,380
def wait_for_job ( self , job , interval = 5 , timeout = 60 ) : complete = False job_id = str ( job if isinstance ( job , ( six . binary_type , six . text_type , int ) ) else job [ 'jobReference' ] [ 'jobId' ] ) job_resource = None start_time = time ( ) elapsed_time = 0 while not ( complete or elapsed_time > timeout ) : sleep ( interval ) request = self . bigquery . jobs ( ) . get ( projectId = self . project_id , jobId = job_id ) job_resource = request . execute ( num_retries = self . num_retries ) self . _raise_executing_exception_if_error ( job_resource ) complete = job_resource . get ( 'status' ) . get ( 'state' ) == u'DONE' elapsed_time = time ( ) - start_time # raise exceptions if timeout if not complete : logger . error ( 'BigQuery job %s timeout' % job_id ) raise BigQueryTimeoutException ( ) return job_resource
Waits until the job indicated by job_resource is done or has failed
241
15
239,381
def push_rows ( self , dataset , table , rows , insert_id_key = None , skip_invalid_rows = None , ignore_unknown_values = None , template_suffix = None , project_id = None ) : project_id = self . _get_project_id ( project_id ) table_data = self . bigquery . tabledata ( ) rows_data = [ ] for row in rows : each_row = { } each_row [ "json" ] = row if insert_id_key is not None : keys = insert_id_key . split ( '.' ) val = reduce ( lambda d , key : d . get ( key ) if d else None , keys , row ) if val is not None : each_row [ "insertId" ] = val rows_data . append ( each_row ) data = { "kind" : "bigquery#tableDataInsertAllRequest" , "rows" : rows_data } if skip_invalid_rows is not None : data [ 'skipInvalidRows' ] = skip_invalid_rows if ignore_unknown_values is not None : data [ 'ignoreUnknownValues' ] = ignore_unknown_values if template_suffix is not None : data [ 'templateSuffix' ] = template_suffix try : response = table_data . insertAll ( projectId = project_id , datasetId = dataset , tableId = table , body = data ) . execute ( num_retries = self . num_retries ) if response . get ( 'insertErrors' ) : logger . error ( 'BigQuery insert errors: %s' % response ) if self . swallow_results : return False else : return response if self . swallow_results : return True else : return response except HttpError as e : logger . exception ( 'Problem with BigQuery insertAll' ) if self . swallow_results : return False else : return { 'insertErrors' : [ { 'errors' : [ { 'reason' : 'httperror' , 'message' : e } ] } ] }
Upload rows to BigQuery table .
452
7
239,382
def get_all_tables ( self , dataset_id , project_id = None ) : tables_data = self . _get_all_tables_for_dataset ( dataset_id , project_id ) tables = [ ] for table in tables_data . get ( 'tables' , [ ] ) : table_name = table . get ( 'tableReference' , { } ) . get ( 'tableId' ) if table_name : tables . append ( table_name ) return tables
Retrieve a list of tables for the dataset .
110
10
239,383
def _get_all_tables_for_dataset ( self , dataset_id , project_id = None ) : project_id = self . _get_project_id ( project_id ) result = self . bigquery . tables ( ) . list ( projectId = project_id , datasetId = dataset_id ) . execute ( num_retries = self . num_retries ) page_token = result . get ( 'nextPageToken' ) while page_token : res = self . bigquery . tables ( ) . list ( projectId = project_id , datasetId = dataset_id , pageToken = page_token ) . execute ( num_retries = self . num_retries ) page_token = res . get ( 'nextPageToken' ) result [ 'tables' ] += res . get ( 'tables' , [ ] ) return result
Retrieve a list of all tables for the dataset .
191
11
239,384
def _parse_table_list_response ( self , list_response ) : tables = defaultdict ( dict ) for table in list_response . get ( 'tables' , [ ] ) : table_ref = table . get ( 'tableReference' ) if not table_ref : continue table_id = table_ref . get ( 'tableId' , '' ) year_month , app_id = self . _parse_table_name ( table_id ) if not year_month : continue table_date = datetime . strptime ( year_month , '%Y-%m' ) unix_seconds = calendar . timegm ( table_date . timetuple ( ) ) tables [ app_id ] . update ( { table_id : unix_seconds } ) # Turn off defualting tables . default_factory = None return tables
Parse the response received from calling list on tables .
185
11
239,385
def _parse_table_name ( self , table_id ) : # Prefix date attributes = table_id . split ( '_' ) year_month = "-" . join ( attributes [ : 2 ] ) app_id = "-" . join ( attributes [ 2 : ] ) # Check if date parsed correctly if year_month . count ( "-" ) == 1 and all ( [ num . isdigit ( ) for num in year_month . split ( '-' ) ] ) : return year_month , app_id # Postfix date attributes = table_id . split ( '_' ) year_month = "-" . join ( attributes [ - 2 : ] ) app_id = "-" . join ( attributes [ : - 2 ] ) # Check if date parsed correctly if year_month . count ( "-" ) == 1 and all ( [ num . isdigit ( ) for num in year_month . split ( '-' ) ] ) and len ( year_month ) == 7 : return year_month , app_id return None , None
Parse a table name in the form of appid_YYYY_MM or YYYY_MM_appid and return a tuple consisting of YYYY - MM and the app id .
224
41
239,386
def _filter_tables_by_time ( self , tables , start_time , end_time ) : return [ table_name for ( table_name , unix_seconds ) in tables . items ( ) if self . _in_range ( start_time , end_time , unix_seconds ) ]
Filter a table dictionary and return table names based on the range of start and end times in unix seconds .
68
22
239,387
def _in_range ( self , start_time , end_time , time ) : ONE_MONTH = 2764800 # 32 days return start_time <= time <= end_time or time <= start_time <= time + ONE_MONTH or time <= end_time <= time + ONE_MONTH
Indicate if the given time falls inside of the given range .
66
13
239,388
def _transform_row ( self , row , schema ) : log = { } # Match each schema column with its associated row value for index , col_dict in enumerate ( schema ) : col_name = col_dict [ 'name' ] row_value = row [ 'f' ] [ index ] [ 'v' ] if row_value is None : log [ col_name ] = None continue # Recurse on nested records if col_dict [ 'type' ] == 'RECORD' : row_value = self . _recurse_on_row ( col_dict , row_value ) # Otherwise just cast the value elif col_dict [ 'type' ] == 'INTEGER' : row_value = int ( row_value ) elif col_dict [ 'type' ] == 'FLOAT' : row_value = float ( row_value ) elif col_dict [ 'type' ] == 'BOOLEAN' : row_value = row_value in ( 'True' , 'true' , 'TRUE' ) elif col_dict [ 'type' ] == 'TIMESTAMP' : row_value = float ( row_value ) log [ col_name ] = row_value return log
Apply the given schema to the given BigQuery data row .
269
12
239,389
def _recurse_on_row ( self , col_dict , nested_value ) : row_value = None # Multiple nested records if col_dict [ 'mode' ] == 'REPEATED' and isinstance ( nested_value , list ) : row_value = [ self . _transform_row ( record [ 'v' ] , col_dict [ 'fields' ] ) for record in nested_value ] # A single nested record else : row_value = self . _transform_row ( nested_value , col_dict [ 'fields' ] ) return row_value
Apply the schema specified by the given dict to the nested value by recursing on it .
125
19
239,390
def _generate_hex_for_uris ( self , uris ) : return sha256 ( ( ":" . join ( uris ) + str ( time ( ) ) ) . encode ( ) ) . hexdigest ( )
Given uris generate and return hex version of it
51
10
239,391
def create_dataset ( self , dataset_id , friendly_name = None , description = None , access = None , location = None , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : datasets = self . bigquery . datasets ( ) dataset_data = self . dataset_resource ( dataset_id , project_id = project_id , friendly_name = friendly_name , description = description , access = access , location = location ) response = datasets . insert ( projectId = project_id , body = dataset_data ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return response except HttpError as e : logger . error ( 'Cannot create dataset {0}, {1}' . format ( dataset_id , e ) ) if self . swallow_results : return False else : return { }
Create a new BigQuery dataset .
203
7
239,392
def delete_dataset ( self , dataset_id , delete_contents = False , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : datasets = self . bigquery . datasets ( ) request = datasets . delete ( projectId = project_id , datasetId = dataset_id , deleteContents = delete_contents ) response = request . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return response except HttpError as e : logger . error ( 'Cannot delete dataset {0}: {1}' . format ( dataset_id , e ) ) if self . swallow_results : return False else : return { }
Delete a BigQuery dataset .
162
6
239,393
def update_dataset ( self , dataset_id , friendly_name = None , description = None , access = None , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : datasets = self . bigquery . datasets ( ) body = self . dataset_resource ( dataset_id , friendly_name = friendly_name , description = description , access = access , project_id = project_id ) request = datasets . update ( projectId = project_id , datasetId = dataset_id , body = body ) response = request . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return response except HttpError as e : logger . error ( 'Cannot update dataset {0}: {1}' . format ( dataset_id , e ) ) if self . swallow_results : return False else : return { }
Updates information in an existing dataset . The update method replaces the entire dataset resource whereas the patch method only replaces fields that are provided in the submitted dataset resource .
201
32
239,394
def schema_from_record ( record , timestamp_parser = default_timestamp_parser ) : return [ describe_field ( k , v , timestamp_parser = timestamp_parser ) for k , v in list ( record . items ( ) ) ]
Generate a BigQuery schema given an example of a record that is to be inserted into BigQuery .
53
21
239,395
def describe_field ( k , v , timestamp_parser = default_timestamp_parser ) : def bq_schema_field ( name , bq_type , mode ) : return { "name" : name , "type" : bq_type , "mode" : mode } if isinstance ( v , list ) : if len ( v ) == 0 : raise Exception ( "Can't describe schema because of empty list {0}:[]" . format ( k ) ) v = v [ 0 ] mode = "repeated" else : mode = "nullable" bq_type = bigquery_type ( v , timestamp_parser = timestamp_parser ) if not bq_type : raise InvalidTypeException ( k , v ) field = bq_schema_field ( k , bq_type , mode ) if bq_type == "record" : try : field [ 'fields' ] = schema_from_record ( v , timestamp_parser ) except InvalidTypeException as e : # recursively construct the key causing the error raise InvalidTypeException ( "%s.%s" % ( k , e . key ) , e . value ) return field
Given a key representing a column name and value representing the value stored in the column return a representation of the BigQuery schema element describing that field . Raise errors if invalid value types are provided .
252
38
239,396
def render_query ( dataset , tables , select = None , conditions = None , groupings = None , having = None , order_by = None , limit = None ) : if None in ( dataset , tables ) : return None query = "%s %s %s %s %s %s %s" % ( _render_select ( select ) , _render_sources ( dataset , tables ) , _render_conditions ( conditions ) , _render_groupings ( groupings ) , _render_having ( having ) , _render_order ( order_by ) , _render_limit ( limit ) ) return query
Render a query that will run over the given tables using the specified parameters .
133
15
239,397
def _render_select ( selections ) : if not selections : return 'SELECT *' rendered_selections = [ ] for name , options in selections . items ( ) : if not isinstance ( options , list ) : options = [ options ] original_name = name for options_dict in options : name = original_name alias = options_dict . get ( 'alias' ) alias = "as %s" % alias if alias else "" formatter = options_dict . get ( 'format' ) if formatter : name = _format_select ( formatter , name ) rendered_selections . append ( "%s %s" % ( name , alias ) ) return "SELECT " + ", " . join ( rendered_selections )
Render the selection part of a query .
156
8
239,398
def _format_select ( formatter , name ) : for caster in formatter . split ( '-' ) : if caster == 'SEC_TO_MICRO' : name = "%s*1000000" % name elif ':' in caster : caster , args = caster . split ( ':' ) name = "%s(%s,%s)" % ( caster , name , args ) else : name = "%s(%s)" % ( caster , name ) return name
Modify the query selector by applying any formatters to it .
101
13
239,399
def _render_sources ( dataset , tables ) : if isinstance ( tables , dict ) : if tables . get ( 'date_range' , False ) : try : dataset_table = '.' . join ( [ dataset , tables [ 'table' ] ] ) return "FROM (TABLE_DATE_RANGE([{}], TIMESTAMP('{}')," " TIMESTAMP('{}'))) " . format ( dataset_table , tables [ 'from_date' ] , tables [ 'to_date' ] ) except KeyError as exp : logger . warn ( 'Missing parameter %s in selecting sources' % ( exp ) ) else : return "FROM " + ", " . join ( [ "[%s.%s]" % ( dataset , table ) for table in tables ] )
Render the source part of a query .
173
8