idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
34,700
def _get_all_relationships ( self ) : relationships_all = set ( ) for goterm in self . go2obj . values ( ) : if goterm . relationship : relationships_all . update ( goterm . relationship ) if goterm . relationship_rev : relationships_all . update ( goterm . relationship_rev ) return relationships_all
Return all relationships seen in GO Dag subset .
34,701
def _init_gos ( self , go_sources_arg , relationships_arg ) : if not go_sources_arg : assert self . go2obj_orig , "go2obj MUST BE PRESENT IF go_sources IS NOT" self . go_sources = set ( self . go2obj_orig ) self . go2obj = self . go2obj_orig sys . stdout . write ( "**NOTE: {N:,} SOURCE GO IDS\n" . format ( N = len ( se...
Initialize GO sources .
34,702
def _add_goterms_kws ( self , go2obj_user , kws_gos ) : if 'go2color' in kws_gos : for goid in kws_gos [ 'go2color' ] . keys ( ) : self . _add_goterms ( go2obj_user , goid )
Add more GOTerms to go2obj_user if requested and relevant .
34,703
def _add_goterms ( self , go2obj_user , goid ) : goterm = self . go2obj_orig [ goid ] if goid != goterm . id and goterm . id in go2obj_user and goid not in go2obj_user : go2obj_user [ goid ] = goterm
Add alt GO IDs to go2obj subset if requested and relevant .
34,704
def _init_go_sources ( self , go_sources_arg , go2obj_arg ) : gos_user = set ( go_sources_arg ) if 'children' in self . kws and self . kws [ 'children' ] : gos_user |= get_leaf_children ( gos_user , go2obj_arg ) gos_godag = set ( go2obj_arg ) gos_source = gos_user . intersection ( gos_godag ) gos_missing = gos_user . d...
Return GO sources which are present in GODag .
34,705
def get_rcntobj ( self ) : if 'rcntobj' in self . kws : rcntobj = self . kws [ 'rcntobj' ] if isinstance ( rcntobj , CountRelatives ) : return rcntobj return CountRelatives ( self . go2obj , self . relationships , dcnt = 'dcnt' in self . kw_elems , go2letter = self . kws . get ( 'go2letter' ) )
Return None or user - provided CountRelatives object .
34,706
def get_prt_fmt ( self , alt = False ) : prt_fmt = [ ] if alt : prt_fmt . append ( '{GO}{alt:1}' ) else : prt_fmt . append ( '{GO}' ) prt_fmt . append ( '# {NS}' ) if 'dcnt' in self . prt_flds : prt_fmt . append ( '{dcnt:5}' ) if 'childcnt' in self . prt_flds : prt_fmt . append ( '{childcnt:3}' ) if 'tcnt' in self . pr...
Return the format for printing GO named tuples and their related information .
34,707
def _init_kwelems ( self ) : ret = set ( ) if 'rcntobj' in self . kws : ret . add ( 'dcnt' ) ret . add ( 'D1' ) if 'tcntobj' in self . kws : ret . add ( 'tcnt' ) ret . add ( 'tfreq' ) ret . add ( 'tinfo' ) return ret
Init set elements .
34,708
def get_relations_cnt ( self ) : return cx . Counter ( [ e . relation for es in self . exts for e in es ] )
Get the set of all relations .
34,709
def _init_equiv ( self ) : gocolored_all = set ( self . go2color ) go2obj_usr = self . gosubdag . go2obj go2color_add = { } for gocolored_cur , color in self . go2color . items ( ) : if gocolored_cur in go2obj_usr : goobj = go2obj_usr [ gocolored_cur ] goids_equiv = goobj . alt_ids . union ( [ goobj . id ] ) for goid_a...
Add equivalent GO IDs to go2color if necessary .
34,710
def get_pvalue ( self ) : if self . method_flds : return getattr ( self , "p_{m}" . format ( m = self . get_method_name ( ) ) ) return getattr ( self , "p_uncorrected" )
Returns pval for 1st method if it exists . Else returns uncorrected pval .
34,711
def set_corrected_pval ( self , nt_method , pvalue ) : self . method_flds . append ( nt_method ) fieldname = "" . join ( [ "p_" , nt_method . fieldname ] ) setattr ( self , fieldname , pvalue )
Add object attribute based on method name .
34,712
def _chk_fields ( field_data , field_formatter ) : if len ( field_data ) == len ( field_formatter ) : return len_dat = len ( field_data ) len_fmt = len ( field_formatter ) msg = [ "FIELD DATA({d}) != FORMATTER({f})" . format ( d = len_dat , f = len_fmt ) , "DAT({N}): {D}" . format ( N = len_dat , D = field_data ) , "FM...
Check that expected fields are present .
34,713
def set_goterm ( self , go2obj ) : if self . GO in go2obj : goterm = go2obj [ self . GO ] self . goterm = goterm self . name = goterm . name self . depth = goterm . depth self . NS = self . namespace2NS [ self . goterm . namespace ]
Set goterm and copy GOTerm s name and namespace .
34,714
def _init_enrichment ( self ) : if self . study_n : return 'e' if ( ( 1.0 * self . study_count / self . study_n ) > ( 1.0 * self . pop_count / self . pop_n ) ) else 'p' return 'p'
Mark as enriched or purified .
34,715
def get_prtflds_default ( self ) : return self . _fldsdefprt [ : - 1 ] + [ "p_{M}" . format ( M = m . fieldname ) for m in self . method_flds ] + [ self . _fldsdefprt [ - 1 ] ]
Get default fields .
34,716
def get_prtflds_all ( self ) : flds = [ ] dont_add = set ( [ '_parents' , 'method_flds' , 'relationship_rev' , 'relationship' ] ) self . _flds_append ( flds , self . get_prtflds_default ( ) , dont_add ) self . _flds_append ( flds , vars ( self ) . keys ( ) , dont_add ) self . _flds_append ( flds , vars ( self . goterm ...
When converting to a namedtuple get all possible fields in their original order .
34,717
def _flds_append ( flds , addthese , dont_add ) : for fld in addthese : if fld not in flds and fld not in dont_add : flds . append ( fld )
Retain order of fields as we add them once to the list .
34,718
def get_field_values ( self , fldnames , rpt_fmt = True , itemid2name = None ) : row = [ ] for fld in fldnames : val = getattr ( self , fld , None ) if val is not None : if rpt_fmt : val = self . _get_rpt_fmt ( fld , val , itemid2name ) row . append ( val ) else : val = getattr ( self . goterm , fld , None ) if rpt_fmt...
Get flat namedtuple fields for one GOEnrichmentRecord .
34,719
def _get_rpt_fmt ( fld , val , itemid2name = None ) : if fld . startswith ( "ratio_" ) : return "{N}/{TOT}" . format ( N = val [ 0 ] , TOT = val [ 1 ] ) elif fld in set ( [ 'study_items' , 'pop_items' , 'alt_ids' ] ) : if itemid2name is not None : val = [ itemid2name . get ( v , v ) for v in val ] return ", " . join ( ...
Return values in a format amenable to printing in a table .
34,720
def _err_fld ( self , fld , fldnames ) : msg = [ 'ERROR. UNRECOGNIZED FIELD({F})' . format ( F = fld ) ] actual_flds = set ( self . get_prtflds_default ( ) + self . goterm . __dict__ . keys ( ) ) bad_flds = set ( fldnames ) . difference ( set ( actual_flds ) ) if bad_flds : msg . append ( "\nGOEA RESULT FIELDS: {}" . f...
Unrecognized field . Print detailed Failure message .
34,721
def run_study_nts ( self , study , ** kws ) : goea_results = self . run_study ( study , ** kws ) return MgrNtGOEAs ( goea_results ) . get_goea_nts_all ( )
Run GOEA on study ids . Return results as a list of namedtuples .
34,722
def get_results_msg ( self , results , study ) : msg = [ ] if results : fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}" stu_items , num_gos_stu = self . get_item_cnt ( results , "study_items" ) pop_items , num_gos_pop = self . get_item_cnt ( results , "pop_items" ) stu_txt = fmt . format ( N = len ( stu_i...
Return summary for GOEA results .
34,723
def get_pval_uncorr ( self , study , log = sys . stdout ) : results = [ ] study_in_pop = self . pop . intersection ( study ) go2studyitems = get_terms ( "study" , study_in_pop , self . assoc , self . obo_dag , log ) pop_n , study_n = self . pop_n , len ( study_in_pop ) allterms = set ( go2studyitems ) . union ( set ( s...
Calculate the uncorrected pvalues for study items .
34,724
def get_study_items ( results ) : study_items = set ( ) for obj in results : study_items . update ( obj . study_items ) return study_items
Return a list of study items associated with the given results .
34,725
def _update_pvalcorr ( ntmt , corrected_pvals ) : if corrected_pvals is None : return for rec , val in zip ( ntmt . results , corrected_pvals ) : rec . set_corrected_pval ( ntmt . nt_method , val )
Add data members to store multiple test corrections .
34,726
def wr_txt ( self , fout_txt , goea_results , prtfmt = None , ** kws ) : if not goea_results : sys . stdout . write ( " 0 GOEA results. NOT WRITING {FOUT}\n" . format ( FOUT = fout_txt ) ) return with open ( fout_txt , 'w' ) as prt : if 'title' in kws : prt . write ( "{TITLE}\n" . format ( TITLE = kws [ 'title' ] ...
Print GOEA results to text file .
34,727
def prt_txt ( prt , goea_results , prtfmt = None , ** kws ) : objprt = PrtFmt ( ) if prtfmt is None : flds = [ 'GO' , 'NS' , 'p_uncorrected' , 'ratio_in_study' , 'ratio_in_pop' , 'depth' , 'name' , 'study_items' ] prtfmt = objprt . get_prtfmt_str ( flds ) prtfmt = objprt . adjust_prtfmt ( prtfmt ) prt_flds = RPT . get_...
Print GOEA results in text format .
34,728
def wr_xlsx ( self , fout_xlsx , goea_results , ** kws ) : objprt = PrtFmt ( ) prt_flds = kws . get ( 'prt_flds' , self . get_prtflds_default ( goea_results ) ) xlsx_data = MgrNtGOEAs ( goea_results ) . get_goea_nts_prt ( prt_flds , ** kws ) if 'fld2col_widths' not in kws : kws [ 'fld2col_widths' ] = { f : objprt . def...
Write a xlsx file .
34,729
def wr_tsv ( self , fout_tsv , goea_results , ** kws ) : prt_flds = kws . get ( 'prt_flds' , self . get_prtflds_default ( goea_results ) ) tsv_data = MgrNtGOEAs ( goea_results ) . get_goea_nts_prt ( prt_flds , ** kws ) RPT . wr_tsv ( fout_tsv , tsv_data , ** kws )
Write tab - separated table data to file
34,730
def prt_tsv ( self , prt , goea_results , ** kws ) : prt_flds = kws . get ( 'prt_flds' , self . get_prtflds_default ( goea_results ) ) tsv_data = MgrNtGOEAs ( goea_results ) . get_goea_nts_prt ( prt_flds , ** kws ) RPT . prt_tsv ( prt , tsv_data , ** kws )
Write tab - separated table data
34,731
def get_ns2nts ( results , fldnames = None , ** kws ) : ns2nts = cx . defaultdict ( list ) nts = MgrNtGOEAs ( results ) . get_goea_nts_all ( fldnames , ** kws ) for ntgoea in nts : ns2nts [ ntgoea . NS ] . append ( ntgoea ) return ns2nts
Get namedtuples of GOEA results split into BP MF CC .
34,732
def print_date ( min_ratio = None , pval = 0.05 ) : import goatools date = datetime . date . today ( ) print ( "# Generated by GOATOOLS v{0} ({1})" . format ( goatools . __version__ , date ) ) print ( "# min_ratio={0} pval={1}" . format ( min_ratio , pval ) )
Print GOATOOLS version and the date the GOEA was run .
34,733
def print_results ( self , results , min_ratio = None , indent = False , pval = 0.05 , prt = sys . stdout ) : results_adj = self . get_adj_records ( results , min_ratio , pval ) self . print_results_adj ( results_adj , indent , prt )
Print GOEA results with some additional statistics calculated .
34,734
def get_adj_records ( results , min_ratio = None , pval = 0.05 ) : records = [ ] for rec in results : rec . update_remaining_fldsdefprt ( min_ratio = min_ratio ) if pval is not None and rec . p_uncorrected >= pval : continue if rec . is_ratio_different : records . append ( rec ) return records
Return GOEA results with some additional statistics calculated .
34,735
def print_results_adj ( results , indent = False , prt = sys . stdout ) : if results : prt . write ( "{R}\n" . format ( R = "\t" . join ( GOEnrichmentStudy . get_prtflds_default ( results ) ) ) ) for rec in results : prt . write ( "{R}\n" . format ( R = rec . __str__ ( indent = indent ) ) )
Print GOEA results .
34,736
def wr_py_goea_results ( self , fout_py , goea_results , ** kws ) : var_name = kws . get ( "var_name" , "goea_results" ) docstring = kws . get ( "docstring" , "" ) sortby = kws . get ( "sortby" , None ) if goea_results : from goatools . nt_utils import wr_py_nts nts_goea = goea_results if hasattr ( goea_results [ 0 ] ,...
Save GOEA results into Python package containing list of namedtuples .
34,737
def _ensure_click ( self ) : script = ( "var viewPortHeight = Math.max(" "document.documentElement.clientHeight, window.innerHeight || 0);" "var elementTop = arguments[0].getBoundingClientRect().top;" "window.scrollBy(0, elementTop-(viewPortHeight/2));" ) self . parent . execute_script ( script , self ) for _ in range ...
Ensures a click gets made because Selenium can be a bit buggy about clicks
34,738
def transfer_session_cookies_to_driver ( self , domain = None ) : if not domain and self . _last_requests_url : domain = tldextract . extract ( self . _last_requests_url ) . registered_domain elif not domain and not self . _last_requests_url : raise Exception ( 'Trying to transfer cookies to selenium without specifying...
Copies the Session s cookies into the webdriver
34,739
def copy_user_agent_from_driver ( self ) : selenium_user_agent = self . driver . execute_script ( "return navigator.userAgent;" ) self . headers . update ( { "user-agent" : selenium_user_agent } )
Updates requests session user - agent with the driver s user agent
34,740
def ensure_add_cookie ( self , cookie , override_domain = None ) : if override_domain : cookie [ 'domain' ] = override_domain cookie_domain = cookie [ 'domain' ] if cookie [ 'domain' ] [ 0 ] != '.' else cookie [ 'domain' ] [ 1 : ] try : browser_domain = tldextract . extract ( self . current_url ) . fqdn except Attribut...
Ensures a cookie gets added to the driver
34,741
def is_cookie_in_driver ( self , cookie ) : for driver_cookie in self . get_cookies ( ) : if ( cookie [ 'name' ] == driver_cookie [ 'name' ] and cookie [ 'value' ] == driver_cookie [ 'value' ] and ( cookie [ 'domain' ] == driver_cookie [ 'domain' ] or '.' + cookie [ 'domain' ] == driver_cookie [ 'domain' ] ) ) : return...
We check that the cookie is correctly added to the driver
34,742
def ensure_element ( self , locator , selector , state = "present" , timeout = None ) : locators = { 'id' : By . ID , 'name' : By . NAME , 'xpath' : By . XPATH , 'link_text' : By . LINK_TEXT , 'partial_link_text' : By . PARTIAL_LINK_TEXT , 'tag_name' : By . TAG_NAME , 'class_name' : By . CLASS_NAME , 'css_selector' : B...
This method allows us to wait till an element appears or disappears in the browser
34,743
def parse_buffer_to_ppm ( data ) : images = [ ] index = 0 while index < len ( data ) : code , size , rgb = tuple ( data [ index : index + 40 ] . split ( b'\n' ) [ 0 : 3 ] ) size_x , size_y = tuple ( size . split ( b' ' ) ) file_size = len ( code ) + len ( size ) + len ( rgb ) + 3 + int ( size_x ) * int ( size_y ) * 3 i...
Parse PPM file bytes to Pillow Image
34,744
def parse_buffer_to_jpeg ( data ) : return [ Image . open ( BytesIO ( image_data + b'\xff\xd9' ) ) for image_data in data . split ( b'\xff\xd9' ) [ : - 1 ] ]
Parse JPEG file bytes to Pillow Image
34,745
def parse_buffer_to_png ( data ) : images = [ ] c1 = 0 c2 = 0 data_len = len ( data ) while c1 < data_len : if data [ c2 : c2 + 4 ] == b'IEND' and ( c2 + 8 == data_len or data [ c2 + 9 : c2 + 12 ] == b'PNG' ) : images . append ( Image . open ( BytesIO ( data [ c1 : c2 + 8 ] ) ) ) c1 = c2 + 8 c2 = c1 c2 += 1 return imag...
Parse PNG file bytes to Pillow Image
34,746
def configure ( * args , ** kwargs ) : assert len ( HANDLERS ) == 0 log_destinations = get_log_destinations ( ) if 'stderr' in log_destinations : HANDLERS . append ( logging . StreamHandler ( ) ) def terrible_log_output ( s ) : import sys print ( s , file = sys . stderr ) places = [ '/dev/log' , '/var/run/log' , '/var/...
Configure logging .
34,747
def get_syslog_facility ( ) : facil = os . getenv ( 'WALE_SYSLOG_FACILITY' , 'user' ) valid_facility = True try : facility = handlers . SysLogHandler . facility_names [ facil . lower ( ) ] except KeyError : valid_facility = False facility = handlers . SysLogHandler . LOG_USER return facility , valid_facility
Get syslog facility from ENV var
34,748
def set_level ( level ) : for handler in HANDLERS : handler . setLevel ( level ) logging . root . setLevel ( level )
Adjust the logging level of WAL - E
34,749
def format ( self , record , * args , ** kwargs ) : return logging . Formatter . format ( self , record , * args , ** kwargs ) . replace ( '\n' , '\n' + ' ' * 8 )
Format a message in the log
34,750
def _segmentation_guts ( root , file_paths , max_partition_size ) : if not root . endswith ( os . path . sep ) : root += os . path . sep if not os . path . isdir ( root ) : raise TarBadRootError ( root = root ) bogus_tar = None try : bogus_tar = tarfile . TarFile ( os . devnull , 'w' , dereference = False ) partition_n...
Segment a series of file paths into TarPartition values
34,751
def tarfile_extract ( fileobj , dest_path ) : tar = tarfile . open ( mode = 'r|' , fileobj = fileobj , bufsize = pipebuf . PIPE_BUF_BYTES ) dest_path = os . path . realpath ( dest_path ) extracted_files = [ ] for member in tar : assert not member . name . startswith ( '/' ) relpath = os . path . join ( dest_path , memb...
Extract a tarfile described by a file object to a specified path .
34,752
def backup_list ( self , query , detail ) : import csv from wal_e . storage . base import BackupInfo bl = self . _backup_list ( detail ) if query is None : bl_iter = bl else : bl_iter = bl . find_all ( query ) w_csv = csv . writer ( sys . stdout , dialect = 'excel-tab' ) w_csv . writerow ( BackupInfo . _fields ) for bi...
Lists base backups and basic information about them
34,753
def database_backup ( self , data_directory , * args , ** kwargs ) : upload_good = False backup_stop_good = False while_offline = False start_backup_info = None if 'while_offline' in kwargs : while_offline = kwargs . pop ( 'while_offline' ) try : if not while_offline : start_backup_info = PgBackupStatements . run_start...
Uploads a PostgreSQL file cluster to S3 or Windows Azure Blob Service
34,754
def wal_archive ( self , wal_path , concurrency = 1 ) : xlog_dir = os . path . dirname ( wal_path ) segment = WalSegment ( wal_path , explicit = True ) uploader = WalUploader ( self . layout , self . creds , self . gpg_key_id ) group = WalTransferGroup ( uploader ) group . start ( segment ) started = 1 seg_stream = Wal...
Uploads a WAL file to S3 or Windows Azure Blob Service
34,755
def wal_restore ( self , wal_name , wal_destination , prefetch_max ) : url = '{0}://{1}/{2}' . format ( self . layout . scheme , self . layout . store_name ( ) , self . layout . wal_path ( wal_name ) ) if prefetch_max > 0 : base = os . path . dirname ( os . path . realpath ( wal_destination ) ) pd = prefetch . Dirs ( b...
Downloads a WAL file from S3 or Windows Azure Blob Service
34,756
def _upload_pg_cluster_dir ( self , start_backup_info , pg_cluster_dir , version , pool_size , rate_limit = None ) : spec , parts = tar_partition . partition ( pg_cluster_dir ) backup_prefix = '{0}/basebackups_{1}/base_{file_name}_{file_offset}' . format ( self . layout . prefix . rstrip ( '/' ) , FILE_STRUCTURE_VERSIO...
Upload to url_prefix from pg_cluster_dir
34,757
def _exception_gather_guard ( self , fn ) : @ functools . wraps ( fn ) def wrapper ( * args , ** kwargs ) : try : return fn ( * args , ** kwargs ) except UserException as e : self . exceptions . append ( e ) return wrapper
A higher order function to trap UserExceptions and then log them .
34,758
def create ( self , segment ) : def lackadaisical_mkdir ( place ) : ok = False place = path . realpath ( place ) try : os . makedirs ( place , 0o700 ) ok = True except EnvironmentError as e : if e . errno == errno . EEXIST : ok = True else : logger . warning ( msg = 'could not create prefetch directory' , detail = ( 'P...
A best - effort attempt to create directories .
34,759
def acquire ( self ) : try : pidfile = open ( self . _pidfile , "a" ) except IOError as err : raise SystemExit ( err ) try : fcntl . flock ( pidfile . fileno ( ) , fcntl . LOCK_EX | fcntl . LOCK_NB ) except IOError : raise SystemExit ( 'Already running according to ' + self . _pidfile ) pidfile . seek ( 0 ) pidfile . t...
Acquire the pidfile .
34,760
def release ( self ) : try : self . pidfile . close ( ) os . remove ( self . _pidfile ) except OSError as err : if err . errno != 2 : raise
Release the pidfile .
34,761
def _configure_buffer_sizes ( ) : global PIPE_BUF_BYTES global OS_PIPE_SZ PIPE_BUF_BYTES = 65536 OS_PIPE_SZ = None if not hasattr ( fcntl , 'F_SETPIPE_SZ' ) : import platform if platform . system ( ) == 'Linux' : fcntl . F_SETPIPE_SZ = 1031 try : with open ( '/proc/sys/fs/pipe-max-size' , 'r' ) as f : OS_PIPE_SZ = min ...
Set up module globals controlling buffer sizes
34,762
def set_buf_size ( fd ) : if OS_PIPE_SZ and hasattr ( fcntl , 'F_SETPIPE_SZ' ) : fcntl . fcntl ( fd , fcntl . F_SETPIPE_SZ , OS_PIPE_SZ )
Set up os pipe buffer size if applicable
34,763
def mark_done ( self ) : if self . explicit : raise UserCritical ( msg = 'unexpected attempt to modify wal metadata detected' , detail = ( 'Segments explicitly passed from postgres should not ' 'engage in archiver metadata manipulation: {0}' . format ( self . path ) ) , hint = 'report a bug' ) try : status_dir = path ....
Mark the archive status of this segment as done .
34,764
def join ( self ) : self . closed = True while self . expect > 0 : val = self . wait_change . get ( ) self . expect -= 1 if val is not None : gevent . joinall ( list ( self . greenlets ) , timeout = 30 ) gevent . killall ( list ( self . greenlets ) , block = True , timeout = 30 ) raise val
Wait for transfer to exit raising errors as necessary .
34,765
def start ( self , segment ) : if self . closed : raise UserCritical ( msg = 'attempt to transfer wal after closing' , hint = 'report a bug' ) g = gevent . Greenlet ( self . transferer , segment ) g . link ( self . _complete_execution ) self . greenlets . add ( g ) self . expect += 1 g . start ( )
Begin transfer for an indicated wal segment .
34,766
def _complete_execution ( self , g ) : assert g . ready ( ) self . greenlets . remove ( g ) placed = UserCritical ( msg = 'placeholder bogus exception' , hint = 'report a bug' ) if g . successful ( ) : try : segment = g . get ( ) if not segment . explicit : segment . mark_done ( ) except BaseException as e : placed = e...
Forward any raised exceptions across a channel .
34,767
def pipe ( * args ) : if len ( args ) < 2 : raise ValueError ( "pipe needs at least 2 processes" ) for i in args [ : - 1 ] : i [ "stdout" ] = subprocess . PIPE popens = [ popen_sp ( ** args [ 0 ] ) ] for i in range ( 1 , len ( args ) ) : args [ i ] [ "stdin" ] = popens [ i - 1 ] . stdout popens . append ( popen_sp ( **...
Takes as parameters several dicts each with the same parameters passed to popen .
34,768
def pipe_wait ( popens ) : popens = copy . copy ( popens ) results = [ 0 ] * len ( popens ) while popens : last = popens . pop ( - 1 ) results [ len ( popens ) ] = last . wait ( ) return results
Given an array of Popen objects returned by the pipe method wait for all processes to terminate and return the array with their return values .
34,769
def connect ( self , creds ) : return BlockBlobService ( account_name = creds . account_name , account_key = creds . account_key , sas_token = creds . access_token , protocol = 'https' )
Return an azure BlockBlobService instance .
34,770
def do_lzop_get ( creds , url , path , decrypt , do_retry ) : assert url . endswith ( '.lzo' ) , 'Expect an lzop-compressed file' with files . DeleteOnError ( path ) as decomp_out : key = _uri_to_key ( creds , url ) with get_download_pipeline ( PIPE , decomp_out . f , decrypt ) as pl : g = gevent . spawn ( write_and_re...
Get and decompress a URL
34,771
def psql_csv_run ( sql_command , error_handler = None ) : csv_query = ( 'COPY ({query}) TO STDOUT WITH CSV HEADER;' . format ( query = sql_command ) ) new_env = os . environ . copy ( ) new_env . setdefault ( 'PGOPTIONS' , '' ) new_env [ "PGOPTIONS" ] += ' --statement-timeout=0' psql_proc = popen_nonblock ( [ PSQL_BIN ,...
Runs psql and returns a CSVReader object from the query
34,772
def _wal_name ( cls ) : if cls . _WAL_NAME is None : version = cls . _dict_transform ( psql_csv_run ( "SELECT current_setting('server_version_num')" ) ) if int ( version [ 'current_setting' ] ) >= 100000 : cls . _WAL_NAME = 'wal' else : cls . _WAL_NAME = 'xlog' return cls . _WAL_NAME
Sets and returns _WAL_NAME to wal or xlog depending on version of postgres we are working with .
34,773
def run_start_backup ( cls ) : def handler ( popen ) : assert popen . returncode != 0 raise UserException ( 'Could not start hot backup' ) label = 'freeze_start_' + ( datetime . datetime . utcnow ( ) . replace ( tzinfo = UTC ( ) ) . isoformat ( ) ) return cls . _dict_transform ( psql_csv_run ( "SELECT file_name, " " l...
Connects to a server and attempts to start a hot backup
34,774
def run_stop_backup ( cls ) : def handler ( popen ) : assert popen . returncode != 0 raise UserException ( 'Could not stop hot backup' ) return cls . _dict_transform ( psql_csv_run ( "SELECT file_name, " " lpad(file_offset::text, 8, '0') AS file_offset " "FROM pg_{0}file_name_offset(" " pg_stop_backup())" . format ( ...
Stop a hot backup if it was running or error
34,775
def _is_ipv4_like ( s ) : parts = s . split ( '.' ) if len ( parts ) != 4 : return False for part in parts : try : int ( part ) except ValueError : return False return True
Find if a string superficially looks like an IPv4 address .
34,776
def _is_mostly_subdomain_compatible ( bucket_name ) : return ( bucket_name . lower ( ) == bucket_name and len ( bucket_name ) >= 3 and len ( bucket_name ) <= 63 and '_' not in bucket_name and '..' not in bucket_name and '-.' not in bucket_name and '.-' not in bucket_name and not bucket_name . startswith ( '-' ) and not...
Returns True if SubdomainCallingFormat can be used ... mostly
34,777
def _connect_secureish ( * args , ** kwargs ) : if tuple ( int ( x ) for x in boto . __version__ . split ( '.' ) ) >= ( 2 , 6 , 0 ) : kwargs [ 'validate_certs' ] = True kwargs [ 'is_secure' ] = True auth_region_name = kwargs . pop ( 'auth_region_name' , None ) conn = connection . S3Connection ( * args , ** kwargs ) if ...
Connect using the safest available options .
34,778
def from_store_name ( bucket_name , region = None ) : if region is None : region = os . getenv ( 'AWS_REGION' ) mostly_ok = _is_mostly_subdomain_compatible ( bucket_name ) if not mostly_ok : return CallingInfo ( bucket_name = bucket_name , region = region , calling_format = connection . OrdinaryCallingFormat , ordinary...
Construct a CallingInfo value from a bucket name .
34,779
def connect ( self , creds ) : def _conn_help ( * args , ** kwargs ) : return _connect_secureish ( * args , provider = creds , calling_format = self . calling_format ( ) , auth_region_name = self . region , ** kwargs ) impl = os . getenv ( 'WALE_S3_ENDPOINT' ) if impl : return connection . S3Connection ( ** _s3connecti...
Return a boto S3Connection set up with great care .
34,780
def remove_empty_dirs ( path ) : for root , dirs , files in os . walk ( path ) : for d in dirs : dir_path = os . path . join ( root , d ) if not os . listdir ( dir_path ) : os . rmdir ( dir_path )
removes empty dirs under a given path
34,781
def ensure_dir_exists ( path ) : dir_path = os . path . dirname ( path ) if not os . path . exists ( dir_path ) : os . makedirs ( dir_path )
create a directory if required
34,782
def external_program_check ( to_check = frozenset ( [ PSQL_BIN , LZOP_BIN , PV_BIN ] ) ) : could_not_run = [ ] error_msgs = [ ] def psql_err_handler ( popen ) : assert popen . returncode != 0 error_msgs . append ( textwrap . fill ( 'Could not get a connection to the database: ' 'note that superuser access is required' ...
Validates the existence and basic working - ness of other programs
34,783
def parse_boolean_envvar ( val ) : if not val or val . lower ( ) in { 'false' , '0' } : return False elif val . lower ( ) in { 'true' , '1' } : return True else : raise ValueError ( 'Invalid boolean environment variable: %s' % val )
Parse a boolean environment variable .
34,784
def _config_hint_generate ( optname , both_env_and_param ) : env = optname . replace ( '-' , '_' ) . upper ( ) if both_env_and_param : option = '--' + optname . lower ( ) return ( 'Pass "{0}" or set the environment variable "{1}".' . format ( option , env ) ) else : return 'Set the environment variable {0}.' . format (...
Generate HINT language for missing configuration
34,785
def render_subcommand ( args ) : if args . subcommand == 'delete' : return 'delete ' + args . delete_subcommand if args . subcommand in ( 'wal-prefetch' , 'wal-push' , 'wal-fetch' ) : return None return args . subcommand
Render a subcommand for human - centric viewing
34,786
def do_lzop_put ( creds , url , local_path , gpg_key ) : assert url . endswith ( '.lzo' ) blobstore = get_blobstore ( storage . StorageLayout ( url ) ) with tempfile . NamedTemporaryFile ( mode = 'r+b' , buffering = pipebuf . PIPE_BUF_BYTES ) as tf : with pipeline . get_upload_pipeline ( open ( local_path , 'rb' ) , tf...
Compress and upload a given local path .
34,787
def do_lzop_get ( creds , url , path , decrypt , do_retry = True ) : blobstore = get_blobstore ( storage . StorageLayout ( url ) ) return blobstore . do_lzop_get ( creds , url , path , decrypt , do_retry = do_retry )
Get and decompress an S3 or WABS URL
34,788
def find_all ( self , query ) : match = re . match ( storage . BASE_BACKUP_REGEXP , query ) if match is not None : for backup in iter ( self ) : if backup . name == query : yield backup elif query == 'LATEST' : all_backups = list ( iter ( self ) ) if not all_backups : return assert len ( all_backups ) > 0 all_backups ....
A procedure to assist in finding or detailing specific backups
34,789
def _delete_wals_before ( self , segment_info ) : wal_key_depth = self . layout . wal_directory ( ) . count ( '/' ) + 1 for key in self . _backup_list ( prefix = self . layout . wal_directory ( ) ) : key_name = self . layout . key_name ( key ) bucket = self . _container_name ( key ) url = '{scm}://{bucket}/{name}' . fo...
Delete all WAL files before segment_info .
34,790
def delete_everything ( self ) : for k in self . _backup_list ( prefix = self . layout . basebackups ( ) ) : self . _maybe_delete_key ( k , 'part of a base backup' ) for k in self . _backup_list ( prefix = self . layout . wal_directory ( ) ) : self . _maybe_delete_key ( k , 'part of wal logs' ) if self . deleter : self...
Delete everything in a storage layout
34,791
def delete_before ( self , segment_info ) : self . _delete_base_backups_before ( segment_info ) self . _delete_wals_before ( segment_info ) if self . deleter : self . deleter . close ( )
Delete all base backups and WAL before a given segment
34,792
def delete_with_retention ( self , num_to_retain ) : base_backup_sentinel_depth = self . layout . basebackups ( ) . count ( '/' ) + 1 completed_basebackups = [ ] for key in self . _backup_list ( prefix = self . layout . basebackups ( ) ) : key_name = self . layout . key_name ( key ) key_parts = key_name . split ( '/' )...
Retain the num_to_retain most recent backups and delete all data before them .
34,793
def connect ( creds ) : return swiftclient . Connection ( authurl = creds . authurl , user = creds . user , key = creds . password , auth_version = creds . auth_version , tenant_name = creds . tenant_name , os_options = { "region_name" : creds . region , "endpoint_type" : creds . endpoint_type , "domain_id" : creds . d...
Construct a connection value from a container
34,794
def connect ( creds , max_retries = 100 ) : credentials , project = google . auth . default ( ) return RetryClient ( max_retries = max_retries , project = project , credentials = credentials )
Construct a connection value to Google Storage API
34,795
def retry ( exception_processor = generic_exception_processor , max_retries = 100 ) : max_retries = int ( os . getenv ( 'WALE_RETRIES' , max_retries ) ) def yield_new_function_from ( f ) : def shim ( * args , ** kwargs ) : exc_processor_cxt = None retries = 0 while True : gevent . sleep ( 0.1 ) try : return f ( * args ...
Generic retry decorator
34,796
def _start ( self , tpart ) : g = gevent . Greenlet ( self . uploader , tpart ) g . link ( self . _finish ) self . concurrency_burden += 1 self . member_burden += len ( tpart ) g . start ( )
Start upload and accout for resource consumption .
34,797
def _finish ( self , g ) : assert g . ready ( ) if g . successful ( ) : finished_tpart = g . get ( ) self . wait_change . put ( finished_tpart ) else : self . wait_change . put ( g . exception )
Called on completion of an upload greenlet .
34,798
def _wait ( self ) : val = self . wait_change . get ( ) if isinstance ( val , Exception ) : raise val else : self . member_burden -= len ( val ) self . concurrency_burden -= 1
Block until an upload finishes
34,799
def put ( self , tpart ) : if self . closed : raise UserCritical ( msg = 'attempt to upload tar after closing' , hint = 'report a bug' ) while True : too_many = ( self . concurrency_burden + 1 > self . max_concurrency or self . member_burden + len ( tpart ) > self . max_members ) if too_many : if self . concurrency_bur...
Upload a tar volume