idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
2,600 | def load_file_to_list ( self ) : lst = [ ] try : with open ( self . fullname , 'r' ) as f : for line in f : lst . append ( line ) return lst except IOError : return lst | load a file to a list | 56 | 6 |
2,601 | def get_program_list ( ) : colList = [ 'FileName' , 'FileSize' , 'Functions' , 'Imports' ] txt = '<TABLE width=90% border=0>' txt += format_file_table_header ( colList ) fl = web . GetFileList ( aikif_folder , [ '*.py' ] , 'N' ) for f in fl : if '__init__.py' in f : txt += '<TR><TD colspan=4><HR><H3>' + get_subfolder ( f ) + '</h3></td></tr>\n' else : txt += format_file_to_html_row ( f , colList ) txt += '</TABLE>\n\n' return txt | get a HTML formatted view of all Python programs in all subfolders of AIKIF including imports and lists of functions and classes | 178 | 26 |
2,602 | def get_subfolder ( txt ) : root_folder = os . sep + 'aikif' + os . sep ndx = txt . find ( root_folder , 1 ) return txt [ ndx : ] . replace ( '__init__.py' , '' ) | extracts a displayable subfolder name from full filename | 62 | 12 |
2,603 | def get_functions ( fname ) : txt = '' with open ( fname , 'r' ) as f : for line in f : if line . strip ( ) [ 0 : 4 ] == 'def ' : txt += '<PRE>' + strip_text_after_string ( strip_text_after_string ( line , '#' ) [ 4 : ] , ':' ) + '</PRE>\n' if line [ 0 : 5 ] == 'class' : txt += '<PRE>' + strip_text_after_string ( strip_text_after_string ( line , '#' ) , ':' ) + '</PRE>\n' return txt + '<BR>' | get a list of functions from a Python program | 159 | 9 |
2,604 | def strip_text_after_string ( txt , junk ) : if junk in txt : return txt [ : txt . find ( junk ) ] else : return txt | used to strip any poorly documented comments at the end of function defs | 39 | 14 |
2,605 | def get_imports ( fname ) : txt = '' with open ( fname , 'r' ) as f : for line in f : if line [ 0 : 6 ] == 'import' : txt += '<PRE>' + strip_text_after_string ( line [ 7 : ] , ' as ' ) + '</PRE>\n' return txt + '<BR>' | get a list of imports from a Python program | 88 | 9 |
2,606 | def main ( arg1 = 55 , arg2 = 'test' , arg3 = None ) : print ( 'Starting dummy AI algorithm with :' , arg1 , arg2 , arg3 ) if arg3 is None : arg3 = [ 5 , 6 , 7 , 5 , 4 , ] result = arg1 + arg3 [ 0 ] * 7566.545 # dummy result print ( 'Done - returning ' , result ) return result | This is a sample program to show how a learning agent can be logged using AIKIF . The idea is that this main function is your algorithm which will run until it finds a successful result . The result is returned and the time taken is logged . There can optionally be have additional functions to call to allow for easy logging access | 92 | 65 |
2,607 | def get ( self , key ) : res = self . connection . get ( key ) print ( res ) return res | get a set of keys from redis | 24 | 8 |
2,608 | def creator ( _ , config ) : packer_script = render ( config . script , model = config . model , env = config . env , variables = config . variables , item = config . item ) filename = "packer.dry.run.see.comment" if not config . dry_run : # writing Packer file (JSON) filename = write_temporary_file ( packer_script , 'packer-' , '.json' ) packer_script = '' # rendering the Bash script for generating the Packer image template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/packer-image.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , debug = config . debug , packer_content = packer_script , packer_filename = filename ) return Packer ( config ) | Creator function for creating an instance of a Packer image script . | 204 | 14 |
2,609 | def process_jpeg_bytes ( bytes_in , quality = DEFAULT_JPEG_QUALITY ) : bytes_out_p = ffi . new ( "char**" ) bytes_out_p_gc = ffi . gc ( bytes_out_p , lib . guetzli_free_bytes ) length = lib . guetzli_process_jpeg_bytes ( bytes_in , len ( bytes_in ) , bytes_out_p_gc , quality ) if length == 0 : raise ValueError ( "Invalid JPEG: Guetzli was not able to decode the image" ) # noqa bytes_out = ffi . cast ( "char*" , bytes_out_p_gc [ 0 ] ) return ffi . unpack ( bytes_out , length ) | Generates an optimized JPEG from JPEG - encoded bytes . | 173 | 11 |
2,610 | def process_rgb_bytes ( bytes_in , width , height , quality = DEFAULT_JPEG_QUALITY ) : if len ( bytes_in ) != width * height * 3 : raise ValueError ( "bytes_in length is not coherent with given width and height" ) # noqa bytes_out_p = ffi . new ( "char**" ) bytes_out_p_gc = ffi . gc ( bytes_out_p , lib . guetzli_free_bytes ) length = lib . guetzli_process_rgb_bytes ( bytes_in , width , height , bytes_out_p_gc , quality ) bytes_out = ffi . cast ( "char*" , bytes_out_p_gc [ 0 ] ) return ffi . unpack ( bytes_out , length ) | Generates an optimized JPEG from RGB bytes . | 182 | 9 |
2,611 | def singleton ( the_class ) : class_instances = { } def get_instance ( * args , * * kwargs ) : """ Creating or just return the one and only class instance. The singleton depends on the parameters used in __init__ @type args: list @param args: positional arguments of the constructor. @type kwargs: dict @param kwargs: named parameters of the constructor. @rtype: decorated class type @return: singleton instance of decorated class. """ key = ( the_class , args , str ( kwargs ) ) if key not in class_instances : class_instances [ key ] = the_class ( * args , * * kwargs ) return class_instances [ key ] return get_instance | Decorator for a class to make a singleton out of it . | 165 | 15 |
2,612 | def build_board_2048 ( ) : grd = Grid ( 4 , 4 , [ 2 , 4 ] ) grd . new_tile ( ) grd . new_tile ( ) print ( grd ) return grd | builds a 2048 starting board Printing Grid 0 0 0 2 0 0 4 0 0 0 0 0 0 0 0 0 | 49 | 24 |
2,613 | def build_board_checkers ( ) : grd = Grid ( 8 , 8 , [ "B" , "W" ] ) for c in range ( 4 ) : grd . set_tile ( 0 , ( c * 2 ) - 1 , "B" ) grd . set_tile ( 1 , ( c * 2 ) - 0 , "B" ) grd . set_tile ( 6 , ( c * 2 ) + 1 , "W" ) grd . set_tile ( 7 , ( c * 2 ) - 0 , "W" ) print ( grd ) return grd | builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0 | 129 | 73 |
2,614 | def TEST ( ) : grd = Grid ( 4 , 4 , [ 2 , 4 ] ) grd . new_tile ( ) grd . new_tile ( ) print ( grd ) print ( "There are " , grd . count_blank_positions ( ) , " blanks in grid 1\n" ) grd2 = Grid ( 5 , 5 , [ 'A' , 'B' ] ) grd2 . new_tile ( 26 ) print ( grd2 ) build_board_checkers ( ) print ( "There are " , grd2 . count_blank_positions ( ) , " blanks in grid 2" ) | tests for this module | 142 | 4 |
2,615 | def url ( self , name ) : key = blobstore . create_gs_key ( '/gs' + name ) return images . get_serving_url ( key ) | Ask blobstore api for an url to directly serve the file | 36 | 12 |
2,616 | def process ( self , stage ) : self . logger . info ( "Processing pipeline stage '%s'" , self . title ) output = [ ] for entry in stage : key = list ( entry . keys ( ) ) [ 0 ] if key == "env" : self . pipeline . data . env_list [ 1 ] . update ( entry [ key ] ) self . logger . debug ( "Updating environment at level 1 with %s" , self . pipeline . data . env_list [ 1 ] ) continue # if not "env" then it must be "tasks" (schema): tasks = Tasks ( self . pipeline , re . match ( r"tasks\(parallel\)" , key ) is not None ) result = tasks . process ( entry [ key ] ) for line in result [ 'output' ] : output . append ( line ) if not result [ 'success' ] : self . event . failed ( ) return { 'success' : False , 'output' : output } self . event . succeeded ( ) return { 'success' : True , 'output' : output } | Processing one stage . | 235 | 5 |
2,617 | def trading_fees ( self ) -> TradingFees : return self . _fetch ( 'trading fees' , self . market . code ) ( self . _trading_fees ) ( ) | Fetch trading fees . | 44 | 5 |
2,618 | def fetch_ticker ( self ) -> Ticker : return self . _fetch ( 'ticker' , self . market . code ) ( self . _ticker ) ( ) | Fetch the market ticker . | 39 | 7 |
2,619 | def fetch_order_book ( self ) -> OrderBook : return self . _fetch ( 'order book' , self . market . code ) ( self . _order_book ) ( ) | Fetch the order book . | 41 | 6 |
2,620 | def fetch_trades_since ( self , since : int ) -> List [ Trade ] : return self . _fetch_since ( 'trades' , self . market . code ) ( self . _trades_since ) ( since ) | Fetch trades since given timestamp . | 52 | 7 |
2,621 | def fetch_deposits ( self , limit : int ) -> List [ Deposit ] : return self . _transactions ( self . _deposits , 'deposits' , limit ) | Fetch latest deposits must provide a limit . | 41 | 9 |
2,622 | def fetch_deposits_since ( self , since : int ) -> List [ Deposit ] : return self . _transactions_since ( self . _deposits_since , 'deposits' , since ) | Fetch all deposits since the given timestamp . | 47 | 9 |
2,623 | def fetch_withdrawals ( self , limit : int ) -> List [ Withdrawal ] : return self . _transactions ( self . _withdrawals , 'withdrawals' , limit ) | Fetch latest withdrawals must provide a limit . | 43 | 9 |
2,624 | def fetch_withdrawals_since ( self , since : int ) -> List [ Withdrawal ] : return self . _transactions_since ( self . _withdrawals_since , 'withdrawals' , since ) | Fetch all withdrawals since the given timestamp . | 49 | 9 |
2,625 | def request_withdrawal ( self , amount : Number , address : str , subtract_fee : bool = False , * * params ) -> Withdrawal : self . log . debug ( f'Requesting {self.currency} withdrawal from {self.name} to {address}' ) amount = self . _parse_money ( amount ) if self . dry_run : withdrawal = Withdrawal . create_default ( TxType . WITHDRAWAL , self . currency , amount , address ) self . log . warning ( f'DRY RUN: Withdrawal requested on {self.name}: {withdrawal}' ) return withdrawal try : withdrawal = self . _withdraw ( amount , address , subtract_fee , * * params ) except Exception as e : msg = f'Failed requesting withdrawal on {self.name}!: amount={amount}, address={address}' raise self . exception ( InvalidWithdrawal , msg , e ) from e self . log . info ( f'Withdrawal requested on {self.name}: {withdrawal}' ) return withdrawal | Request a withdrawal . | 233 | 4 |
2,626 | def fetch_order ( self , order_id : str ) -> Order : return self . _fetch ( f'order id={order_id}' , exc = OrderNotFound ) ( self . _order ) ( order_id ) | Fetch an order by ID . | 51 | 7 |
2,627 | def fetch_open_orders ( self , limit : int ) -> List [ Order ] : return self . _fetch_orders_limit ( self . _open_orders , limit ) | Fetch latest open orders must provide a limit . | 39 | 10 |
2,628 | def fetch_closed_orders ( self , limit : int ) -> List [ Order ] : return self . _fetch_orders_limit ( self . _closed_orders , limit ) | Fetch latest closed orders must provide a limit . | 39 | 10 |
2,629 | def fetch_closed_orders_since ( self , since : int ) -> List [ Order ] : return self . _fetch_orders_since ( self . _closed_orders_since , since ) | Fetch closed orders since the given timestamp . | 43 | 9 |
2,630 | def cancel_order ( self , order_id : str ) -> str : self . log . debug ( f'Canceling order id={order_id} on {self.name}' ) if self . dry_run : # Don't cancel if dry run self . log . warning ( f'DRY RUN: Order cancelled on {self.name}: id={order_id}' ) return order_id try : # Cancel order self . _cancel_order ( order_id ) except Exception as e : raise self . exception ( OrderNotFound , f'Failed to cancel order: id={order_id}' , e ) from e self . log . info ( f'Order cancelled on {self.name}: id={order_id}' ) return order_id | Cancel an order by ID . | 167 | 7 |
2,631 | def cancel_orders ( self , order_ids : List [ str ] ) -> List [ str ] : orders_to_cancel = order_ids self . log . debug ( f'Canceling orders on {self.name}: ids={orders_to_cancel}' ) cancelled_orders = [ ] if self . dry_run : # Don't cancel if dry run self . log . warning ( f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}' ) return orders_to_cancel try : # Iterate and cancel orders if self . has_batch_cancel : self . _cancel_orders ( orders_to_cancel ) cancelled_orders . append ( orders_to_cancel ) orders_to_cancel . clear ( ) else : for i , order_id in enumerate ( orders_to_cancel ) : self . _cancel_order ( order_id ) cancelled_orders . append ( order_id ) orders_to_cancel . pop ( i ) except Exception as e : msg = f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}' raise self . exception ( OrderNotFound , msg , e ) from e self . log . info ( f'Orders cancelled on {self.name}: ids={cancelled_orders}' ) return cancelled_orders | Cancel multiple orders by a list of IDs . | 316 | 10 |
2,632 | def cancel_all_orders ( self ) -> List [ str ] : order_ids = [ o . id for o in self . fetch_all_open_orders ( ) ] return self . cancel_orders ( order_ids ) | Cancel all open orders . | 49 | 6 |
2,633 | def min_order_amount ( self ) -> Money : return self . _fetch ( 'minimum order amount' , self . market . code ) ( self . _min_order_amount ) ( ) | Minimum amount to place an order . | 43 | 7 |
2,634 | def place_market_order ( self , side : Side , amount : Number ) -> Order : return self . place_order ( side , OrderType . MARKET , amount ) | Place a market order . | 37 | 5 |
2,635 | def main ( ) : parser = argparse . ArgumentParser ( description = main . __doc__ , add_help = True ) parser . add_argument ( '-M' , '--master_key' , dest = 'master_key' , help = 'Path to the master key ' + 'used for the encryption. Data is transferred without encryption if this' + 'is not provided.' , type = str , required = False , default = None ) parser . add_argument ( '-B' , '--bucket' , dest = 'bucket' , help = 'S3 bucket.' , type = str , required = True ) parser . add_argument ( '-R' , '--remote_dir' , dest = 'remote_dir' , help = 'Pseudo directory within ' + 'the bucket to store the file(s). NOTE: Folder structure below ' + 'REMOTE_DIR will be retained.' , type = str , required = False , default = '' ) parser . add_argument ( 'data' , help = 'File(s) or folder(s) to transfer to S3.' , type = str , nargs = '+' ) params = parser . parse_args ( ) # Input handling if params . master_key and not os . path . exists ( params . master_key ) : raise InputParameterError ( 'The master key was not found at ' + params . master_key ) # If the user doesn't have ~/.boto , it doesn't even make sense to go ahead if not os . path . exists ( os . path . expanduser ( '~/.boto' ) ) : raise RuntimeError ( '~/.boto not found' ) # Ensure that the remote directory doesn't start with a / if params . remote_dir . startswith ( '/' ) : raise InputParameterError ( 'The remote dir cannot start with a \'/\'' ) # Process each of the input arguments. for datum in params . data : datum = os . path . abspath ( datum ) if not os . path . exists ( datum ) : print ( 'ERROR: %s could not be found.' % datum , file = sys . stderr ) continue write_to_s3 ( datum , params . master_key , params . bucket , params . remote_dir ) return None | This is the main module for the script . The script will accept a file or a directory and then encrypt it with a provided key before pushing it to S3 into a specified bucket . | 501 | 37 |
2,636 | def _get_bucket_endpoint ( self ) : conn = S3Connection ( ) bucket = conn . lookup ( self . bucket_name ) if not bucket : # TODO: Make the bucket here? raise InputParameterError ( 'The provided bucket %s doesn\'t exist' % self . bucket_name ) endpoint = str ( bucket . get_location ( ) ) return endpoint | Queries S3 to identify the region hosting the provided bucket . | 82 | 13 |
2,637 | def align_rna ( job , fastqs , univ_options , star_options ) : star = job . wrapJobFn ( run_star , fastqs , univ_options , star_options , cores = star_options [ 'n' ] , memory = PromisedRequirement ( lambda x : int ( 1.85 * x . size ) , star_options [ 'index' ] ) , disk = PromisedRequirement ( star_disk , fastqs , star_options [ 'index' ] ) ) s_and_i = job . wrapJobFn ( sort_and_index_star , star . rv ( ) , univ_options , star_options ) . encapsulate ( ) job . addChild ( star ) star . addChild ( s_and_i ) return s_and_i . rv ( ) | A wrapper for the entire rna alignment subgraph . | 183 | 11 |
2,638 | def sort_and_index_star ( job , star_bams , univ_options , star_options ) : star_options [ 'samtools' ] [ 'n' ] = star_options [ 'n' ] sort = job . wrapJobFn ( sort_bamfile , star_bams [ 'rnaAligned.out.bam' ] , 'rna' , univ_options , samtools_options = star_options [ 'samtools' ] , disk = PromisedRequirement ( sort_disk , star_bams [ 'rnaAligned.out.bam' ] ) ) index = job . wrapJobFn ( index_bamfile , sort . rv ( ) , 'rna' , univ_options , samtools_options = star_options [ 'samtools' ] , sample_info = 'genome_sorted' , disk = PromisedRequirement ( index_disk , sort . rv ( ) ) ) job . addChild ( sort ) sort . addChild ( index ) return { 'rna_genome' : index . rv ( ) , 'rna_transcriptome.bam' : star_bams [ 'rnaAligned.toTranscriptome.out.bam' ] , 'rnaChimeric.out.junction' : star_bams [ 'rnaChimeric.out.junction' ] } | A wrapper for sorting and indexing the genomic star bam generated by run_star . It is required since run_star returns a dict of 2 bams | 313 | 32 |
2,639 | def reset ( self ) : self . expr = [ ] self . matcher = None self . last_matcher = None self . description = None | Resets the state of the expression | 31 | 7 |
2,640 | def clone ( self ) : from copy import copy clone = copy ( self ) clone . expr = copy ( self . expr ) clone . factory = False return clone | Clone this expression | 33 | 4 |
2,641 | def resolve ( self , value = None ) : # If we still have an uninitialized matcher init it now if self . matcher : self . _init_matcher ( ) # Evaluate the current set of matchers forming the expression matcher = self . evaluate ( ) try : value = self . _transform ( value ) self . _assertion ( matcher , value ) except AssertionError as ex : # By re-raising here the exception we reset the traceback raise ex finally : # Reset the state of the object so we can use it again if self . deferred : self . reset ( ) | Resolve the current expression against the supplied value | 128 | 9 |
2,642 | def _assertion ( self , matcher , value ) : # To support the syntax `any_of(subject) | should ...` we check if the # value to check is an Expectation object and if it is we use the descriptor # protocol to bind the value's assertion logic to this expectation. if isinstance ( value , Expectation ) : assertion = value . _assertion . __get__ ( self , Expectation ) assertion ( matcher , value . value ) else : hc . assert_that ( value , matcher ) | Perform the actual assertion for the given matcher and value . Override this method to apply a special configuration when performing the assertion . If the assertion fails it should raise an AssertionError . | 113 | 40 |
2,643 | def _transform ( self , value ) : if self . transform : try : value = self . transform ( value ) except : import sys exc_type , exc_obj , exc_tb = sys . exc_info ( ) raise AssertionError ( 'Error applying transformation <{0}>: {2}: {3}' . format ( self . transform . __name__ , value , exc_type . __name__ , exc_obj ) ) return value | Applies any defined transformation to the given value | 98 | 9 |
2,644 | def evaluate ( self ) : # Apply Shunting Yard algorithm to convert the infix expression # into Reverse Polish Notation. Since we have a very limited # set of operators and binding rules, the implementation becomes # really simple. The expression is formed of hamcrest matcher instances # and operators identifiers (ints). ops = [ ] rpn = [ ] for token in self . expr : if isinstance ( token , int ) : while len ( ops ) and token <= ops [ - 1 ] : rpn . append ( ops . pop ( ) ) ops . append ( token ) else : rpn . append ( token ) # Append the remaining operators while len ( ops ) : rpn . append ( ops . pop ( ) ) # Walk the RPN expression to create AllOf/AnyOf matchers stack = [ ] for token in rpn : if isinstance ( token , int ) : # Handle the NOT case in a special way since it's unary if token == OPERATOR . NOT : stack [ - 1 ] = IsNot ( stack [ - 1 ] ) continue # Our operators always need two operands if len ( stack ) < 2 : raise RuntimeError ( 'Unable to build a valid expression. Not enough operands available.' ) # Check what kind of matcher we need to create if token == OPERATOR . OR : matcher = hc . any_of ( * stack [ - 2 : ] ) else : # AND, BUT matcher = hc . all_of ( * stack [ - 2 : ] ) stack [ - 2 : ] = [ matcher ] else : stack . append ( token ) if len ( stack ) != 1 : raise RuntimeError ( 'Unable to build a valid expression. The RPN stack should have just one item.' ) matcher = stack . pop ( ) # If a description has been given include it in the matcher if self . description : matcher = hc . described_as ( self . description , matcher ) return matcher | Converts the current expression into a single matcher applying coordination operators to operands according to their binding rules | 419 | 21 |
2,645 | def _find_matcher ( self , alias ) : matcher = lookup ( alias ) if not matcher : msg = 'Matcher "%s" not found' % alias # Try to find similarly named matchers to help the user similar = suggest ( alias , max = 3 , cutoff = 0.5 ) if len ( similar ) > 1 : last = similar . pop ( ) msg += '. Perhaps you meant to use %s or %s?' % ( ', ' . join ( similar ) , last ) elif len ( similar ) > 0 : msg += '. Perhaps you meant to use %s?' % similar . pop ( ) raise KeyError ( msg ) return matcher | Finds a matcher based on the given alias or raises an error if no matcher could be found . | 141 | 22 |
2,646 | def _init_matcher ( self , * args , * * kwargs ) : # If subject-less expectation are provided as arguments convert them # to plain Hamcrest matchers in order to allow complex compositions fn = lambda x : x . evaluate ( ) if isinstance ( x , Expectation ) else x args = [ fn ( x ) for x in args ] kwargs = dict ( ( k , fn ( v ) ) for k , v in kwargs . items ( ) ) matcher = self . matcher ( * args , * * kwargs ) self . expr . append ( matcher ) self . matcher = None return matcher | Executes the current matcher appending it to the expression | 140 | 12 |
2,647 | def described_as ( self , description , * args ) : if len ( args ) : description = description . format ( * args ) self . description = description return self | Specify a custom message for the matcher | 35 | 9 |
2,648 | def make_dbsource ( * * kwargs ) : if 'spatialite' in connection . settings_dict . get ( 'ENGINE' ) : kwargs . setdefault ( 'file' , connection . settings_dict [ 'NAME' ] ) return mapnik . SQLite ( wkb_format = 'spatialite' , * * kwargs ) names = ( ( 'dbname' , 'NAME' ) , ( 'user' , 'USER' ) , ( 'password' , 'PASSWORD' ) , ( 'host' , 'HOST' ) , ( 'port' , 'PORT' ) ) for mopt , dopt in names : val = connection . settings_dict . get ( dopt ) if val : kwargs . setdefault ( mopt , val ) return mapnik . PostGIS ( * * kwargs ) | Returns a mapnik PostGIS or SQLite Datasource . | 189 | 14 |
2,649 | def layer ( self , queryset , stylename = None ) : cls = RasterLayer if hasattr ( queryset , 'image' ) else VectorLayer layer = cls ( queryset , style = stylename ) try : style = self . map . find_style ( layer . stylename ) except KeyError : self . map . append_style ( layer . stylename , layer . style ( ) ) layer . styles . append ( layer . stylename ) self . map . layers . append ( layer . _layer ) return layer | Returns a map Layer . | 116 | 5 |
2,650 | def zoom_bbox ( self , bbox ) : try : bbox . transform ( self . map . srs ) except gdal . GDALException : pass else : self . map . zoom_to_box ( mapnik . Box2d ( * bbox . extent ) ) | Zoom map to geometry extent . | 61 | 7 |
2,651 | def style ( self ) : style = mapnik . Style ( ) rule = mapnik . Rule ( ) self . _symbolizer = self . symbolizer ( ) rule . symbols . append ( self . _symbolizer ) style . rules . append ( rule ) return style | Returns a default Style . | 58 | 5 |
2,652 | def wrap_fusion ( job , fastqs , star_output , univ_options , star_fusion_options , fusion_inspector_options ) : # Give user option to skip fusion calling if not star_fusion_options [ 'run' ] : job . fileStore . logToMaster ( 'Skipping STAR-Fusion on %s' % univ_options [ 'patient' ] ) return fusion = job . wrapJobFn ( run_fusion , fastqs , star_output [ 'rnaChimeric.out.junction' ] , univ_options , star_fusion_options , fusion_inspector_options , cores = star_fusion_options [ 'n' ] , memory = PromisedRequirement ( lambda x : int ( 1.85 * x . size ) , star_fusion_options [ 'index' ] ) , disk = PromisedRequirement ( fusion_disk , fastqs , star_fusion_options [ 'index' ] ) ) . encapsulate ( ) job . addChild ( fusion ) return fusion . rv ( ) | A wrapper for run_fusion using the results from cutadapt and star as input . | 240 | 18 |
2,653 | def parse_star_fusion ( infile ) : reader = csv . reader ( infile , delimiter = '\t' ) header = reader . next ( ) header = { key : index for index , key in enumerate ( header ) } features = [ 'LeftGene' , 'LeftLocalBreakpoint' , 'LeftBreakpoint' , 'RightGene' , 'RightLocalBreakpoint' , 'RightBreakpoint' , 'LargeAnchorSupport' , 'JunctionReadCount' , 'SpanningFragCount' ] for line in reader : yield Expando ( dict ( ( feature , line [ header [ feature ] ] ) for feature in features ) ) | Parses STAR - Fusion format and returns an Expando object with basic features | 144 | 16 |
2,654 | def get_transcripts ( transcript_file ) : with open ( transcript_file , 'r' ) as fa : transcripts = { } regex_s = r"(?P<ID>TRINITY.*)\s(?P<fusion>.*--.*):(?P<left_start>\d+)-(?P<right_start>\d+)" regex = re . compile ( regex_s ) while True : # Usually the transcript is on one line try : info = fa . next ( ) seq = fa . next ( ) assert info . startswith ( '>' ) m = regex . search ( info ) if m : transcripts [ m . group ( 'ID' ) ] = seq . strip ( ) except StopIteration : break except AssertionError : print ( "WARNING: Malformed fusion transcript file" ) return transcripts | Parses FusionInspector transcript file and returns dictionary of sequences | 184 | 14 |
2,655 | def split_fusion_transcript ( annotation_path , transcripts ) : annotation = collections . defaultdict ( dict ) forward = 'ACGTN' reverse = 'TGCAN' trans = string . maketrans ( forward , reverse ) # Pull in assembled transcript annotation five_pr_splits = collections . defaultdict ( dict ) three_pr_splits = collections . defaultdict ( dict ) regex = re . compile ( r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)' ) with open ( annotation_path , 'r' ) as gff : for line in gff : print ( line ) if line . startswith ( '#' ) : _ , eyd , fusion = line . strip ( ) . split ( ) fusion , start_stop = fusion . split ( ':' ) left_break , right_break = start_stop . split ( '-' ) annotation [ fusion ] [ eyd ] = { } annotation [ fusion ] [ eyd ] [ 'left_break' ] = left_break annotation [ fusion ] [ eyd ] [ 'right_break' ] = right_break else : line = line . strip ( ) . split ( '\t' ) fusion = line [ 0 ] strand = line [ 6 ] block_start = line [ 3 ] block_stop = line [ 4 ] attr = line [ 8 ] m = regex . search ( attr ) if m : transcript_id = m . group ( 'Name' ) rb = any ( [ block_start == annotation [ fusion ] [ transcript_id ] [ 'right_break' ] , block_stop == annotation [ fusion ] [ transcript_id ] [ 'right_break' ] ] ) lb = any ( [ block_start == annotation [ fusion ] [ transcript_id ] [ 'left_break' ] , block_stop == annotation [ fusion ] [ transcript_id ] [ 'left_break' ] ] ) if strand == '-' and rb : transcript_split = int ( m . group ( 'stop' ) ) + 1 # Off by one # Take the reverse complement to orient transcripts from 5' to 3' five_seq = transcripts [ transcript_id ] [ transcript_split : ] five_pr_splits [ fusion ] [ transcript_id ] = five_seq . translate ( trans ) [ : : - 1 ] three_seq = transcripts [ transcript_id ] [ : transcript_split ] three_pr_splits [ fusion ] [ transcript_id ] = three_seq . translate ( trans ) [ : : - 1 ] elif strand == '+' and lb : transcript_split = int ( m . group ( 'stop' ) ) s1 = transcripts [ transcript_id ] [ : transcript_split ] five_pr_splits [ fusion ] [ transcript_id ] = s1 s2 = transcripts [ transcript_id ] [ transcript_split : ] three_pr_splits [ fusion ] [ transcript_id ] = s2 return five_pr_splits , three_pr_splits | Finds the breakpoint in the fusion transcript and splits the 5 donor from the 3 acceptor | 686 | 19 |
2,656 | def get_gene_ids ( fusion_bed ) : with open ( fusion_bed , 'r' ) as f : gene_to_id = { } regex = re . compile ( r'(?P<gene>ENSG\d*)' ) for line in f : line = line . split ( '\t' ) transcript , gene_bit , name = line [ 3 ] . split ( ';' ) m = regex . search ( gene_bit ) if m : gene_to_id [ name ] = m . group ( 'gene' ) return gene_to_id | Parses FusionInspector bed file to ascertain the ENSEMBL gene ids | 129 | 19 |
2,657 | def _add_default_entries ( input_dict , defaults_dict ) : for key , value in defaults_dict . iteritems ( ) : if key == 'patients' : print ( 'Cannot default `patients`.' ) continue if isinstance ( value , dict ) : if key not in input_dict or input_dict [ key ] is None : # User didn't specify anython for the tool, but the entry was still in there so we # just copy over the whole defaults dict input_dict [ key ] = value else : r = _add_default_entries ( input_dict . get ( key , { } ) , value ) input_dict [ key ] = r else : # Only write if not in input_dict if key not in input_dict or input_dict [ key ] is None : # Either the user didn't have the entry, or had it without a value input_dict [ key ] = value return input_dict | Add the entries in defaults dict into input_dict if they don t exist in input_dict | 206 | 19 |
2,658 | def _process_group ( input_group , required_group , groupname , append_subgroups = None ) : if append_subgroups is None : append_subgroups = [ ] tool_options = { } for key in input_group : _ensure_set_contains ( input_group [ key ] , required_group . get ( key , { } ) , groupname + '::' + key ) if key in append_subgroups : continue else : tool_options [ key ] = input_group [ key ] for key in input_group : if key in append_subgroups : continue else : for yek in append_subgroups : tool_options [ key ] . update ( input_group [ yek ] ) return tool_options | Process one group from the input yaml . Ensure it has the required entries . If there is a subgroup that should be processed and then appended to the rest of the subgroups in that group handle it accordingly . | 162 | 44 |
2,659 | def get_fastq_2 ( job , patient_id , sample_type , fastq_1 ) : prefix , extn = fastq_1 , 'temp' final_extn = '' while extn : prefix , extn = os . path . splitext ( prefix ) final_extn = extn + final_extn if prefix . endswith ( '1' ) : prefix = prefix [ : - 1 ] job . fileStore . logToMaster ( '"%s" prefix for "%s" determined to be %s' % ( sample_type , patient_id , prefix ) ) break else : raise ParameterError ( 'Could not determine prefix from provided fastq (%s). Is it ' 'of the form <fastq_prefix>1.[fq/fastq][.gz]?' % fastq_1 ) if final_extn not in [ '.fastq' , '.fastq.gz' , '.fq' , '.fq.gz' ] : raise ParameterError ( 'If and _2 fastq path is not specified, only .fastq, .fq or ' 'their gzippped extensions are accepted. Could not process ' '%s:%s.' % ( patient_id , sample_type + '_fastq_1' ) ) return '' . join ( [ prefix , '2' , final_extn ] ) | For a path to a fastq_1 file return a fastq_2 file with the same prefix and naming scheme . | 301 | 25 |
2,660 | def parse_config_file ( job , config_file , max_cores = None ) : sample_set , univ_options , processed_tool_inputs = _parse_config_file ( job , config_file , max_cores ) # Start a job for each sample in the sample set for patient_id in sample_set . keys ( ) : job . addFollowOnJobFn ( launch_protect , sample_set [ patient_id ] , univ_options , processed_tool_inputs ) return None | Parse the config file and spawn a ProTECT job for every input sample . | 115 | 17 |
2,661 | def get_all_tool_inputs ( job , tools , outer_key = '' , mutation_caller_list = None ) : for tool in tools : for option in tools [ tool ] : if isinstance ( tools [ tool ] [ option ] , dict ) : tools [ tool ] [ option ] = get_all_tool_inputs ( job , { option : tools [ tool ] [ option ] } , outer_key = ':' . join ( [ outer_key , tool ] ) . lstrip ( ':' ) ) [ option ] else : # If a file is of the type file, vcf, tar or fasta, it needs to be downloaded from # S3 if reqd, then written to job store. if option . split ( '_' ) [ - 1 ] in [ 'file' , 'vcf' , 'index' , 'fasta' , 'fai' , 'idx' , 'dict' , 'tbi' , 'beds' , 'gtf' , 'config' ] : tools [ tool ] [ option ] = job . addChildJobFn ( get_pipeline_inputs , ':' . join ( [ outer_key , tool , option ] ) . lstrip ( ':' ) , tools [ tool ] [ option ] ) . rv ( ) elif option == 'version' : tools [ tool ] [ option ] = str ( tools [ tool ] [ option ] ) if mutation_caller_list is not None : # Guaranteed to occur only in the outermost loop indexes = tools . pop ( 'indexes' ) indexes [ 'chromosomes' ] = parse_chromosome_string ( job , indexes [ 'chromosomes' ] ) for mutation_caller in mutation_caller_list : if mutation_caller == 'indexes' : continue tools [ mutation_caller ] . update ( indexes ) return tools | Iterate through all the tool options and download required files from their remote locations . | 410 | 16 |
2,662 | def get_pipeline_inputs ( job , input_flag , input_file , encryption_key = None , per_file_encryption = False , gdc_download_token = None ) : work_dir = os . getcwd ( ) job . fileStore . logToMaster ( 'Obtaining file (%s) to the file job store' % input_flag ) if input_file . startswith ( ( 'http' , 'https' , 'ftp' ) ) : input_file = get_file_from_url ( job , input_file , encryption_key = encryption_key , per_file_encryption = per_file_encryption , write_to_jobstore = True ) elif input_file . startswith ( ( 'S3' , 's3' ) ) : input_file = get_file_from_s3 ( job , input_file , encryption_key = encryption_key , per_file_encryption = per_file_encryption , write_to_jobstore = True ) elif input_file . startswith ( ( 'GDC' , 'gdc' ) ) : input_file = get_file_from_gdc ( job , input_file , gdc_download_token = gdc_download_token , write_to_jobstore = True ) else : assert os . path . exists ( input_file ) , 'Bogus Input : ' + input_file input_file = job . fileStore . writeGlobalFile ( input_file ) return input_file | Get the input file from s3 or disk and write to file store . | 341 | 15 |
2,663 | def prepare_samples ( job , patient_dict , univ_options ) : job . fileStore . logToMaster ( 'Downloading Inputs for %s' % univ_options [ 'patient' ] ) # For each sample type, check if the prefix is an S3 link or a regular file # Download S3 files. output_dict = { } for input_file in patient_dict : if not input_file . endswith ( ( 'bam' , 'bai' , '_1' , '_2' , 'files' , 'vcf' , 'bedpe' ) ) : output_dict [ input_file ] = patient_dict [ input_file ] continue output_dict [ input_file ] = get_pipeline_inputs ( job , ':' . join ( [ univ_options [ 'patient' ] , input_file ] ) , patient_dict [ input_file ] , encryption_key = ( univ_options [ 'sse_key' ] if patient_dict [ 'ssec_encrypted' ] else None ) , per_file_encryption = univ_options [ 'sse_key_is_master' ] , gdc_download_token = univ_options [ 'gdc_download_token' ] ) return output_dict | Obtain the input files for the patient and write them to the file store . | 286 | 16 |
2,664 | def get_patient_bams ( job , patient_dict , sample_type , univ_options , bwa_options , mutect_options ) : output_dict = { } if 'dna' in sample_type : sample_info = 'fix_pg_sorted' prefix = sample_type + '_' + sample_info else : sample_info = 'genome_sorted' prefix = 'rna_' + sample_info if sample_type + '_bam' in patient_dict [ 'gdc_inputs' ] : output_dict [ prefix + '.bam' ] = patient_dict [ sample_type + '_bam' ] [ 0 ] output_dict [ prefix + '.bam.bai' ] = patient_dict [ sample_type + '_bam' ] [ 1 ] elif sample_type + '_bai' in patient_dict : output_dict [ prefix + '.bam' ] = patient_dict [ sample_type + '_bam' ] output_dict [ prefix + '.bam.bai' ] = patient_dict [ sample_type + '_bai' ] else : from protect . alignment . dna import index_bamfile , index_disk output_job = job . wrapJobFn ( index_bamfile , patient_dict [ sample_type + '_bam' ] , 'rna' if sample_type == 'tumor_rna' else sample_type , univ_options , bwa_options [ 'samtools' ] , sample_info = sample_info , export = False , disk = PromisedRequirement ( index_disk , patient_dict [ sample_type + '_bam' ] ) ) job . addChild ( output_job ) output_dict = output_job . rv ( ) if sample_type == 'tumor_rna' : if 'tumor_rna_transcriptome_bam' not in patient_dict : patient_dict [ 'tumor_rna_transcriptome_bam' ] = None return { 'rna_genome' : output_dict , 'rna_transcriptome.bam' : patient_dict [ 'tumor_rna_transcriptome_bam' ] } else : return output_dict | Convenience function to return the bam and its index in the correct format for a sample type . | 515 | 21 |
2,665 | def get_patient_vcf ( job , patient_dict ) : temp = job . fileStore . readGlobalFile ( patient_dict [ 'mutation_vcf' ] , os . path . join ( os . getcwd ( ) , 'temp.gz' ) ) if is_gzipfile ( temp ) : outfile = job . fileStore . writeGlobalFile ( gunzip ( temp ) ) job . fileStore . deleteGlobalFile ( patient_dict [ 'mutation_vcf' ] ) else : outfile = patient_dict [ 'mutation_vcf' ] return outfile | Convenience function to get the vcf from the patient dict | 130 | 13 |
2,666 | def get_patient_mhc_haplotype ( job , patient_dict ) : haplotype_archive = job . fileStore . readGlobalFile ( patient_dict [ 'hla_haplotype_files' ] ) haplotype_archive = untargz ( haplotype_archive , os . getcwd ( ) ) output_dict = { } for filename in 'mhci_alleles.list' , 'mhcii_alleles.list' : output_dict [ filename ] = job . fileStore . writeGlobalFile ( os . path . join ( haplotype_archive , filename ) ) return output_dict | Convenience function to get the mhc haplotype from the patient dict | 138 | 16 |
2,667 | def get_patient_expression ( job , patient_dict ) : expression_archive = job . fileStore . readGlobalFile ( patient_dict [ 'expression_files' ] ) expression_archive = untargz ( expression_archive , os . getcwd ( ) ) output_dict = { } for filename in 'rsem.genes.results' , 'rsem.isoforms.results' : output_dict [ filename ] = job . fileStore . writeGlobalFile ( os . path . join ( expression_archive , filename ) ) return output_dict | Convenience function to get the expression from the patient dict | 120 | 12 |
2,668 | def generate_config_file ( ) : shutil . copy ( os . path . join ( os . path . dirname ( __file__ ) , 'input_parameters.yaml' ) , os . path . join ( os . getcwd ( ) , 'ProTECT_config.yaml' ) ) | Generate a config file for a ProTECT run on hg19 . | 69 | 16 |
2,669 | def main ( ) : parser = argparse . ArgumentParser ( prog = 'ProTECT' , description = 'Prediction of T-Cell Epitopes for Cancer Therapy' , epilog = 'Contact Arjun Rao (aarao@ucsc.edu) if you encounter ' 'any problems while running ProTECT' ) inputs = parser . add_mutually_exclusive_group ( required = True ) inputs . add_argument ( '--config_file' , dest = 'config_file' , help = 'Config file to be used in the ' 'run.' , type = str , default = None ) inputs . add_argument ( '--generate_config' , dest = 'generate_config' , help = 'Generate a config file ' 'in the current directory that is pre-filled with references and flags for ' 'an hg19 run.' , action = 'store_true' , default = False ) parser . add_argument ( '--max-cores-per-job' , dest = 'max_cores' , help = 'Maximum cores to use per ' 'job. Aligners and Haplotypers ask for cores dependent on the machine that ' 'the launchpad gets assigned to -- In a heterogeneous cluster, this can ' 'lead to problems. This value should be set to the number of cpus on the ' 'smallest node in a cluster.' , type = int , required = False , default = None ) # We parse the args once to see if the user has asked for a config file to be generated. In # this case, we don't need a jobstore. To handle the case where Toil arguments are passed to # ProTECT, we parse known args, and if the used specified config_file instead of generate_config # we re-parse the arguments with the added Toil parser. params , others = parser . parse_known_args ( ) if params . generate_config : generate_config_file ( ) else : Job . Runner . addToilOptions ( parser ) params = parser . parse_args ( ) params . config_file = os . path . abspath ( params . config_file ) if params . maxCores : if not params . max_cores : params . max_cores = int ( params . maxCores ) else : if params . max_cores > int ( params . maxCores ) : print ( "The value provided to max-cores-per-job (%s) was greater than that " "provided to maxCores (%s). Setting max-cores-per-job = maxCores." % ( params . max_cores , params . maxCores ) , file = sys . stderr ) params . max_cores = int ( params . maxCores ) start = Job . wrapJobFn ( parse_config_file , params . config_file , params . max_cores ) Job . Runner . startToil ( start , params ) return None | This is the main function for ProTECT . | 640 | 10 |
2,670 | def poll ( self ) : if select . select ( [ self . tn ] , [ ] , [ ] , 0 ) == ( [ self . tn ] , [ ] , [ ] ) : response = urllib . unquote ( self . tn . read_until ( b"\n" ) . decode ( ) ) if self . debug : print "Telnet Poll: %s" % ( response [ : - 1 ] ) # TODO Keep track of which screen is displayed return response else : return None | Poll Check for a non - response string generated by LCDd and return any string read . LCDd generates strings for key presses menu events & screen visibility changes . | 110 | 32 |
2,671 | def module_to_dict ( module , omittable = lambda k : k . startswith ( '_' ) ) : return dict ( [ ( k , repr ( v ) ) for k , v in module . __dict__ . items ( ) if not omittable ( k ) ] ) | Converts a module namespace to a Python dictionary . Used by get_settings_diff . | 64 | 18 |
2,672 | def run_snpeff ( job , merged_mutation_file , univ_options , snpeff_options ) : work_dir = os . getcwd ( ) input_files = { 'merged_mutations.vcf' : merged_mutation_file , 'snpeff_index.tar.gz' : snpeff_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) input_files [ 'snpeff_index' ] = untargz ( input_files [ 'snpeff_index.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'eff' , '-dataDir' , input_files [ 'snpeff_index' ] , '-c' , '/' . join ( [ input_files [ 'snpeff_index' ] , 'snpEff_' + univ_options [ 'ref' ] + '_gencode.config' ] ) , '-no-intergenic' , '-no-downstream' , '-no-upstream' , # '-canon', '-noStats' , univ_options [ 'ref' ] + '_gencode' , input_files [ 'merged_mutations.vcf' ] ] xmx = snpeff_options [ 'java_Xmx' ] if snpeff_options [ 'java_Xmx' ] else univ_options [ 'java_Xmx' ] with open ( '/' . join ( [ work_dir , 'mutations.vcf' ] ) , 'w' ) as snpeff_file : docker_call ( tool = 'snpeff' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = xmx , outfile = snpeff_file , tool_version = snpeff_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( snpeff_file . name ) export_results ( job , output_file , snpeff_file . name , univ_options , subfolder = 'mutations/snpeffed' ) job . fileStore . logToMaster ( 'Ran snpeff on %s successfully' % univ_options [ 'patient' ] ) return output_file | Run snpeff on an input vcf . | 564 | 10 |
2,673 | def paths_in_directory ( input_directory ) : paths = [ ] for base_path , directories , filenames in os . walk ( input_directory ) : relative_path = os . path . relpath ( base_path , input_directory ) path_components = relative_path . split ( os . sep ) if path_components [ 0 ] == "." : path_components = path_components [ 1 : ] if path_components and path_components [ 0 ] . startswith ( "." ) : # hidden dir continue path_components = filter ( bool , path_components ) # remove empty components for filename in filenames : if filename . startswith ( "." ) : # hidden file continue paths . append ( path_components + [ filename ] ) return paths | Generate a list of all files in input_directory each as a list containing path components . | 176 | 19 |
2,674 | def run_car_t_validity_assessment ( job , rsem_files , univ_options , reports_options ) : return job . addChildJobFn ( assess_car_t_validity , rsem_files [ 'rsem.genes.results' ] , univ_options , reports_options ) . rv ( ) | A wrapper for assess_car_t_validity . | 78 | 12 |
2,675 | def align_dna ( job , fastqs , sample_type , univ_options , bwa_options ) : # The mkdup and regroup steps use picard that allots heap space using the Xmx key in the # univ_options dictionary. This should reflect in the job allotment. Since We want all these # jobs to occur on the same node, we ened to give them all the same memory requirements. bwa = job . wrapJobFn ( run_bwa , fastqs , sample_type , univ_options , bwa_options , disk = PromisedRequirement ( bwa_disk , fastqs , bwa_options [ 'index' ] ) , memory = univ_options [ 'java_Xmx' ] , cores = bwa_options [ 'n' ] ) sam2bam = job . wrapJobFn ( bam_conversion , bwa . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , disk = PromisedRequirement ( sam2bam_disk , bwa . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) # reheader takes the same disk as sam2bam so we can serialize this on the same worker. reheader = job . wrapJobFn ( fix_bam_header , sam2bam . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , disk = PromisedRequirement ( sam2bam_disk , bwa . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) regroup = job . wrapJobFn ( add_readgroups , reheader . rv ( ) , sample_type , univ_options , bwa_options [ 'picard' ] , disk = PromisedRequirement ( regroup_disk , reheader . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) mkdup = job . wrapJobFn ( mark_duplicates , regroup . rv ( ) , sample_type , univ_options , bwa_options [ 'picard' ] , disk = PromisedRequirement ( mkdup_disk , regroup . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) index = job . wrapJobFn ( index_bamfile , mkdup . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , sample_info = 'fix_pg_sorted' , disk = PromisedRequirement ( index_disk , mkdup . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) job . addChild ( bwa ) bwa . addChild ( sam2bam ) sam2bam . addChild ( reheader ) reheader . addChild ( regroup ) regroup . addChild ( mkdup ) mkdup . addChild ( index ) return index . rv ( ) | A wrapper for the entire dna alignment subgraph . | 688 | 11 |
2,676 | def run_bwa ( job , fastqs , sample_type , univ_options , bwa_options ) : work_dir = os . getcwd ( ) input_files = { 'dna_1.fastq' : fastqs [ 0 ] , 'dna_2.fastq' : fastqs [ 1 ] , 'bwa_index.tar.gz' : bwa_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) # Handle gzipped file gz = '.gz' if is_gzipfile ( input_files [ 'dna_1.fastq' ] ) else '' if gz : for read_file in 'dna_1.fastq' , 'dna_2.fastq' : os . symlink ( read_file , read_file + gz ) input_files [ read_file + gz ] = input_files [ read_file ] + gz # Untar the index input_files [ 'bwa_index' ] = untargz ( input_files [ 'bwa_index.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'mem' , '-t' , str ( bwa_options [ 'n' ] ) , '-v' , '1' , # Don't print INFO messages to the stderr '/' . join ( [ input_files [ 'bwa_index' ] , univ_options [ 'ref' ] ] ) , input_files [ 'dna_1.fastq' + gz ] , input_files [ 'dna_2.fastq' + gz ] ] with open ( '' . join ( [ work_dir , '/' , sample_type , '.sam' ] ) , 'w' ) as samfile : docker_call ( tool = 'bwa' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = samfile , tool_version = bwa_options [ 'version' ] ) # samfile.name retains the path info output_file = job . fileStore . writeGlobalFile ( samfile . name ) job . fileStore . logToMaster ( 'Ran bwa on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file | Align a pair of fastqs with bwa . | 569 | 11 |
2,677 | def bam_conversion ( job , samfile , sample_type , univ_options , samtools_options ) : work_dir = os . getcwd ( ) input_files = { sample_type + '.sam' : samfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) bamfile = '/' . join ( [ work_dir , sample_type + '.bam' ] ) parameters = [ 'view' , '-bS' , '-o' , docker_path ( bamfile ) , input_files [ sample_type + '.sam' ] ] docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = samtools_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( bamfile ) # The samfile is no longer useful so delete it job . fileStore . deleteGlobalFile ( samfile ) job . fileStore . logToMaster ( 'Ran sam2bam on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file | Convert a sam to a bam . | 285 | 9 |
2,678 | def fix_bam_header ( job , bamfile , sample_type , univ_options , samtools_options , retained_chroms = None ) : if retained_chroms is None : retained_chroms = [ ] work_dir = os . getcwd ( ) input_files = { sample_type + '.bam' : bamfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'view' , '-H' , input_files [ sample_type + '.bam' ] ] with open ( '/' . join ( [ work_dir , sample_type + '_input_bam.header' ] ) , 'w' ) as headerfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = headerfile , tool_version = samtools_options [ 'version' ] ) with open ( headerfile . name , 'r' ) as headerfile , open ( '/' . join ( [ work_dir , sample_type + '_output_bam.header' ] ) , 'w' ) as outheaderfile : for line in headerfile : if line . startswith ( '@PG' ) : line = '\t' . join ( [ x for x in line . strip ( ) . split ( '\t' ) if not x . startswith ( 'CL' ) ] ) if retained_chroms and line . startswith ( '@SQ' ) : if line . strip ( ) . split ( ) [ 1 ] . lstrip ( 'SN:' ) not in retained_chroms : continue print ( line . strip ( ) , file = outheaderfile ) parameters = [ 'reheader' , docker_path ( outheaderfile . name ) , input_files [ sample_type + '.bam' ] ] with open ( '/' . join ( [ work_dir , sample_type + '_fixPG.bam' ] ) , 'w' ) as fixpg_bamfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = fixpg_bamfile , tool_version = samtools_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( fixpg_bamfile . name ) # The old bam file is now useless. job . fileStore . deleteGlobalFile ( bamfile ) job . fileStore . logToMaster ( 'Ran reheader on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file | Fix the bam header to remove the command line call . Failing to do this causes Picard to reject the bam . | 631 | 25 |
2,679 | def add_readgroups ( job , bamfile , sample_type , univ_options , picard_options ) : work_dir = os . getcwd ( ) input_files = { sample_type + '.bam' : bamfile } get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'AddOrReplaceReadGroups' , 'CREATE_INDEX=false' , 'I=/data/' + sample_type + '.bam' , 'O=/data/' + sample_type + '_reheader.bam' , 'SO=coordinate' , 'ID=1' , '' . join ( [ 'LB=' , univ_options [ 'patient' ] ] ) , 'PL=ILLUMINA' , 'PU=12345' , '' . join ( [ 'SM=' , sample_type . rstrip ( '_dna' ) ] ) ] docker_call ( tool = 'picard' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = univ_options [ 'java_Xmx' ] , tool_version = picard_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , sample_type + '_reheader.bam' ] ) ) # Delete the old bam file job . fileStore . deleteGlobalFile ( bamfile ) job . fileStore . logToMaster ( 'Ran add_read_groups on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file | Add read groups to the bam . | 389 | 8 |
2,680 | def weekday ( cls , year , month , day ) : return NepDate . from_bs_date ( year , month , day ) . weekday ( ) | Returns the weekday of the date . 0 = aaitabar | 33 | 13 |
2,681 | def monthrange ( cls , year , month ) : functions . check_valid_bs_range ( NepDate ( year , month , 1 ) ) return values . NEPALI_MONTH_DAY_DATA [ year ] [ month - 1 ] | Returns the number of days in a month | 54 | 8 |
2,682 | def itermonthdays ( cls , year , month ) : for day in NepCal . itermonthdates ( year , month ) : if day . month == month : yield day . day else : yield 0 | Similar to itermonthdates but returns day number instead of NepDate object | 43 | 14 |
2,683 | def itermonthdays2 ( cls , year , month ) : for day in NepCal . itermonthdates ( year , month ) : if day . month == month : yield ( day . day , day . weekday ( ) ) else : yield ( 0 , day . weekday ( ) ) | Similar to itermonthdays2 but returns tuples of day and weekday . | 60 | 15 |
2,684 | def monthdatescalendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdates ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks | Returns a list of week in a month . A week is a list of NepDate objects | 77 | 18 |
2,685 | def monthdayscalendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdays ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks | Return a list of the weeks in the month month of the year as full weeks . Weeks are lists of seven day numbers . | 77 | 25 |
2,686 | def monthdays2calendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdays2 ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks | Return a list of the weeks in the month month of the year as full weeks . Weeks are lists of seven tuples of day numbers and weekday numbers . | 79 | 31 |
2,687 | def run_somaticsniper_with_merge ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options ) : spawn = job . wrapJobFn ( run_somaticsniper , tumor_bam , normal_bam , univ_options , somaticsniper_options , split = False ) . encapsulate ( ) job . addChild ( spawn ) return spawn . rv ( ) | A wrapper for the the entire SomaticSniper sub - graph . | 97 | 14 |
2,688 | def run_somaticsniper ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options , split = True ) : # Get a list of chromosomes to handle if somaticsniper_options [ 'chromosomes' ] : chromosomes = somaticsniper_options [ 'chromosomes' ] else : chromosomes = sample_chromosomes ( job , somaticsniper_options [ 'genome_fai' ] ) perchrom_somaticsniper = defaultdict ( ) snipe = job . wrapJobFn ( run_somaticsniper_full , tumor_bam , normal_bam , univ_options , somaticsniper_options , disk = PromisedRequirement ( sniper_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) pileup = job . wrapJobFn ( run_pileup , tumor_bam , univ_options , somaticsniper_options , disk = PromisedRequirement ( pileup_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) filtersnipes = job . wrapJobFn ( filter_somaticsniper , tumor_bam , snipe . rv ( ) , pileup . rv ( ) , univ_options , somaticsniper_options , disk = PromisedRequirement ( sniper_filter_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) job . addChild ( snipe ) job . addChild ( pileup ) snipe . addChild ( filtersnipes ) pileup . addChild ( filtersnipes ) if split : unmerge_snipes = job . wrapJobFn ( unmerge , filtersnipes . rv ( ) , 'somaticsniper' , chromosomes , somaticsniper_options , univ_options ) filtersnipes . addChild ( unmerge_snipes ) return unmerge_snipes . rv ( ) else : return filtersnipes . rv ( ) | Run the SomaticSniper subgraph on the DNA bams . Optionally split the results into per - chromosome vcfs . | 559 | 27 |
2,689 | def run_somaticsniper_full ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal.bam' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal.bam.bai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } output_file = os . path . join ( work_dir , 'somatic-sniper_full.vcf' ) parameters = [ '-f' , input_files [ 'genome.fa' ] , '-F' , 'vcf' , '-G' , '-L' , '-q' , '1' , '-Q' , '15' , input_files [ 'tumor.bam' ] , input_files [ 'normal.bam' ] , docker_path ( output_file ) ] docker_call ( tool = 'somaticsniper' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( output_file ) job . fileStore . logToMaster ( 'Ran SomaticSniper on %s successfully' % univ_options [ 'patient' ] ) return outfile | Run SomaticSniper on the DNA bams . | 577 | 11 |
2,690 | def run_pileup ( job , tumor_bam , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'pileup' , '-cvi' , '-f' , docker_path ( input_files [ 'genome.fa' ] ) , docker_path ( input_files [ 'tumor.bam' ] ) ] with open ( os . path . join ( work_dir , 'pileup.txt' ) , 'w' ) as pileup_file : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = pileup_file , tool_version = somaticsniper_options [ 'samtools' ] [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( pileup_file . name ) job . fileStore . logToMaster ( 'Ran samtools pileup on %s successfully' % univ_options [ 'patient' ] ) return outfile | Runs a samtools pileup on the tumor bam . | 486 | 13 |
2,691 | def get_action_cache_key ( name , argument ) : tokens = [ str ( name ) ] if argument : tokens . append ( str ( argument ) ) return '::' . join ( tokens ) | Get an action cache key string . | 43 | 7 |
2,692 | def removed_or_inserted_action ( mapper , connection , target ) : current_access . delete_action_cache ( get_action_cache_key ( target . action , target . argument ) ) | Remove the action from cache when an item is inserted or deleted . | 45 | 13 |
2,693 | def changed_action ( mapper , connection , target ) : action_history = get_history ( target , 'action' ) argument_history = get_history ( target , 'argument' ) owner_history = get_history ( target , 'user' if isinstance ( target , ActionUsers ) else 'role' if isinstance ( target , ActionRoles ) else 'role_name' ) if action_history . has_changes ( ) or argument_history . has_changes ( ) or owner_history . has_changes ( ) : current_access . delete_action_cache ( get_action_cache_key ( target . action , target . argument ) ) current_access . delete_action_cache ( get_action_cache_key ( action_history . deleted [ 0 ] if action_history . deleted else target . action , argument_history . deleted [ 0 ] if argument_history . deleted else target . argument ) ) | Remove the action from cache when an item is updated . | 199 | 11 |
2,694 | def allow ( cls , action , * * kwargs ) : return cls . create ( action , exclude = False , * * kwargs ) | Allow the given action need . | 33 | 6 |
2,695 | def deny ( cls , action , * * kwargs ) : return cls . create ( action , exclude = True , * * kwargs ) | Deny the given action need . | 33 | 7 |
2,696 | def query_by_action ( cls , action , argument = None ) : query = cls . query . filter_by ( action = action . value ) argument = argument or getattr ( action , 'argument' , None ) if argument is not None : query = query . filter ( db . or_ ( cls . argument == str ( argument ) , cls . argument . is_ ( None ) , ) ) else : query = query . filter ( cls . argument . is_ ( None ) ) return query | Prepare query object with filtered action . | 110 | 8 |
2,697 | def predict_mhci_binding ( job , peptfile , allele , peplen , univ_options , mhci_options ) : work_dir = os . getcwd ( ) input_files = { 'peptfile.faa' : peptfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) peptides = read_peptide_file ( os . path . join ( os . getcwd ( ) , 'peptfile.faa' ) ) if not peptides : return job . fileStore . writeGlobalFile ( job . fileStore . getLocalTempFile ( ) ) parameters = [ mhci_options [ 'pred' ] , allele , peplen , input_files [ 'peptfile.faa' ] ] with open ( '/' . join ( [ work_dir , 'predictions.tsv' ] ) , 'w' ) as predfile : docker_call ( tool = 'mhci' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = predfile , interactive = True , tool_version = mhci_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( predfile . name ) job . fileStore . logToMaster ( 'Ran mhci on %s:%s:%s successfully' % ( univ_options [ 'patient' ] , allele , peplen ) ) return output_file | Predict binding for each peptide in peptfile to allele using the IEDB mhci binding prediction tool . | 348 | 24 |
2,698 | def iter_and_close ( file_like , block_size ) : while 1 : try : block = file_like . read ( block_size ) if block : yield block else : raise StopIteration except StopIteration : file_like . close ( ) return | Yield file contents by block then close the file . | 57 | 11 |
2,699 | def cling_wrap ( package_name , dir_name , * * kw ) : resource = Requirement . parse ( package_name ) return Cling ( resource_filename ( resource , dir_name ) , * * kw ) | Return a Cling that serves from the given package and dir_name . | 50 | 15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.