idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
29,000
def nextline ( self ) : linebuf = b'' linepos = self . bufpos + self . charpos eol = False while 1 : self . fillbuf ( ) if eol : c = self . buf [ self . charpos ] if c == b'\n' : linebuf += c self . charpos += 1 break m = EOL . search ( self . buf , self . charpos ) if m : linebuf += self . buf [ self . charpos : m . e...
Fetches a next line that ends either with \\ r or \\ n .
29,001
def revreadlines ( self ) : self . fp . seek ( 0 , 2 ) pos = self . fp . tell ( ) buf = b'' while 0 < pos : prevpos = pos pos = max ( 0 , pos - self . BUFSIZ ) self . fp . seek ( pos ) s = self . fp . read ( prevpos - pos ) if not s : break while 1 : n = max ( s . rfind ( b'\r' ) , s . rfind ( b'\n' ) ) if n == - 1 : b...
Fetches a next line backward .
29,002
def nextobject ( self ) : while not self . results : ( pos , token ) = self . nexttoken ( ) if isinstance ( token , ( int , long , float , bool , str , PSLiteral ) ) : self . push ( ( pos , token ) ) elif token == KEYWORD_ARRAY_BEGIN : self . start_type ( pos , 'a' ) elif token == KEYWORD_ARRAY_END : try : self . push ...
Yields a list of objects .
29,003
def name2unicode ( name ) : if name in glyphname2unicode : return glyphname2unicode [ name ] m = STRIP_NAME . search ( name ) if not m : raise KeyError ( name ) return unichr ( int ( m . group ( 0 ) ) )
Converts Adobe glyph names to Unicode numbers .
29,004
def resolve1 ( x , default = None ) : while isinstance ( x , PDFObjRef ) : x = x . resolve ( default = default ) return x
Resolves an object .
29,005
def resolve_all ( x , default = None ) : while isinstance ( x , PDFObjRef ) : x = x . resolve ( default = default ) if isinstance ( x , list ) : x = [ resolve_all ( v , default = default ) for v in x ] elif isinstance ( x , dict ) : for ( k , v ) in x . iteritems ( ) : x [ k ] = resolve_all ( v , default = default ) re...
Recursively resolves the given object and all the internals .
29,006
def decipher_all ( decipher , objid , genno , x ) : if isinstance ( x , str ) : return decipher ( objid , genno , x ) if isinstance ( x , list ) : x = [ decipher_all ( decipher , objid , genno , v ) for v in x ] elif isinstance ( x , dict ) : for ( k , v ) in x . iteritems ( ) : x [ k ] = decipher_all ( decipher , obji...
Recursively deciphers the given object .
29,007
def find_xref ( self , parser ) : prev = None for line in parser . revreadlines ( ) : line = line . strip ( ) if self . debug : logging . debug ( 'find_xref: %r' % line ) if line == b'startxref' : break if line : prev = line else : raise PDFNoValidXRef ( 'Unexpected EOF' ) if self . debug : logging . info ( 'xref found...
Internal function used to locate the first XRef .
29,008
def read_xref_from ( self , parser , start , xrefs ) : parser . seek ( start ) parser . reset ( ) try : ( pos , token ) = parser . nexttoken ( ) except PSEOF : raise PDFNoValidXRef ( 'Unexpected EOF' ) if self . debug : logging . info ( 'read_xref_from: start=%d, token=%r' % ( start , token ) ) if isinstance ( token , ...
Reads XRefs from the given location .
29,009
def mult_matrix ( m1 , m0 ) : ( a1 , b1 , c1 , d1 , e1 , f1 ) = m1 ( a0 , b0 , c0 , d0 , e0 , f0 ) = m0 return ( a0 * a1 + c0 * b1 , b0 * a1 + d0 * b1 , a0 * c1 + c0 * d1 , b0 * c1 + d0 * d1 , a0 * e1 + c0 * f1 + e0 , b0 * e1 + d0 * f1 + f0 )
Returns the multiplication of two matrices .
29,010
def uniq ( objs ) : done = set ( ) for obj in objs : if obj in done : continue done . add ( obj ) yield obj return
Eliminates duplicated elements .
29,011
def csort ( objs , key ) : idxs = dict ( ( obj , i ) for ( i , obj ) in enumerate ( objs ) ) return sorted ( objs , key = lambda obj : ( key ( obj ) , idxs [ obj ] ) )
Order - preserving sorting function .
29,012
def fsplit ( pred , objs ) : t = [ ] f = [ ] for obj in objs : if pred ( obj ) : t . append ( obj ) else : f . append ( obj ) return ( t , f )
Split a list into two classes according to the predicate .
29,013
def drange ( v0 , v1 , d ) : assert v0 < v1 return xrange ( int ( v0 ) // d , int ( v1 + d ) // d )
Returns a discrete range .
29,014
def get_bound ( pts ) : ( x0 , y0 , x1 , y1 ) = ( INF , INF , - INF , - INF ) for ( x , y ) in pts : x0 = min ( x0 , x ) y0 = min ( y0 , y ) x1 = max ( x1 , x ) y1 = max ( y1 , y ) return ( x0 , y0 , x1 , y1 )
Compute a minimal rectangle that covers all the points .
29,015
def choplist ( n , seq ) : r = [ ] for x in seq : r . append ( x ) if len ( r ) == n : yield tuple ( r ) r = [ ] return
Groups every n elements of the list .
29,016
def decode_text ( s ) : if s . startswith ( b'\xfe\xff' ) : return unicode ( s [ 2 : ] , 'utf-16be' , 'ignore' ) else : return '' . join ( PDFDocEncoding [ ord ( c ) ] for c in s )
Decodes a PDFDocEncoding string to Unicode .
29,017
def do_keyword ( self , pos , token ) : if token in ( self . KEYWORD_XREF , self . KEYWORD_STARTXREF ) : self . add_results ( * self . pop ( 1 ) ) elif token is self . KEYWORD_ENDOBJ : self . add_results ( * self . pop ( 4 ) ) elif token is self . KEYWORD_NULL : self . push ( ( pos , None ) ) elif token is self . KEYWO...
Handles PDF - related keywords .
29,018
def generate_help_text ( ) : def generate_cmds_with_explanations ( summary , cmds ) : text = '{0}:\n' . format ( summary ) for cmd , explanation in cmds : text += '\t{0:<10}\t{1:<20}\n' . format ( cmd , explanation ) return text + '\n' text = generate_cmds_with_explanations ( 'Commands' , ROOT_COMMANDS . items ( ) ) te...
Return a formatted string listing commands HTTPie options and HTTP actions .
29,019
def colformat ( strings , num_sep_spaces = 1 , terminal_width = None ) : if terminal_width is None : terminal_width = get_terminal_size ( ) . columns if not strings : return num_items = len ( strings ) max_len = max ( [ len ( strip_ansi_escapes ( s ) ) for s in strings ] ) num_columns = min ( int ( ( terminal_width + n...
Format a list of strings like ls does multi - column output .
29,020
def load_context ( context , file_path = None ) : if not file_path : file_path = _get_context_filepath ( ) if os . path . exists ( file_path ) : with io . open ( file_path , encoding = 'utf-8' ) as f : for line in f : execute ( line , context )
Load a Context object in place from user data directory .
29,021
def save_context ( context ) : file_path = _get_context_filepath ( ) content = format_to_http_prompt ( context , excluded_options = EXCLUDED_OPTIONS ) with io . open ( file_path , 'w' , encoding = 'utf-8' ) as f : f . write ( content )
Save a Context object to user data directory .
29,022
def extract_args_for_httpie_main ( context , method = None ) : args = _extract_httpie_options ( context ) if method : args . append ( method . upper ( ) ) args . append ( context . url ) args += _extract_httpie_request_items ( context ) return args
Transform a Context object to a list of arguments that can be passed to HTTPie main function .
29,023
def format_to_httpie ( context , method = None ) : cmd = [ 'http' ] + _extract_httpie_options ( context , quote = True , join_key_value = True ) if method : cmd . append ( method . upper ( ) ) cmd . append ( context . url ) cmd += _extract_httpie_request_items ( context , quote = True ) return ' ' . join ( cmd ) + '\n'
Format a Context object to an HTTPie command .
29,024
def format_to_http_prompt ( context , excluded_options = None ) : cmds = _extract_httpie_options ( context , quote = True , join_key_value = True , excluded_keys = excluded_options ) cmds . append ( 'cd ' + smart_quote ( context . url ) ) cmds += _extract_httpie_request_items ( context , quote = True ) return '\n' . jo...
Format a Context object to HTTP Prompt commands .
29,025
def initialize ( ) : dst_path = get_user_config_path ( ) copied = False if not os . path . exists ( dst_path ) : src_path = os . path . join ( os . path . dirname ( __file__ ) , 'defaultconfig.py' ) shutil . copyfile ( src_path , dst_path ) copied = True return copied , dst_path
Initialize a default config file if it doesn t exist yet .
29,026
def load_user ( ) : config_path = get_user_config_path ( ) config = { } with open ( config_path ) as f : code = compile ( f . read ( ) , config_path , 'exec' ) exec ( code , config ) keys = list ( six . iterkeys ( config ) ) for k in keys : if k . startswith ( '_' ) : del config [ k ] return config
Read user config file and return it as a dict .
29,027
def filter_single_reads_by_length ( in_file , quality_format , min_length = 20 , out_file = None ) : logger . info ( "Removing reads in %s thare are less than %d bases." % ( in_file , min_length ) ) in_iterator = SeqIO . parse ( in_file , quality_format ) out_iterator = ( record for record in in_iterator if len ( recor...
removes reads from a fastq file which are shorter than a minimum length
29,028
def filter_reads_by_length ( fq1 , fq2 , quality_format , min_length = 20 ) : logger . info ( "Removing reads in %s and %s that " "are less than %d bases." % ( fq1 , fq2 , min_length ) ) fq1_out = utils . append_stem ( fq1 , ".fixed" ) fq2_out = utils . append_stem ( fq2 , ".fixed" ) fq1_single = utils . append_stem ( ...
removes reads from a pair of fastq files that are shorter than a minimum length . removes both ends of a read if one end falls below the threshold while maintaining the order of the reads
29,029
def rstrip_extra ( fname ) : to_strip = ( "_R" , ".R" , "-R" , "_" , "fastq" , "." , "-" ) while fname . endswith ( to_strip ) : for x in to_strip : if fname . endswith ( x ) : fname = fname [ : len ( fname ) - len ( x ) ] break return fname
Strip extraneous non - discriminative filename info from the end of a file .
29,030
def fast_combine_pairs ( files , force_single , full_name , separators ) : files = sort_filenames ( files ) chunks = tz . sliding_window ( 10 , files ) pairs = [ combine_pairs ( chunk , force_single , full_name , separators ) for chunk in chunks ] pairs = [ y for x in pairs for y in x ] longest = defaultdict ( list ) f...
assume files that need to be paired are within 10 entries of each other once the list is sorted
29,031
def open_fastq ( in_file ) : if objectstore . is_remote ( in_file ) : return objectstore . open_file ( in_file ) else : return utils . open_gzipsafe ( in_file )
open a fastq file using gzip if it is gzipped
29,032
def get_region_bed ( region , items , out_file , want_gzip = True ) : variant_regions = bedutils . population_variant_regions ( items , merged = True ) target = shared . subset_variant_regions ( variant_regions , region , out_file , items ) if not target : raise ValueError ( "Need BED input for strelka2 regions: %s %s"...
Retrieve BED file of regions to analyze either single or multi - region .
29,033
def coverage_interval_from_bed ( bed_file , per_chrom = True ) : total_starts = { } total_ends = { } bed_bases = collections . defaultdict ( int ) with utils . open_gzipsafe ( bed_file ) as in_handle : for line in in_handle : parts = line . split ( ) if len ( parts ) >= 3 : chrom , start , end = parts [ : 3 ] if chromh...
Calculate a coverage interval for the current region BED .
29,034
def _is_targeted_region ( cur_bed , data ) : cores = dd . get_num_cores ( data ) if cores > 0 : return dd . get_coverage_interval ( data ) not in [ "genome" ] else : return coverage_interval_from_bed ( cur_bed , per_chrom = False ) == "targeted"
Calculate if we should process region as a targeted or WGS .
29,035
def _postprocess_somatic ( in_file , paired ) : out_file = in_file . replace ( ".vcf.gz" , "-fixed.vcf" ) if not utils . file_exists ( out_file ) and not utils . file_exists ( out_file + ".gz" ) : with file_transaction ( paired . tumor_data , out_file ) as tx_out_file : with utils . open_gzipsafe ( in_file ) as in_hand...
Post - process somatic calls to provide standard output .
29,036
def _run_workflow ( data , workflow_file , work_dir ) : utils . remove_safe ( os . path . join ( work_dir , "workspace" ) ) cmd = [ utils . get_program_python ( "configureStrelkaGermlineWorkflow.py" ) , workflow_file , "-m" , "local" , "-j" , dd . get_num_cores ( data ) , "--quiet" ] do . run ( cmd , "Run Strelka2: %s"...
Run Strelka2 analysis inside prepared workflow directory .
29,037
def run_gvcfgenotyper ( data , orig_region , vrn_files , out_file ) : if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : regions = _find_gvcf_blocks ( vrn_files [ 0 ] , bamprep . region_to_gatk ( orig_region ) , os . path . dirname ( tx_out_file ) ) if len ( regions ) =...
Merge strelka2 and Illumina compatible gVCFs with gvcfgenotyper .
29,038
def _run_gvcfgenotyper ( data , region , vrn_files , out_file ) : if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : input_file = "%s-inputs.txt" % utils . splitext_plus ( tx_out_file ) [ 0 ] with open ( input_file , "w" ) as out_handle : out_handle . write ( "%s\n" % "...
Run gvcfgenotyper on a single gVCF region in input file .
29,039
def _find_gvcf_blocks ( vcf_file , region , tmp_dir ) : region_file = os . path . join ( tmp_dir , "cur_region.bed" ) with open ( region_file , "w" ) as out_handle : chrom , coords = region . split ( ":" ) start , end = coords . split ( "-" ) out_handle . write ( "\t" . join ( [ chrom , start , end ] ) + "\n" ) final_f...
Retrieve gVCF blocks within our current evaluation region .
29,040
def run ( samples , run_parallel ) : to_process = [ ] extras = [ ] for data in ( xs [ 0 ] for xs in samples ) : hlacaller = tz . get_in ( [ "config" , "algorithm" , "hlacaller" ] , data ) if hlacaller : to_process . append ( data ) else : extras . append ( [ data ] ) processed = run_parallel ( "call_hla" , ( [ x ] for ...
Run HLA detection on the input samples .
29,041
def align_bam ( in_bam , ref_file , names , align_dir , data ) : config = data [ "config" ] out_file = os . path . join ( align_dir , "{0}-sort.bam" . format ( names [ "lane" ] ) ) samtools = config_utils . get_program ( "samtools" , config ) bedtools = config_utils . get_program ( "bedtools" , config ) resources = con...
Perform direct alignment of an input BAM file with BWA using pipes .
29,042
def _get_bwa_mem_cmd ( data , out_file , ref_file , fastq1 , fastq2 = "" ) : alt_file = ref_file + ".alt" if utils . file_exists ( alt_file ) and dd . get_hlacaller ( data ) : bwakit_dir = os . path . dirname ( os . path . realpath ( utils . which ( "run-bwamem" ) ) ) hla_base = os . path . join ( utils . safe_makedir ...
Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing .
29,043
def _align_mem ( fastq_file , pair_file , ref_file , out_file , names , rg_info , data ) : with postalign . tobam_cl ( data , out_file , pair_file != "" ) as ( tobam_cl , tx_out_file ) : cmd = ( "unset JAVA_HOME && " "%s | %s" % ( _get_bwa_mem_cmd ( data , out_file , ref_file , fastq_file , pair_file ) , tobam_cl ) ) d...
Perform bwa - mem alignment on supported read lengths .
29,044
def _align_backtrack ( fastq_file , pair_file , ref_file , out_file , names , rg_info , data ) : bwa = config_utils . get_program ( "bwa" , data [ "config" ] ) config = data [ "config" ] sai1_file = "%s_1.sai" % os . path . splitext ( out_file ) [ 0 ] sai2_file = "%s_2.sai" % os . path . splitext ( out_file ) [ 0 ] if ...
Perform a BWA alignment using aln backtrack algorithm .
29,045
def run_main ( workdir , config_file = None , fc_dir = None , run_info_yaml = None , parallel = None , workflow = None ) : os . environ [ "LC_ALL" ] = "C" os . environ [ "LC" ] = "C" os . environ [ "LANG" ] = "C" workdir = utils . safe_makedir ( os . path . abspath ( workdir ) ) os . chdir ( workdir ) config , config_f...
Run variant analysis handling command line options .
29,046
def _setup_resources ( ) : target_procs = 10240 cur_proc , max_proc = resource . getrlimit ( resource . RLIMIT_NPROC ) target_proc = min ( max_proc , target_procs ) if max_proc > 0 else target_procs resource . setrlimit ( resource . RLIMIT_NPROC , ( max ( cur_proc , target_proc ) , max_proc ) ) cur_hdls , max_hdls = re...
Attempt to increase resource limits up to hard limits .
29,047
def _wres ( parallel , progs , fresources = None , ensure_mem = None ) : parallel = copy . deepcopy ( parallel ) parallel [ "progs" ] = progs if fresources : parallel [ "fresources" ] = fresources if ensure_mem : parallel [ "ensure_mem" ] = ensure_mem return parallel
Add resource information to the parallel environment on required programs and files .
29,048
def rnaseq_prep_samples ( config , run_info_yaml , parallel , dirs , samples ) : pipeline = dd . get_in_samples ( samples , dd . get_analysis ) trim_reads_set = any ( [ tz . get_in ( [ "algorithm" , "trim_reads" ] , d ) for d in dd . sample_data_iterator ( samples ) ] ) resources = [ "picard" ] needs_trimming = ( _is_s...
organizes RNA - seq and small - RNAseq samples converting from BAM if necessary and trimming if necessary
29,049
def _pair_samples_with_pipelines ( run_info_yaml , config ) : samples = config_utils . load_config ( run_info_yaml ) if isinstance ( samples , dict ) : resources = samples . pop ( "resources" ) samples = samples [ "details" ] else : resources = { } ready_samples = [ ] for sample in samples : if "files" in sample : del ...
Map samples defined in input file to pipelines to run .
29,050
def run ( data ) : bwakit_dir = os . path . dirname ( os . path . realpath ( utils . which ( "run-bwamem" ) ) ) hla_fqs = tz . get_in ( [ "hla" , "fastq" ] , data , [ ] ) if len ( hla_fqs ) > 0 : hla_base = os . path . commonprefix ( hla_fqs ) while hla_base . endswith ( "." ) : hla_base = hla_base [ : - 1 ] out_file =...
HLA typing with bwakit parsing output from called genotype files .
29,051
def _organize_calls ( out_file , hla_base , data ) : hla_truth = get_hla_truthset ( data ) sample = dd . get_sample_name ( data ) with file_transaction ( data , out_file ) as tx_out_file : with open ( tx_out_file , "w" ) as out_handle : writer = csv . writer ( out_handle ) writer . writerow ( [ "sample" , "locus" , "mi...
Prepare genotype calls reporting best call along with quality metrics .
29,052
def matches_truth ( call_alleles , truth_alleles , data ) : if not truth_alleles : return "" else : def _remove_p ( x ) : return x [ : - 1 ] if x . endswith ( "P" ) else x t_cmp = set ( [ _remove_p ( hla_groups . hla_protein ( x , data ) ) for x in truth_alleles ] ) c_cmp = set ( [ _remove_p ( hla_groups . hla_protein ...
Flexibly check if truth and call alleles match using p - groups .
29,053
def get_hla_truthset ( data ) : val_csv = tz . get_in ( [ "config" , "algorithm" , "hlavalidate" ] , data ) out = { } if val_csv and utils . file_exists ( val_csv ) : with open ( val_csv ) as in_handle : reader = csv . reader ( in_handle ) next ( reader ) for sample , locus , alleles in ( l for l in reader if l ) : out...
Retrieve expected truth calls for annotating HLA called output .
29,054
def bam_to_fastq_pair ( in_file , target_region , pair ) : space , start , end = target_region bam_file = pysam . Samfile ( in_file , "rb" ) for read in bam_file : if ( not read . is_unmapped and not read . mate_is_unmapped and bam_file . getrname ( read . tid ) == space and bam_file . getrname ( read . mrnm ) == space...
Generator to convert BAM files into name seq qual in a region .
29,055
def sample_callable_bed ( bam_file , ref_file , data ) : from bcbio . heterogeneity import chromhacks CovInfo = collections . namedtuple ( "CovInfo" , "callable, raw_callable, depth_files" ) noalt_calling = "noalt_calling" in dd . get_tools_on ( data ) or "altcontigs" in dd . get_exclude_regions ( data ) def callable_c...
Retrieve callable regions for a sample subset by defined analysis regions .
29,056
def get_ref_bedtool ( ref_file , config , chrom = None ) : broad_runner = broad . runner_from_path ( "picard" , config ) ref_dict = broad_runner . run_fn ( "picard_index_ref" , ref_file ) ref_lines = [ ] with pysam . Samfile ( ref_dict , "r" ) as ref_sam : for sq in ref_sam . header [ "SQ" ] : if not chrom or sq [ "SN"...
Retrieve a pybedtool BedTool object with reference sizes from input reference .
29,057
def _get_nblock_regions ( in_file , min_n_size , ref_regions ) : out_lines = [ ] called_contigs = set ( [ ] ) with utils . open_gzipsafe ( in_file ) as in_handle : for line in in_handle : contig , start , end , ctype = line . rstrip ( ) . split ( ) called_contigs . add ( contig ) if ( ctype in [ "REF_N" , "NO_COVERAGE"...
Retrieve coordinates of regions in reference genome with no mapping . These are potential breakpoints for parallelizing analysis .
29,058
def _combine_regions ( all_regions , ref_regions ) : chrom_order = { } for i , x in enumerate ( ref_regions ) : chrom_order [ x . chrom ] = i def wchrom_key ( x ) : chrom , start , end = x return ( chrom_order [ chrom ] , start , end ) all_intervals = [ ] for region_group in all_regions : for region in region_group : a...
Combine multiple BEDtools regions of regions into sorted final BEDtool .
29,059
def _add_config_regions ( nblock_regions , ref_regions , data ) : input_regions_bed = dd . get_variant_regions ( data ) if input_regions_bed : input_regions = pybedtools . BedTool ( input_regions_bed ) if len ( input_regions ) == 1 : str_regions = str ( input_regions [ 0 ] ) . strip ( ) input_regions = pybedtools . Bed...
Add additional nblock regions based on configured regions to call . Identifies user defined regions which we should not be analyzing .
29,060
def block_regions ( callable_bed , in_bam , ref_file , data ) : min_n_size = int ( data [ "config" ] [ "algorithm" ] . get ( "nomap_split_size" , 250 ) ) with shared . bedtools_tmpdir ( data ) : nblock_bed = "%s-nblocks.bed" % utils . splitext_plus ( callable_bed ) [ 0 ] callblock_bed = "%s-callableblocks.bed" % utils ...
Find blocks of regions for analysis from mapped input BAM file .
29,061
def _analysis_block_stats ( regions , samples ) : prev = None between_sizes = [ ] region_sizes = [ ] for region in regions : if prev and prev . chrom == region . chrom : between_sizes . append ( region . start - prev . end ) region_sizes . append ( region . end - region . start ) prev = region def descriptive_stats ( x...
Provide statistics on sizes and number of analysis blocks .
29,062
def _needs_region_update ( out_file , samples ) : nblock_files = [ x [ "regions" ] [ "nblock" ] for x in samples if "regions" in x ] for nblock_file in nblock_files : test_old = nblock_file . replace ( "-nblocks" , "-analysisblocks" ) if os . path . exists ( test_old ) : return False for noblock_file in nblock_files : ...
Check if we need to update BED file of regions supporting back compatibility .
29,063
def combine_sample_regions ( * samples ) : samples = utils . unpack_worlds ( samples ) samples = cwlutils . unpack_tarballs ( samples , samples [ 0 ] ) global_analysis_file = os . path . join ( samples [ 0 ] [ "dirs" ] [ "work" ] , "analysis_blocks.bed" ) if utils . file_exists ( global_analysis_file ) and not _needs_r...
Create batch - level sets of callable regions for multi - sample calling .
29,064
def _combine_sample_regions_batch ( batch , items ) : config = items [ 0 ] [ "config" ] work_dir = utils . safe_makedir ( os . path . join ( items [ 0 ] [ "dirs" ] [ "work" ] , "regions" ) ) analysis_file = os . path . join ( work_dir , "%s-analysis_blocks.bed" % batch ) no_analysis_file = os . path . join ( work_dir ,...
Combine sample regions within a group of batched samples .
29,065
def get_split_regions ( bed_file , data ) : out_file = "%s-analysis_blocks.bed" % utils . splitext_plus ( bed_file ) [ 0 ] with shared . bedtools_tmpdir ( data ) : if not utils . file_uptodate ( out_file , bed_file ) : ref_regions = get_ref_bedtool ( dd . get_ref_file ( data ) , data [ "config" ] ) nblock_regions = ref...
Retrieve a set of split regions using the input BED for callable regions .
29,066
def include_block ( self , x ) : last_pos = self . _chr_last_blocks . get ( x . chrom , 0 ) if last_pos <= self . _end_buffer and x . stop >= self . _ref_sizes . get ( x . chrom , 0 ) - self . _end_buffer : return True elif self . _ref_sizes . get ( x . chrom , 0 ) <= self . _target_size : return False elif ( x . start...
Check for inclusion of block based on distance from previous .
29,067
def expand_block ( self , feat ) : chrom_end = self . _ref_sizes . get ( feat . chrom ) if chrom_end : if feat . start < self . _end_buffer : feat . start = 0 if feat . stop >= chrom_end - self . _end_buffer : feat . stop = chrom_end return feat
Expand any blocks which are near the start or end of a contig .
29,068
def _keep_assembled_chrom ( bam_file , genome , config ) : fai = "%s.fai" % genome chrom = [ ] with open ( fai ) as inh : for line in inh : c = line . split ( "\t" ) [ 0 ] if c . find ( "_" ) < 0 : chrom . append ( c ) chroms = " " . join ( chrom ) out_file = utils . append_stem ( bam_file , '_chrom' ) samtools = confi...
Remove contigs from the BAM file
29,069
def _prepare_bam ( bam_file , bed_file , config ) : if not bam_file or not bed_file : return bam_file out_file = utils . append_stem ( bam_file , '_filter' ) bedtools = config_utils . get_program ( "bedtools" , config ) if not utils . file_exists ( out_file ) : with file_transaction ( out_file ) as tx_out : cmd = "{bed...
Remove regions from bed files
29,070
def _bam_coverage ( name , bam_input , data ) : cmd = ( "{bam_coverage} -b {bam_input} -o {bw_output} " "--binSize 20 --effectiveGenomeSize {size} " "--smoothLength 60 --extendReads 150 --centerReads -p {cores}" ) size = bam . fasta . total_sequence_length ( dd . get_ref_file ( data ) ) cores = dd . get_num_cores ( dat...
Run bamCoverage from deeptools
29,071
def _get_out_file ( work_dir , paired ) : if paired : if paired . normal_bam : base_file = "somaticSV.vcf.gz" else : base_file = "tumorSV.vcf.gz" else : base_file = "diploidSV.vcf.gz" return os . path . join ( work_dir , "results" , "variants" , base_file )
Retrieve manta output variant file depending on analysis .
29,072
def _get_evidence_bam ( work_dir , data ) : evidence_bam = glob . glob ( os . path . join ( work_dir , "results" , "evidence" , "evidence_*.%s*.bam" % ( dd . get_sample_name ( data ) ) ) ) if evidence_bam : return evidence_bam [ 0 ]
Retrieve evidence BAM for the sample if it exists
29,073
def _run_workflow ( items , paired , workflow_file , work_dir ) : utils . remove_safe ( os . path . join ( work_dir , "workspace" ) ) data = paired . tumor_data if paired else items [ 0 ] cmd = [ utils . get_program_python ( "configManta.py" ) , workflow_file , "-m" , "local" , "-j" , dd . get_num_cores ( data ) ] do ....
Run manta analysis inside prepared workflow directory .
29,074
def _prep_config ( items , paired , work_dir ) : assert utils . which ( "configManta.py" ) , "Could not find installed configManta.py" out_file = os . path . join ( work_dir , "runWorkflow.py" ) if not utils . file_exists ( out_file ) or _out_of_date ( out_file ) : config_script = os . path . realpath ( utils . which (...
Run initial configuration generating a run directory for Manta .
29,075
def _prep_streamlined_config ( config_script , work_dir ) : new_min_size = 100 in_file = config_script + ".ini" out_file = os . path . join ( work_dir , os . path . basename ( in_file ) ) with open ( in_file ) as in_handle : with open ( out_file , "w" ) as out_handle : for line in in_handle : if line . startswith ( "mi...
Create manta INI file without steps that potentially increase runtimes .
29,076
def _maybe_limit_chromosomes ( data ) : std_chroms = [ ] prob_chroms = [ ] noalt_calling = "noalt_calling" in dd . get_tools_on ( data ) or "altcontigs" in dd . get_exclude_regions ( data ) for contig in ref . file_contigs ( dd . get_ref_file ( data ) ) : if contig . name . find ( ":" ) > 0 or ( noalt_calling and not c...
Potentially limit chromosomes to avoid problematically named HLA contigs .
29,077
def _out_of_date ( rw_file ) : with open ( rw_file ) as in_handle : for line in in_handle : if line . startswith ( "sys.path.append" ) : file_version = line . split ( "/lib/python" ) [ 0 ] . split ( "Cellar/manta/" ) [ - 1 ] if file_version != programs . get_version_manifest ( "manta" ) : return True return False
Check if a run workflow file points to an older version of manta and needs a refresh .
29,078
def _freebayes_options_from_config ( items , config , out_file , region = None ) : opts = [ "--genotype-qualities" , "--strict-vcf" ] cur_ploidy = ploidy . get_ploidy ( items , region ) base_ploidy = ploidy . get_ploidy ( items ) opts += [ "--ploidy" , str ( cur_ploidy ) ] if ( isinstance ( region , ( list , tuple ) ) ...
Prepare standard options from configuration input .
29,079
def _add_somatic_opts ( opts , paired ) : if "--min-alternate-fraction" not in opts and "-F" not in opts : min_af = float ( utils . get_in ( paired . tumor_config , ( "algorithm" , "min_allele_fraction" ) , 10 ) ) / 100.0 opts += " --min-alternate-fraction %s" % min_af opts += ( " --pooled-discrete --pooled-continuous ...
Add somatic options to current set . See _run_freebayes_paired for references .
29,080
def _run_freebayes_caller ( align_bams , items , ref_file , assoc_files , region = None , out_file = None , somatic = None ) : config = items [ 0 ] [ "config" ] if out_file is None : out_file = "%s-variants.vcf.gz" % os . path . splitext ( align_bams [ 0 ] ) [ 0 ] if not utils . file_exists ( out_file ) : if not utils ...
Detect SNPs and indels with FreeBayes .
29,081
def _check_lods ( parts , tumor_thresh , normal_thresh , indexes ) : try : gl_index = parts [ 8 ] . split ( ":" ) . index ( "GL" ) except ValueError : return True try : tumor_gls = [ float ( x ) for x in parts [ indexes [ "tumor" ] ] . strip ( ) . split ( ":" ) [ gl_index ] . split ( "," ) if x != "." ] if tumor_gls : ...
Ensure likelihoods for tumor and normal pass thresholds .
29,082
def _check_freqs ( parts , indexes ) : thresh_ratio = 2.7 try : ao_index = parts [ 8 ] . split ( ":" ) . index ( "AO" ) ro_index = parts [ 8 ] . split ( ":" ) . index ( "RO" ) except ValueError : ao_index , ro_index = None , None try : af_index = parts [ 8 ] . split ( ":" ) . index ( "AF" ) except ValueError : af_index...
Ensure frequency of tumor to normal passes a reasonable threshold .
29,083
def _clean_freebayes_output ( line ) : if line . startswith ( "#" ) : line = line . replace ( "Type=Int,D" , "Type=Integer,D" ) return line else : parts = line . split ( "\t" ) alleles = [ x . strip ( ) for x in parts [ 4 ] . split ( "," ) ] + [ parts [ 3 ] . strip ( ) ] if len ( alleles ) == len ( set ( alleles ) ) : ...
Clean FreeBayes output to make post - processing with GATK happy .
29,084
def clean_vcf_output ( orig_file , clean_fn , config , name = "clean" ) : base , ext = utils . splitext_plus ( orig_file ) out_file = "{0}-{1}{2}" . format ( base , name , ext ) if not utils . file_exists ( out_file ) : with open ( orig_file ) as in_handle : with file_transaction ( config , out_file ) as tx_out_file : ...
Provide framework to clean a file in - place with the specified clean function .
29,085
def get_type ( data ) : if data [ "analysis" ] . lower ( ) . startswith ( "var" ) or dd . get_variantcaller ( data ) : return tz . get_in ( ( "config" , "algorithm" , "effects" ) , data , "snpeff" )
Retrieve the type of effects calculation to do .
29,086
def prep_vep_cache ( dbkey , ref_file , tooldir = None , config = None ) : if config is None : config = { } resource_file = os . path . join ( os . path . dirname ( ref_file ) , "%s-resources.yaml" % dbkey ) if os . path . exists ( resource_file ) : with open ( resource_file ) as in_handle : resources = yaml . safe_loa...
Ensure correct installation of VEP cache file .
29,087
def _get_G2P ( data ) : G2P_file = os . path . realpath ( tz . get_in ( ( "genome_resources" , "variation" , "genotype2phenotype" ) , data ) ) args = [ "--plugin" , "G2P,file:%s" % ( G2P_file ) ] if G2P_file : return args else : return [ ]
A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement .
29,088
def _snpeff_args_from_config ( data ) : config = data [ "config" ] args = [ "-hgvs" ] resources = config_utils . get_resources ( "snpeff" , config ) if resources . get ( "options" ) : args += [ str ( x ) for x in resources . get ( "options" , [ ] ) ] if vcfutils . get_paired_phenotype ( data ) : args += [ "-cancer" ] e...
Retrieve snpEff arguments supplied through input configuration .
29,089
def get_db ( data ) : snpeff_db = utils . get_in ( data , ( "genome_resources" , "aliases" , "snpeff" ) ) snpeff_base_dir = None if snpeff_db : snpeff_base_dir = utils . get_in ( data , ( "reference" , "snpeff" ) ) if not ( isinstance ( snpeff_base_dir , six . string_types ) and os . path . isdir ( snpeff_base_dir ) ) ...
Retrieve a snpEff database name and location relative to reference file .
29,090
def _get_snpeff_cmd ( cmd_name , datadir , data , out_file ) : resources = config_utils . get_resources ( "snpeff" , data [ "config" ] ) jvm_opts = resources . get ( "jvm_opts" , [ "-Xms750m" , "-Xmx3g" ] ) jvm_opts = config_utils . adjust_opts ( jvm_opts , { "algorithm" : { "memory_adjust" : { "direction" : "increase"...
Retrieve snpEff base command line .
29,091
def _run_snpeff ( snp_in , out_format , data ) : snpeff_db , datadir = get_db ( data ) if not snpeff_db : return None , None assert os . path . exists ( os . path . join ( datadir , snpeff_db ) ) , "Did not find %s snpEff genome data in %s" % ( snpeff_db , datadir ) ext = utils . splitext_plus ( snp_in ) [ 1 ] if out_f...
Run effects prediction with snpEff skipping if snpEff database not present .
29,092
def _installed_snpeff_genome ( base_name , config ) : snpeff_config_file = os . path . join ( config_utils . get_program ( "snpeff" , config , "dir" ) , "snpEff.config" ) if os . path . exists ( snpeff_config_file ) : data_dir = _find_snpeff_datadir ( snpeff_config_file ) dbs = [ d for d in sorted ( glob . glob ( os . ...
Find the most recent installed genome for snpEff with the given name .
29,093
def remap_index_fn ( ref_file ) : index_dir = os . path . join ( os . path . dirname ( ref_file ) , os . pardir , "minimap2" ) if os . path . exists ( index_dir ) and os . path . isdir ( index_dir ) : return index_dir else : return os . path . dirname ( ref_file )
minimap2 can build indexes on the fly but will also store commons ones .
29,094
def create_new_csv ( samples , args ) : out_fn = os . path . splitext ( args . csv ) [ 0 ] + "-merged.csv" logger . info ( "Preparing new csv: %s" % out_fn ) with file_transaction ( out_fn ) as tx_out : with open ( tx_out , 'w' ) as handle : handle . write ( _header ( args . csv ) ) for s in samples : sample_name = s [...
create csv file that can be use with bcbio - w template
29,095
def _get_samples_to_process ( fn , out_dir , config , force_single , separators ) : out_dir = os . path . abspath ( out_dir ) samples = defaultdict ( list ) with open ( fn ) as handle : for l in handle : if l . find ( "description" ) > 0 : logger . info ( "Skipping header." ) continue cols = l . strip ( ) . split ( ","...
parse csv file with one line per file . It will merge all files that have the same description name
29,096
def _check_stems ( files ) : used = set ( ) for fn in files : if os . path . basename ( fn ) in used : logger . warning ( "%s stem is multiple times in your file list, " "so we don't know " "how to assign it to the sample data in the CSV. " "We are gonna use full path to make a difference, " "that means paired files sh...
check if stem names are the same and use full path then
29,097
def get_cluster_view ( p ) : from cluster_helper import cluster as ipc return ipc . cluster_view ( p [ 'scheduler' ] , p [ 'queue' ] , p [ 'num_jobs' ] , p [ 'cores_per_job' ] , start_wait = p [ 'timeout' ] , extra_params = { "resources" : p [ 'resources' ] , "mem" : p [ 'mem' ] , "tag" : p [ 'tag' ] , "run_local" : Fa...
get ipython running
29,098
def from_sample ( sample ) : upload_config = sample . get ( "upload" ) if upload_config : approach = _approaches [ upload_config . get ( "method" , "filesystem" ) ] for finfo in _get_files ( sample ) : approach . update_file ( finfo , sample , upload_config ) return [ [ sample ] ]
Upload results of processing from an analysis pipeline sample .
29,099
def _get_files ( sample ) : analysis = sample . get ( "analysis" ) if analysis . lower ( ) in [ "variant" , "snp calling" , "variant2" , "standard" ] : return _get_files_variantcall ( sample ) elif analysis . lower ( ) in [ "rna-seq" , "fastrna-seq" ] : return _get_files_rnaseq ( sample ) elif analysis . lower ( ) in [...
Retrieve files for the sample dispatching by analysis type .