idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
29,800
def get_fastq_dir ( fc_dir ) : full_goat_bc = glob . glob ( os . path . join ( fc_dir , "Data" , "*Firecrest*" , "Bustard*" ) ) bustard_bc = glob . glob ( os . path . join ( fc_dir , "Data" , "Intensities" , "*Bustard*" ) ) machine_bc = os . path . join ( fc_dir , "Data" , "Intensities" , "BaseCalls" ) if os . path . e...
Retrieve the fastq directory within Solexa flowcell output .
29,801
def run_details ( self , run ) : run_data = dict ( run = run ) req = urllib . request . Request ( "%s/nglims/api_run_details" % self . _base_url , urllib . parse . urlencode ( run_data ) ) response = urllib . request . urlopen ( req ) info = json . loads ( response . read ( ) ) if "error" in info : raise ValueError ( "...
Retrieve sequencing run details as a dictionary .
29,802
def _mosaik_args_from_config ( config ) : multi_mappers = config [ "algorithm" ] . get ( "multiple_mappers" , True ) multi_flags = [ "-m" , "all" ] if multi_mappers else [ "-m" , "unique" ] error_flags = [ "-mm" , "2" ] num_cores = config [ "algorithm" ] . get ( "num_cores" , 1 ) core_flags = [ "-p" , str ( num_cores )...
Configurable high level options for mosaik .
29,803
def _convert_fastq ( fastq_file , pair_file , rg_name , out_file , config ) : out_file = "{0}-fq.mkb" . format ( os . path . splitext ( out_file ) [ 0 ] ) if not file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : cl = [ config_utils . get_program ( "mosaik" , config , default = "Mos...
Convert fastq inputs into internal Mosaik representation .
29,804
def _get_mosaik_nn_args ( out_file ) : base_nn_url = "https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/" out = [ ] for arg , fname in [ ( "-annse" , "2.1.26.se.100.005.ann" ) , ( "-annpe" , "2.1.26.pe.100.0065.ann" ) ] : arg_fname = os . path . join ( os . path . dirname ( out_file ) , fname ) if not fil...
Retrieve default neural network files from GitHub to pass to Mosaik .
29,805
def align ( fastq_file , pair_file , ref_file , names , align_dir , data , extra_args = None ) : config = data [ "config" ] rg_name = names . get ( "rg" , None ) if names else None out_file = os . path . join ( align_dir , "%s-align.bam" % names [ "lane" ] ) if not file_exists ( out_file ) : with file_transaction ( dat...
Alignment with MosaikAligner .
29,806
def get_bcbio_timings ( path ) : with open ( path , 'r' ) as file_handle : steps = { } for line in file_handle : matches = re . search ( r'^\[([^\]]+)\] ([^:]+: .*)' , line ) if not matches : continue tstamp = matches . group ( 1 ) msg = matches . group ( 2 ) when = datetime . strptime ( tstamp , '%Y-%m-%dT%H:%MZ' ) . ...
Fetch timing information from a bcbio log file .
29,807
def this_and_prev ( iterable ) : try : item = next ( iterable ) while True : next_item = next ( iterable ) yield item , next_item item = next_item except StopIteration : return
Walk an iterable returning the current and previous items as a two - tuple .
29,808
def remove_outliers ( series , stddev ) : return series [ ( series - series . mean ( ) ) . abs ( ) < stddev * series . std ( ) ]
Remove the outliers from a series .
29,809
def prep_for_graph ( data_frame , series = None , delta_series = None , smoothing = None , outlier_stddev = None ) : series = series or [ ] delta_series = delta_series or [ ] graph = calc_deltas ( data_frame , delta_series ) for s in series + delta_series : if smoothing : graph [ s ] = graph [ s ] . resample ( smoothin...
Prepare a dataframe for graphing by calculating deltas for series that need them resampling and removing outliers .
29,810
def add_common_plot_features ( plot , steps ) : _setup_matplotlib ( ) plot . yaxis . set_tick_params ( labelright = True ) plot . set_xlabel ( '' ) ymax = plot . get_ylim ( ) [ 1 ] ticks = { } for tstamp , step in steps . items ( ) : if step == 'finished' : continue plot . vlines ( tstamp , 0 , ymax , linestyles = 'das...
Add plot features common to all plots such as bcbio step information .
29,811
def log_time_frame ( bcbio_log ) : output = collections . namedtuple ( "Time" , [ "start" , "end" , "steps" ] ) bcbio_timings = get_bcbio_timings ( bcbio_log ) return output ( min ( bcbio_timings ) , max ( bcbio_timings ) , bcbio_timings )
The bcbio running time frame .
29,812
def resource_usage ( bcbio_log , cluster , rawdir , verbose ) : data_frames = { } hardware_info = { } time_frame = log_time_frame ( bcbio_log ) for collectl_file in sorted ( os . listdir ( rawdir ) ) : if not collectl_file . endswith ( '.raw.gz' ) : continue if rawfile_within_timeframe ( collectl_file , time_frame ) : ...
Generate system statistics from bcbio runs .
29,813
def generate_graphs ( data_frames , hardware_info , steps , outdir , verbose = False ) : _setup_matplotlib ( ) collectl_info = collections . defaultdict ( dict ) for host , data_frame in data_frames . items ( ) : if verbose : print ( 'Generating CPU graph for {}...' . format ( host ) ) graph , data_cpu = graph_cpu ( da...
Generate all graphs for a bcbio run .
29,814
def get_ploidy ( items , region = None ) : chrom = chromosome_special_cases ( region [ 0 ] if isinstance ( region , ( list , tuple ) ) else None ) ploidy = _configured_ploidy ( items ) sexes = _configured_genders ( items ) if chrom == "mitochondrial" : return ploidy . get ( "mitochondrial" , 1 ) elif chrom == "X" : if ...
Retrieve ploidy of a region handling special cases .
29,815
def filter_vcf_by_sex ( vcf_file , items ) : out_file = "%s-ploidyfix%s" % utils . splitext_plus ( vcf_file ) if not utils . file_exists ( out_file ) : genders = list ( _configured_genders ( items ) ) is_female = len ( genders ) == 1 and genders [ 0 ] and genders [ 0 ] in [ "female" , "f" ] if is_female : orig_out_file...
Post - filter a single sample VCF handling sex chromosomes .
29,816
def variant_filtration ( call_file , ref_file , vrn_files , data , items ) : caller = data [ "config" ] [ "algorithm" ] . get ( "variantcaller" ) if "gvcf" not in dd . get_tools_on ( data ) : call_file = ploidy . filter_vcf_by_sex ( call_file , items ) if caller in [ "freebayes" ] : return vfilter . freebayes ( call_fi...
Filter variant calls using Variant Quality Score Recalibration .
29,817
def _split_by_ready_regions ( ext , file_key , dir_ext_fn ) : def _sort_by_size ( region_w_bams ) : region , _ = region_w_bams _ , start , end = region return end - start def _assign_bams_to_regions ( data ) : for i , region in enumerate ( data [ "region" ] ) : work_bams = [ ] for xs in data [ "region_bams" ] : if len ...
Organize splits based on regions generated by parallel_prep_region .
29,818
def _collapse_by_bam_variantcaller ( samples ) : by_bam = collections . OrderedDict ( ) for data in ( x [ 0 ] for x in samples ) : work_bam = utils . get_in ( data , ( "combine" , "work_bam" , "out" ) , data . get ( "align_bam" ) ) variantcaller = get_variantcaller ( data ) if isinstance ( work_bam , list ) : work_bam ...
Collapse regions to a single representative by BAM input variant caller and batch .
29,819
def _dup_samples_by_variantcaller ( samples , require_bam = True ) : samples = [ utils . to_single_data ( x ) for x in samples ] samples = germline . split_somatic ( samples ) to_process = [ ] extras = [ ] for data in samples : added = False for i , add in enumerate ( handle_multiple_callers ( data , "variantcaller" , ...
Prepare samples by variant callers duplicating any with multiple callers .
29,820
def parallel_variantcall_region ( samples , run_parallel ) : to_process , extras = _dup_samples_by_variantcaller ( samples ) split_fn = _split_by_ready_regions ( ".vcf.gz" , "work_bam" , get_variantcaller ) samples = _collapse_by_bam_variantcaller ( grouped_parallel_split_combine ( to_process , split_fn , multi . group...
Perform variant calling and post - analysis on samples by region .
29,821
def vc_output_record ( samples ) : shared_keys = [ [ "vrn_file" ] , [ "validate" , "summary" ] , [ "validate" , "tp" ] , [ "validate" , "fp" ] , [ "validate" , "fn" ] ] raw = cwlutils . samples_to_records ( [ utils . to_single_data ( x ) for x in samples ] ) shared = { } for key in shared_keys : cur = list ( set ( [ x ...
Prepare output record from variant calling to feed into downstream analysis .
29,822
def batch_for_variantcall ( samples ) : sample_order = [ dd . get_sample_name ( utils . to_single_data ( x ) ) for x in samples ] to_process , extras = _dup_samples_by_variantcaller ( samples , require_bam = False ) batch_groups = collections . defaultdict ( list ) to_process = [ utils . to_single_data ( x ) for x in t...
Prepare a set of samples for parallel variant calling .
29,823
def _handle_precalled ( data ) : if data . get ( "vrn_file" ) and not cwlutils . is_cwl_run ( data ) : vrn_file = data [ "vrn_file" ] if isinstance ( vrn_file , ( list , tuple ) ) : assert len ( vrn_file ) == 1 vrn_file = vrn_file [ 0 ] precalled_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ...
Copy in external pre - called variants fed into analysis .
29,824
def handle_multiple_callers ( data , key , default = None , require_bam = True ) : callers = get_variantcaller ( data , key , default , require_bam = require_bam ) if isinstance ( callers , six . string_types ) : return [ data ] elif not callers : return [ ] else : out = [ ] for caller in callers : base = copy . deepco...
Split samples that potentially require multiple variant calling approaches .
29,825
def variantcall_sample ( data , region = None , align_bams = None , out_file = None ) : if out_file is None or not os . path . exists ( out_file ) or not os . path . lexists ( out_file ) : utils . safe_makedir ( os . path . dirname ( out_file ) ) ref_file = dd . get_ref_file ( data ) config = data [ "config" ] caller_f...
Parallel entry point for doing genotyping of a region of a sample .
29,826
def _get_batch_name ( items , skip_jointcheck = False ) : batch_names = collections . defaultdict ( int ) has_joint = any ( [ is_joint ( d ) for d in items ] ) for data in items : if has_joint and not skip_jointcheck : batches = dd . get_sample_name ( data ) else : batches = dd . get_batches ( data ) or dd . get_sample...
Retrieve the shared batch name for a group of items .
29,827
def _run_variantcall_batch_multicore ( items , regions , final_file ) : batch_name = _get_batch_name ( items ) variantcaller = _get_batch_variantcaller ( items ) work_bams = [ dd . get_work_bam ( d ) or dd . get_align_bam ( d ) for d in items ] def split_fn ( data ) : out = [ ] for region in regions : region = _region_...
Run variant calling on a batch of items using multiple cores .
29,828
def create ( parallel , dirs , config ) : profile_dir = utils . safe_makedir ( os . path . join ( dirs [ "work" ] , get_log_dir ( config ) , "ipython" ) ) has_mincores = any ( x . startswith ( "mincores=" ) for x in parallel [ "resources" ] ) cores = min ( _get_common_cores ( config [ "resources" ] ) , parallel [ "syst...
Create a cluster based on the provided parallel arguments .
29,829
def per_machine_target_cores ( cores , num_jobs ) : if cores >= 32 and num_jobs == 1 : cores = cores - 2 elif cores >= 16 and num_jobs in [ 1 , 2 ] : cores = cores - 1 return cores
Select target cores on larger machines to leave room for batch script and controller .
29,830
def _get_common_cores ( resources ) : all_cores = [ ] for vs in resources . values ( ) : cores = vs . get ( "cores" ) if cores : all_cores . append ( int ( vs [ "cores" ] ) ) return collections . Counter ( all_cores ) . most_common ( 1 ) [ 0 ] [ 0 ]
Retrieve the most common configured number of cores in the input file .
29,831
def zip_args ( args , config = None ) : if msgpack : return [ msgpack . packb ( x , use_single_float = True , use_bin_type = True ) for x in args ] else : return args
Compress arguments using msgpack .
29,832
def runner ( view , parallel , dirs , config ) : def run ( fn_name , items ) : setpath . prepend_bcbiopath ( ) out = [ ] fn , fn_name = ( fn_name , fn_name . __name__ ) if callable ( fn_name ) else ( _get_ipython_fn ( fn_name , parallel ) , fn_name ) items = [ x for x in items if x is not None ] items = diagnostics . t...
Run a task on an ipython parallel cluster allowing alternative queue types .
29,833
def peakcall_prepare ( data , run_parallel ) : caller_fns = get_callers ( ) to_process = [ ] for sample in data : mimic = copy . copy ( sample [ 0 ] ) callers = dd . get_peakcaller ( sample [ 0 ] ) if not isinstance ( callers , list ) : callers = [ callers ] for caller in callers : if caller in caller_fns : mimic [ "pe...
Entry point for doing peak calling
29,834
def calling ( data ) : chip_bam = data . get ( "work_bam" ) input_bam = data . get ( "work_bam_input" , None ) caller_fn = get_callers ( ) [ data [ "peak_fn" ] ] name = dd . get_sample_name ( data ) out_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ) , data [ "peak_fn" ] , name ) ) out_files ...
Main function to parallelize peak calling .
29,835
def _sync ( original , processed ) : for original_sample in original : original_sample [ 0 ] [ "peaks_files" ] = { } for process_sample in processed : if dd . get_sample_name ( original_sample [ 0 ] ) == dd . get_sample_name ( process_sample [ 0 ] ) : for key in [ "peaks_files" ] : if process_sample [ 0 ] . get ( key )...
Add output to data if run sucessfully . For now only macs2 is available so no need to consider multiple callers .
29,836
def _check ( sample , data ) : if dd . get_chip_method ( sample ) . lower ( ) == "atac" : return [ sample ] if dd . get_phenotype ( sample ) == "input" : return None for origin in data : if dd . get_batch ( sample ) in ( dd . get_batches ( origin [ 0 ] ) or [ ] ) and dd . get_phenotype ( origin [ 0 ] ) == "input" : sam...
Get input sample for each chip bam file .
29,837
def _get_multiplier ( samples ) : to_process = 1.0 to_skip = 0 for sample in samples : if dd . get_phenotype ( sample [ 0 ] ) == "chip" : to_process += 1.0 elif dd . get_chip_method ( sample [ 0 ] ) . lower ( ) == "atac" : to_process += 1.0 else : to_skip += 1.0 mult = ( to_process - to_skip ) / len ( samples ) if mult...
Get multiplier to get jobs only for samples that have input
29,838
def greylisting ( data ) : input_bam = data . get ( "work_bam_input" , None ) if not input_bam : logger . info ( "No input BAM file detected, skipping greylisting." ) return None try : greylister = config_utils . get_program ( "chipseq-greylist" , data ) except config_utils . CmdNotFound : logger . info ( "No greyliste...
Run ChIP - seq greylisting
29,839
def to_parallel ( args , module = "bcbio.distributed" ) : ptype , cores = _get_cores_and_type ( args . numcores , getattr ( args , "paralleltype" , None ) , args . scheduler ) local_controller = getattr ( args , "local_controller" , False ) parallel = { "type" : ptype , "cores" : cores , "scheduler" : args . scheduler ...
Convert input arguments into a parallel dictionary for passing to processing .
29,840
def _get_cores_and_type ( numcores , paralleltype , scheduler ) : if scheduler is not None : paralleltype = "ipython" if paralleltype is None : paralleltype = "local" if not numcores or int ( numcores ) < 1 : numcores = 1 return paralleltype , int ( numcores )
Return core and parallelization approach from command line providing sane defaults .
29,841
def _fix_mates ( orig_file , out_file , ref_file , config ) : if not file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : samtools = config_utils . get_program ( "samtools" , config ) cmd = "{samtools} view -bS -h -t {ref_file}.fai -F 8 {orig_file} > {tx_out_file}" do . run ( cmd . fo...
Fix problematic unmapped mate pairs in TopHat output .
29,842
def _add_rg ( unmapped_file , config , names ) : picard = broad . runner_from_path ( "picard" , config ) rg_fixed = picard . run_fn ( "picard_fix_rgs" , unmapped_file , names ) return rg_fixed
Add the missing RG header .
29,843
def _estimate_paired_innerdist ( fastq_file , pair_file , ref_file , out_base , out_dir , data ) : mean , stdev = _bowtie_for_innerdist ( "100000" , fastq_file , pair_file , ref_file , out_base , out_dir , data , True ) if not mean or not stdev : mean , stdev = _bowtie_for_innerdist ( "1" , fastq_file , pair_file , ref...
Use Bowtie to estimate the inner distance of paired reads .
29,844
def fix_insert_size ( in_bam , config ) : fixed_file = os . path . splitext ( in_bam ) [ 0 ] + ".pi_fixed.bam" if file_exists ( fixed_file ) : return fixed_file header_file = os . path . splitext ( in_bam ) [ 0 ] + ".header.sam" read_length = bam . estimate_read_length ( in_bam ) bam_handle = bam . open_samfile ( in_ba...
Tophat sets PI in the RG to be the inner distance size but the SAM spec states should be the insert size . This fixes the RG in the alignment file generated by Tophat header to match the spec
29,845
def _filter_to_info ( in_file , data ) : header = ( ) out_file = "%s-ann.vcf" % utils . splitext_plus ( in_file ) [ 0 ] if not utils . file_uptodate ( out_file , in_file ) and not utils . file_uptodate ( out_file + ".gz" , in_file ) : with file_transaction ( data , out_file ) as tx_out_file : with utils . open_gzipsafe...
Move DKFZ filter information into INFO field .
29,846
def _rec_filter_to_info ( line ) : parts = line . rstrip ( ) . split ( "\t" ) move_filters = { "bSeq" : "strand" , "bPcr" : "damage" } new_filters = [ ] bias_info = [ ] for f in parts [ 6 ] . split ( ";" ) : if f in move_filters : bias_info . append ( move_filters [ f ] ) elif f not in [ "." ] : new_filters . append ( ...
Move a DKFZBias filter to the INFO field for a record .
29,847
def should_filter ( items ) : return ( vcfutils . get_paired ( items ) is not None and any ( "damage_filter" in dd . get_tools_on ( d ) for d in items ) )
Check if we should do damage filtering on somatic calling with low frequency events .
29,848
def start_cmd ( cmd , descr , data ) : if data and "provenance" in data : entity_id = tz . get_in ( [ "provenance" , "entity" ] , data )
Retain details about starting a command returning a command identifier .
29,849
def initialize ( dirs ) : if biolite and dirs . get ( "work" ) : base_dir = utils . safe_makedir ( os . path . join ( dirs [ "work" ] , "provenance" ) ) p_db = os . path . join ( base_dir , "biolite.db" ) biolite . config . resources [ "database" ] = p_db biolite . database . connect ( )
Initialize the biolite database to load provenance information .
29,850
def track_parallel ( items , sub_type ) : out = [ ] for i , args in enumerate ( items ) : item_i , item = _get_provitem_from_args ( args ) if item : sub_entity = "%s.%s.%s" % ( item [ "provenance" ] [ "entity" ] , sub_type , i ) item [ "provenance" ] [ "entity" ] = sub_entity args = list ( args ) args [ item_i ] = item...
Create entity identifiers to trace the given items in sub - commands .
29,851
def _get_provitem_from_args ( xs ) : for i , x in enumerate ( xs ) : if _has_provenance ( x ) : return i , x return - 1 , None
Retrieve processed item from list of input arguments .
29,852
def handle_vcf_calls ( vcf_file , data , orig_items ) : if not _do_prioritize ( orig_items ) : return vcf_file else : ann_vcf = population . run_vcfanno ( vcf_file , data ) if ann_vcf : priority_file = _prep_priority_filter_vcfanno ( ann_vcf , data ) return _apply_priority_filter ( ann_vcf , priority_file , data ) else...
Prioritize VCF calls based on external annotations supplied through GEMINI .
29,853
def _apply_priority_filter ( in_file , priority_file , data ) : out_file = "%s-priority%s" % utils . splitext_plus ( in_file ) if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : header = ( '##INFO=<ID=EPR,Number=.,Type=String,' 'Description="Somatic prioritization based...
Annotate variants with priority information and use to apply filters .
29,854
def _prep_priority_filter_vcfanno ( in_vcf , data ) : pops = [ 'af_adj_exac_afr' , 'af_adj_exac_amr' , 'af_adj_exac_eas' , 'af_adj_exac_fin' , 'af_adj_exac_nfe' , 'af_adj_exac_oth' , 'af_adj_exac_sas' , 'af_exac_all' , 'max_aaf_all' , "af_esp_ea" , "af_esp_aa" , "af_esp_all" , "af_1kg_amr" , "af_1kg_eas" , "af_1kg_sas"...
Prepare tabix file with priority filters based on vcfanno annotations .
29,855
def _get_impact_info ( vcf_reader ) : ImpactInfo = collections . namedtuple ( "ImpactInfo" , "header, gclass, id" ) KEY_2_CLASS = { 'CSQ' : geneimpacts . VEP , 'ANN' : geneimpacts . SnpEff , 'BCSQ' : geneimpacts . BCFT } for l in ( x . strip ( ) for x in _from_bytes ( vcf_reader . raw_header ) . split ( "\n" ) ) : if l...
Retrieve impact parsing information from INFO header .
29,856
def _parse_impact_header ( hdr_dict ) : desc = hdr_dict [ "Description" ] if hdr_dict [ "ID" ] == "ANN" : parts = [ x . strip ( "\"'" ) for x in re . split ( "\s*\|\s*" , desc . split ( ":" , 1 ) [ 1 ] . strip ( '" ' ) ) ] elif hdr_dict [ "ID" ] == "EFF" : parts = [ x . strip ( " [])'(\"" ) for x in re . split ( "\||\(...
Parse fields for impact taken from vcf2db
29,857
def _prepare_vcf_rec ( rec , pops , known , impact_info ) : out = { } for k in pops + known : out [ k ] = rec . INFO . get ( k ) if impact_info : cur_info = rec . INFO . get ( impact_info . id ) if cur_info : cur_impacts = [ impact_info . gclass ( e , impact_info . header ) for e in _from_bytes ( cur_info ) . split ( "...
Parse a vcfanno output into a dictionary of useful attributes .
29,858
def _calc_priority_filter ( row , pops ) : filters = [ ] passes = [ ] passes . extend ( _find_known ( row ) ) filters . extend ( _known_populations ( row , pops ) ) if len ( filters ) == 0 or ( len ( passes ) > 0 and len ( filters ) < 2 ) : passes . insert ( 0 , "pass" ) return "," . join ( passes + filters )
Calculate the priority filter based on external associated data .
29,859
def _known_populations ( row , pops ) : cutoff = 0.01 out = set ( [ ] ) for pop , base in [ ( "esp" , "af_esp_all" ) , ( "1000g" , "af_1kg_all" ) , ( "exac" , "af_exac_all" ) , ( "anypop" , "max_aaf_all" ) ] : for key in [ x for x in pops if x . startswith ( base ) ] : val = row [ key ] if val and val > cutoff : out . ...
Find variants present in substantial frequency in population databases .
29,860
def _find_known ( row ) : out = [ ] clinvar_no = set ( [ "unknown" , "untested" , "non-pathogenic" , "probable-non-pathogenic" , "uncertain_significance" , "uncertain_significance" , "not_provided" , "benign" , "likely_benign" ] ) if row [ "cosmic_ids" ] or row [ "cosmic_id" ] : out . append ( "cosmic" ) if row [ "clin...
Find variant present in known pathogenic databases .
29,861
def _do_prioritize ( items ) : if not any ( "tumoronly-prioritization" in dd . get_tools_off ( d ) for d in items ) : if vcfutils . get_paired_phenotype ( items [ 0 ] ) : has_tumor = False has_normal = False for sub_data in items : if vcfutils . get_paired_phenotype ( sub_data ) == "tumor" : has_tumor = True elif vcfut...
Determine if we should perform prioritization .
29,862
def run_cortex ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) : raise NotImplementedError ( "Cortex currently out of date and needs reworking." ) if len ( align_bams ) == 1 : align_bam = align_bams [ 0 ] config = items [ 0 ] [ "config" ] else : raise NotImplementedError ( "Need to add...
Top level entry to regional de - novo based variant calling with cortex_var .
29,863
def _passes_cortex_depth ( line , min_depth ) : parts = line . split ( "\t" ) cov_index = parts [ 8 ] . split ( ":" ) . index ( "COV" ) passes_depth = False for gt in parts [ 9 : ] : cur_cov = gt . split ( ":" ) [ cov_index ] cur_depth = sum ( int ( x ) for x in cur_cov . split ( "," ) ) if cur_depth >= min_depth : pas...
Do any genotypes in the cortex_var VCF line passes the minimum depth requirement?
29,864
def _select_final_variants ( base_vcf , out_vcf , config ) : min_depth = int ( config [ "algorithm" ] . get ( "min_depth" , 4 ) ) with file_transaction ( out_vcf ) as tx_out_file : with open ( base_vcf ) as in_handle : with open ( tx_out_file , "w" ) as out_handle : for line in in_handle : if line . startswith ( "#" ) ...
Filter input file removing items with low depth of support .
29,865
def _combine_variants ( in_vcfs , out_file , ref_file , config ) : in_vcfs . sort ( ) wrote_header = False with open ( out_file , "w" ) as out_handle : for in_vcf in ( x [ - 1 ] for x in in_vcfs ) : with open ( in_vcf ) as in_handle : header = list ( itertools . takewhile ( lambda x : x . startswith ( "#" ) , in_handle...
Combine variant files writing the header from the first non - empty input .
29,866
def _remap_cortex_out ( cortex_out , region , out_file ) : def _remap_vcf_line ( line , contig , start ) : parts = line . split ( "\t" ) if parts [ 0 ] == "" or parts [ 1 ] == "" : return None parts [ 0 ] = contig try : parts [ 1 ] = str ( int ( parts [ 1 ] ) + start ) except ValueError : raise ValueError ( "Problem in...
Remap coordinates in local cortex variant calls to the original global region .
29,867
def _run_cortex ( fastq , indexes , params , out_base , dirs , config ) : print ( out_base ) fastaq_index = "{0}.fastaq_index" . format ( out_base ) se_fastq_index = "{0}.se_fastq" . format ( out_base ) pe_fastq_index = "{0}.pe_fastq" . format ( out_base ) reffasta_index = "{0}.list_ref_fasta" . format ( out_base ) wit...
Run cortex_var run_calls . pl producing a VCF variant file .
29,868
def _index_local_ref ( fasta_file , cortex_dir , stampy_dir , kmers ) : base_out = os . path . splitext ( fasta_file ) [ 0 ] cindexes = [ ] for kmer in kmers : out_file = "{0}.k{1}.ctx" . format ( base_out , kmer ) if not file_exists ( out_file ) : file_list = "{0}.se_list" . format ( base_out ) with open ( file_list ,...
Pre - index a generated local reference sequence with cortex_var and stampy .
29,869
def _get_local_ref ( region , ref_file , out_vcf_base ) : out_file = "{0}.fa" . format ( out_vcf_base ) if not file_exists ( out_file ) : with pysam . Fastafile ( ref_file ) as in_pysam : contig , start , end = region seq = in_pysam . fetch ( contig , int ( start ) , int ( end ) ) with open ( out_file , "w" ) as out_ha...
Retrieve a local FASTA file corresponding to the specified region .
29,870
def _get_fastq_in_region ( region , align_bam , out_base ) : out_file = "{0}.fastq" . format ( out_base ) if not file_exists ( out_file ) : with pysam . Samfile ( align_bam , "rb" ) as in_pysam : with file_transaction ( out_file ) as tx_out_file : with open ( tx_out_file , "w" ) as out_handle : contig , start , end = r...
Retrieve fastq files in region as single end . Paired end is more complicated since pairs can map off the region so focus on local only assembly since we ve previously used paired information for mapping .
29,871
def _count_fastq_reads ( in_fastq , min_reads ) : with open ( in_fastq ) as in_handle : items = list ( itertools . takewhile ( lambda i : i <= min_reads , ( i for i , _ in enumerate ( FastqGeneralIterator ( in_handle ) ) ) ) ) return len ( items )
Count the number of fastq reads in a file stopping after reaching min_reads .
29,872
def _move_file_with_sizecheck ( tx_file , final_file ) : tmp_file = final_file + ".bcbiotmp" open ( tmp_file , 'wb' ) . close ( ) want_size = utils . get_size ( tx_file ) shutil . move ( tx_file , final_file ) transfer_size = utils . get_size ( final_file ) assert want_size == transfer_size , ( 'distributed.transaction...
Move transaction file to final location with size checks avoiding failed transfers .
29,873
def _gatk_extract_reads_cl ( data , region , prep_params , tmp_dir ) : args = [ "PrintReads" , "-L" , region_to_gatk ( region ) , "-R" , dd . get_ref_file ( data ) , "-I" , data [ "work_bam" ] ] if "gatk4" in dd . get_tools_off ( data ) : args = [ "--analysis_type" ] + args runner = broad . runner_from_config ( data [ ...
Use GATK to extract reads from full BAM file .
29,874
def _piped_input_cl ( data , region , tmp_dir , out_base_file , prep_params ) : return data [ "work_bam" ] , _gatk_extract_reads_cl ( data , region , prep_params , tmp_dir )
Retrieve the commandline for streaming input into preparation step .
29,875
def _piped_realign_gatk ( data , region , cl , out_base_file , tmp_dir , prep_params ) : broad_runner = broad . runner_from_config ( data [ "config" ] ) pa_bam = "%s-prealign%s" % os . path . splitext ( out_base_file ) if not utils . file_exists ( pa_bam ) : with file_transaction ( data , pa_bam ) as tx_out_file : cmd ...
Perform realignment with GATK using input commandline . GATK requires writing to disk and indexing before realignment .
29,876
def _get_prep_params ( data ) : realign_param = dd . get_realign ( data ) realign_param = "gatk" if realign_param is True else realign_param return { "realign" : realign_param }
Retrieve configuration parameters with defaults for preparing BAM files .
29,877
def _piped_bamprep_region ( data , region , out_file , tmp_dir ) : if _need_prep ( data ) : prep_params = _get_prep_params ( data ) _piped_bamprep_region_gatk ( data , region , prep_params , out_file , tmp_dir ) else : raise ValueError ( "No realignment specified" )
Do work of preparing BAM input file on the selected region .
29,878
def piped_bamprep ( data , region = None , out_file = None ) : data [ "region" ] = region if not _need_prep ( data ) : return [ data ] else : utils . safe_makedir ( os . path . dirname ( out_file ) ) if region [ 0 ] == "nochrom" : prep_bam = shared . write_nochr_reads ( data [ "work_bam" ] , out_file , data [ "config" ...
Perform full BAM preparation using pipes to avoid intermediate disk IO .
29,879
def update_file ( finfo , sample_info , config ) : if GalaxyInstance is None : raise ImportError ( "Could not import bioblend.galaxy" ) if "dir" not in config : raise ValueError ( "Galaxy upload requires `dir` parameter in config specifying the " "shared filesystem path to move files to." ) if "outputs" in config : _ga...
Update file in Galaxy data libraries .
29,880
def _galaxy_tool_copy ( finfo , outputs ) : tool_map = { "align" : "bam" , "variants" : "vcf.gz" } for galaxy_key , finfo_type in tool_map . items ( ) : if galaxy_key in outputs and finfo . get ( "type" ) == finfo_type : shutil . copy ( finfo [ "path" ] , outputs [ galaxy_key ] )
Copy information directly to pre - defined outputs from a Galaxy tool .
29,881
def _galaxy_library_upload ( finfo , sample_info , config ) : folder_name = "%s_%s" % ( config [ "fc_date" ] , config [ "fc_name" ] ) storage_dir = utils . safe_makedir ( os . path . join ( config [ "dir" ] , folder_name ) ) if finfo . get ( "type" ) == "directory" : storage_file = None if finfo . get ( "ext" ) == "qc"...
Upload results to galaxy library .
29,882
def _to_datalibrary_safe ( fname , gi , folder_name , sample_info , config ) : num_tries = 0 max_tries = 5 while 1 : try : _to_datalibrary ( fname , gi , folder_name , sample_info , config ) break except ( simplejson . scanner . JSONDecodeError , bioblend . galaxy . client . ConnectionError ) as e : num_tries += 1 if n...
Upload with retries for intermittent JSON failures .
29,883
def _to_datalibrary ( fname , gi , folder_name , sample_info , config ) : library = _get_library ( gi , sample_info , config ) libitems = gi . libraries . show_library ( library . id , contents = True ) folder = _get_folder ( gi , folder_name , library , libitems ) _file_to_folder ( gi , fname , sample_info , libitems ...
Upload a file to a Galaxy data library in a project specific folder .
29,884
def _file_to_folder ( gi , fname , sample_info , libitems , library , folder ) : full_name = os . path . join ( folder [ "name" ] , os . path . basename ( fname ) ) file_type = "vcf_bgzip" if full_name . endswith ( ".vcf.gz" ) else "auto" if full_name . endswith ( ".vcf.gz" ) : full_name = full_name . replace ( ".vcf.g...
Check if file exists on Galaxy if not upload to specified folder .
29,885
def _get_folder ( gi , folder_name , library , libitems ) : for item in libitems : if item [ "type" ] == "folder" and item [ "name" ] == "/%s" % folder_name : return item return gi . libraries . create_folder ( library . id , folder_name ) [ 0 ]
Retrieve or create a folder inside the library with the specified name .
29,886
def _get_library ( gi , sample_info , config ) : galaxy_lib = sample_info . get ( "galaxy_library" , config . get ( "galaxy_library" ) ) role = sample_info . get ( "galaxy_role" , config . get ( "galaxy_role" ) ) if galaxy_lib : return _get_library_from_name ( gi , galaxy_lib , role , sample_info , create = True ) elif...
Retrieve the appropriate data library for the current user .
29,887
def _library_from_nglims ( gi , sample_info , config ) : names = [ config . get ( x , "" ) . strip ( ) for x in [ "lab_association" , "researcher" ] if config . get ( x ) ] for name in names : for ext in [ "sequencing" , "lab" ] : check_name = "%s %s" % ( name . split ( ) [ 0 ] , ext ) try : return _get_library_from_na...
Retrieve upload library from nglims specified user libraries .
29,888
def prepare_input_data ( config ) : if not dd . get_disambiguate ( config ) : return dd . get_input_sequence_files ( config ) work_bam = dd . get_work_bam ( config ) logger . info ( "Converting disambiguated reads to fastq..." ) fq_files = convert_bam_to_fastq ( work_bam , dd . get_work_dir ( config ) , None , None , c...
In case of disambiguation we want to run fusion calling on the disambiguated reads which are in the work_bam file . As EricScript accepts 2 fastq files as input we need to convert the . bam to 2 . fq files .
29,889
def get_run_command ( self , tx_output_dir , input_files ) : logger . debug ( "Input data: %s" % ', ' . join ( input_files ) ) cmd = [ self . EXECUTABLE , '-db' , self . _db_location , '-name' , self . _sample_name , '-o' , tx_output_dir , ] + list ( input_files ) return "export PATH=%s:%s:\"$PATH\"; %s;" % ( self . _g...
Constructs a command to run EricScript via do . run function .
29,890
def _get_ericscript_path ( self ) : es = utils . which ( os . path . join ( utils . get_bcbio_bin ( ) , self . EXECUTABLE ) ) return os . path . dirname ( os . path . realpath ( es ) )
Retrieve PATH to the isolated eriscript anaconda environment .
29,891
def _get_samtools0_path ( self ) : samtools_path = os . path . realpath ( os . path . join ( self . _get_ericscript_path ( ) , ".." , ".." , "bin" ) ) return samtools_path
Retrieve PATH to the samtools version specific for eriscript .
29,892
def output_dir ( self ) : if self . _output_dir is None : self . _output_dir = self . _get_output_dir ( ) return self . _output_dir
Absolute path to permanent location in working directory where EricScript output will be stored .
29,893
def reference_index ( self ) : if self . _db_location : ref_indices = glob . glob ( os . path . join ( self . _db_location , "*" , self . _REF_INDEX ) ) if ref_indices : return ref_indices [ 0 ]
Absolute path to the BWA index for EricScript reference data .
29,894
def reference_fasta ( self ) : if self . _db_location : ref_files = glob . glob ( os . path . join ( self . _db_location , "*" , self . _REF_FASTA ) ) if ref_files : return ref_files [ 0 ]
Absolute path to the fasta file with EricScript reference data .
29,895
def _get_input_args ( bam_file , data , out_base , background ) : if dd . get_genome_build ( data ) in [ "hg19" ] : return [ "--PileupFile" , _create_pileup ( bam_file , data , out_base , background ) ] else : return [ "--BamFile" , bam_file ]
Retrieve input args depending on genome build .
29,896
def _create_pileup ( bam_file , data , out_base , background ) : out_file = "%s-mpileup.txt" % out_base if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : background_bed = os . path . normpath ( os . path . join ( os . path . dirname ( os . path . realpath ( utils . whi...
Create pileup calls in the regions of interest for hg19 - > GRCh37 chromosome mapping .
29,897
def _cnvbed_to_bed ( in_file , caller , out_file ) : with open ( out_file , "w" ) as out_handle : for feat in pybedtools . BedTool ( in_file ) : out_handle . write ( "\t" . join ( [ feat . chrom , str ( feat . start ) , str ( feat . end ) , "cnv%s_%s" % ( feat . score , caller ) ] ) + "\n" )
Convert cn_mops CNV based bed files into flattened BED
29,898
def to_bed ( call , sample , work_dir , calls , data ) : out_file = os . path . join ( work_dir , "%s-%s-flat.bed" % ( sample , call [ "variantcaller" ] ) ) if call . get ( "vrn_file" ) and not utils . file_uptodate ( out_file , call [ "vrn_file" ] ) : with file_transaction ( data , out_file ) as tx_out_file : convert_...
Create a simplified BED file from caller specific input .
29,899
def _subset_by_support ( orig_vcf , cmp_calls , data ) : cmp_vcfs = [ x [ "vrn_file" ] for x in cmp_calls ] out_file = "%s-inensemble.vcf.gz" % utils . splitext_plus ( orig_vcf ) [ 0 ] if not utils . file_uptodate ( out_file , orig_vcf ) : with file_transaction ( data , out_file ) as tx_out_file : cmd = "bedtools inter...
Subset orig_vcf to calls also present in any of the comparison callers .