idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
29,100
def _add_meta ( xs , sample = None , config = None ) : out = [ ] for x in xs : if not isinstance ( x [ "path" ] , six . string_types ) or not os . path . exists ( x [ "path" ] ) : raise ValueError ( "Unexpected path for upload: %s" % x ) x [ "mtime" ] = shared . get_file_timestamp ( x [ "path" ] ) if sample : sample_na...
Add top level information about the sample or flowcell to output .
29,101
def _get_files_variantcall ( sample ) : out = [ ] algorithm = sample [ "config" ] [ "algorithm" ] out = _maybe_add_summary ( algorithm , sample , out ) out = _maybe_add_alignment ( algorithm , sample , out ) out = _maybe_add_callable ( sample , out ) out = _maybe_add_disambiguate ( algorithm , sample , out ) out = _may...
Return output files for the variant calling pipeline .
29,102
def _maybe_add_callable ( data , out ) : callable_bed = dd . get_sample_callable ( data ) if callable_bed : out . append ( { "path" : callable_bed , "type" : "bed" , "ext" : "callable" } ) perbase_bed = tz . get_in ( [ "depth" , "variant_regions" , "per_base" ] , data ) if perbase_bed : out . append ( { "path" : perbas...
Add callable and depth regions to output folder .
29,103
def _get_batch_name ( sample ) : batch = dd . get_batch ( sample ) or dd . get_sample_name ( sample ) if isinstance ( batch , ( list , tuple ) ) and len ( batch ) > 1 : batch = dd . get_sample_name ( sample ) return batch
Retrieve batch name for use in SV calling outputs .
29,104
def _sample_variant_file_in_population ( x ) : if "population" in x : a = _get_project_vcf ( x ) b = _get_variant_file ( x , ( "vrn_file" , ) ) decomposed = tz . get_in ( ( "population" , "decomposed" ) , x ) if ( a and b and not decomposed and len ( a ) > 0 and len ( b ) > 0 and vcfutils . get_samples ( a [ 0 ] [ "pat...
Check if a sample file is the same as the population file .
29,105
def _get_variant_file ( x , key , suffix = "" , sample = None , ignore_do_upload = False ) : out = [ ] fname = utils . get_in ( x , key ) upload_key = list ( key ) upload_key [ - 1 ] = "do_upload" do_upload = tz . get_in ( tuple ( upload_key ) , x , True ) if fname and ( ignore_do_upload or do_upload ) : if fname . end...
Retrieve VCF file with the given key if it exists handling bgzipped .
29,106
def _add_batch ( x , sample ) : added = False for batch in sorted ( dd . get_batches ( sample ) or [ ] , key = len , reverse = True ) : if batch and os . path . basename ( x [ "path" ] ) . startswith ( ( "%s-" % batch , "%s.vcf" % batch ) ) : x [ "batch" ] = batch added = True break if not added : x [ "batch" ] = dd . ...
Potentially add batch name to an upload file .
29,107
def _get_project_vcf ( x , suffix = "" ) : vcfs = _get_variant_file ( x , ( "population" , "vcf" ) , suffix = suffix ) if not vcfs : vcfs = _get_variant_file ( x , ( "vrn_file_batch" , ) , suffix = suffix , ignore_do_upload = True ) if not vcfs and x . get ( "variantcaller" ) == "ensemble" : vcfs = _get_variant_file ( ...
Get our project VCF either from the population or the variant batch file .
29,108
def _id_remapper ( orig , new ) : new_chrom_to_index = { } for i_n , ( chr_n , _ ) in enumerate ( new ) : new_chrom_to_index [ chr_n ] = i_n remap_indexes = { } for i_o , ( chr_o , _ ) in enumerate ( orig ) : if chr_o in new_chrom_to_index . keys ( ) : remap_indexes [ i_o ] = new_chrom_to_index [ chr_o ] remap_indexes ...
Provide a dictionary remapping original read indexes to new indexes .
29,109
def _clean_rec_name ( rec ) : out_id = [ ] for char in list ( rec . id ) : if char in ALLOWED_CONTIG_NAME_CHARS : out_id . append ( char ) else : out_id . append ( "_" ) rec . id = "" . join ( out_id ) rec . description = "" return rec
Clean illegal characters in input fasta file which cause problems downstream .
29,110
def run ( _ , data , out_dir ) : logger . info ( "Running kraken to determine contaminant: %s" % dd . get_sample_name ( data ) ) out = out_stats = None db = tz . get_in ( [ "config" , "algorithm" , "kraken" ] , data ) if db and isinstance ( db , ( list , tuple ) ) : db = db [ 0 ] kraken_cmd = config_utils . get_program...
Run kraken generating report in specified directory and parsing metrics . Using only first paired reads .
29,111
def _parse_kraken_output ( out_dir , db , data ) : in_file = os . path . join ( out_dir , "kraken_out" ) stat_file = os . path . join ( out_dir , "kraken_stats" ) out_file = os . path . join ( out_dir , "kraken_summary" ) kraken_cmd = config_utils . get_program ( "kraken-report" , data [ "config" ] ) classify = unclass...
Parse kraken stat info comming from stderr generating report with kraken - report
29,112
def _summarize_kraken ( fn ) : kraken = { } list_sp , list_value = [ ] , [ ] with open ( fn ) as handle : for line in handle : cols = line . strip ( ) . split ( "\t" ) sp = cols [ 5 ] . strip ( ) if len ( sp . split ( " " ) ) > 1 and not sp . startswith ( "cellular" ) : list_sp . append ( sp ) list_value . append ( col...
get the value at species level
29,113
def _get_main_and_json ( directory ) : directory = os . path . normpath ( os . path . abspath ( directory ) ) checker_main = os . path . normpath ( os . path . join ( directory , os . path . pardir , "checker-workflow-wrapping-tool.cwl" ) ) if checker_main and os . path . exists ( checker_main ) : main_cwl = [ checker_...
Retrieve the main CWL and sample JSON files from a bcbio generated directory .
29,114
def _run_tool ( cmd , use_container = True , work_dir = None , log_file = None ) : if isinstance ( cmd , ( list , tuple ) ) : cmd = " " . join ( [ str ( x ) for x in cmd ] ) cmd = utils . local_path_export ( at_start = use_container ) + cmd if log_file : cmd += " 2>&1 | tee -a %s" % log_file try : print ( "Running: %s"...
Run with injection of bcbio path .
29,115
def _pack_cwl ( unpacked_cwl ) : out_file = "%s-pack%s" % os . path . splitext ( unpacked_cwl ) cmd = "cwltool --pack {unpacked_cwl} > {out_file}" _run_tool ( cmd . format ( ** locals ( ) ) ) return out_file
Pack CWL into a single document for submission .
29,116
def _remove_bcbiovm_path ( ) : cur_path = os . path . dirname ( os . path . realpath ( sys . executable ) ) paths = os . environ [ "PATH" ] . split ( ":" ) if cur_path in paths : paths . remove ( cur_path ) os . environ [ "PATH" ] = ":" . join ( paths )
Avoid referencing minimal bcbio_nextgen in bcbio_vm installation .
29,117
def _run_arvados ( args ) : assert not args . no_container , "Arvados runs require containers" assert "ARVADOS_API_TOKEN" in os . environ and "ARVADOS_API_HOST" in os . environ , "Need to set ARVADOS_API_TOKEN and ARVADOS_API_HOST in environment to run" main_file , json_file , project_name = _get_main_and_json ( args ....
Run CWL on Arvados .
29,118
def _run_toil ( args ) : main_file , json_file , project_name = _get_main_and_json ( args . directory ) work_dir = utils . safe_makedir ( os . path . join ( os . getcwd ( ) , "toil_work" ) ) tmp_dir = utils . safe_makedir ( os . path . join ( work_dir , "tmpdir" ) ) os . environ [ "TMPDIR" ] = tmp_dir log_file = os . p...
Run CWL with Toil .
29,119
def _run_bunny ( args ) : main_file , json_file , project_name = _get_main_and_json ( args . directory ) work_dir = utils . safe_makedir ( os . path . join ( os . getcwd ( ) , "bunny_work" ) ) flags = [ "-b" , work_dir ] log_file = os . path . join ( work_dir , "%s-bunny.log" % project_name ) if os . path . exists ( wo...
Run CWL with rabix bunny .
29,120
def _run_wes_stratus ( args , main_file , json_file ) : import requests base_url = args . host if not base_url . startswith ( "http" ) : base_url = "https://%s" % base_url with open ( main_file ) as in_handle : r = requests . post ( "%s/v1/workflows" % base_url , headers = { "Content-Type" : "application/json" , "Autho...
Run WES on Illumina stratus endpoint server which wes - client doesn t support .
29,121
def _estimate_runner_memory ( json_file ) : with open ( json_file ) as in_handle : sinfo = json . load ( in_handle ) num_parallel = 1 for key in [ "config__algorithm__variantcaller" , "description" ] : item_counts = [ ] n = 0 for val in ( sinfo . get ( key ) or [ ] ) : n += 1 if val : if isinstance ( val , ( list , tup...
Estimate Java memory requirements based on number of samples .
29,122
def _run_cromwell ( args ) : main_file , json_file , project_name = _get_main_and_json ( args . directory ) work_dir = utils . safe_makedir ( os . path . join ( os . getcwd ( ) , "cromwell_work" ) ) final_dir = utils . safe_makedir ( os . path . join ( work_dir , "final" ) ) if args . no_container : _remove_bcbiovm_pat...
Run CWL with Cromwell .
29,123
def _cromwell_debug ( metadata ) : def get_failed_calls ( cur , key = None ) : if key is None : key = [ ] out = [ ] if isinstance ( cur , dict ) and "failures" in cur and "callRoot" in cur : out . append ( ( key , cur ) ) elif isinstance ( cur , dict ) : for k , v in cur . items ( ) : out . extend ( get_failed_calls ( ...
Format Cromwell failures to make debugging easier .
29,124
def _cromwell_move_outputs ( metadata , final_dir ) : sample_key = [ k for k in metadata [ "outputs" ] . keys ( ) if k . endswith ( ( "rgnames__sample" , "rgnames__sample_out" ) ) ] [ 0 ] project_dir = utils . safe_makedir ( os . path . join ( final_dir , "project" ) ) samples = metadata [ "outputs" ] [ sample_key ] de...
Move Cromwell outputs to the final upload directory .
29,125
def _run_sbgenomics ( args ) : assert not args . no_container , "Seven Bridges runs require containers" main_file , json_file , project_name = _get_main_and_json ( args . directory ) flags = [ ] cmd = [ "sbg-cwl-runner" ] + flags + args . toolargs + [ main_file , json_file ] _run_tool ( cmd )
Run CWL on SevenBridges platform and Cancer Genomics Cloud .
29,126
def _run_funnel ( args ) : host = "localhost" port = "8088" main_file , json_file , project_name = _get_main_and_json ( args . directory ) work_dir = utils . safe_makedir ( os . path . join ( os . getcwd ( ) , "funnel_work" ) ) log_file = os . path . join ( work_dir , "%s-funnel.log" % project_name ) orig_config_dir = ...
Run funnel TES server with rabix bunny for CWL .
29,127
def _parse_qualimap_globals_inregion ( table ) : out = { } for row in table . find_all ( "tr" ) : col , val = [ x . text for x in row . find_all ( "td" ) ] if col == "Mapped reads" : out . update ( _parse_num_pct ( "%s (in regions)" % col , val ) ) return out
Retrieve metrics from the global targeted region table .
29,128
def _parse_qualimap_coverage ( table ) : out = { } for row in table . find_all ( "tr" ) : col , val = [ x . text for x in row . find_all ( "td" ) ] if col == "Mean" : out [ "Coverage (Mean)" ] = val return out
Parse summary qualimap coverage metrics .
29,129
def _bed_to_bed6 ( orig_file , out_dir ) : bed6_file = os . path . join ( out_dir , "%s-bed6%s" % os . path . splitext ( os . path . basename ( orig_file ) ) ) if not utils . file_exists ( bed6_file ) : with open ( bed6_file , "w" ) as out_handle : for i , region in enumerate ( list ( x ) for x in pybedtools . BedTool ...
Convert bed to required bed6 inputs .
29,130
def _detect_duplicates ( bam_file , out_dir , data ) : out_file = os . path . join ( out_dir , "dup_metrics.txt" ) if not utils . file_exists ( out_file ) : dup_align_bam = postalign . dedup_bam ( bam_file , data ) logger . info ( "Detecting duplicates in %s." % dup_align_bam ) dup_count = readstats . number_of_mapped_...
count duplicate percentage
29,131
def run_rnaseq ( bam_file , data , out_dir ) : strandedness = { "firststrand" : "strand-specific-reverse" , "secondstrand" : "strand-specific-forward" , "unstranded" : "non-strand-specific" } results_dir = os . path . join ( out_dir , dd . get_sample_name ( data ) ) results_file = os . path . join ( results_dir , "rnas...
Run qualimap for a rnaseq bam file and parse results
29,132
def _rnaseq_qualimap_cmd ( data , bam_file , out_dir , gtf_file = None , library = "non-strand-specific" ) : config = data [ "config" ] qualimap = config_utils . get_program ( "qualimap" , config ) resources = config_utils . get_resources ( "qualimap" , config ) num_cores = resources . get ( "cores" , dd . get_num_core...
Create command lines for qualimap
29,133
def _find_qualimap_secondary_files ( results_dir , base_file ) : def not_dup ( x ) : is_dup = ( os . path . basename ( x ) == os . path . basename ( base_file ) and os . path . getsize ( x ) == os . path . getsize ( base_file ) ) return not is_dup def is_problem_file ( x ) : return x . find ( "(" ) >= 0 or x . find ( "...
Retrieve additional files avoiding double uploading the base file .
29,134
def classifyplot_from_plotfiles ( plot_files , out_csv , outtype = "png" , title = None , size = None ) : dfs = [ pd . read_csv ( x ) for x in plot_files ] samples = [ ] for df in dfs : for sample in df [ "sample" ] . unique ( ) : if sample not in samples : samples . append ( sample ) df = pd . concat ( dfs ) df . to_c...
Create a plot from individual summary csv files with classification metrics .
29,135
def classifyplot_from_valfile ( val_file , outtype = "png" , title = None , size = None , samples = None , callers = None ) : mpl . use ( 'Agg' , force = True ) df = pd . read_csv ( val_file ) grouped = df . groupby ( [ "sample" , "caller" , "vtype" ] ) df = grouped . apply ( _calculate_fnr_fdr ) df = df . reset_index ...
Create a plot from a summarized validation file .
29,136
def create ( plot_data , header , ploti , sample_config , out_file_base , outtype = "png" , title = None , size = None ) : if mpl is None or plt is None or sns is None : not_found = ", " . join ( [ x for x in [ 'mpl' , 'plt' , 'sns' ] if eval ( x ) is None ] ) logger . info ( "No validation plot. Missing imports: %s" %...
Create plots of validation results for a sample labeling prep strategies .
29,137
def plot_prep_methods ( df , prep , prepi , out_file_base , outtype , title = None , size = None ) : samples = df [ ( df [ "bamprep" ] == prep ) ] [ "sample" ] . unique ( ) assert len ( samples ) >= 1 , samples out_file = "%s-%s.%s" % ( out_file_base , samples [ 0 ] , outtype ) df = df [ df [ "category" ] . isin ( cat_...
Plot comparison between BAM preparation methods .
29,138
def _seaborn ( df , prep , prepi , out_file , title = None , size = None ) : plt . ioff ( ) sns . set ( style = 'dark' ) vtypes = df [ "variant.type" ] . unique ( ) callers = sorted ( df [ "caller" ] . unique ( ) ) cats = _check_cats ( [ "concordant" , "discordant-missing-total" , "discordant-extra-total" , "discordant...
Plot using seaborn wrapper around matplotlib .
29,139
def _check_cats ( cats , vtypes , df , prep , callers ) : out = [ ] for cat in cats : all_vals = [ ] for vtype in vtypes : vals , labels , maxval = _get_chart_info ( df , vtype , cat , prep , callers ) all_vals . extend ( vals ) if sum ( all_vals ) / float ( len ( all_vals ) ) > 2 : out . append ( cat ) if len ( out ) ...
Only include categories in the final output if they have values .
29,140
def _get_chart_info ( df , vtype , cat , prep , callers ) : maxval_raw = max ( list ( df [ "value.floor" ] ) ) curdf = df [ ( df [ "variant.type" ] == vtype ) & ( df [ "category" ] == cat ) & ( df [ "bamprep" ] == prep ) ] vals = [ ] labels = [ ] for c in callers : row = curdf [ df [ "caller" ] == c ] if len ( row ) > ...
Retrieve values for a specific variant type category and prep method .
29,141
def _annotate ( ax , annotate , height , left , width ) : annotate_yrange_factor = 0.010 xticks = np . array ( left ) + width / 2.0 ymin , ymax = ax . get_ylim ( ) yrange = ymax - ymin if ymax > 0 : ymax += yrange * 0.15 if ymin < 0 : ymin -= yrange * 0.15 ax . set_ylim ( ymin , ymax ) yrange = ymax - ymin offset_ = yr...
Annotate axis with labels .
29,142
def _ggplot ( df , out_file ) : import ggplot as gg df [ "variant.type" ] = [ vtype_labels [ x ] for x in df [ "variant.type" ] ] df [ "category" ] = [ cat_labels [ x ] for x in df [ "category" ] ] df [ "caller" ] = [ caller_labels . get ( x , None ) for x in df [ "caller" ] ] p = ( gg . ggplot ( df , gg . aes ( x = "c...
Plot faceted items with ggplot wrapper on top of matplotlib . XXX Not yet functional
29,143
def get_floor_value ( x , cat , vartype , floors ) : all_base = floors [ vartype ] cur_max = floors [ ( cat , vartype ) ] if cur_max > all_base : diff = cur_max - all_base x = max ( 1 , x - diff ) return x
Modify values so all have the same relative scale for differences .
29,144
def get_group_floors ( df , cat_labels ) : group_maxes = collections . defaultdict ( list ) group_diffs = collections . defaultdict ( list ) diff_pad = 0.1 for name , group in df . groupby ( [ "category" , "variant.type" ] ) : label , stype = name if label in cat_labels : diff = max ( group [ "value" ] ) - min ( group ...
Retrieve the floor for a given row of comparisons creating a normalized set of differences .
29,145
def _sanity_check_args ( args ) : if "scheduler" in args and "queue" in args : if args . scheduler and not args . queue : if args . scheduler != "sge" : return "IPython parallel scheduler (-s) specified. This also requires a queue (-q)." elif args . queue and not args . scheduler : return "IPython parallel queue (-q) s...
Ensure dependent arguments are correctly specified
29,146
def _add_inputs_to_kwargs ( args , kwargs , parser ) : inputs = [ x for x in [ args . global_config , args . fc_dir ] + args . run_config if x is not None ] global_config = "bcbio_system.yaml" if kwargs . get ( "workflow" , "" ) == "template" : if args . only_metadata : inputs . append ( "--only-metadata" ) if args . f...
Convert input system config flow cell directory and sample yaml to kwargs .
29,147
def _add_commas ( s , sep = ',' ) : if len ( s ) <= 3 : return s return _add_commas ( s [ : - 3 ] , sep ) + sep + s [ - 3 : ]
Add commas to output counts .
29,148
def bed_to_interval ( orig_bed , bam_file ) : with open ( orig_bed ) as in_handle : line = in_handle . readline ( ) if line . startswith ( "@" ) : yield orig_bed else : with pysam . Samfile ( bam_file , "rb" ) as bam_handle : header = bam_handle . text with tmpfile ( dir = os . path . dirname ( orig_bed ) , prefix = "p...
Add header and format BED bait and target files for Picard if necessary .
29,149
def get_summary_metrics ( self , align_metrics , dup_metrics , insert_metrics = None , hybrid_metrics = None , vrn_vals = None , rnaseq_metrics = None ) : with open ( align_metrics ) as in_handle : align_vals = self . _parse_align_metrics ( in_handle ) if dup_metrics : with open ( dup_metrics ) as in_handle : dup_vals ...
Retrieve a high level summary of interesting metrics .
29,150
def extract_metrics ( self , metrics_files ) : extension_maps = dict ( align_metrics = ( self . _parse_align_metrics , "AL" ) , dup_metrics = ( self . _parse_dup_metrics , "DUP" ) , hs_metrics = ( self . _parse_hybrid_metrics , "HS" ) , insert_metrics = ( self . _parse_insert_metrics , "INS" ) , rnaseq_metrics = ( self...
Return summary information for a lane of metrics files .
29,151
def report ( self , align_bam , ref_file , is_paired , bait_file , target_file , variant_region_file , config ) : dup_metrics = self . _get_current_dup_metrics ( align_bam ) align_metrics = self . _collect_align_metrics ( align_bam , ref_file ) gc_graph = None insert_graph , insert_metrics , hybrid_metrics = ( None , N...
Produce report metrics using Picard with sorted aligned BAM file .
29,152
def _get_current_dup_metrics ( self , align_bam ) : metrics_file = "%s.dup_metrics" % os . path . splitext ( align_bam ) [ 0 ] if not file_exists ( metrics_file ) : dups = 0 with pysam . Samfile ( align_bam , "rb" ) as bam_handle : for read in bam_handle : if ( read . is_paired and read . is_read1 ) or not read . is_pa...
Retrieve duplicate information from input BAM file .
29,153
def _check_metrics_file ( self , bam_name , metrics_ext ) : base , _ = os . path . splitext ( bam_name ) try : int ( base [ - 1 ] ) can_glob = False except ValueError : can_glob = True check_fname = "{base}{maybe_glob}.{ext}" . format ( base = base , maybe_glob = "*" if can_glob else "" , ext = metrics_ext ) glob_fname...
Check for an existing metrics file for the given BAM .
29,154
def _hybrid_select_metrics ( self , dup_bam , bait_file , target_file ) : metrics = self . _check_metrics_file ( dup_bam , "hs_metrics" ) if not file_exists ( metrics ) : with bed_to_interval ( bait_file , dup_bam ) as ready_bait : with bed_to_interval ( target_file , dup_bam ) as ready_target : with file_transaction (...
Generate metrics for hybrid selection efficiency .
29,155
def _variant_eval_metrics ( self , dup_bam ) : base , ext = os . path . splitext ( dup_bam ) end_strip = "-dup" base = base [ : - len ( end_strip ) ] if base . endswith ( end_strip ) else base mfiles = glob . glob ( "%s*eval_metrics" % base ) if len ( mfiles ) > 0 : with open ( mfiles [ 0 ] ) as in_handle : for line in...
Find metrics for evaluating variant effectiveness .
29,156
def report ( self , align_bam , ref_file , gtf_file , is_paired = False , rrna_file = "null" ) : dup_metrics = self . _get_current_dup_metrics ( align_bam ) align_metrics = self . _collect_align_metrics ( align_bam , ref_file ) insert_graph , insert_metrics = ( None , None ) if is_paired : insert_graph , insert_metrics...
Produce report metrics for a RNASeq experiment using Picard with a sorted aligned BAM file .
29,157
def standard_cl_params ( items ) : out = [ ] def _skip_duplicates ( data ) : return ( dd . get_coverage_interval ( data ) == "amplicon" or ( dd . get_aligner ( data ) and not dd . get_mark_duplicates ( data ) ) ) if any ( _skip_duplicates ( d ) for d in items ) : broad_runner = broad . runner_from_config ( items [ 0 ] ...
Shared command line parameters for GATK programs .
29,158
def _shared_gatk_call_prep ( align_bams , items , ref_file , region , out_file , num_cores = 1 ) : data = items [ 0 ] config = data [ "config" ] broad_runner = broad . runner_from_config ( config ) gatk_type = broad_runner . gatk_type ( ) for x in align_bams : bam . index ( x , config ) picard_runner = broad . runner_f...
Shared preparation work for GATK variant calling .
29,159
def unified_genotyper ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) : if out_file is None : out_file = "%s-variants.vcf.gz" % utils . splitext_plus ( align_bams [ 0 ] ) [ 0 ] if not utils . file_exists ( out_file ) : broad_runner , params = _shared_gatk_call_prep ( align_bams , items...
Perform SNP genotyping on the given alignment file .
29,160
def _joint_calling ( items ) : jointcaller = tz . get_in ( ( "config" , "algorithm" , "jointcaller" ) , items [ 0 ] ) if jointcaller : assert len ( items ) == 1 , "Can only do joint calling preparation with GATK with single samples" assert tz . get_in ( ( "metadata" , "batch" ) , items [ 0 ] ) is not None , "Joint call...
Determine if this call feeds downstream into joint calls .
29,161
def _supports_avx ( ) : if os . path . exists ( "/proc/cpuinfo" ) : with open ( "/proc/cpuinfo" ) as in_handle : for line in in_handle : if line . startswith ( "flags" ) and line . find ( "avx" ) > 0 : return True
Check for support for Intel AVX acceleration .
29,162
def jar_versioner ( program_name , jar_name ) : def get_version ( config ) : try : pdir = config_utils . get_program ( program_name , config , "dir" ) except ValueError : return "" jar = os . path . basename ( config_utils . get_jar ( jar_name , pdir ) ) for to_remove in [ jar_name , ".jar" , "-standalone" ] : jar = ja...
Retrieve version information based on jar file .
29,163
def _get_cl_version ( p , config ) : if not p . get ( "has_cl_version" , True ) : return "" try : prog = config_utils . get_program ( p [ "cmd" ] , config ) except config_utils . CmdNotFound : localpy_cmd = os . path . join ( os . path . dirname ( sys . executable ) , p [ "cmd" ] ) if os . path . exists ( localpy_cmd )...
Retrieve version of a single commandline program .
29,164
def _get_brew_versions ( ) : from bcbio import install tooldir = install . get_defaults ( ) . get ( "tooldir" ) brew_cmd = os . path . join ( tooldir , "bin" , "brew" ) if tooldir else "brew" try : vout = subprocess . check_output ( [ brew_cmd , "list" , "--versions" ] ) except OSError : vout = "" out = { } for vstr in...
Retrieve versions of tools installed via brew .
29,165
def _get_versions ( config = None ) : try : from bcbio . pipeline import version if hasattr ( version , "__version__" ) : bcbio_version = ( "%s-%s" % ( version . __version__ , version . __git_revision__ ) if version . __git_revision__ else version . __version__ ) else : bcbio_version = "" except ImportError : bcbio_ver...
Retrieve details on all programs available on the system .
29,166
def _get_versions_manifest ( manifest_dir ) : all_pkgs = _manifest_progs + [ p . get ( "name" , p [ "cmd" ] ) for p in _cl_progs ] + [ p [ "name" ] for p in _alt_progs ] if os . path . exists ( manifest_dir ) : out = [ ] for plist in [ "toolplus" , "python" , "r" , "debian" , "custom" ] : pkg_file = os . path . join ( ...
Retrieve versions from a pre - existing manifest of installed software .
29,167
def write_versions ( dirs , config = None , is_wrapper = False ) : out_file = _get_program_file ( dirs ) if is_wrapper : assert utils . file_exists ( out_file ) , "Failed to create program versions from VM" elif out_file is None : for p in _get_versions ( config ) : print ( "{program},{version}" . format ( ** p ) ) els...
Write CSV file with versions used in analysis pipeline .
29,168
def get_version_manifest ( name , data = None , required = False ) : manifest_dir = _get_manifest_dir ( data , name ) manifest_vs = _get_versions_manifest ( manifest_dir ) or [ ] for x in manifest_vs : if x [ "program" ] == name : v = x . get ( "version" , "" ) if v : return v if required : raise ValueError ( "Did not ...
Retrieve a version from the currently installed manifest .
29,169
def add_subparser ( subparsers ) : parser = subparsers . add_parser ( "version" , help = "Export versions of used software to stdout or a file " ) parser . add_argument ( "--workdir" , help = "Directory export programs to in workdir/provenance/programs.txt" , default = None )
Add command line option for exporting version information .
29,170
def get_version ( name , dirs = None , config = None ) : if dirs : p = _get_program_file ( dirs ) else : p = tz . get_in ( [ "resources" , "program_versions" ] , config ) if p : with open ( p ) as in_handle : for line in in_handle : prog , version = line . rstrip ( ) . split ( "," ) if prog == name and version : return...
Retrieve the current version of the given program from cached names .
29,171
def hla_choices ( orig_hla , min_parts = 2 ) : yield orig_hla try : int ( orig_hla [ - 1 ] ) except ValueError : yield orig_hla [ : - 1 ] hla_parts = orig_hla . split ( ":" ) for sub_i in range ( len ( hla_parts ) - min_parts + 1 ) : yield ":" . join ( hla_parts [ : len ( hla_parts ) - sub_i ] )
Provide a range of options for HLA type with decreasing resolution .
29,172
def read_pgroups ( in_file ) : out = { } with open ( in_file ) as in_handle : for line in ( l for l in in_handle if not l . startswith ( "#" ) ) : locus , alleles , group = line . strip ( ) . split ( ";" ) for allele in alleles . split ( "/" ) : out [ "HLA-%s%s" % ( locus , allele ) ] = group return out
Read HLAs and the pgroups they fall in .
29,173
def read_hlas ( fasta_fai ) : out = [ ] with open ( fasta_fai ) as in_handle : for line in in_handle : if line . startswith ( "HLA" ) : out . append ( line . split ( ) [ 0 ] ) return out
Get HLA alleles from the hg38 fasta fai file .
29,174
def split_vcf ( in_file , ref_file , config , out_dir = None ) : if out_dir is None : out_dir = os . path . join ( os . path . dirname ( in_file ) , "split" ) out_files = [ ] with open ( ref . fasta_idx ( ref_file , config ) ) as in_handle : for line in in_handle : chrom , size = line . split ( ) [ : 2 ] out_file = os ...
Split a VCF file into separate files by chromosome .
29,175
def subset_vcf ( in_file , region , out_file , config ) : work_file = vcfutils . bgzip_and_index ( in_file , config ) if not file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : bcftools = config_utils . get_program ( "bcftools" , config ) region_str = bamprep . region_to_gatk ( regio...
Subset VCF in the given region handling bgzip and indexing of input .
29,176
def prep_recal ( data ) : if dd . get_recalibrate ( data ) in [ True , "gatk" ] : logger . info ( "Prepare BQSR tables with GATK: %s " % str ( dd . get_sample_name ( data ) ) ) dbsnp_file = tz . get_in ( ( "genome_resources" , "variation" , "dbsnp" ) , data ) if not dbsnp_file : logger . info ( "Skipping GATK BaseRecal...
Do pre - BQSR recalibration calculation of recalibration tables .
29,177
def apply_recal ( data ) : orig_bam = dd . get_align_bam ( data ) or dd . get_work_bam ( data ) had_work_bam = "work_bam" in data if dd . get_recalibrate ( data ) in [ True , "gatk" ] : if data . get ( "prep_recal" ) : logger . info ( "Applying BQSR recalibration with GATK: %s " % str ( dd . get_sample_name ( data ) ) ...
Apply recalibration tables to the sorted aligned BAM producing recalibrated BAM .
29,178
def _gatk_base_recalibrator ( broad_runner , dup_align_bam , ref_file , platform , dbsnp_file , intervals , data ) : target_counts = 1e8 out_file = os . path . join ( dd . get_work_dir ( data ) , "align" , dd . get_sample_name ( data ) , "%s-recal.grp" % utils . splitext_plus ( os . path . basename ( dup_align_bam ) ) ...
Step 1 of GATK recalibration process producing table of covariates .
29,179
def _gatk_apply_bqsr ( data ) : in_file = dd . get_align_bam ( data ) or dd . get_work_bam ( data ) out_file = os . path . join ( dd . get_work_dir ( data ) , "align" , dd . get_sample_name ( data ) , "%s-recal.bam" % utils . splitext_plus ( os . path . basename ( in_file ) ) [ 0 ] ) if not utils . file_uptodate ( out_...
Parallel BQSR support for GATK4 .
29,180
def create_base_logger ( config = None , parallel = None ) : if parallel is None : parallel = { } parallel_type = parallel . get ( "type" , "local" ) cores = parallel . get ( "cores" , 1 ) if parallel_type == "ipython" : from bcbio . log import logbook_zmqpush fqdn_ip = socket . gethostbyname ( socket . getfqdn ( ) ) i...
Setup base logging configuration also handling remote logging .
29,181
def setup_local_logging ( config = None , parallel = None ) : if config is None : config = { } if parallel is None : parallel = { } parallel_type = parallel . get ( "type" , "local" ) cores = parallel . get ( "cores" , 1 ) wrapper = parallel . get ( "wrapper" , None ) if parallel_type == "ipython" : from bcbio . log im...
Setup logging for a local context directing messages to appropriate base loggers .
29,182
def setup_script_logging ( ) : handlers = [ logbook . NullHandler ( ) ] format_str = ( "[{record.time:%Y-%m-%dT%H:%MZ}] " "{record.level_name}: {record.message}" ) handler = logbook . StreamHandler ( sys . stderr , format_string = format_str , level = "DEBUG" ) handler . push_thread ( ) return handler
Use this logger for standalone scripts or script - like subcommands such as bcbio_prepare_samples and bcbio_nextgen . py - w template .
29,183
def _validate ( wdl_file ) : start_dir = os . getcwd ( ) os . chdir ( os . path . dirname ( wdl_file ) ) print ( "Validating" , wdl_file ) subprocess . check_call ( [ "wdltool" , "validate" , wdl_file ] ) os . chdir ( start_dir )
Run validation on the generated WDL output using wdltool .
29,184
def _wf_to_dict ( wf , records ) : inputs , outputs , records = _get_wf_inout ( wf , records ) out = { "name" : _id_to_name ( _clean_id ( wf . tool [ "id" ] ) ) , "inputs" : inputs , "outputs" : outputs , "steps" : [ ] , "subworkflows" : [ ] , "requirements" : [ ] } for step in wf . steps : is_subworkflow = isinstance ...
Parse a workflow into cwl2wdl style dictionaries for base and sub - workflows .
29,185
def _organize_step_scatter ( step , inputs , remapped ) : def extract_scatter_id ( inp ) : _ , ns_var = inp . split ( "#" ) _ , var = ns_var . split ( "/" ) return var scatter_local = { } if "scatter" in step . tool : assert step . tool [ "scatterMethod" ] == "dotproduct" , "Only support dotproduct scattering in conver...
Add scattering information from inputs remapping input variables .
29,186
def _variable_type_to_read_fn ( vartype , records ) : fn_map = { "String" : "read_string" , "Array[String]" : "read_lines" , "Array[Array[String]]" : "read_tsv" , "Object" : "read_object" , "Array[Object]" : "read_objects" , "Array[Array[Object]]" : "read_objects" , "Int" : "read_int" , "Float" : "read_float" } for rec...
Convert variant types into corresponding WDL standard library functions .
29,187
def _requirements_to_dict ( rs ) : out = [ ] added = set ( [ ] ) for r in rs : if r [ "class" ] == "DockerRequirement" and "docker" not in added : added . add ( "docker" ) out . append ( { "requirement_type" : "docker" , "value" : r [ "dockerImageId" ] } ) elif r [ "class" ] == "ResourceRequirement" : if "coresMin" in ...
Convert supported requirements into dictionary for output .
29,188
def _get_jvm_opts ( out_file , data ) : resources = config_utils . get_resources ( "purple" , data [ "config" ] ) jvm_opts = resources . get ( "jvm_opts" , [ "-Xms750m" , "-Xmx3500m" ] ) jvm_opts = config_utils . adjust_opts ( jvm_opts , { "algorithm" : { "memory_adjust" : { "direction" : "increase" , "maximum" : "3000...
Retrieve Java options adjusting memory for available cores .
29,189
def _counts_to_amber ( t_vals , n_vals ) : t_depth = int ( t_vals [ "REF_COUNT" ] ) + int ( t_vals [ "ALT_COUNT" ] ) n_depth = int ( n_vals [ "REF_COUNT" ] ) + int ( n_vals [ "ALT_COUNT" ] ) if n_depth > 0 and t_depth > 0 : t_baf = float ( t_vals [ "ALT_COUNT" ] ) / float ( t_depth ) n_baf = float ( n_vals [ "ALT_COUNT...
Converts a line of CollectAllelicCounts into AMBER line .
29,190
def _count_files_to_amber ( tumor_counts , normal_counts , work_dir , data ) : amber_dir = utils . safe_makedir ( os . path . join ( work_dir , "amber" ) ) out_file = os . path . join ( amber_dir , "%s.amber.baf" % dd . get_sample_name ( data ) ) if not utils . file_uptodate ( out_file , tumor_counts ) : with file_tran...
Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format .
29,191
def _amber_het_file ( method , vrn_files , work_dir , paired ) : assert vrn_files , "Did not find compatible variant calling files for PURPLE inputs" from bcbio . heterogeneity import bubbletree if method == "variants" : amber_dir = utils . safe_makedir ( os . path . join ( work_dir , "amber" ) ) out_file = os . path ....
Create file of BAFs in normal heterozygous positions compatible with AMBER .
29,192
def _run_cobalt ( paired , work_dir ) : cobalt_dir = utils . safe_makedir ( os . path . join ( work_dir , "cobalt" ) ) out_file = os . path . join ( cobalt_dir , "%s.cobalt" % dd . get_sample_name ( paired . tumor_data ) ) if not utils . file_exists ( out_file ) : with file_transaction ( paired . tumor_data , out_file ...
Run Cobalt for counting read depth across genomic windows .
29,193
def _cobalt_ratio_file ( paired , work_dir ) : cobalt_dir = utils . safe_makedir ( os . path . join ( work_dir , "cobalt" ) ) out_file = os . path . join ( cobalt_dir , "%s.cobalt" % dd . get_sample_name ( paired . tumor_data ) ) if not utils . file_exists ( out_file ) : cnr_file = tz . get_in ( [ "depth" , "bins" , "n...
Convert CNVkit binning counts into cobalt ratio output .
29,194
def _export_to_vcf ( cur ) : if float ( cur [ "copyNumber" ] ) > 2.0 : svtype = "DUP" elif float ( cur [ "copyNumber" ] ) < 2.0 : svtype = "DEL" else : svtype = None if svtype : info = [ "END=%s" % cur [ "end" ] , "SVLEN=%s" % ( int ( cur [ "end" ] ) - int ( cur [ "start" ] ) ) , "SVTYPE=%s" % svtype , "CN=%s" % cur [ ...
Convert PURPLE custom output into VCF .
29,195
def make_pizzly_gtf ( gtf_file , out_file , data ) : if file_exists ( out_file ) : return out_file db = gtf . get_gtf_db ( gtf_file ) with file_transaction ( data , out_file ) as tx_out_file : with open ( tx_out_file , "w" ) as out_handle : for gene in db . features_of_type ( "gene" ) : children = [ x for x in db . chi...
pizzly needs the GTF to be in gene - > transcript - > exon order for each gene . it also wants the gene biotype set as the source
29,196
def _validate_caller_vcf ( call_vcf , truth_vcf , callable_bed , svcaller , work_dir , data ) : stats = _calculate_comparison_stats ( truth_vcf ) call_vcf = _prep_vcf ( call_vcf , callable_bed , dd . get_sample_name ( data ) , dd . get_sample_name ( data ) , stats , work_dir , data ) truth_vcf = _prep_vcf ( truth_vcf ,...
Validate a caller VCF against truth within callable regions using SURVIVOR .
29,197
def _survivor_merge ( call_vcf , truth_vcf , stats , work_dir , data ) : out_file = os . path . join ( work_dir , "eval-merge.vcf" ) if not utils . file_uptodate ( out_file , call_vcf ) : in_call_vcf = call_vcf . replace ( ".vcf.gz" , ".vcf" ) if not utils . file_exists ( in_call_vcf ) : with file_transaction ( data , ...
Perform a merge of two callsets using SURVIVOR
29,198
def _calculate_comparison_stats ( truth_vcf ) : min_stat_size = 50 min_median_size = 250 sizes = [ ] svtypes = set ( [ ] ) with utils . open_gzipsafe ( truth_vcf ) as in_handle : for call in ( l . rstrip ( ) . split ( "\t" ) for l in in_handle if not l . startswith ( "#" ) ) : stats = _summarize_call ( call ) if stats ...
Identify calls to validate from the input truth VCF .
29,199
def _get_start_end ( parts , index = 7 ) : start = parts [ 1 ] end = [ x . split ( "=" ) [ - 1 ] for x in parts [ index ] . split ( ";" ) if x . startswith ( "END=" ) ] if end : end = end [ 0 ] return start , end return None , None
Retrieve start and end for a VCF record skips BNDs without END coords