idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
30,200
def _run_concat_variant_files_gatk4 ( input_file_list , out_file , config ) : if not utils . file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : params = [ "-T" , "GatherVcfs" , "-I" , input_file_list , "-O" , tx_out_file ] config = utils . deepish_copy ( config ) if "gatk4" in dd . ...
Use GATK4 GatherVcfs for concatenation of scattered VCFs .
30,201
def _get_file_list ( orig_files , out_file , regions , ref_file , config ) : sorted_files = _sort_by_region ( orig_files , regions , ref_file , config ) exist_files = [ ( c , x ) for c , x in sorted_files if os . path . exists ( x ) and vcf_has_variants ( x ) ] if len ( exist_files ) == 0 : exist_files = [ x for c , x ...
Create file with region sorted list of non - empty VCFs for concatenating .
30,202
def _fix_gatk_header ( exist_files , out_file , config ) : from bcbio . variation import ploidy c , base_file = exist_files [ 0 ] replace_file = base_file items = [ { "config" : config } ] if ploidy . get_ploidy ( items , region = ( c , 1 , 2 ) ) == 1 : for c , x in exist_files [ 1 : ] : if ploidy . get_ploidy ( items ...
Ensure consistent headers for VCF concatenation .
30,203
def _run_concat_variant_files_bcftools ( in_list , out_file , config , naive = False ) : if not utils . file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : bcftools = config_utils . get_program ( "bcftools" , config ) output_type = "z" if out_file . endswith ( ".gz" ) else "v" if nai...
Concatenate variant files using bcftools concat potentially using the fast naive option .
30,204
def combine_variant_files ( orig_files , out_file , ref_file , config , quiet_out = True , region = None ) : in_pipeline = False if isinstance ( orig_files , dict ) : file_key = config [ "file_key" ] in_pipeline = True orig_files = orig_files [ file_key ] if not utils . file_exists ( out_file ) : with file_transaction ...
Combine VCF files from the same sample into a single output file .
30,205
def sort_by_ref ( vcf_file , data ) : out_file = "%s-prep.vcf.gz" % utils . splitext_plus ( vcf_file ) [ 0 ] if not utils . file_uptodate ( out_file , vcf_file ) : with file_transaction ( data , out_file ) as tx_out_file : header_file = "%s-header.txt" % utils . splitext_plus ( tx_out_file ) [ 0 ] with open ( header_fi...
Sort a VCF file by genome reference and position adding contig information .
30,206
def add_contig_to_header ( line , ref_file ) : if line . startswith ( "##fileformat=VCF" ) : out = [ line ] for region in ref . file_contigs ( ref_file ) : out . append ( "##contig=<ID=%s,length=%s>" % ( region . name , region . size ) ) return "\n" . join ( out ) else : return line
Streaming target to add contigs to a VCF file header .
30,207
def parallel_combine_variants ( orig_files , out_file , ref_file , config , run_parallel ) : file_key = "vcf_files" def split_by_region ( data ) : base , ext = utils . splitext_plus ( os . path . basename ( out_file ) ) args = [ ] for region in [ x . name for x in ref . file_contigs ( ref_file , config ) ] : region_out...
Combine variants in parallel by chromosome concatenating final outputs .
30,208
def move_vcf ( orig_file , new_file ) : for ext in [ "" , ".idx" , ".tbi" ] : to_move = orig_file + ext if os . path . exists ( to_move ) : shutil . move ( to_move , new_file + ext )
Move a VCF file with associated index .
30,209
def bgzip_and_index ( in_file , config = None , remove_orig = True , prep_cmd = "" , tabix_args = None , out_dir = None ) : if config is None : config = { } out_file = in_file if in_file . endswith ( ".gz" ) else in_file + ".gz" if out_dir : remove_orig = False out_file = os . path . join ( out_dir , os . path . basena...
bgzip and tabix index an input file handling VCF and BED .
30,210
def tabix_index ( in_file , config , preset = None , tabix_args = None ) : in_file = os . path . abspath ( in_file ) out_file = in_file + ".tbi" if not utils . file_exists ( out_file ) or not utils . file_uptodate ( out_file , in_file ) : utils . remove_safe ( out_file ) with file_transaction ( config , out_file ) as t...
Index a file using tabix .
30,211
def is_gvcf_file ( in_file ) : to_check = 100 n = 0 with utils . open_gzipsafe ( in_file ) as in_handle : for line in in_handle : if not line . startswith ( "##" ) : if n > to_check : break n += 1 parts = line . split ( "\t" ) if parts [ 4 ] == "<NON_REF>" : return True if parts [ 4 ] == "." and parts [ 7 ] . startswit...
Check if an input file is raw gVCF
30,212
def cyvcf_add_filter ( rec , name ) : if rec . FILTER : filters = rec . FILTER . split ( ";" ) else : filters = [ ] if name not in filters : filters . append ( name ) rec . FILTER = filters return rec
Add a FILTER value to a cyvcf2 record
30,213
def cyvcf_remove_filter ( rec , name ) : if rec . FILTER : filters = rec . FILTER . split ( ";" ) else : filters = [ ] new_filters = [ x for x in filters if not str ( x ) == name ] if len ( new_filters ) == 0 : new_filters = [ "PASS" ] rec . FILTER = new_filters return rec
Remove filter with the given name from a cyvcf2 record
30,214
def organize_noalign ( data ) : data = utils . to_single_data ( data [ 0 ] ) work_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ) , "align" , dd . get_sample_name ( data ) ) ) work_bam = os . path . join ( work_dir , "%s-input.bam" % dd . get_sample_name ( data ) ) if data . get ( "files" ) :...
CWL target to skip alignment and organize input data .
30,215
def align_to_sort_bam ( fastq1 , fastq2 , aligner , data ) : names = data [ "rgnames" ] align_dir_parts = [ data [ "dirs" ] [ "work" ] , "align" , names [ "sample" ] ] if data . get ( "disambiguate" ) : align_dir_parts . append ( data [ "disambiguate" ] [ "genome_build" ] ) aligner_index = _get_aligner_index ( aligner ...
Align to the named genome build returning a sorted BAM file .
30,216
def get_aligner_with_aliases ( aligner , data ) : aligner_aliases = { "sentieon-bwa" : "bwa" } from bcbio import structural if not aligner and "gridss" in structural . get_svcallers ( data ) : aligner = "bwa" return aligner_aliases . get ( aligner ) or aligner
Retrieve aligner index retriever including aliases for shared .
30,217
def _get_aligner_index ( aligner , data ) : aligner_indexes = tz . get_in ( ( "reference" , get_aligner_with_aliases ( aligner , data ) , "indexes" ) , data ) if aligner_indexes and isinstance ( aligner_indexes , ( list , tuple ) ) : aligner_index = os . path . commonprefix ( aligner_indexes ) if aligner_index . endswi...
Handle multiple specifications of aligner indexes returning value to pass to aligner .
30,218
def _align_from_fastq ( fastq1 , fastq2 , aligner , align_ref , sam_ref , names , align_dir , data ) : config = data [ "config" ] align_fn = TOOLS [ aligner ] . align_fn out = align_fn ( fastq1 , fastq2 , align_ref , names , align_dir , data ) if isinstance ( out , dict ) : assert out . get ( "work_bam" ) , ( dd . get_...
Align from fastq inputs producing sorted BAM output .
30,219
def _finalize_memory ( jvm_opts ) : avoid_min = 32 avoid_max = 48 out_opts = [ ] for opt in jvm_opts : if opt . startswith ( "-Xmx" ) : spec = opt [ 4 : ] val = int ( spec [ : - 1 ] ) mod = spec [ - 1 ] if mod . upper ( ) == "M" : adjust = 1024 min_val = avoid_min * 1024 max_val = avoid_max * 1024 else : adjust = 1 min...
GRIDSS does not recommend setting memory between 32 and 48Gb .
30,220
def _setup_reference_files ( data , tx_out_dir ) : aligner = dd . get_aligner ( data ) or "bwa" out_dir = utils . safe_makedir ( os . path . join ( tx_out_dir , aligner ) ) ref_fasta = dd . get_ref_file ( data ) ref_files = [ "%s%s" % ( utils . splitext_plus ( ref_fasta ) [ 0 ] , ext ) for ext in [ ".fa" , ".fa.fai" , ...
Create a reference directory with fasta and bwa indices .
30,221
def _add_versions ( samples ) : samples [ 0 ] [ "versions" ] = { "tools" : programs . write_versions ( samples [ 0 ] [ "dirs" ] , samples [ 0 ] [ "config" ] ) , "data" : provenancedata . write_versions ( samples [ 0 ] [ "dirs" ] , samples ) } return samples
Add tool and data versions to the summary .
30,222
def _summarize_inputs ( samples , out_dir ) : logger . info ( "summarize target information" ) if samples [ 0 ] . get ( "analysis" , "" ) . lower ( ) in [ "variant" , "variant2" ] : metrics_dir = utils . safe_makedir ( os . path . join ( out_dir , "report" , "metrics" ) ) samples = _merge_target_information ( samples ,...
Summarize inputs for MultiQC reporting in display .
30,223
def _work_path_to_rel_final_path ( path , upload_path_mapping , upload_base_dir ) : if not path or not isinstance ( path , str ) : return path upload_path = None if upload_path_mapping . get ( path ) is not None and os . path . isfile ( path ) : upload_path = upload_path_mapping [ path ] else : paths_to_check = [ key f...
Check if path is a work - rooted path and convert to a relative final - rooted path
30,224
def _one_exists ( input_files ) : for f in input_files : if os . path . exists ( f ) : return True return False
at least one file must exist for multiqc to run properly
30,225
def _get_input_files ( samples , base_dir , tx_out_dir ) : in_files = collections . defaultdict ( list ) for data in samples : sum_qc = tz . get_in ( [ "summary" , "qc" ] , data , { } ) if sum_qc in [ None , "None" ] : sum_qc = { } elif isinstance ( sum_qc , six . string_types ) : sum_qc = { dd . get_algorithm_qc ( dat...
Retrieve input files keyed by sample and QC method name .
30,226
def _group_by_sample_and_batch ( samples ) : out = collections . defaultdict ( list ) for data in samples : out [ ( dd . get_sample_name ( data ) , dd . get_align_bam ( data ) , tuple ( _get_batches ( data ) ) ) ] . append ( data ) return [ xs [ 0 ] for xs in out . values ( ) ]
Group samples split by QC method back one per sample - batch .
30,227
def _has_bcftools_germline_stats ( data ) : stats_file = tz . get_in ( [ "summary" , "qc" ] , data ) if isinstance ( stats_file , dict ) : stats_file = tz . get_in ( [ "variants" , "base" ] , stats_file ) if not stats_file : stats_file = "" return stats_file . find ( "bcftools_stats_germline" ) > 0
Check for the presence of a germline stats file CWL compatible .
30,228
def _is_good_file_for_multiqc ( fpath ) : ( ftype , encoding ) = mimetypes . guess_type ( fpath ) if encoding is not None : return False if ftype is not None and ftype . startswith ( 'image' ) : return False return True
Returns False if the file is binary or image .
30,229
def _parse_disambiguate ( disambiguatestatsfilename ) : disambig_stats = [ 0 , 0 , 0 ] with open ( disambiguatestatsfilename , "r" ) as in_handle : for i , line in enumerate ( in_handle ) : fields = line . strip ( ) . split ( "\t" ) if i == 0 : assert fields == [ 'sample' , 'unique species A pairs' , 'unique species B ...
Parse disambiguation stats from given file .
30,230
def _merge_metrics ( samples , out_dir ) : logger . info ( "summarize metrics" ) out_dir = utils . safe_makedir ( os . path . join ( out_dir , "report" , "metrics" ) ) sample_metrics = collections . defaultdict ( dict ) for s in samples : s = _add_disambiguate ( s ) m = tz . get_in ( [ 'summary' , 'metrics' ] , s ) if ...
Merge metrics from multiple QC steps
30,231
def _merge_fastqc ( samples ) : fastqc_list = collections . defaultdict ( list ) seen = set ( ) for data in samples : name = dd . get_sample_name ( data ) if name in seen : continue seen . add ( name ) fns = glob . glob ( os . path . join ( dd . get_work_dir ( data ) , "qc" , dd . get_sample_name ( data ) , "fastqc" ) ...
merge all fastqc samples into one by module
30,232
def _create_plot ( tumor , in_glob , out_ext , page = 1 ) : out_dir = utils . safe_makedir ( "images" ) out_name = os . path . join ( out_dir , "%s-%s" % ( tumor , out_ext ) ) in_file = glob . glob ( in_glob ) [ 0 ] cmd = [ "pdftoppm" , in_file , out_name , "-png" , "-f" , page , "-singlefile" ] if not os . path . exis...
Create an output plot for the given PDF in the images directory .
30,233
def _get_cromwell_execution_dir ( base_dir , target_glob ) : cur_dir = glob . glob ( os . path . join ( base_dir , target_glob ) ) [ 0 ] if os . path . exists ( os . path . join ( cur_dir , "cwl.output.json" ) ) : return base_dir else : symlink_dir = os . path . dirname ( os . path . realpath ( os . path . join ( cur_d...
Retrieve the baseline directory with cromwell output files .
30,234
def prep_bam_inputs ( out_dir , sample , call_file , bam_file ) : base = utils . splitext_plus ( os . path . basename ( bam_file ) ) [ 0 ] with open ( call_file ) as in_handle : for cur_hla in ( x . strip ( ) for x in in_handle ) : out_file = os . path . join ( utils . safe_makedir ( os . path . join ( out_dir , base )...
Prepare expected input BAM files from pre - aligned .
30,235
def get_hla ( sample , cromwell_dir , hla_glob ) : hla_dir = glob . glob ( os . path . join ( cromwell_dir , hla_glob , "align" , sample , "hla" ) ) [ 0 ] fastq = os . path . join ( hla_dir , "OptiType-HLA-A_B_C-input.fq" ) calls = os . path . join ( hla_dir , "%s-optitype.csv" % sample ) return fastq , calls
Retrieve HLA calls and input fastqs for a sample .
30,236
def name_to_absolute ( x ) : for c in [ "-" , "*" , ":" ] : x = x . replace ( c , "_" ) x = x . lower ( ) return x
Convert standard hg38 HLA name into ABSOLUTE naming .
30,237
def get_hla_choice ( h , hlas , normal_bam , tumor_bam ) : def get_counts ( bam_file ) : counts = { } for line in subprocess . check_output ( [ "samtools" , "idxstats" , bam_file ] ) . split ( "\n" ) : if line . startswith ( h ) : name , _ , count , _ = line . split ( ) counts [ name ] = int ( count ) return counts tco...
Retrieve matching HLA with best read support in both tumor and normal
30,238
def prep_hla ( work_dir , sample , calls , hlas , normal_bam , tumor_bam ) : work_dir = utils . safe_makedir ( os . path . join ( work_dir , sample , "inputs" ) ) hla_file = os . path . join ( work_dir , "%s-hlas.txt" % sample ) with open ( calls ) as in_handle : with open ( hla_file , "w" ) as out_handle : next ( in_h...
Convert HLAs into ABSOLUTE format for use with LOHHLA .
30,239
def prep_ploidy ( work_dir , sample , bam_file , cromwell_dir , sv_glob ) : purecn_file = _get_cromwell_file ( cromwell_dir , sv_glob , dict ( sample = sample , method = "purecn" , ext = "purecn.csv" ) ) work_dir = utils . safe_makedir ( os . path . join ( work_dir , sample , "inputs" ) ) out_file = os . path . join ( ...
Create LOHHLA compatible input ploidy file from PureCN output .
30,240
def _bowtie_args_from_config ( data ) : config = data [ 'config' ] qual_format = config [ "algorithm" ] . get ( "quality_format" , "" ) if qual_format . lower ( ) == "illumina" : qual_flags = [ "--phred64-quals" ] else : qual_flags = [ ] multi_mappers = config [ "algorithm" ] . get ( "multiple_mappers" , True ) multi_f...
Configurable high level options for bowtie .
30,241
def align ( fastq_file , pair_file , ref_file , names , align_dir , data , extra_args = None ) : num_hits = 1 if data [ "analysis" ] . lower ( ) . startswith ( "smallrna-seq" ) : num_hits = 1000 config = data [ 'config' ] out_file = os . path . join ( align_dir , "{0}-sort.bam" . format ( dd . get_sample_name ( data ) ...
Do standard or paired end alignment with bowtie .
30,242
def subset_by_supported ( input_file , get_coords , calls_by_name , work_dir , data , headers = ( "#" , ) ) : support_files = [ ( c , tz . get_in ( [ c , "vrn_file" ] , calls_by_name ) ) for c in convert . SUBSET_BY_SUPPORT [ "cnvkit" ] ] support_files = [ ( c , f ) for ( c , f ) in support_files if f and vcfutils . vc...
Limit CNVkit input to calls with support from another caller .
30,243
def _input_to_bed ( theta_input , work_dir , get_coords , headers ) : theta_bed = os . path . join ( work_dir , "%s.bed" % os . path . splitext ( os . path . basename ( theta_input ) ) [ 0 ] ) with open ( theta_input ) as in_handle : with open ( theta_bed , "w" ) as out_handle : for line in in_handle : if not line . st...
Convert input file to a BED file for comparisons
30,244
def _run_theta ( cnv_info , data , work_dir , run_n3 = True ) : out = { "caller" : "theta" } max_normal = "0.9" opts = [ "-m" , max_normal ] n2_result = _safe_run_theta ( cnv_info [ "theta_input" ] , os . path . join ( work_dir , "n2" ) , ".n2.results" , [ "-n" , "2" ] + opts , data ) if n2_result : out [ "estimate" ] ...
Run theta calculating subpopulations and normal contamination .
30,245
def _update_with_calls ( result_file , cnv_file ) : results = { } with open ( result_file ) as in_handle : in_handle . readline ( ) _ , _ , cs , ps = in_handle . readline ( ) . strip ( ) . split ( ) for i , ( c , p ) in enumerate ( zip ( cs . split ( ":" ) , ps . split ( "," ) ) ) : results [ i ] = ( c , p ) cnvs = { }...
Update bounds with calls from CNVkit inferred copy numbers and p - values from THetA .
30,246
def _merge_theta_calls ( bounds_file , result_file , cnv_file , data ) : out_file = "%s-merged.txt" % ( result_file . replace ( ".BEST.results" , "" ) ) if not utils . file_uptodate ( out_file , result_file ) : with file_transaction ( data , out_file ) as tx_out_file : updater = _update_with_calls ( result_file , cnv_f...
Create a final output file with merged CNVkit and THetA copy and population estimates .
30,247
def _select_model ( n2_bounds , n2_result , n3_result , out_dir , data ) : n2_out_file = n2_result . replace ( ".n2.results" , ".BEST.results" ) n3_out_file = n3_result . replace ( ".n3.results" , ".BEST.results" ) if not utils . file_exists ( n2_out_file ) and not utils . file_exists ( n3_out_file ) : cmd = _get_cmd (...
Run final model selection from n = 2 and n = 3 options .
30,248
def _safe_run_theta ( input_file , out_dir , output_ext , args , data ) : out_file = os . path . join ( out_dir , _split_theta_ext ( input_file ) + output_ext ) skip_file = out_file + ".skipped" if utils . file_exists ( skip_file ) : return None if not utils . file_exists ( out_file ) : with file_transaction ( data , o...
Run THetA catching and continuing on any errors .
30,249
def _get_cmd ( cmd ) : check_cmd = "RunTHetA.py" try : local_cmd = subprocess . check_output ( [ "which" , check_cmd ] ) . strip ( ) except subprocess . CalledProcessError : return None return [ sys . executable , "%s/%s" % ( os . path . dirname ( os . path . realpath ( local_cmd ) ) , cmd ) ]
Retrieve required commands for running THetA with our local bcbio python .
30,250
def run ( data ) : sample = data [ 0 ] [ 0 ] work_dir = dd . get_work_dir ( sample ) out_dir = os . path . join ( work_dir , "mirge" ) lib = _find_lib ( sample ) mirge = _find_mirge ( sample ) bowtie = _find_bowtie ( sample ) sps = dd . get_species ( sample ) species = SPS . get ( sps , "" ) if not species : raise Valu...
Proxy function to run the tool
30,251
def _create_sample_file ( data , out_dir ) : sample_file = os . path . join ( out_dir , "sample_file.txt" ) with open ( sample_file , 'w' ) as outh : for sample in data : outh . write ( sample [ 0 ] [ "clean_fastq" ] + "\n" ) return sample_file
from data list all the fastq files in a file
30,252
def _find_lib ( data ) : options = " " . join ( data . get ( 'resources' , { } ) . get ( 'mirge' , { } ) . get ( "options" , "" ) ) if options . find ( "-lib" ) > - 1 and utils . file_exists ( options . split ( ) [ 1 ] ) : return options if not options : logger . warning ( "miRge libraries not found. Follow these instr...
Find mirge libs
30,253
def get_input_sequence_files ( data , default = None ) : if "files" not in data or data . get ( "files" ) is None : file1 , file2 = None , None elif len ( data [ "files" ] ) == 2 : file1 , file2 = data [ "files" ] else : assert len ( data [ "files" ] ) == 1 , data [ "files" ] file1 , file2 = data [ "files" ] [ 0 ] , No...
returns the input sequencing files these can be single or paired FASTQ files or BAM files
30,254
def get_umi_consensus ( data ) : consensus_choices = ( [ "fastq_name" ] ) umi = tz . get_in ( [ "config" , "algorithm" , "umi_type" ] , data ) if tz . get_in ( [ "analysis" ] , data , "" ) . lower ( ) == "scrna-seq" : return False if umi and ( umi in consensus_choices or os . path . exists ( umi ) ) : assert tz . get_i...
Retrieve UMI for consensus based preparation .
30,255
def get_dexseq_gff ( config , default = None ) : dexseq_gff = tz . get_in ( tz . get_in ( [ 'dexseq_gff' , 'keys' ] , LOOKUPS , { } ) , config , None ) if not dexseq_gff : return None gtf_file = get_gtf_file ( config ) if gtf_file : base_dir = os . path . dirname ( gtf_file ) else : base_dir = os . path . dirname ( dex...
some older versions of the genomes have the DEXseq gff file as gff instead of gff3 so this handles that by looking for either one
30,256
def get_in_samples ( samples , fn ) : for sample in samples : sample = to_single_data ( sample ) if fn ( sample , None ) : return fn ( sample ) return None
for a list of samples return the value of a global option
30,257
def update_summary_qc ( data , key , base = None , secondary = None ) : summary = get_summary_qc ( data , { } ) if base and secondary : summary [ key ] = { "base" : base , "secondary" : secondary } elif base : summary [ key ] = { "base" : base } elif secondary : summary [ key ] = { "secondary" : secondary } data = set_...
updates summary_qc with a new section keyed by key . stick files into summary_qc if you want them propagated forward and available for multiqc
30,258
def has_variantcalls ( data ) : analysis = get_analysis ( data ) . lower ( ) variant_pipeline = analysis . startswith ( ( "standard" , "variant" , "variant2" ) ) variantcaller = get_variantcaller ( data ) return variant_pipeline or variantcaller
returns True if the data dictionary is configured for variant calling
30,259
def estimate_library_complexity ( df , algorithm = "RNA-seq" ) : DEFAULT_CUTOFFS = { "RNA-seq" : ( 0.25 , 0.40 ) } cutoffs = DEFAULT_CUTOFFS [ algorithm ] if len ( df ) < 5 : return { "unique_starts_per_read" : 'nan' , "complexity" : "NA" } model = sm . ols ( formula = "starts ~ reads" , data = df ) fitted = model . fi...
estimate library complexity from the number of reads vs . number of unique start sites . returns NA if there are not enough data points to fit the line
30,260
def run_details ( self , run_bc , run_date = None ) : try : details = self . _get ( "/nglims/api_run_details" , dict ( run = run_bc ) ) except ValueError : raise ValueError ( "Could not find information in Galaxy for run: %s" % run_bc ) if "error" in details and run_date is not None : try : details = self . _get ( "/ng...
Next Gen LIMS specific API functionality .
30,261
def fixrg ( in_bam , names , ref_file , dirs , data ) : work_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ) , "bamclean" , dd . get_sample_name ( data ) ) ) out_file = os . path . join ( work_dir , "%s-fixrg.bam" % utils . splitext_plus ( os . path . basename ( in_bam ) ) [ 0 ] ) if not util...
Fix read group in a file using samtools addreplacerg .
30,262
def _target_chroms_and_header ( bam_file , data ) : special_remaps = { "chrM" : "MT" , "MT" : "chrM" } target_chroms = dict ( [ ( x . name , i ) for i , x in enumerate ( ref . file_contigs ( dd . get_ref_file ( data ) ) ) if chromhacks . is_autosomal_or_sex ( x . name ) ] ) out_chroms = [ ] with pysam . Samfile ( bam_f...
Get a list of chromosomes to target and new updated ref_file header .
30,263
def picard_prep ( in_bam , names , ref_file , dirs , data ) : runner = broad . runner_from_path ( "picard" , data [ "config" ] ) work_dir = utils . safe_makedir ( os . path . join ( dirs [ "work" ] , "bamclean" , names [ "sample" ] ) ) runner . run_fn ( "picard_index_ref" , ref_file ) reorder_bam = os . path . join ( w...
Prepare input BAM using Picard and GATK cleaning tools .
30,264
def _filter_bad_reads ( in_bam , ref_file , data ) : bam . index ( in_bam , data [ "config" ] ) out_file = "%s-gatkfilter.bam" % os . path . splitext ( in_bam ) [ 0 ] if not utils . file_exists ( out_file ) : with tx_tmpdir ( data ) as tmp_dir : with file_transaction ( data , out_file ) as tx_out_file : params = [ ( "F...
Use GATK filter to remove problem reads which choke GATK and Picard .
30,265
def generate_parallel ( samples , run_parallel ) : to_analyze , extras = _split_samples_by_qc ( samples ) qced = run_parallel ( "pipeline_summary" , to_analyze ) samples = _combine_qc_samples ( qced ) + extras qsign_info = run_parallel ( "qsignature_summary" , [ samples ] ) metadata_file = _merge_metadata ( [ samples ]...
Provide parallel preparation of summary information for alignment and variant calling .
30,266
def pipeline_summary ( data ) : data = utils . to_single_data ( data ) work_bam = dd . get_align_bam ( data ) or dd . get_work_bam ( data ) if not work_bam or not work_bam . endswith ( ".bam" ) : work_bam = None if dd . get_ref_file ( data ) : if work_bam or ( tz . get_in ( [ "config" , "algorithm" , "kraken" ] , data ...
Provide summary information on processing sample .
30,267
def get_qc_tools ( data ) : if dd . get_algorithm_qc ( data ) : return dd . get_algorithm_qc ( data ) analysis = data [ "analysis" ] . lower ( ) to_run = [ ] if tz . get_in ( [ "config" , "algorithm" , "kraken" ] , data ) : to_run . append ( "kraken" ) if "fastqc" not in dd . get_tools_off ( data ) : to_run . append ( ...
Retrieve a list of QC tools to use based on configuration and analysis type .
30,268
def _run_qc_tools ( bam_file , data ) : from bcbio . qc import ( atropos , contamination , coverage , damage , fastqc , kraken , qsignature , qualimap , samtools , picard , srna , umi , variant , viral , preseq , chipseq ) tools = { "fastqc" : fastqc . run , "atropos" : atropos . run , "small-rna" : srna . run , "samto...
Run a set of third party quality control tools returning QC directory and metrics .
30,269
def _organize_qc_files ( program , qc_dir ) : base_files = { "fastqc" : "fastqc_report.html" , "qualimap_rnaseq" : "qualimapReport.html" , "qualimap" : "qualimapReport.html" } if os . path . exists ( qc_dir ) : out_files = [ ] for fname in [ os . path . join ( qc_dir , x ) for x in os . listdir ( qc_dir ) ] : if os . p...
Organize outputs from quality control runs into a base file and secondary outputs .
30,270
def _split_samples_by_qc ( samples ) : to_process = [ ] extras = [ ] for data in [ utils . to_single_data ( x ) for x in samples ] : qcs = dd . get_algorithm_qc ( data ) if qcs and ( dd . get_align_bam ( data ) or dd . get_work_bam ( data ) or tz . get_in ( [ "config" , "algorithm" , "kraken" ] , data ) ) : for qc in q...
Split data into individual quality control steps for a run .
30,271
def _combine_qc_samples ( samples ) : by_bam = collections . defaultdict ( list ) for data in [ utils . to_single_data ( x ) for x in samples ] : batch = dd . get_batch ( data ) or dd . get_sample_name ( data ) if not isinstance ( batch , ( list , tuple ) ) : batch = [ batch ] batch = tuple ( batch ) by_bam [ ( dd . ge...
Combine split QC analyses into single samples based on BAM files .
30,272
def write_project_summary ( samples , qsign_info = None ) : work_dir = samples [ 0 ] [ 0 ] [ "dirs" ] [ "work" ] out_file = os . path . join ( work_dir , "project-summary.yaml" ) upload_dir = ( os . path . join ( work_dir , samples [ 0 ] [ 0 ] [ "upload" ] [ "dir" ] ) if "dir" in samples [ 0 ] [ 0 ] [ "upload" ] else "...
Write project summary information on the provided samples . write out dirs genome resources
30,273
def _merge_metadata ( samples ) : samples = list ( utils . flatten ( samples ) ) out_dir = dd . get_work_dir ( samples [ 0 ] ) logger . info ( "summarize metadata" ) out_file = os . path . join ( out_dir , "metadata.csv" ) sample_metrics = collections . defaultdict ( dict ) for s in samples : m = tz . get_in ( [ 'metad...
Merge all metadata into CSV file
30,274
def _other_pipeline_samples ( summary_file , cur_samples ) : cur_descriptions = set ( [ s [ 0 ] [ "description" ] for s in cur_samples ] ) out = [ ] if utils . file_exists ( summary_file ) : with open ( summary_file ) as in_handle : for s in yaml . safe_load ( in_handle ) . get ( "samples" , [ ] ) : if s [ "description...
Retrieve samples produced previously by another pipeline in the summary output .
30,275
def _add_researcher_summary ( samples , summary_yaml ) : by_researcher = collections . defaultdict ( list ) for data in ( x [ 0 ] for x in samples ) : researcher = utils . get_in ( data , ( "upload" , "researcher" ) ) if researcher : by_researcher [ researcher ] . append ( data [ "description" ] ) out_by_researcher = {...
Generate summary files per researcher if organized via a LIMS .
30,276
def _summary_csv_by_researcher ( summary_yaml , researcher , descrs , data ) : out_file = os . path . join ( utils . safe_makedir ( os . path . join ( data [ "dirs" ] [ "work" ] , "researcher" ) ) , "%s-summary.tsv" % run_info . clean_name ( researcher ) ) metrics = [ "Total_reads" , "Mapped_reads" , "Mapped_reads_pct"...
Generate a CSV file with summary information for a researcher on this project .
30,277
def prep_pdf ( qc_dir , config ) : html_file = os . path . join ( qc_dir , "fastqc" , "fastqc_report.html" ) html_fixed = "%s-fixed%s" % os . path . splitext ( html_file ) try : topdf = config_utils . get_program ( "wkhtmltopdf" , config ) except config_utils . CmdNotFound : topdf = None if topdf and utils . file_exist...
Create PDF from HTML summary outputs in QC directory .
30,278
def _run_purecn_dx ( out , paired ) : out_base , out , all_files = _get_purecn_dx_files ( paired , out ) if not utils . file_uptodate ( out [ "mutation_burden" ] , out [ "rds" ] ) : with file_transaction ( paired . tumor_data , out_base ) as tx_out_base : cmd = [ "PureCN_Dx.R" , "--rds" , out [ "rds" ] , "--callable" ,...
Extract signatures and mutational burdens from PureCN rds file .
30,279
def _get_purecn_dx_files ( paired , out ) : out_base = "%s-dx" % utils . splitext_plus ( out [ "rds" ] ) [ 0 ] all_files = [ ] for key , ext in [ [ ( "mutation_burden" , ) , "_mutation_burden.csv" ] , [ ( "plot" , "signatures" ) , "_signatures.pdf" ] , [ ( "signatures" , ) , "_signatures.csv" ] ] : cur_file = "%s%s" % ...
Retrieve files generated by PureCN_Dx
30,280
def _run_purecn ( paired , work_dir ) : segfns = { "cnvkit" : _segment_normalized_cnvkit , "gatk-cnv" : _segment_normalized_gatk } out_base , out , all_files = _get_purecn_files ( paired , work_dir ) failed_file = out_base + "-failed.log" cnr_file = tz . get_in ( [ "depth" , "bins" , "normalized" ] , paired . tumor_dat...
Run PureCN . R wrapper with pre - segmented CNVkit or GATK4 inputs .
30,281
def _segment_normalized_gatk ( cnr_file , work_dir , paired ) : work_dir = utils . safe_makedir ( os . path . join ( work_dir , "gatk-cnv" ) ) seg_file = gatkcnv . model_segments ( cnr_file , work_dir , paired ) [ "seg" ] std_seg_file = seg_file . replace ( ".cr.seg" , ".seg" ) if not utils . file_uptodate ( std_seg_fi...
Segmentation of normalized inputs using GATK4 converting into standard input formats .
30,282
def _segment_normalized_cnvkit ( cnr_file , work_dir , paired ) : cnvkit_base = os . path . join ( utils . safe_makedir ( os . path . join ( work_dir , "cnvkit" ) ) , dd . get_sample_name ( paired . tumor_data ) ) cnr_file = chromhacks . bed_to_standardonly ( cnr_file , paired . tumor_data , headers = "chromosome" , in...
Segmentation of normalized inputs using CNVkit .
30,283
def _remove_overlaps ( in_file , out_dir , data ) : out_file = os . path . join ( out_dir , "%s-nooverlaps%s" % utils . splitext_plus ( os . path . basename ( in_file ) ) ) if not utils . file_uptodate ( out_file , in_file ) : with file_transaction ( data , out_file ) as tx_out_file : with open ( in_file ) as in_handle...
Remove regions that overlap with next region these result in issues with PureCN .
30,284
def _get_purecn_files ( paired , work_dir , require_exist = False ) : out_base = os . path . join ( work_dir , "%s-purecn" % ( dd . get_sample_name ( paired . tumor_data ) ) ) out = { "plot" : { } } all_files = [ ] for plot in [ "chromosomes" , "local_optima" , "segmentation" , "summary" ] : if plot == "summary" : cur_...
Retrieve organized structure of PureCN output files .
30,285
def _loh_to_vcf ( cur ) : cn = int ( float ( cur [ "C" ] ) ) minor_cn = int ( float ( cur [ "M" ] ) ) if cur [ "type" ] . find ( "LOH" ) : svtype = "LOH" elif cn > 2 : svtype = "DUP" elif cn < 1 : svtype = "DEL" else : svtype = None if svtype : info = [ "SVTYPE=%s" % svtype , "END=%s" % cur [ "end" ] , "SVLEN=%s" % ( i...
Convert LOH output into standardized VCF .
30,286
def _generate_metrics ( bam_fname , config_file , ref_file , bait_file , target_file ) : with open ( config_file ) as in_handle : config = yaml . safe_load ( in_handle ) broad_runner = broad . runner_from_config ( config ) bam_fname = os . path . abspath ( bam_fname ) path = os . path . dirname ( bam_fname ) out_dir = ...
Run Picard commands to generate metrics files when missing .
30,287
def run ( items , background = None ) : if not background : background = [ ] paired = vcfutils . get_paired ( items + background ) if paired : out = _run_paired ( paired ) else : out = items logger . warn ( "GATK4 CNV calling currently only available for somatic samples: %s" % ", " . join ( [ dd . get_sample_name ( d )...
Detect copy number variations from batched set of samples using GATK4 CNV calling .
30,288
def _run_paired ( paired ) : from bcbio . structural import titancna work_dir = _sv_workdir ( paired . tumor_data ) seg_files = model_segments ( tz . get_in ( [ "depth" , "bins" , "normalized" ] , paired . tumor_data ) , work_dir , paired ) call_file = call_copy_numbers ( seg_files [ "seg" ] , work_dir , paired . tumor...
Run somatic variant calling pipeline .
30,289
def call_copy_numbers ( seg_file , work_dir , data ) : out_file = os . path . join ( work_dir , "%s-call.seg" % dd . get_sample_name ( data ) ) if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : params = [ "-T" , "CallCopyRatioSegments" , "-I" , seg_file , "-O" , tx_out...
Call copy numbers from a normalized and segmented input file .
30,290
def plot_model_segments ( seg_files , work_dir , data ) : from bcbio . heterogeneity import chromhacks out_file = os . path . join ( work_dir , "%s.modeled.png" % dd . get_sample_name ( data ) ) if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : dict_file = utils . spli...
Diagnostic plots of segmentation and inputs .
30,291
def model_segments ( copy_file , work_dir , paired ) : out_file = os . path . join ( work_dir , "%s.cr.seg" % dd . get_sample_name ( paired . tumor_data ) ) tumor_counts , normal_counts = heterogzygote_counts ( paired ) if not utils . file_exists ( out_file ) : with file_transaction ( paired . tumor_data , out_file ) a...
Perform segmentation on input copy number log2 ratio file .
30,292
def create_panel_of_normals ( items , group_id , work_dir ) : out_file = os . path . join ( work_dir , "%s-%s-pon.hdf5" % ( dd . get_sample_name ( items [ 0 ] ) , group_id ) ) if not utils . file_exists ( out_file ) : with file_transaction ( items [ 0 ] , out_file ) as tx_out_file : params = [ "-T" , "CreateReadCountPa...
Create a panel of normals from one or more background read counts .
30,293
def pon_to_bed ( pon_file , out_dir , data ) : out_file = os . path . join ( out_dir , "%s-intervals.bed" % ( utils . splitext_plus ( os . path . basename ( pon_file ) ) [ 0 ] ) ) if not utils . file_uptodate ( out_file , pon_file ) : import h5py with file_transaction ( data , out_file ) as tx_out_file : with h5py . Fi...
Extract BED intervals from a GATK4 hdf5 panel of normal file .
30,294
def prepare_intervals ( data , region_file , work_dir ) : target_file = os . path . join ( work_dir , "%s-target.interval_list" % dd . get_sample_name ( data ) ) if not utils . file_uptodate ( target_file , region_file ) : with file_transaction ( data , target_file ) as tx_out_file : params = [ "-T" , "PreprocessInterv...
Prepare interval regions for targeted and gene based regions .
30,295
def annotate_intervals ( target_file , data ) : out_file = "%s-gcannotated.tsv" % utils . splitext_plus ( target_file ) [ 0 ] if not utils . file_uptodate ( out_file , target_file ) : with file_transaction ( data , out_file ) as tx_out_file : params = [ "-T" , "AnnotateIntervals" , "-R" , dd . get_ref_file ( data ) , "...
Provide GC annotated intervals for error correction during panels and denoising .
30,296
def collect_read_counts ( data , work_dir ) : out_file = os . path . join ( work_dir , "%s-target-coverage.hdf5" % dd . get_sample_name ( data ) ) if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : params = [ "-T" , "CollectReadCounts" , "-I" , dd . get_align_bam ( data...
Count reads in defined bins using CollectReadCounts .
30,297
def _filter_by_normal ( tumor_counts , normal_counts , data ) : from bcbio . heterogeneity import bubbletree fparams = bubbletree . NORMAL_FILTER_PARAMS tumor_out = "%s-normfilter%s" % utils . splitext_plus ( tumor_counts ) normal_out = "%s-normfilter%s" % utils . splitext_plus ( normal_counts ) if not utils . file_upt...
Filter count files based on normal frequency and median depth avoiding high depth regions .
30,298
def _run_collect_allelic_counts ( pos_file , pos_name , work_dir , data ) : out_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ) , "structural" , "counts" ) ) out_file = os . path . join ( out_dir , "%s-%s-counts.tsv" % ( dd . get_sample_name ( data ) , pos_name ) ) if not utils . file_exists ...
Counts by alleles for a specific sample and set of positions .
30,299
def _seg_to_vcf ( vals ) : call_to_cn = { "+" : 3 , "-" : 1 } call_to_type = { "+" : "DUP" , "-" : "DEL" } if vals [ "CALL" ] not in [ "0" ] : info = [ "FOLD_CHANGE_LOG=%s" % vals [ "MEAN_LOG2_COPY_RATIO" ] , "PROBES=%s" % vals [ "NUM_POINTS_COPY_RATIO" ] , "SVTYPE=%s" % call_to_type [ vals [ "CALL" ] ] , "SVLEN=%s" % ...
Convert GATK CNV calls seg output to a VCF line .