idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
29,900 | def run ( bam_file , data , out_dir ) : out = dict ( ) out_dir = utils . safe_makedir ( out_dir ) if dd . get_coverage ( data ) and dd . get_coverage ( data ) not in [ "None" ] : merged_bed_file = bedutils . clean_file ( dd . get_coverage_merged ( data ) , data , prefix = "cov-" , simple = True ) target_name = "coverag... | Run coverage QC analysis |
29,901 | def _goleft_indexcov ( bam_file , data , out_dir ) : if not dd . get_coverage_interval ( data ) == "genome" : return [ ] out_dir = utils . safe_makedir ( os . path . join ( out_dir , "indexcov" ) ) out_files = [ os . path . join ( out_dir , "%s-indexcov.%s" % ( dd . get_sample_name ( data ) , ext ) ) for ext in [ "roc"... | Use goleft indexcov to estimate coverage distributions using BAM index . |
29,902 | def picard_sort ( picard , align_bam , sort_order = "coordinate" , out_file = None , compression_level = None , pipe = False ) : base , ext = os . path . splitext ( align_bam ) if out_file is None : out_file = "%s-sort%s" % ( base , ext ) if not file_exists ( out_file ) : with tx_tmpdir ( picard . _config ) as tmp_dir ... | Sort a BAM file by coordinates . |
29,903 | def picard_merge ( picard , in_files , out_file = None , merge_seq_dicts = False ) : if out_file is None : out_file = "%smerge.bam" % os . path . commonprefix ( in_files ) if not file_exists ( out_file ) : with tx_tmpdir ( picard . _config ) as tmp_dir : with file_transaction ( picard . _config , out_file ) as tx_out_f... | Merge multiple BAM files together with Picard . |
29,904 | def picard_reorder ( picard , in_bam , ref_file , out_file ) : if not file_exists ( out_file ) : with tx_tmpdir ( picard . _config ) as tmp_dir : with file_transaction ( picard . _config , out_file ) as tx_out_file : opts = [ ( "INPUT" , in_bam ) , ( "OUTPUT" , tx_out_file ) , ( "REFERENCE" , ref_file ) , ( "ALLOW_INCO... | Reorder BAM file to match reference file ordering . |
29,905 | def picard_fix_rgs ( picard , in_bam , names ) : out_file = "%s-fixrgs.bam" % os . path . splitext ( in_bam ) [ 0 ] if not file_exists ( out_file ) : with tx_tmpdir ( picard . _config ) as tmp_dir : with file_transaction ( picard . _config , out_file ) as tx_out_file : opts = [ ( "INPUT" , in_bam ) , ( "OUTPUT" , tx_ou... | Add read group information to BAM files and coordinate sort . |
29,906 | def picard_index_ref ( picard , ref_file ) : dict_file = "%s.dict" % os . path . splitext ( ref_file ) [ 0 ] if not file_exists ( dict_file ) : with file_transaction ( picard . _config , dict_file ) as tx_dict_file : opts = [ ( "REFERENCE" , ref_file ) , ( "OUTPUT" , tx_dict_file ) ] picard . run ( "CreateSequenceDicti... | Provide a Picard style dict index file for a reference genome . |
29,907 | def picard_bam_to_fastq ( picard , in_bam , fastq_one , fastq_two = None ) : if not file_exists ( fastq_one ) : with tx_tmpdir ( picard . _config ) as tmp_dir : with file_transaction ( picard . _config , fastq_one ) as tx_out1 : opts = [ ( "INPUT" , in_bam ) , ( "FASTQ" , tx_out1 ) , ( "TMP_DIR" , tmp_dir ) ] if fastq_... | Convert BAM file to fastq . |
29,908 | def picard_sam_to_bam ( picard , align_sam , fastq_bam , ref_file , is_paired = False ) : to_retain = [ "XS" , "XG" , "XM" , "XN" , "XO" , "YT" ] if align_sam . endswith ( ".sam" ) : out_bam = "%s.bam" % os . path . splitext ( align_sam ) [ 0 ] elif align_sam . endswith ( "-align.bam" ) : out_bam = "%s.bam" % align_sam... | Convert SAM to BAM including unmapped reads from fastq BAM file . |
29,909 | def picard_formatconverter ( picard , align_sam ) : out_bam = "%s.bam" % os . path . splitext ( align_sam ) [ 0 ] if not file_exists ( out_bam ) : with tx_tmpdir ( picard . _config ) as tmp_dir : with file_transaction ( picard . _config , out_bam ) as tx_out_bam : opts = [ ( "INPUT" , align_sam ) , ( "OUTPUT" , tx_out_... | Convert aligned SAM file to BAM format . |
29,910 | def picard_fixmate ( picard , align_bam ) : base , ext = os . path . splitext ( align_bam ) out_file = "%s-sort%s" % ( base , ext ) if not file_exists ( out_file ) : with tx_tmpdir ( picard . _config ) as tmp_dir : with file_transaction ( picard . _config , out_file ) as tx_out_file : opts = [ ( "INPUT" , align_bam ) ,... | Run Picard s FixMateInformation generating an aligned output file . |
29,911 | def picard_idxstats ( picard , align_bam ) : opts = [ ( "INPUT" , align_bam ) ] stdout = picard . run ( "BamIndexStats" , opts , get_stdout = True ) out = [ ] AlignInfo = collections . namedtuple ( "AlignInfo" , [ "contig" , "length" , "aligned" , "unaligned" ] ) for line in stdout . split ( "\n" ) : if line : parts = ... | Retrieve alignment stats from picard using BamIndexStats . |
29,912 | def bed2interval ( align_file , bed , out_file = None ) : import pysam base , ext = os . path . splitext ( align_file ) if out_file is None : out_file = base + ".interval" with pysam . Samfile ( align_file , "r" if ext . endswith ( ".sam" ) else "rb" ) as in_bam : header = in_bam . text def reorder_line ( line ) : spli... | Converts a bed file to an interval file for use with some of the Picard tools by grabbing the header from the alignment file reording the bed file columns and gluing them together . |
29,913 | def _enforce_max_region_size ( in_file , data ) : max_size = 20000 overlap_size = 250 def _has_larger_regions ( f ) : return any ( r . stop - r . start > max_size for r in pybedtools . BedTool ( f ) ) out_file = "%s-regionlimit%s" % utils . splitext_plus ( in_file ) if not utils . file_exists ( out_file ) : if _has_lar... | Ensure we don t have any chunks in the region greater than 20kb . |
29,914 | def run_vardict ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) : items = shared . add_highdepth_genome_exclusion ( items ) if vcfutils . is_paired_analysis ( align_bams , items ) : call_file = _run_vardict_paired ( align_bams , items , ref_file , assoc_files , region , out_file ) else... | Run VarDict variant calling . |
29,915 | def _get_jvm_opts ( data , out_file ) : if get_vardict_command ( data ) == "vardict-java" : resources = config_utils . get_resources ( "vardict" , data [ "config" ] ) jvm_opts = resources . get ( "jvm_opts" , [ "-Xms750m" , "-Xmx4g" ] ) jvm_opts += broad . get_default_jvm_opts ( os . path . dirname ( out_file ) ) retur... | Retrieve JVM options when running the Java version of VarDict . |
29,916 | def _run_vardict_caller ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) : config = items [ 0 ] [ "config" ] if out_file is None : out_file = "%s-variants.vcf.gz" % os . path . splitext ( align_bams [ 0 ] ) [ 0 ] if not utils . file_exists ( out_file ) : with file_transaction ( items [ ... | Detect SNPs and indels with VarDict . |
29,917 | def _lowfreq_linear_filter ( tumor_index , is_paired ) : if is_paired : sbf = "FORMAT/SBF[%s]" % tumor_index nm = "FORMAT/NM[%s]" % tumor_index else : sbf = "INFO/SBF" nm = "INFO/NM" cmd = ( ) return cmd . format ( ** locals ( ) ) | Linear classifier for removing low frequency false positives . |
29,918 | def add_db_germline_flag ( line ) : if line . startswith ( "#CHROM" ) : headers = [ '##INFO=<ID=DB,Number=0,Type=Flag,Description="Likely germline variant">' ] return "\n" . join ( headers ) + "\n" + line elif line . startswith ( "#" ) : return line else : parts = line . split ( "\t" ) if parts [ 7 ] . find ( "STATUS=G... | Adds a DB flag for Germline filters allowing downstream compatibility with PureCN . |
29,919 | def depth_freq_filter ( line , tumor_index , aligner ) : if line . startswith ( "#CHROM" ) : headers = [ ( '##FILTER=<ID=LowAlleleDepth,Description="Low depth per allele frequency ' 'along with poor depth, quality, mapping quality and read mismatches.">' ) , ( '##FILTER=<ID=LowFreqQuality,Description="Low frequency rea... | Command line to filter VarDict calls based on depth frequency and quality . |
29,920 | def get_vardict_command ( data ) : vcaller = dd . get_variantcaller ( data ) if isinstance ( vcaller , list ) : vardict = [ x for x in vcaller if "vardict" in x ] if not vardict : return None vardict = vardict [ 0 ] elif not vcaller : return None else : vardict = vcaller vardict = "vardict-java" if not vardict . endswi... | convert variantcaller specification to proper vardict command handling string or list specification |
29,921 | def run ( vrn_info , calls_by_name , somatic_info , do_plots = True , handle_failures = True ) : if "seq2c" in calls_by_name : cnv_info = calls_by_name [ "seq2c" ] elif "cnvkit" in calls_by_name : cnv_info = calls_by_name [ "cnvkit" ] else : raise ValueError ( "BubbleTree only currently support CNVkit and Seq2c: %s" % ... | Run BubbleTree given variant calls CNVs and somatic |
29,922 | def _run_bubbletree ( vcf_csv , cnv_csv , data , wide_lrr = False , do_plots = True , handle_failures = True ) : lrr_scale = 10.0 if wide_lrr else 1.0 local_sitelib = utils . R_sitelib ( ) base = utils . splitext_plus ( vcf_csv ) [ 0 ] r_file = "%s-run.R" % base bubbleplot_out = "%s-bubbleplot.pdf" % base trackplot_out... | Create R script and run on input data |
29,923 | def _prep_cnv_file ( cns_file , svcaller , work_dir , data ) : in_file = cns_file out_file = os . path . join ( work_dir , "%s-%s-prep.csv" % ( utils . splitext_plus ( os . path . basename ( in_file ) ) [ 0 ] , svcaller ) ) if not utils . file_uptodate ( out_file , in_file ) : with file_transaction ( data , out_file ) ... | Create a CSV file of CNV calls with log2 and number of marks . |
29,924 | def prep_vrn_file ( in_file , vcaller , work_dir , somatic_info , writer_class , seg_file = None , params = None ) : data = somatic_info . tumor_data if not params : params = PARAMS out_file = os . path . join ( work_dir , "%s-%s-prep.csv" % ( utils . splitext_plus ( os . path . basename ( in_file ) ) [ 0 ] , vcaller )... | Select heterozygous variants in the normal sample with sufficient depth . |
29,925 | def max_normal_germline_depth ( in_file , params , somatic_info ) : bcf_in = pysam . VariantFile ( in_file ) depths = [ ] for rec in bcf_in : stats = _is_possible_loh ( rec , bcf_in , params , somatic_info ) if tz . get_in ( [ "normal" , "depth" ] , stats ) : depths . append ( tz . get_in ( [ "normal" , "depth" ] , sta... | Calculate threshold for excluding potential heterozygotes based on normal depth . |
29,926 | def _identify_heterogeneity_blocks_hmm ( in_file , params , work_dir , somatic_info ) : def _segment_by_hmm ( chrom , freqs , coords ) : cur_coords = [ ] for j , state in enumerate ( _predict_states ( freqs ) ) : if state == 0 : if len ( cur_coords ) == 0 : num_misses = 0 cur_coords . append ( coords [ j ] ) else : num... | Use a HMM to identify blocks of heterogeneity to use for calculating allele frequencies . |
29,927 | def _predict_states ( freqs ) : from hmmlearn import hmm freqs = np . column_stack ( [ np . array ( freqs ) ] ) model = hmm . GaussianHMM ( 2 , covariance_type = "full" ) model . fit ( freqs ) states = model . predict ( freqs ) freqs_by_state = collections . defaultdict ( list ) for i , state in enumerate ( states ) : ... | Use frequencies to predict states across a chromosome . |
29,928 | def _freqs_by_chromosome ( in_file , params , somatic_info ) : freqs = [ ] coords = [ ] cur_chrom = None with pysam . VariantFile ( in_file ) as bcf_in : for rec in bcf_in : if _is_biallelic_snp ( rec ) and _passes_plus_germline ( rec ) and chromhacks . is_autosomal ( rec . chrom ) : if cur_chrom is None or rec . chrom... | Retrieve frequencies across each chromosome as inputs to HMM . |
29,929 | def _create_subset_file ( in_file , het_region_bed , work_dir , data ) : cnv_regions = shared . get_base_cnv_regions ( data , work_dir ) region_bed = bedutils . intersect_two ( het_region_bed , cnv_regions , work_dir , data ) out_file = os . path . join ( work_dir , "%s-origsubset.bcf" % utils . splitext_plus ( os . pa... | Subset the VCF to a set of pre - calculated smaller regions . |
29,930 | def is_info_germline ( rec ) : if hasattr ( rec , "INFO" ) : status = rec . INFO . get ( "STATUS" , "" ) . lower ( ) else : status = rec . info . get ( "STATUS" , "" ) . lower ( ) return status == "germline" or status . find ( "loh" ) >= 0 | Check if a variant record is germline based on INFO attributes . |
29,931 | def _tumor_normal_stats ( rec , somatic_info , vcf_rec ) : out = { "normal" : { "alt" : None , "depth" : None , "freq" : None } , "tumor" : { "alt" : 0 , "depth" : 0 , "freq" : None } } if hasattr ( vcf_rec , "samples" ) : samples = [ ( s , { } ) for s in vcf_rec . samples ] for fkey in [ "AD" , "AO" , "RO" , "AF" , "D... | Retrieve depth and frequency of tumor and normal samples . |
29,932 | def _is_possible_loh ( rec , vcf_rec , params , somatic_info , use_status = False , max_normal_depth = None ) : if _is_biallelic_snp ( rec ) and _passes_plus_germline ( rec , use_status = use_status ) : stats = _tumor_normal_stats ( rec , somatic_info , vcf_rec ) depths = [ tz . get_in ( [ x , "depth" ] , stats ) for x... | Check if the VCF record is a het in the normal with sufficient support . |
29,933 | def _has_population_germline ( rec ) : for k in population_keys : if k in rec . header . info : return True return False | Check if header defines population annotated germline samples for tumor only . |
29,934 | def is_population_germline ( rec ) : min_count = 50 for k in population_keys : if k in rec . info : val = rec . info . get ( k ) if "," in val : val = val . split ( "," ) [ 0 ] if isinstance ( val , ( list , tuple ) ) : val = max ( val ) if int ( val ) > min_count : return True return False | Identify a germline calls based on annoations with ExAC or other population databases . |
29,935 | def sample_alt_and_depth ( rec , sample ) : if sample and "AD" in sample : all_counts = [ int ( x ) for x in sample [ "AD" ] ] alt_counts = sum ( all_counts [ 1 : ] ) depth = sum ( all_counts ) elif sample and "AO" in sample and sample . get ( "RO" ) is not None : alts = sample [ "AO" ] if not isinstance ( alts , ( lis... | Flexibly get ALT allele and depth counts handling FreeBayes MuTect and other cases . |
29,936 | def fasta_idx ( in_file , config = None ) : fasta_index = in_file + ".fai" if not utils . file_exists ( fasta_index ) : samtools = config_utils . get_program ( "samtools" , config ) if config else "samtools" cmd = "{samtools} faidx {in_file}" do . run ( cmd . format ( ** locals ( ) ) , "samtools faidx" ) return fasta_i... | Retrieve samtools style fasta index . |
29,937 | def file_contigs ( ref_file , config = None ) : ContigInfo = collections . namedtuple ( "ContigInfo" , "name size" ) with open ( fasta_idx ( ref_file , config ) ) as in_handle : for line in ( l for l in in_handle if l . strip ( ) ) : name , size = line . split ( ) [ : 2 ] yield ContigInfo ( name , int ( size ) ) | Iterator of reference contigs and lengths from a reference file . |
29,938 | def run ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) : paired = vcfutils . get_paired_bams ( align_bams , items ) assert paired and not paired . normal_bam , ( "smCounter2 supports tumor-only variant calling: %s" % ( "," . join ( [ dd . get_sample_name ( d ) for d in items ] ) ) ) v... | Run tumor only smCounter2 calling . |
29,939 | def number_of_mapped_reads ( data , bam_file , keep_dups = True , bed_file = None , target_name = None ) : callable_flags = [ "not unmapped" , "not mate_is_unmapped" , "not secondary_alignment" , "not failed_quality_control" ] if keep_dups : query_flags = callable_flags flag = 780 else : query_flags = callable_flags + ... | Count mapped reads allow adjustment for duplicates and BED regions . |
29,940 | def _simple_lock ( f ) : lock_file = f + ".lock" timeout = 20 curtime = 0 interval = 2 while os . path . exists ( lock_file ) : time . sleep ( interval ) curtime += interval if curtime > timeout : os . remove ( lock_file ) with open ( lock_file , "w" ) as out_handle : out_handle . write ( "locked" ) yield if os . path ... | Simple file lock times out after 20 second assuming lock is stale |
29,941 | def get_max_counts ( samples ) : counts = [ ] for data in ( x [ 0 ] for x in samples ) : count = tz . get_in ( [ "config" , "algorithm" , "callable_count" ] , data , 1 ) vcs = tz . get_in ( [ "config" , "algorithm" , "variantcaller" ] , data , [ ] ) if isinstance ( vcs , six . string_types ) : vcs = [ vcs ] if vcs : co... | Retrieve number of regions that can be processed in parallel from current samples . |
29,942 | def _split_by_regions ( dirname , out_ext , in_key ) : def _do_work ( data ) : regions = _get_parallel_regions ( data ) def _sort_by_size ( region ) : _ , start , end = region return end - start regions . sort ( key = _sort_by_size , reverse = True ) bam_file = data [ in_key ] if bam_file is None : return None , [ ] pa... | Split a BAM file data analysis into chromosomal regions . |
29,943 | def _get_parallel_regions ( data ) : callable_regions = tz . get_in ( [ "config" , "algorithm" , "callable_regions" ] , data ) if not callable_regions : raise ValueError ( "Did not find any callable regions for sample: %s\n" "Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions" % ( dd . get_... | Retrieve regions to run in parallel putting longest intervals first . |
29,944 | def get_parallel_regions ( batch ) : samples = [ utils . to_single_data ( d ) for d in batch ] regions = _get_parallel_regions ( samples [ 0 ] ) return [ { "region" : "%s:%s-%s" % ( c , s , e ) } for c , s , e in regions ] | CWL target to retrieve a list of callable regions for parallelization . |
29,945 | def get_parallel_regions_block ( batch ) : samples = [ utils . to_single_data ( d ) for d in batch ] regions = _get_parallel_regions ( samples [ 0 ] ) out = [ ] n = 10 for region_block in tz . partition_all ( n , regions ) : out . append ( { "region_block" : [ "%s:%s-%s" % ( c , s , e ) for c , s , e in region_block ] ... | CWL target to retrieve block group of callable regions for parallelization . |
29,946 | def _add_combine_info ( output , combine_map , file_key ) : files_per_output = collections . defaultdict ( list ) for part_file , out_file in combine_map . items ( ) : files_per_output [ out_file ] . append ( part_file ) out_by_file = collections . defaultdict ( list ) out = [ ] for data in output : if data [ "region" ... | Do not actually combine but add details for later combining work . |
29,947 | def parallel_prep_region ( samples , run_parallel ) : file_key = "work_bam" split_fn = _split_by_regions ( "bamprep" , "-prep.bam" , file_key ) extras = [ ] torun = [ ] for data in [ x [ 0 ] for x in samples ] : if data . get ( "work_bam" ) : data [ "align_bam" ] = data [ "work_bam" ] if ( not dd . get_realign ( data )... | Perform full pre - variant calling BAM prep work on regions . |
29,948 | def delayed_bamprep_merge ( samples , run_parallel ) : if any ( "combine" in data [ 0 ] for data in samples ) : return run_parallel ( "delayed_bam_merge" , samples ) else : return samples | Perform a delayed merge on regional prepared BAM files . |
29,949 | def clean_sample_data ( samples ) : out = [ ] for data in ( utils . to_single_data ( x ) for x in samples ) : if "dirs" in data : data [ "dirs" ] = { "work" : data [ "dirs" ] [ "work" ] , "galaxy" : data [ "dirs" ] [ "galaxy" ] , "fastq" : data [ "dirs" ] . get ( "fastq" ) } data [ "config" ] = { "algorithm" : data [ "... | Clean unnecessary information from sample data reducing size for message passing . |
29,950 | def _add_sj_index_commands ( fq1 , ref_file , gtf_file ) : if _has_sj_index ( ref_file ) : return "" else : rlength = fastq . estimate_maximum_read_length ( fq1 ) cmd = " --sjdbGTFfile %s " % gtf_file cmd += " --sjdbOverhang %s " % str ( rlength - 1 ) return cmd | newer versions of STAR can generate splice junction databases on thephfly this is preferable since we can tailor it to the read lengths |
29,951 | def _has_sj_index ( ref_file ) : return ( file_exists ( os . path . join ( ref_file , "sjdbInfo.txt" ) ) and ( file_exists ( os . path . join ( ref_file , "transcriptInfo.tab" ) ) ) ) | this file won t exist if we can do on the fly splice junction indexing |
29,952 | def remap_index_fn ( ref_file ) : return os . path . join ( os . path . dirname ( os . path . dirname ( ref_file ) ) , "star" ) | Map sequence references to equivalent star indexes |
29,953 | def index ( ref_file , out_dir , data ) : ( ref_dir , local_file ) = os . path . split ( ref_file ) gtf_file = dd . get_gtf_file ( data ) if not utils . file_exists ( gtf_file ) : raise ValueError ( "%s not found, could not create a star index." % ( gtf_file ) ) if not utils . file_exists ( out_dir ) : with tx_tmpdir (... | Create a STAR index in the defined reference directory . |
29,954 | def get_splicejunction_file ( out_dir , data ) : samplename = dd . get_sample_name ( data ) sjfile = os . path . join ( out_dir , os . pardir , "{0}SJ.out.tab" ) . format ( samplename ) if file_exists ( sjfile ) : return sjfile else : return None | locate the splicejunction file starting from the alignment directory |
29,955 | def junction2bed ( junction_file ) : base , _ = os . path . splitext ( junction_file ) out_file = base + "-minimized.bed" if file_exists ( out_file ) : return out_file if not file_exists ( junction_file ) : return None with file_transaction ( out_file ) as tx_out_file : with open ( junction_file ) as in_handle : with o... | reformat the STAR junction file to BED3 format one end of the splice junction per line |
29,956 | def run ( data ) : hlas = [ ] for hla_fq in tz . get_in ( [ "hla" , "fastq" ] , data , [ ] ) : hla_type = re . search ( "[.-](?P<hlatype>HLA-[\w-]+).fq" , hla_fq ) . group ( "hlatype" ) if hla_type in SUPPORTED_HLAS : if utils . file_exists ( hla_fq ) : hlas . append ( ( hla_type , hla_fq ) ) if len ( hlas ) > 0 : out_... | HLA typing with OptiType parsing output from called genotype files . |
29,957 | def combine_hla_fqs ( hlas , out_file , data ) : if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : with open ( tx_out_file , "w" ) as out_handle : for hla_type , hla_fq in hlas : if utils . file_exists ( hla_fq ) : with open ( hla_fq ) as in_handle : shutil . copyfileo... | OptiType performs best on a combination of all extracted HLAs . |
29,958 | def _prepare_calls ( result_file , out_dir , data ) : sample = dd . get_sample_name ( data ) out_file = os . path . join ( out_dir , "%s-optitype.csv" % ( sample ) ) if not utils . file_uptodate ( out_file , result_file ) : hla_truth = bwakit . get_hla_truthset ( data ) with file_transaction ( data , out_file ) as tx_o... | Write summary file of results of HLA typing by allele . |
29,959 | def _call_hla ( hla_fq , out_dir , data ) : bin_dir = os . path . dirname ( os . path . realpath ( sys . executable ) ) out_dir = utils . safe_makedir ( out_dir ) with tx_tmpdir ( data , os . path . dirname ( out_dir ) ) as tx_out_dir : config_file = os . path . join ( tx_out_dir , "config.ini" ) with open ( config_fil... | Run OptiType HLA calling for a specific fastq input . |
29,960 | def is_autosomal ( chrom ) : try : int ( chrom ) return True except ValueError : try : int ( str ( chrom . lower ( ) . replace ( "chr" , "" ) . replace ( "_" , "" ) . replace ( "-" , "" ) ) ) return True except ValueError : return False | Keep chromosomes that are a digit 1 - 22 or chr prefixed digit chr1 - chr22 |
29,961 | def _bcftools_stats ( data , out_dir , vcf_file_key = None , germline = False ) : vcinfo = get_active_vcinfo ( data ) if vcinfo : out_dir = utils . safe_makedir ( out_dir ) vcf_file = vcinfo [ vcf_file_key or "vrn_file" ] if dd . get_jointcaller ( data ) or "gvcf" in dd . get_tools_on ( data ) : opts = "" else : opts =... | Run bcftools stats . |
29,962 | def _add_filename_details ( full_f ) : out = { "vrn_file" : full_f } f = os . path . basename ( full_f ) for vc in list ( genotype . get_variantcallers ( ) . keys ( ) ) + [ "ensemble" ] : if f . find ( "-%s.vcf" % vc ) > 0 : out [ "variantcaller" ] = vc if f . find ( "-germline-" ) >= 0 : out [ "germline" ] = full_f re... | Add variant callers and germline information standard CWL filenames . |
29,963 | def _get_variants ( data ) : active_vs = [ ] if "variants" in data : variants = data [ "variants" ] if isinstance ( variants , dict ) and "samples" in variants : variants = variants [ "samples" ] for v in variants : if isinstance ( v , six . string_types ) and os . path . exists ( v ) : active_vs . append ( _add_filena... | Retrieve variants from CWL and standard inputs for organizing variants . |
29,964 | def get_active_vcinfo ( data , use_ensemble = True ) : active_vs = _get_variants ( data ) if len ( active_vs ) > 0 : e_active_vs = [ ] if use_ensemble : e_active_vs = [ v for v in active_vs if v . get ( "variantcaller" ) == "ensemble" ] if len ( e_active_vs ) == 0 : e_active_vs = [ v for v in active_vs if v . get ( "va... | Use first caller if ensemble is not active |
29,965 | def extract_germline_vcinfo ( data , out_dir ) : supported_germline = set ( [ "vardict" , "octopus" , "freebayes" ] ) if dd . get_phenotype ( data ) in [ "tumor" ] : for v in _get_variants ( data ) : if v . get ( "variantcaller" ) in supported_germline : if v . get ( "germline" ) : return v else : d = utils . deepish_c... | Extract germline VCFs from existing tumor inputs . |
29,966 | def merge_bam_files ( bam_files , work_dir , data , out_file = None , batch = None ) : out_file = _merge_outfile_fname ( out_file , bam_files , work_dir , batch ) if not utils . file_exists ( out_file ) : if len ( bam_files ) == 1 and bam . bam_already_sorted ( bam_files [ 0 ] , data [ "config" ] , "coordinate" ) : wit... | Merge multiple BAM files from a sample into a single BAM for processing . |
29,967 | def _create_merge_filelist ( bam_files , base_file , config ) : bam_file_list = "%s.list" % os . path . splitext ( base_file ) [ 0 ] samtools = config_utils . get_program ( "samtools" , config ) with open ( bam_file_list , "w" ) as out_handle : for f in sorted ( bam_files ) : do . run ( '{} quickcheck -v {}' . format (... | Create list of input files for merge ensuring all files are valid . |
29,968 | def _merge_outfile_fname ( out_file , bam_files , work_dir , batch ) : if out_file is None : out_file = os . path . join ( work_dir , os . path . basename ( sorted ( bam_files ) [ 0 ] ) ) if batch is not None : base , ext = os . path . splitext ( out_file ) out_file = "%s-b%s%s" % ( base , batch , ext ) return out_file | Derive correct name of BAM file based on batching . |
29,969 | def _finalize_merge ( out_file , bam_files , config ) : for ext in [ "" , ".bai" ] : if os . path . exists ( out_file + ext ) : subprocess . check_call ( [ "touch" , out_file + ext ] ) for b in bam_files : utils . save_diskspace ( b , "BAM merged to %s" % out_file , config ) | Handle indexes and cleanups of merged BAM and input files . |
29,970 | def _cwl_workflow_template ( inputs , top_level = False ) : ready_inputs = [ ] for inp in inputs : cur_inp = copy . deepcopy ( inp ) for attr in [ "source" , "valueFrom" , "wf_duplicate" ] : cur_inp . pop ( attr , None ) if top_level : cur_inp = workflow . _flatten_nested_input ( cur_inp ) cur_inp = _clean_record ( cur... | Retrieve CWL inputs shared amongst different workflows . |
29,971 | def _get_disk_estimates ( name , parallel , inputs , file_estimates , samples , disk , cur_remotes , no_files ) : tmp_disk , out_disk , in_disk = 0 , 0 , 0 if file_estimates : if disk : for key , multiplier in disk . items ( ) : if key in file_estimates : out_disk += int ( multiplier * file_estimates [ key ] ) for inp ... | Retrieve disk usage estimates as CWL ResourceRequirement and hint . |
29,972 | def _add_current_quay_tag ( repo , container_tags ) : if ':' in repo : return repo , container_tags try : latest_tag = container_tags [ repo ] except KeyError : repo_id = repo [ repo . find ( '/' ) + 1 : ] tags = requests . request ( "GET" , "https://quay.io/api/v1/repository/" + repo_id ) . json ( ) [ "tags" ] latest_... | Lookup the current quay tag for the repository adding to repo string . |
29,973 | def _write_expressiontool ( step_dir , name , inputs , outputs , expression , parallel ) : out_file = os . path . join ( step_dir , "%s.cwl" % name ) out = { "class" : "ExpressionTool" , "cwlVersion" : "v1.0" , "requirements" : [ { "class" : "InlineJavascriptRequirement" } ] , "inputs" : [ ] , "outputs" : [ ] , "expres... | Create an ExpressionTool output for the given inputs |
29,974 | def _clean_record ( rec ) : if workflow . is_cwl_record ( rec ) : def _clean_fields ( d ) : if isinstance ( d , dict ) : if "fields" in d : out = [ ] for f in d [ "fields" ] : f = utils . deepish_copy ( f ) f . pop ( "secondaryFiles" , None ) out . append ( f ) d [ "fields" ] = out return d else : out = { } for k , v i... | Remove secondary files from record fields which are currently not supported . |
29,975 | def _get_record_fields ( d ) : if isinstance ( d , dict ) : if "fields" in d : return d [ "fields" ] else : for v in d . values ( ) : fields = _get_record_fields ( v ) if fields : return fields | Get field names from a potentially nested record . |
29,976 | def _get_sentinel_val ( v ) : out = workflow . get_base_id ( v [ "id" ] ) if workflow . is_cwl_record ( v ) : out += ":%s" % ";" . join ( [ x [ "name" ] for x in _get_record_fields ( v ) ] ) return out | Retrieve expected sentinel value for an output expanding records . |
29,977 | def _place_input_binding ( inp_tool , inp_binding , parallel ) : if ( parallel in [ "multi-combined" , "multi-batch" , "batch-split" , "batch-parallel" , "batch-merge" , "batch-single" ] and tz . get_in ( [ "type" , "type" ] , inp_tool ) == "array" ) : inp_tool [ "type" ] [ "inputBinding" ] = inp_binding else : inp_too... | Check nesting of variables to determine where to place the input binding . |
29,978 | def _place_secondary_files ( inp_tool , inp_binding = None ) : def _is_file ( val ) : return ( val == "File" or ( isinstance ( val , ( list , tuple ) ) and ( "File" in val or any ( isinstance ( x , dict ) and _is_file ( val ) ) for x in val ) ) ) secondary_files = inp_tool . pop ( "secondaryFiles" , None ) if secondary... | Put secondaryFiles at the level of the File item to ensure indexes get passed . |
29,979 | def _do_scatter_var ( v , parallel ) : if parallel . startswith ( "batch" ) and workflow . is_cwl_record ( v ) : return ( tz . get_in ( [ "type" , "type" ] , v ) == "array" and tz . get_in ( [ "type" , "type" , "type" ] , v ) == "array" ) else : return ( tz . get_in ( [ "type" , "type" ] , v ) == "array" ) | Logic for scattering a variable . |
29,980 | def _step_template ( name , run_file , inputs , outputs , parallel , step_parallelism , scatter = None ) : scatter_inputs = [ ] sinputs = [ ] for inp in inputs : step_inp = { "id" : workflow . get_base_id ( inp [ "id" ] ) , "source" : inp [ "id" ] } if inp . get ( "wf_duplicate" ) : step_inp [ "id" ] += "_toolinput" fo... | Templating function for writing a step to avoid repeating namespaces . |
29,981 | def _get_cur_remotes ( path ) : cur_remotes = set ( [ ] ) if isinstance ( path , ( list , tuple ) ) : for v in path : cur_remotes |= _get_cur_remotes ( v ) elif isinstance ( path , dict ) : for v in path . values ( ) : cur_remotes |= _get_cur_remotes ( v ) elif path and isinstance ( path , six . string_types ) : if pat... | Retrieve remote references defined in the CWL . |
29,982 | def _flatten_samples ( samples , base_file , get_retriever ) : flat_data = [ ] for data in samples : data [ "reference" ] = _indexes_to_secondary_files ( data [ "reference" ] , data [ "genome_build" ] ) cur_flat = { } for key_path in [ [ "analysis" ] , [ "description" ] , [ "rgnames" ] , [ "config" , "algorithm" ] , [ ... | Create a flattened JSON representation of data from the bcbio world map . |
29,983 | def _indexes_to_secondary_files ( gresources , genome_build ) : out = { } for refname , val in gresources . items ( ) : if isinstance ( val , dict ) and "indexes" in val : if len ( val . keys ( ) ) == 1 : indexes = sorted ( val [ "indexes" ] ) if len ( indexes ) == 0 : if refname not in alignment . allow_noindices ( ) ... | Convert a list of genome indexes into a single file plus secondary files . |
29,984 | def _add_suppl_info ( inp , val ) : inp [ "type" ] = _get_avro_type ( val ) secondary = _get_secondary_files ( val ) if secondary : inp [ "secondaryFiles" ] = secondary return inp | Add supplementary information to inputs from file values . |
29,985 | def _get_secondary_files ( val ) : out = [ ] if isinstance ( val , ( tuple , list ) ) : s_counts = collections . defaultdict ( int ) for x in val : for s in _get_secondary_files ( x ) : s_counts [ s ] += 1 for s , count in s_counts . items ( ) : if s and s not in out and count == len ( [ x for x in val if x ] ) : out .... | Retrieve associated secondary files . |
29,986 | def _get_relative_ext ( of , sf ) : def half_finished_trim ( orig , prefix ) : return ( os . path . basename ( prefix ) . count ( "." ) > 0 and os . path . basename ( orig ) . count ( "." ) == os . path . basename ( prefix ) . count ( "." ) ) if of . find ( ":" ) > 0 : of = os . path . basename ( of . split ( ":" ) [ -... | Retrieve relative extension given the original and secondary files . |
29,987 | def _get_avro_type ( val ) : if isinstance ( val , dict ) : assert val . get ( "class" ) == "File" or "File" in val . get ( "class" ) return "File" elif isinstance ( val , ( tuple , list ) ) : types = [ ] for ctype in [ _get_avro_type ( v ) for v in val ] : if isinstance ( ctype , dict ) : nested_types = [ x [ "items" ... | Infer avro type for the current input . |
29,988 | def _avoid_duplicate_arrays ( types ) : arrays = [ t for t in types if isinstance ( t , dict ) and t [ "type" ] == "array" ] others = [ t for t in types if not ( isinstance ( t , dict ) and t [ "type" ] == "array" ) ] if arrays : items = set ( [ ] ) for t in arrays : if isinstance ( t [ "items" ] , ( list , tuple ) ) :... | Collapse arrays when we have multiple types . |
29,989 | def _samplejson_to_inputs ( svals ) : out = [ ] for key , val in svals . items ( ) : out . append ( _add_suppl_info ( { "id" : "%s" % key } , val ) ) return out | Convert sample output into inputs for CWL configuration files with types . |
29,990 | def _to_cwldata ( key , val , get_retriever ) : out = [ ] if isinstance ( val , dict ) : if len ( val ) == 2 and "base" in val and "indexes" in val : if len ( val [ "indexes" ] ) > 0 and val [ "base" ] == val [ "indexes" ] [ 0 ] : out . append ( ( "%s__indexes" % key , _item_to_cwldata ( val [ "base" ] , get_retriever ... | Convert nested dictionary into CWL data flatening and marking up files . |
29,991 | def _to_cwlfile_with_indexes ( val , get_retriever ) : val [ "indexes" ] = _index_blacklist ( val [ "indexes" ] ) tval = { "base" : _remove_remote_prefix ( val [ "base" ] ) , "indexes" : [ _remove_remote_prefix ( f ) for f in val [ "indexes" ] ] } cp_dir , cp_base = os . path . split ( os . path . commonprefix ( [ tval... | Convert reads with ready to go indexes into the right CWL object . |
29,992 | def _add_secondary_if_exists ( secondary , out , get_retriever ) : secondary = [ _file_local_or_remote ( y , get_retriever ) for y in secondary ] secondary = [ z for z in secondary if z ] if secondary : out [ "secondaryFiles" ] = [ { "class" : "File" , "path" : f } for f in secondary ] return out | Add secondary files only if present locally or remotely . |
29,993 | def _item_to_cwldata ( x , get_retriever , indexes = None ) : if isinstance ( x , ( list , tuple ) ) : return [ _item_to_cwldata ( subx , get_retriever ) for subx in x ] elif ( x and isinstance ( x , six . string_types ) and ( ( ( os . path . isfile ( x ) or os . path . isdir ( x ) ) and os . path . exists ( x ) ) or o... | Markup an item with CWL specific metadata . |
29,994 | def _file_local_or_remote ( f , get_retriever ) : if os . path . exists ( f ) : return f integration , config = get_retriever . integration_and_config ( f ) if integration : return integration . file_exists ( f , config ) | Check for presence of a local or remote file . |
29,995 | def directory_tarball ( dirname ) : assert os . path . isdir ( dirname ) , dirname base_dir , tarball_dir = os . path . split ( dirname ) while not os . path . exists ( os . path . join ( base_dir , "seq" ) ) and base_dir and base_dir != "/" : base_dir , extra_tarball = os . path . split ( base_dir ) tarball_dir = os .... | Create a tarball of a complex directory avoiding complex secondaryFiles . |
29,996 | def _calc_input_estimates ( keyvals , get_retriever ) : out = { } for key , val in keyvals . items ( ) : size = _calc_file_size ( val , 0 , get_retriever ) if size : out [ key ] = size return out | Calculate estimations of input file sizes for disk usage approximation . |
29,997 | def _get_file_size ( path , get_retriever ) : integration , config = get_retriever . integration_and_config ( path ) if integration : return integration . file_size ( path , config ) elif os . path . exists ( path ) : return os . path . getsize ( path ) / ( 1024.0 * 1024.0 ) | Return file size in megabytes including querying remote integrations |
29,998 | def integration_and_config ( self , path ) : if path . startswith ( tuple ( INTEGRATION_MAP . keys ( ) ) ) : key = INTEGRATION_MAP [ path . split ( ":" ) [ 0 ] + ":" ] integration = self . _integrations . get ( key ) config = { } for sample in self . _samples : config = tz . get_in ( [ "config" , key ] , sample ) if co... | Get a retriever and configuration for the given file path . |
29,999 | def make_scrnaseq_object ( samples ) : local_sitelib = R_sitelib ( ) counts_dir = os . path . dirname ( dd . get_in_samples ( samples , dd . get_combined_counts ) ) gtf_file = dd . get_in_samples ( samples , dd . get_transcriptome_gtf ) if not gtf_file : gtf_file = dd . get_in_samples ( samples , dd . get_gtf_file ) rd... | load the initial se . rda object using sinclecell - experiment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.