idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
29,700 | def rbind ( dfs ) : if len ( dfs ) == 1 : return dfs [ 0 ] df = dfs [ 0 ] for d in dfs [ 1 : ] : df = df . append ( d ) return df | acts like rbind for pandas dataframes |
29,701 | def sort_filenames ( filenames ) : basenames = [ os . path . basename ( x ) for x in filenames ] indexes = [ i [ 0 ] for i in sorted ( enumerate ( basenames ) , key = lambda x : x [ 1 ] ) ] return [ filenames [ x ] for x in indexes ] | sort a list of files by filename only ignoring the directory names |
29,702 | def walk_json ( d , func ) : if isinstance ( d , Mapping ) : return OrderedDict ( ( k , walk_json ( v , func ) ) for k , v in d . items ( ) ) elif isinstance ( d , list ) : return [ walk_json ( v , func ) for v in d ] else : return func ( d ) | Walk over a parsed JSON nested structure d apply func to each leaf element and replace it with result |
29,703 | def _link_bam_file ( in_file , new_dir , data ) : new_dir = utils . safe_makedir ( new_dir ) out_file = os . path . join ( new_dir , os . path . basename ( in_file ) ) if not utils . file_exists ( out_file ) : out_file = os . path . join ( new_dir , "%s-prealign.bam" % dd . get_sample_name ( data ) ) if data . get ( "c... | Provide symlinks of BAM file and existing indexes if needed . |
29,704 | def _add_supplemental_bams ( data ) : file_key = "work_bam" if data . get ( file_key ) : for supext in [ "disc" , "sr" ] : base , ext = os . path . splitext ( data [ file_key ] ) test_file = "%s-%s%s" % ( base , supext , ext ) if os . path . exists ( test_file ) : sup_key = file_key + "_plus" if sup_key not in data : d... | Add supplemental files produced by alignment useful for structural variant calling . |
29,705 | def _add_hla_files ( data ) : if "hla" not in data : data [ "hla" ] = { } align_file = dd . get_align_bam ( data ) hla_dir = os . path . join ( os . path . dirname ( align_file ) , "hla" ) if not os . path . exists ( hla_dir ) : hla_files = None else : hla_files = sorted ( list ( glob . glob ( os . path . join ( hla_di... | Add extracted fastq files of HLA alleles for typing . |
29,706 | def prep_samples ( * items ) : out = [ ] for data in ( utils . to_single_data ( x ) for x in items ) : data = cwlutils . normalize_missing ( data ) data = cwlutils . unpack_tarballs ( data , data ) data = clean_inputs ( data ) out . append ( [ data ] ) return out | Handle any global preparatory steps for samples with potentially shared data . |
29,707 | def clean_inputs ( data ) : if not utils . get_in ( data , ( "config" , "algorithm" , "variant_regions_orig" ) ) : data [ "config" ] [ "algorithm" ] [ "variant_regions_orig" ] = dd . get_variant_regions ( data ) clean_vr = clean_file ( dd . get_variant_regions ( data ) , data , prefix = "cleaned-" ) merged_vr = merge_o... | Clean BED input files to avoid overlapping segments that cause downstream issues . |
29,708 | def postprocess_alignment ( data ) : data = cwlutils . normalize_missing ( utils . to_single_data ( data ) ) data = cwlutils . unpack_tarballs ( data , data ) bam_file = data . get ( "align_bam" ) or data . get ( "work_bam" ) ref_file = dd . get_ref_file ( data ) if vmulti . bam_needs_processing ( data ) and bam_file a... | Perform post - processing steps required on full BAM files . Prepares list of callable genome regions allowing subsequent parallelization . |
29,709 | def _merge_out_from_infiles ( in_files ) : fname = os . path . commonprefix ( [ os . path . basename ( f ) for f in in_files ] ) while fname . endswith ( ( "-" , "_" , "." ) ) : fname = fname [ : - 1 ] ext = os . path . splitext ( in_files [ 0 ] ) [ - 1 ] dirname = os . path . dirname ( in_files [ 0 ] ) while dirname .... | Generate output merged file name from set of input files . |
29,710 | def delayed_bam_merge ( data ) : if data . get ( "combine" ) : assert len ( data [ "combine" ] . keys ( ) ) == 1 file_key = list ( data [ "combine" ] . keys ( ) ) [ 0 ] extras = [ ] for x in data [ "combine" ] [ file_key ] . get ( "extras" , [ ] ) : if isinstance ( x , ( list , tuple ) ) : extras . extend ( x ) else : ... | Perform a merge on previously prepped files delayed in processing . |
29,711 | def merge_split_alignments ( data ) : data = utils . to_single_data ( data ) data = _merge_align_bams ( data ) data = _merge_hla_fastq_inputs ( data ) return [ [ data ] ] | Merge split BAM inputs generated by common workflow language runs . |
29,712 | def _merge_align_bams ( data ) : for key in ( [ "work_bam" ] , [ "work_bam_plus" , "disc" ] , [ "work_bam_plus" , "sr" ] , [ "umi_bam" ] ) : in_files = tz . get_in ( key , data , [ ] ) if not isinstance ( in_files , ( list , tuple ) ) : in_files = [ in_files ] in_files = [ x for x in in_files if x and x != "None" ] if ... | Merge multiple alignment BAMs including split and discordant reads . |
29,713 | def _merge_hla_fastq_inputs ( data ) : hla_key = [ "hla" , "fastq" ] hla_sample_files = [ x for x in ( tz . get_in ( hla_key , data ) or [ ] ) if x and x != "None" ] merged_hlas = None if hla_sample_files : out_files = collections . defaultdict ( list ) for hla_file in utils . flatten ( hla_sample_files ) : rehla = re ... | Merge HLA inputs from a split initial alignment . |
29,714 | def prepare_bcbio_samples ( sample ) : logger . info ( "Preparing %s files %s to merge into %s." % ( sample [ 'name' ] , sample [ 'files' ] , sample [ 'out_file' ] ) ) if sample [ 'fn' ] == "fq_merge" : out_file = fq_merge ( sample [ 'files' ] , sample [ 'out_file' ] , sample [ 'config' ] ) elif sample [ 'fn' ] == "bam... | Function that will use specific function to merge input files |
29,715 | def _get_calls ( data , cnv_only = False ) : cnvs_supported = set ( [ "cnvkit" , "battenberg" ] ) out = { } for sv in data . get ( "sv" , [ ] ) : if not cnv_only or sv [ "variantcaller" ] in cnvs_supported : out [ sv [ "variantcaller" ] ] = sv return out | Retrieve calls organized by name to use for heterogeneity analysis . |
29,716 | def get_variants ( data , include_germline = False ) : data = utils . deepish_copy ( data ) supported = [ "precalled" , "vardict" , "vardict-java" , "vardict-perl" , "freebayes" , "octopus" , "strelka2" ] if include_germline : supported . insert ( 1 , "gatk-haplotype" ) out = [ ] if isinstance ( data . get ( "variants"... | Retrieve set of variant calls to use for heterogeneity analysis . |
29,717 | def _ready_for_het_analysis ( items ) : paired = vcfutils . get_paired_bams ( [ dd . get_align_bam ( d ) for d in items ] , items ) has_het = any ( dd . get_hetcaller ( d ) for d in items ) if has_het and paired : return get_variants ( paired . tumor_data ) and _get_calls ( paired . tumor_data , cnv_only = True ) | Check if a sample has input information for heterogeneity analysis . |
29,718 | def run ( items , run_parallel ) : to_process = [ ] extras = [ ] for batch , cur_items in _group_by_batches ( items ) . items ( ) : if _ready_for_het_analysis ( cur_items ) : to_process . append ( ( batch , cur_items ) ) else : for data in cur_items : extras . append ( [ data ] ) processed = run_parallel ( "heterogenei... | Top level entry point for calculating heterogeneity handles organization and job distribution . |
29,719 | def create_inputs ( data ) : from bcbio . pipeline import sample data = cwlutils . normalize_missing ( data ) aligner = tz . get_in ( ( "config" , "algorithm" , "aligner" ) , data ) if not ( "files" in data and data [ "files" ] and aligner and ( _is_cram_input ( data [ "files" ] ) or objectstore . is_remote ( data [ "f... | Index input reads and prepare groups of reads to process concurrently . |
29,720 | def _set_align_split_size ( data ) : if cwlutils . is_cwl_run ( data ) : target_size = 20 target_size_reads = 80 else : target_size = 5 target_size_reads = 20 max_splits = 100 val = dd . get_align_split_size ( data ) umi_consensus = dd . get_umi_consensus ( data ) if val is None : if not umi_consensus : total_size = 0 ... | Set useful align_split_size generating an estimate if it doesn t exist . |
29,721 | def _pick_align_split_size ( total_size , target_size , target_size_reads , max_splits ) : if total_size // target_size > max_splits : piece_size = total_size // max_splits return int ( piece_size * target_size_reads / target_size ) else : return int ( target_size_reads ) | Do the work of picking an alignment split size for the given criteria . |
29,722 | def split_namedpipe_cls ( pair1_file , pair2_file , data ) : if "align_split" in data : start , end = [ int ( x ) for x in data [ "align_split" ] . split ( "-" ) ] else : start , end = None , None if pair1_file . endswith ( ".sdf" ) : assert not pair2_file , pair2_file return rtg . to_fastq_apipe_cl ( pair1_file , star... | Create a commandline suitable for use as a named pipe with reads in a given region . |
29,723 | def _seqtk_fastq_prep_cl ( data , in_file = None , read_num = 0 ) : needs_convert = dd . get_quality_format ( data ) . lower ( ) == "illumina" trim_ends = dd . get_trim_ends ( data ) seqtk = config_utils . get_program ( "seqtk" , data [ "config" ] ) if in_file : in_file = objectstore . cl_input ( in_file ) else : in_fi... | Provide a commandline for prep of fastq inputs with seqtk . |
29,724 | def fastq_convert_pipe_cl ( in_file , data ) : cmd = _seqtk_fastq_prep_cl ( data , in_file ) if not cmd : cat_cmd = "zcat" if in_file . endswith ( ".gz" ) else "cat" cmd = cat_cmd + " " + in_file return "<(%s)" % cmd | Create an anonymous pipe converting Illumina 1 . 3 - 1 . 7 to Sanger . |
29,725 | def parallel_multiplier ( items ) : multiplier = 1 for data in ( x [ 0 ] for x in items ) : if ( tz . get_in ( [ "config" , "algorithm" , "align_split_size" ] , data ) is not False and tz . get_in ( [ "algorithm" , "align_split_size" ] , data ) is not False ) : multiplier += 50 return multiplier | Determine if we will be parallelizing items during processing . |
29,726 | def setup_combine ( final_file , data ) : if "align_split" not in data : return final_file , data align_dir = os . path . dirname ( final_file ) base , ext = os . path . splitext ( os . path . basename ( final_file ) ) start , end = [ int ( x ) for x in data [ "align_split" ] . split ( "-" ) ] out_file = os . path . jo... | Setup the data and outputs to allow merging data back together . |
29,727 | def merge_split_alignments ( samples , run_parallel ) : ready = [ ] file_key = "work_bam" to_merge = collections . defaultdict ( list ) for data in ( xs [ 0 ] for xs in samples ) : if data . get ( "combine" ) : out_key = tz . get_in ( [ "combine" , file_key , "out" ] , data ) if not out_key : out_key = data [ "rgnames"... | Manage merging split alignments back into a final working BAM file . |
29,728 | def _save_fastq_space ( items ) : to_cleanup = { } for data in ( utils . to_single_data ( x ) for x in items ) : for fname in data . get ( "files" , [ ] ) : if os . path . realpath ( fname ) . startswith ( dd . get_work_dir ( data ) ) : to_cleanup [ fname ] = data [ "config" ] for fname , config in to_cleanup . items (... | Potentially save fastq space prior to merging since alignments done . |
29,729 | def total_reads_from_grabix ( in_file ) : gbi_file = _get_grabix_index ( in_file ) if gbi_file : with open ( gbi_file ) as in_handle : next ( in_handle ) num_lines = int ( next ( in_handle ) . strip ( ) ) assert num_lines % 4 == 0 , "Expected lines to be multiple of 4" return num_lines // 4 else : return 0 | Retrieve total reads in a fastq file from grabix index . |
29,730 | def _find_read_splits ( in_file , split_size ) : num_lines = total_reads_from_grabix ( in_file ) * 4 assert num_lines and num_lines > 0 , "Did not find grabix index reads: %s %s" % ( in_file , num_lines ) split_lines = split_size * 4 chunks = [ ] last = 1 for chunki in range ( num_lines // split_lines + min ( 1 , num_l... | Determine sections of fastq files to process in splits . |
29,731 | def _ready_gzip_fastq ( in_files , data , require_bgzip = False ) : all_gzipped = all ( [ not x or x . endswith ( ".gz" ) for x in in_files ] ) if require_bgzip and all_gzipped : all_gzipped = all ( [ not x or not _check_gzipped_input ( x , data ) [ 0 ] for x in in_files ] ) needs_convert = dd . get_quality_format ( da... | Check if we have gzipped fastq and don t need format conversion or splitting . |
29,732 | def prep_fastq_inputs ( in_files , data ) : if len ( in_files ) == 1 and _is_bam_input ( in_files ) : out = _bgzip_from_bam ( in_files [ 0 ] , data [ "dirs" ] , data ) elif len ( in_files ) == 1 and _is_cram_input ( in_files ) : out = _bgzip_from_cram ( in_files [ 0 ] , data [ "dirs" ] , data ) elif len ( in_files ) in... | Prepare bgzipped fastq inputs |
29,733 | def _symlink_or_copy_grabix ( in_file , out_file , data ) : if cwlutils . is_cwl_run ( data ) : if utils . file_exists ( in_file + ".gbi" ) : out_file = in_file else : utils . copy_plus ( in_file , out_file ) else : utils . symlink_plus ( in_file , out_file ) return out_file | We cannot symlink in CWL but may be able to use inputs or copy |
29,734 | def _prep_grabix_indexes ( in_files , data ) : if _ready_gzip_fastq ( in_files , data ) and ( not _ready_gzip_fastq ( in_files , data , require_bgzip = True ) or dd . get_align_split_size ( data ) is False ) : for in_file in in_files : if not utils . file_exists ( in_file + ".gbi" ) : with file_transaction ( data , in_... | Parallel preparation of grabix indexes for files . |
29,735 | def _bgzip_from_cram ( cram_file , dirs , data ) : import pybedtools region_file = ( tz . get_in ( [ "config" , "algorithm" , "variant_regions" ] , data ) if tz . get_in ( [ "config" , "algorithm" , "coverage_interval" ] , data ) in [ "regional" , "exome" , "amplicon" ] else None ) if region_file : regions = [ "%s:%s-%... | Create bgzipped fastq files from an input CRAM file in regions of interest . |
29,736 | def _bgzip_from_cram_sambamba ( cram_file , dirs , data ) : raise NotImplementedError ( "sambamba doesn't yet support retrieval from CRAM by BED file" ) region_file = ( tz . get_in ( [ "config" , "algorithm" , "variant_regions" ] , data ) if tz . get_in ( [ "config" , "algorithm" , "coverage_interval" ] , data ) in [ "... | Use sambamba to extract from CRAM via regions . |
29,737 | def _cram_to_fastq_regions ( regions , cram_file , dirs , data ) : base_name = utils . splitext_plus ( os . path . basename ( cram_file ) ) [ 0 ] work_dir = utils . safe_makedir ( os . path . join ( dirs [ "work" ] , "align_prep" , "%s-parts" % base_name ) ) fnames = run_multicore ( _cram_to_fastq_region , [ ( cram_fil... | Convert CRAM files to fastq potentially within sub regions . |
29,738 | def _cram_to_fastq_region ( cram_file , work_dir , base_name , region , data ) : ref_file = tz . get_in ( [ "reference" , "fasta" , "base" ] , data ) resources = config_utils . get_resources ( "bamtofastq" , data [ "config" ] ) cores = tz . get_in ( [ "config" , "algorithm" , "num_cores" ] , data , 1 ) max_mem = config... | Convert CRAM to fastq in a specified region . |
29,739 | def _bgzip_from_bam ( bam_file , dirs , data , is_retry = False , output_infix = '' ) : config = data [ "config" ] bamtofastq = config_utils . get_program ( "bamtofastq" , config ) resources = config_utils . get_resources ( "bamtofastq" , config ) cores = config [ "algorithm" ] . get ( "num_cores" , 1 ) max_mem = confi... | Create bgzipped fastq files from an input BAM file . |
29,740 | def _grabix_index ( data ) : in_file = data [ "bgzip_file" ] config = data [ "config" ] grabix = config_utils . get_program ( "grabix" , config ) gbi_file = _get_grabix_index ( in_file ) if not gbi_file or _is_partial_index ( gbi_file ) : if gbi_file : utils . remove_safe ( gbi_file ) else : gbi_file = in_file + ".gbi"... | Create grabix index of bgzip input file . |
29,741 | def _is_partial_index ( gbi_file ) : with open ( gbi_file ) as in_handle : for i , _ in enumerate ( in_handle ) : if i > 2 : return False return True | Check for truncated output since grabix doesn t write to a transactional directory . |
29,742 | def _bgzip_file ( finput , config , work_dir , needs_bgzip , needs_gunzip , needs_convert , data ) : if isinstance ( finput , six . string_types ) : in_file = finput else : assert not needs_convert , "Do not yet handle quality conversion with multiple inputs" return _bgzip_multiple_files ( finput , work_dir , data ) ou... | Handle bgzip of input file potentially gunzipping an existing file . |
29,743 | def _check_gzipped_input ( in_file , data ) : grabix = config_utils . get_program ( "grabix" , data [ "config" ] ) is_bgzip = subprocess . check_output ( [ grabix , "check" , in_file ] ) if is_bgzip . strip ( ) == "yes" : return False , False else : return True , True | Determine if a gzipped input file is blocked gzip or standard . |
29,744 | def run ( bam_file , data , fastqc_out ) : sentry_file = os . path . join ( fastqc_out , "fastqc_report.html" ) if not os . path . exists ( sentry_file ) : work_dir = os . path . dirname ( fastqc_out ) utils . safe_makedir ( work_dir ) ds_file = ( bam . downsample ( bam_file , data , 1e7 , work_dir = work_dir ) if data... | Run fastqc generating report in specified directory and parsing metrics . |
29,745 | def _get_module ( self , parser , module ) : dt = [ ] lines = parser . clean_data ( module ) header = lines [ 0 ] for data in lines [ 1 : ] : if data [ 0 ] . startswith ( "#" ) : header = data continue if data [ 0 ] . find ( "-" ) > - 1 : f , s = map ( int , data [ 0 ] . split ( "-" ) ) for pos in range ( f , s ) : dt ... | Get module using fadapa package |
29,746 | def start ( self ) : if not self . pipe is None : raise RuntimeError ( "Cannot start task twice" ) self . ioloop = tornado . ioloop . IOLoop . instance ( ) if self . timeout > 0 : self . expiration = self . ioloop . add_timeout ( time . time ( ) + self . timeout , self . on_timeout ) self . pipe = subprocess . Popen ( ... | Spawn the task . |
29,747 | def get_fc_date ( out_config_file ) : if os . path . exists ( out_config_file ) : with open ( out_config_file ) as in_handle : old_config = yaml . safe_load ( in_handle ) fc_date = old_config [ "fc_date" ] else : fc_date = datetime . datetime . now ( ) . strftime ( "%y%m%d" ) return fc_date | Retrieve flowcell date reusing older dates if refreshing a present workflow . |
29,748 | def draw_quality_plot ( db_file , plot_file , position_select , title ) : robjects . r . assign ( 'db.file' , db_file ) robjects . r . assign ( 'plot.file' , plot_file ) robjects . r . assign ( 'position.select' , position_select ) robjects . r . assign ( 'title' , title ) robjects . r ( ) | Draw a plot of remapped qualities using ggplot2 . |
29,749 | def _positions_to_examine ( db_file ) : conn = sqlite3 . connect ( db_file ) cursor = conn . cursor ( ) cursor . execute ( ) position = cursor . fetchone ( ) [ 0 ] if position is not None : position = int ( position ) cursor . close ( ) split_at = 50 if position is None : return [ ] elif position < split_at : return [ ... | Determine how to sub - divide recalibration analysis based on read length . |
29,750 | def _organize_by_position ( orig_file , cmp_file , chunk_size ) : with open ( orig_file ) as in_handle : reader1 = csv . reader ( in_handle ) positions = len ( next ( reader1 ) ) - 1 for positions in _chunks ( range ( positions ) , chunk_size ) : with open ( orig_file ) as orig_handle : with open ( cmp_file ) as cmp_ha... | Read two CSV files of qualities organizing values by position . |
29,751 | def _counts_at_position ( positions , orig_reader , cmp_reader ) : pos_counts = collections . defaultdict ( lambda : collections . defaultdict ( lambda : collections . defaultdict ( int ) ) ) for orig_parts in orig_reader : cmp_parts = next ( cmp_reader ) for pos in positions : try : pos_counts [ pos ] [ int ( orig_par... | Combine orignal and new qualities at each position generating counts . |
29,752 | def sort_csv ( in_file ) : out_file = "%s.sort" % in_file if not ( os . path . exists ( out_file ) and os . path . getsize ( out_file ) > 0 ) : cl = [ "sort" , "-k" , "1,1" , in_file ] with open ( out_file , "w" ) as out_handle : child = subprocess . Popen ( cl , stdout = out_handle ) child . wait ( ) return out_file | Sort a CSV file by read name allowing direct comparison . |
29,753 | def fastq_to_csv ( in_file , fastq_format , work_dir ) : out_file = "%s.csv" % ( os . path . splitext ( os . path . basename ( in_file ) ) [ 0 ] ) out_file = os . path . join ( work_dir , out_file ) if not ( os . path . exists ( out_file ) and os . path . getsize ( out_file ) > 0 ) : with open ( in_file ) as in_handle ... | Convert a fastq file into a CSV of phred quality scores . |
29,754 | def bam_to_fastq ( bam_file , is_paired ) : out_files , out_handles = _get_fastq_handles ( bam_file , is_paired ) if len ( out_handles ) > 0 : in_bam = pysam . Samfile ( bam_file , mode = 'rb' ) for read in in_bam : num = 1 if ( not read . is_paired or read . is_read1 ) else 2 if read . is_reverse : seq = str ( Seq . r... | Convert a BAM file to fastq files . |
29,755 | def run_latex_report ( base , report_dir , section_info ) : out_name = "%s_recal_plots.tex" % base out = os . path . join ( report_dir , out_name ) with open ( out , "w" ) as out_handle : out_tmpl = Template ( out_template ) out_handle . write ( out_tmpl . render ( sections = section_info ) ) start_dir = os . getcwd ( ... | Generate a pdf report with plots using latex . |
29,756 | def bam_needs_processing ( data ) : return ( ( data . get ( "work_bam" ) or data . get ( "align_bam" ) ) and ( any ( tz . get_in ( [ "config" , "algorithm" , x ] , data ) for x in [ "variantcaller" , "mark_duplicates" , "recalibrate" , "realign" , "svcaller" , "jointcaller" , "variant_regions" ] ) or any ( k in data fo... | Check if a work input needs processing for parallelization . |
29,757 | def get_batch_for_key ( data ) : batches = _get_batches ( data , require_bam = False ) if len ( batches ) == 1 : return batches [ 0 ] else : return tuple ( batches ) | Retrieve batch information useful as a unique key for the sample . |
29,758 | def _find_all_groups ( items , require_bam = True ) : all_groups = [ ] for data in items : batches = _get_batches ( data , require_bam ) all_groups . append ( batches ) return all_groups | Find all groups |
29,759 | def _get_representative_batch ( merged ) : out = { } for mgroup in merged : mgroup = sorted ( list ( mgroup ) ) for x in mgroup : out [ x ] = mgroup [ 0 ] return out | Prepare dictionary matching batch items to a representative within a group . |
29,760 | def _group_batches_shared ( xs , caller_batch_fn , prep_data_fn ) : singles = [ ] batch_groups = collections . defaultdict ( list ) for args in xs : data = utils . to_single_data ( args ) caller , batch = caller_batch_fn ( data ) region = _list_to_tuple ( data [ "region" ] ) if "region" in data else ( ) if batch is not... | Shared functionality for grouping by batches for variant calling and joint calling . |
29,761 | def group_batches ( xs ) : def _caller_batches ( data ) : caller = tz . get_in ( ( "config" , "algorithm" , "variantcaller" ) , data ) jointcaller = tz . get_in ( ( "config" , "algorithm" , "jointcaller" ) , data ) batch = tz . get_in ( ( "metadata" , "batch" ) , data ) if not jointcaller else None return caller , batc... | Group samples into batches for simultaneous variant calling . |
29,762 | def _collapse_subitems ( base , items ) : out = [ ] for d in items : newd = _diff_dict ( base , d ) out . append ( newd ) return out | Collapse full data representations relative to a standard base . |
29,763 | def _pick_lead_item ( items ) : if vcfutils . is_paired_analysis ( [ dd . get_align_bam ( x ) for x in items ] , items ) : for data in items : if vcfutils . get_paired_phenotype ( data ) == "tumor" : return data raise ValueError ( "Did not find tumor sample in paired tumor/normal calling" ) else : return items [ 0 ] | Pick single representative sample for batch calling to attach calls to . |
29,764 | def get_orig_items ( base ) : assert "group_orig" in base out = [ ] for data_diff in base [ "group_orig" ] : new = utils . deepish_copy ( base ) new . pop ( "group_orig" ) out . append ( _patch_dict ( data_diff , new ) ) return out | Retrieve original items from a diffed set of nested samples . |
29,765 | def _patch_dict ( diff , base ) : for k , v in diff . items ( ) : if isinstance ( v , dict ) : base [ k ] = _patch_dict ( v , base . get ( k , { } ) ) elif not v : base . pop ( k , None ) else : base [ k ] = v return base | Patch a dictionary substituting in changed items from the nested diff . |
29,766 | def split_variants_by_sample ( data ) : if "group_orig" not in data : return [ [ data ] ] elif ( vcfutils . get_paired_phenotype ( data ) and "tumor" in [ vcfutils . get_paired_phenotype ( d ) for d in get_orig_items ( data ) ] ) : out = [ ] for i , sub_data in enumerate ( get_orig_items ( data ) ) : if vcfutils . get_... | Split a multi - sample call file into inputs for individual samples . |
29,767 | def run ( call_file , ref_file , vrn_files , data ) : algs = [ data [ "config" ] [ "algorithm" ] ] * len ( data . get ( "vrn_files" , [ 1 ] ) ) if includes_missingalt ( data ) : logger . info ( "Removing variants with missing alts from %s." % call_file ) call_file = gatk_remove_missingalt ( call_file , data ) if "gatkc... | Run filtering on the input call file handling SNPs and indels separately . |
29,768 | def _cnn_filter ( in_file , vrn_files , data ) : tensor_type = "read_tensor" score_file = _cnn_score_variants ( in_file , tensor_type , data ) return _cnn_tranch_filtering ( score_file , vrn_files , tensor_type , data ) | Perform CNN filtering on input VCF using pre - trained models . |
29,769 | def _cnn_tranch_filtering ( in_file , vrn_files , tensor_type , data ) : out_file = "%s-filter.vcf.gz" % utils . splitext_plus ( in_file ) [ 0 ] if not utils . file_uptodate ( out_file , in_file ) : runner = broad . runner_from_config ( data [ "config" ] ) gatk_type = runner . gatk_type ( ) assert gatk_type == "gatk4" ... | Filter CNN scored VCFs in tranches using standard SNP and Indel truth sets . |
29,770 | def _cnn_score_variants ( in_file , tensor_type , data ) : out_file = "%s-cnnscore.vcf.gz" % utils . splitext_plus ( in_file ) [ 0 ] if not utils . file_uptodate ( out_file , in_file ) : runner = broad . runner_from_config ( data [ "config" ] ) gatk_type = runner . gatk_type ( ) assert gatk_type == "gatk4" , "CNN filte... | Score variants with pre - trained CNN models . |
29,771 | def _apply_vqsr ( in_file , ref_file , recal_file , tranch_file , sensitivity_cutoff , filter_type , data ) : base , ext = utils . splitext_plus ( in_file ) out_file = "{base}-{filter}filter{ext}" . format ( base = base , ext = ext , filter = filter_type ) if not utils . file_exists ( out_file ) : with file_transaction... | Apply VQSR based on the specified tranche returning a filtered VCF file . |
29,772 | def _get_training_data ( vrn_files ) : out = { "SNP" : [ ] , "INDEL" : [ ] } for name , train_info in [ ( "train_hapmap" , "known=false,training=true,truth=true,prior=15.0" ) , ( "train_omni" , "known=false,training=true,truth=true,prior=12.0" ) , ( "train_1000g" , "known=false,training=true,truth=false,prior=10.0" ) ,... | Retrieve training data returning an empty set of information if not available . |
29,773 | def _get_vqsr_training ( filter_type , vrn_files , gatk_type ) : params = [ ] for name , train_info , fname in _get_training_data ( vrn_files ) [ filter_type ] : if gatk_type == "gatk4" : params . extend ( [ "--resource:%s,%s" % ( name , train_info ) , fname ] ) if filter_type == "INDEL" : params . extend ( [ "--max-ga... | Return parameters for VQSR training handling SNPs and Indels . |
29,774 | def _get_vqsr_annotations ( filter_type , data ) : if filter_type == "SNP" : anns = [ "QD" , "FS" , "ReadPosRankSum" , "SOR" ] else : assert filter_type == "INDEL" anns = [ "QD" , "FS" , "ReadPosRankSum" , "SOR" ] if dd . get_coverage_interval ( data ) == "genome" : anns += [ "DP" ] return anns | Retrieve appropriate annotations to use for VQSR based on filter type . |
29,775 | def _run_vqsr ( in_file , ref_file , vrn_files , sensitivity_cutoff , filter_type , data ) : cutoffs = [ "100.0" , "99.99" , "99.98" , "99.97" , "99.96" , "99.95" , "99.94" , "99.93" , "99.92" , "99.91" , "99.9" , "99.8" , "99.7" , "99.6" , "99.5" , "99.0" , "98.0" , "90.0" ] if sensitivity_cutoff not in cutoffs : cuto... | Run variant quality score recalibration . |
29,776 | def _already_cutoff_filtered ( in_file , filter_type ) : filter_file = "%s-filter%s.vcf.gz" % ( utils . splitext_plus ( in_file ) [ 0 ] , filter_type ) return utils . file_exists ( filter_file ) | Check if we have a pre - existing cutoff - based filter file from previous VQSR failure . |
29,777 | def _variant_filtration ( in_file , ref_file , vrn_files , data , filter_type , hard_filter_fn ) : algs = [ data [ "config" ] [ "algorithm" ] ] * len ( data . get ( "vrn_files" , [ 1 ] ) ) if ( not config_utils . use_vqsr ( algs , in_file ) or _already_cutoff_filtered ( in_file , filter_type ) ) : logger . info ( "Skip... | Filter SNP and indel variant calls using GATK best practice recommendations . |
29,778 | def gatk_remove_missingalt ( in_file , data ) : base = in_file . split ( '.vcf.gz' ) [ 0 ] out_file = "%s-nomissingalt%s" % ( base , '.vcf.gz' ) if utils . file_exists ( out_file ) : return out_file no_gzip_out = out_file . replace ( ".vcf.gz" , ".vcf" ) with file_transaction ( no_gzip_out ) as tx_out_file : with utils... | GATK 4 . 1 . 0 . 0 outputs variants that have missing ALTs which breaks downstream tools this filters those out . |
29,779 | def strand_unknown ( db , transcript ) : features = list ( db . children ( transcript ) ) strand = features [ 0 ] . strand if strand == "." : return True else : return False | for unstranded data with novel transcripts single exon genes will have no strand information . single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them |
29,780 | def fix_cufflinks_attributes ( ref_gtf , merged_gtf , data , out_file = None ) : base , ext = os . path . splitext ( merged_gtf ) fixed = out_file if out_file else base + ".clean.fixed" + ext if file_exists ( fixed ) : return fixed ref_db = gtf . get_gtf_db ( ref_gtf ) merged_db = gtf . get_gtf_db ( merged_gtf , in_mem... | replace the cufflinks gene_id and transcript_id with the gene_id and transcript_id from ref_gtf where available |
29,781 | def merge ( assembled_gtfs , ref_file , gtf_file , num_cores , data ) : assembled_file = tempfile . NamedTemporaryFile ( delete = False ) . name with open ( assembled_file , "w" ) as temp_handle : for assembled in assembled_gtfs : temp_handle . write ( assembled + "\n" ) out_dir = os . path . join ( "assembly" , "cuffm... | run cuffmerge on a set of assembled GTF files |
29,782 | def _vcf_info ( start , end , mate_id , info = None ) : out = "SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}" . format ( mate = mate_id , size = end - start ) if info is not None : extra_info = ";" . join ( "{0}={1}" . format ( k , v ) for k , v in info . iteritems ( ) ) out = "{0};{1}" . format ( out , extra_info ... | Return breakend information line with mate and imprecise location . |
29,783 | def _vcf_alt ( base , other_chr , other_pos , isrc , is_first ) : if is_first : pipe = "[" if isrc else "]" out_str = "{base}{pipe}{chr}:{pos}{pipe}" else : pipe = "]" if isrc else "[" out_str = "{pipe}{chr}:{pos}{pipe}{base}" return out_str . format ( pipe = pipe , chr = other_chr , pos = other_pos + 1 , base = base ) | Create ALT allele line in VCF 4 . 1 format associating with other paired end . |
29,784 | def _breakend_orientation ( strand1 , strand2 ) : EndOrientation = namedtuple ( "EndOrientation" , [ "is_first1" , "is_rc1" , "is_first2" , "is_rc2" ] ) if strand1 == "+" and strand2 == "-" : return EndOrientation ( True , True , False , True ) elif strand1 == "+" and strand2 == "+" : return EndOrientation ( True , Fal... | Convert BEDPE strand representation of breakpoints into VCF . |
29,785 | def build_vcf_parts ( feature , genome_2bit , info = None ) : base1 = genome_2bit [ feature . chrom1 ] . get ( feature . start1 , feature . start1 + 1 ) . upper ( ) id1 = "hydra{0}a" . format ( feature . name ) base2 = genome_2bit [ feature . chrom2 ] . get ( feature . start2 , feature . start2 + 1 ) . upper ( ) id2 = ... | Convert BedPe feature information into VCF part representation . |
29,786 | def build_vcf_deletion ( x , genome_2bit ) : base1 = genome_2bit [ x . chrom1 ] . get ( x . start1 , x . start1 + 1 ) . upper ( ) id1 = "hydra{0}" . format ( x . name ) return VcfLine ( x . chrom1 , x . start1 , id1 , base1 , "<DEL>" , _vcf_single_end_info ( x , "DEL" , True ) ) | Provide representation of deletion from BedPE breakpoints . |
29,787 | def build_vcf_inversion ( x1 , x2 , genome_2bit ) : id1 = "hydra{0}" . format ( x1 . name ) start_coords = sorted ( [ x1 . start1 , x1 . end1 , x2 . start1 , x2 . end1 ] ) end_coords = sorted ( [ x1 . start2 , x1 . end2 , x2 . start2 , x2 . start2 ] ) start_pos = ( start_coords [ 1 ] + start_coords [ 2 ] ) // 2 end_pos... | Provide representation of inversion from BedPE breakpoints . |
29,788 | def hydra_parser ( in_file , options = None ) : if options is None : options = { } BedPe = namedtuple ( 'BedPe' , [ "chrom1" , "start1" , "end1" , "chrom2" , "start2" , "end2" , "name" , "strand1" , "strand2" , "support" ] ) with open ( in_file ) as in_handle : reader = csv . reader ( in_handle , dialect = "excel-tab" ... | Parse hydra input file into namedtuple of values . |
29,789 | def _cluster_by ( end_iter , attr1 , attr2 , cluster_distance ) : ClusterInfo = namedtuple ( "ClusterInfo" , [ "chroms" , "clusters" , "lookup" ] ) chr_clusters = { } chroms = [ ] brends_by_id = { } for brend in end_iter : if not chr_clusters . has_key ( brend . chrom1 ) : chroms . append ( brend . chrom1 ) chr_cluster... | Cluster breakends by specified attributes . |
29,790 | def _calculate_cluster_distance ( end_iter ) : out = [ ] sizes = [ ] for x in end_iter : out . append ( x ) sizes . append ( x . end1 - x . start1 ) sizes . append ( x . end2 - x . start2 ) distance = sum ( sizes ) // len ( sizes ) return distance , out | Compute allowed distance for clustering based on end confidence intervals . |
29,791 | def group_hydra_breakends ( end_iter ) : cluster_distance , all_ends = _calculate_cluster_distance ( end_iter ) first_cluster = _cluster_by ( all_ends , "start1" , "end1" , cluster_distance ) for chrom in first_cluster . chroms : for _ , _ , brends in first_cluster . clusters [ chrom ] . getregions ( ) : if len ( brend... | Group together hydra breakends with overlapping ends . |
29,792 | def _write_vcf_header ( out_handle ) : def w ( line ) : out_handle . write ( "{0}\n" . format ( line ) ) w ( '##fileformat=VCFv4.1' ) w ( '##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">' ) w ( '##INFO=<ID=END,Number=1,Type=Integer,' 'Description="End position of the variant descri... | Write VCF header information for Hydra structural variant . |
29,793 | def _write_vcf_breakend ( brend , out_handle ) : out_handle . write ( "{0}\n" . format ( "\t" . join ( str ( x ) for x in [ brend . chrom , brend . pos + 1 , brend . id , brend . ref , brend . alt , "." , "PASS" , brend . info ] ) ) ) | Write out a single VCF line with breakpoint information . |
29,794 | def _get_vcf_breakends ( hydra_file , genome_2bit , options = None ) : if options is None : options = { } for features in group_hydra_breakends ( hydra_parser ( hydra_file , options ) ) : if len ( features ) == 1 and is_deletion ( features [ 0 ] , options ) : yield build_vcf_deletion ( features [ 0 ] , genome_2bit ) el... | Parse BEDPE input yielding VCF ready breakends . |
29,795 | def hydra_to_vcf_writer ( hydra_file , genome_2bit , options , out_handle ) : _write_vcf_header ( out_handle ) brends = list ( _get_vcf_breakends ( hydra_file , genome_2bit , options ) ) brends . sort ( key = attrgetter ( "chrom" , "pos" ) ) for brend in brends : _write_vcf_breakend ( brend , out_handle ) | Write hydra output as sorted VCF file . |
29,796 | def kallisto_table ( kallisto_dir , index ) : quant_dir = os . path . join ( kallisto_dir , "quant" ) out_file = os . path . join ( quant_dir , "matrix.csv" ) if file_exists ( out_file ) : return out_file tsvfile = os . path . join ( quant_dir , "matrix.tsv" ) ecfile = os . path . join ( quant_dir , "matrix.ec" ) cells... | convert kallisto output to a count table where the rows are equivalence classes and the columns are cells |
29,797 | def get_ec_names ( ecfile , fasta_names ) : df = pd . read_table ( ecfile , header = None , names = [ "ec" , "transcripts" ] ) transcript_groups = [ x . split ( "," ) for x in df [ "transcripts" ] ] transcripts = [ ] for group in transcript_groups : transcripts . append ( ":" . join ( [ fasta_names [ int ( x ) ] for x ... | convert equivalence classes to their set of transcripts |
29,798 | def parse_dirname ( fc_dir ) : ( _ , fc_dir ) = os . path . split ( fc_dir ) parts = fc_dir . split ( "_" ) name = None date = None for p in parts : if p . endswith ( ( "XX" , "xx" , "XY" , "X2" ) ) : name = p elif len ( p ) == 6 : try : int ( p ) date = p except ValueError : pass if name is None or date is None : rais... | Parse the flow cell ID and date from a flow cell directory . |
29,799 | def get_qseq_dir ( fc_dir ) : machine_bc = os . path . join ( fc_dir , "Data" , "Intensities" , "BaseCalls" ) if os . path . exists ( machine_bc ) : return machine_bc else : return fc_dir | Retrieve the qseq directory within Solexa flowcell output . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.