idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
30,300
def make_bcbiornaseq_object ( data ) : if "bcbiornaseq" not in dd . get_tools_on ( data ) : return data upload_dir = tz . get_in ( ( "upload" , "dir" ) , data ) report_dir = os . path . join ( upload_dir , "bcbioRNASeq" ) safe_makedir ( report_dir ) organism = dd . get_bcbiornaseq ( data ) . get ( "organism" , None ) g...
load the initial bcb . rda object using bcbioRNASeq
30,301
def make_quality_report ( data ) : if "bcbiornaseq" not in dd . get_tools_on ( data ) : return data upload_dir = tz . get_in ( ( "upload" , "dir" ) , data ) report_dir = os . path . join ( upload_dir , "bcbioRNASeq" ) safe_makedir ( report_dir ) quality_rmd = os . path . join ( report_dir , "quality_control.Rmd" ) qual...
create and render the bcbioRNASeq quality report
30,302
def rmarkdown_draft ( filename , template , package ) : if file_exists ( filename ) : return filename draft_template = Template ( 'rmarkdown::draft("$filename", template="$template", package="$package", edit=FALSE)' ) draft_string = draft_template . substitute ( filename = filename , template = template , package = pac...
create a draft rmarkdown file from an installed template
30,303
def render_rmarkdown_file ( filename ) : render_template = Template ( 'rmarkdown::render("$filename")' ) render_string = render_template . substitute ( filename = filename ) report_dir = os . path . dirname ( filename ) rcmd = Rscript_cmd ( ) with chdir ( report_dir ) : do . run ( [ rcmd , "--no-environ" , "-e" , rende...
render a rmarkdown file using the rmarkdown library
30,304
def create_load_string ( upload_dir , groups = None , organism = None ) : libraryline = 'library(bcbioRNASeq)' load_template = Template ( ( 'bcb <- bcbioRNASeq(uploadDir="$upload_dir",' 'interestingGroups=$groups,' 'organism="$organism")' ) ) load_noorganism_template = Template ( ( 'bcb <- bcbioRNASeq(uploadDir="$uploa...
create the code necessary to load the bcbioRNAseq object
30,305
def _list2Rlist ( xs ) : if isinstance ( xs , six . string_types ) : xs = [ xs ] rlist = "," . join ( [ _quotestring ( x ) for x in xs ] ) return "c(" + rlist + ")"
convert a python list to an R list
30,306
def _run_qsnp_paired ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) : config = items [ 0 ] [ "config" ] if out_file is None : out_file = "%s-paired-variants.vcf" % os . path . splitext ( align_bams [ 0 ] ) [ 0 ] if not utils . file_exists ( out_file ) : out_file = out_file . replace (...
Detect somatic mutations with qSNP .
30,307
def _clean_regions ( items , region ) : variant_regions = bedutils . population_variant_regions ( items , merged = True ) with utils . tmpfile ( ) as tx_out_file : target = subset_variant_regions ( variant_regions , region , tx_out_file , items ) if target : if isinstance ( target , six . string_types ) and os . path ....
Intersect region with target file if it exists
30,308
def _load_regions ( target ) : regions = [ ] with open ( target ) as in_handle : for line in in_handle : if not line . startswith ( "#" ) : c , s , e = line . strip ( ) . split ( "\t" ) regions . append ( ( c , s , e ) ) return regions
Get list of tupples from bed file
30,309
def _slice_bam ( in_bam , region , tmp_dir , config ) : name_file = os . path . splitext ( os . path . basename ( in_bam ) ) [ 0 ] out_file = os . path . join ( tmp_dir , os . path . join ( tmp_dir , name_file + _to_str ( region ) + ".bam" ) ) sambamba = config_utils . get_program ( "sambamba" , config ) region = _to_s...
Use sambamba to slice a bam region
30,310
def _create_input ( paired , out_file , ref_file , snp_file , qsnp_file ) : ini_file [ "[inputFiles]" ] [ "dbSNP" ] = snp_file ini_file [ "[inputFiles]" ] [ "ref" ] = ref_file ini_file [ "[inputFiles]" ] [ "normalBam" ] = paired . normal_bam ini_file [ "[inputFiles]" ] [ "tumourBam" ] = paired . tumor_bam ini_file [ "[...
Create INI input for qSNP
30,311
def _filter_vcf ( out_file ) : in_file = out_file . replace ( ".vcf" , "-ori.vcf" ) FILTER_line = ( '##FILTER=<ID=SBIAS,Description="Due to bias">\n' '##FILTER=<ID=5BP,Description="Due to 5BP">\n' '##FILTER=<ID=REJECT,Description="Not somatic due to qSNP filters">\n' ) SOMATIC_line = '##INFO=<ID=SOMATIC,Number=0,Type=F...
Fix sample names FILTER and FORMAT fields . Remove lines with ambiguous reference .
30,312
def _set_reject ( line ) : if line . startswith ( "#" ) : return line parts = line . split ( "\t" ) if parts [ 6 ] == "PASS" : parts [ 6 ] = "REJECT" else : parts [ 6 ] += ";REJECT" return "\t" . join ( parts )
Set REJECT in VCF line or add it if there is something else .
30,313
def svevent_reader ( in_file ) : with open ( in_file ) as in_handle : while 1 : line = next ( in_handle ) if line . startswith ( ">" ) : break header = line [ 1 : ] . rstrip ( ) . split ( "\t" ) reader = csv . reader ( in_handle , dialect = "excel-tab" ) for parts in reader : out = { } for h , p in zip ( header , parts...
Lazy generator of SV events returned as dictionary of parts .
30,314
def initialize_watcher ( samples ) : work_dir = dd . get_in_samples ( samples , dd . get_work_dir ) ww = WorldWatcher ( work_dir , is_on = any ( [ dd . get_cwl_reporting ( d [ 0 ] ) for d in samples ] ) ) ww . initialize ( samples ) return ww
check to see if cwl_reporting is set for any samples and if so initialize a WorldWatcher object from a set of samples
30,315
def guess_infer_extent ( gtf_file ) : _ , ext = os . path . splitext ( gtf_file ) tmp_out = tempfile . NamedTemporaryFile ( suffix = ".gtf" , delete = False ) . name with open ( tmp_out , "w" ) as out_handle : count = 0 in_handle = utils . open_gzipsafe ( gtf_file ) for line in in_handle : if count > 1000 : break out_h...
guess if we need to use the gene extent option when making a gffutils database by making a tiny database of 1000 lines from the original GTF and looking for all of the features
30,316
def get_gtf_db ( gtf , in_memory = False ) : db_file = gtf + ".db" if file_exists ( db_file ) : return gffutils . FeatureDB ( db_file ) if not os . access ( os . path . dirname ( db_file ) , os . W_OK | os . X_OK ) : in_memory = True db_file = ":memory:" if in_memory else db_file if in_memory or not file_exists ( db_fi...
create a gffutils DB in memory if we don t have write permissions
30,317
def partition_gtf ( gtf , coding = False , out_file = False ) : if out_file and file_exists ( out_file ) : return out_file if not out_file : out_file = tempfile . NamedTemporaryFile ( delete = False , suffix = ".gtf" ) . name if coding : pred = lambda biotype : biotype and biotype == "protein_coding" else : pred = lamb...
return a GTF file of all non - coding or coding transcripts . the GTF must be annotated with gene_biotype = protein_coding or to have the source column set to the biotype for all coding transcripts . set coding to True to get only the coding false to get only the non - coding
30,318
def split_gtf ( gtf , sample_size = None , out_dir = None ) : if out_dir : part1_fn = os . path . basename ( os . path . splitext ( gtf ) [ 0 ] ) + ".part1.gtf" part2_fn = os . path . basename ( os . path . splitext ( gtf ) [ 0 ] ) + ".part2.gtf" part1 = os . path . join ( out_dir , part1_fn ) part2 = os . path . join ...
split a GTF file into two equal parts randomly selecting genes . sample_size will select up to sample_size genes in total
30,319
def get_coding_noncoding_transcript_ids ( gtf ) : coding_gtf = partition_gtf ( gtf , coding = True ) coding_db = get_gtf_db ( coding_gtf ) coding_ids = set ( [ x [ 'transcript_id' ] [ 0 ] for x in coding_db . all_features ( ) if 'transcript_id' in x . attributes ] ) noncoding_gtf = partition_gtf ( gtf ) noncoding_db = ...
return a set of coding and non - coding transcript_ids from a GTF
30,320
def get_gene_source_set ( gtf ) : gene_to_source = { } db = get_gtf_db ( gtf ) for feature in complete_features ( db ) : gene_id = feature [ 'gene_id' ] [ 0 ] sources = gene_to_source . get ( gene_id , set ( [ ] ) ) . union ( set ( [ feature . source ] ) ) gene_to_source [ gene_id ] = sources return gene_to_source
get a dictionary of the set of all sources for a gene
30,321
def get_transcript_source_set ( gtf ) : gene_to_source = get_gene_source_set ( gtf ) transcript_to_source = { } db = get_gtf_db ( gtf ) for feature in complete_features ( db ) : gene_id = feature [ 'gene_id' ] [ 0 ] transcript_to_source [ feature [ 'transcript_id' ] [ 0 ] ] = gene_to_source [ gene_id ] return transcrip...
get a dictionary of the set of all sources of the gene for a given transcript
30,322
def get_rRNA ( gtf ) : rRNA_biotypes = [ "rRNA" , "Mt_rRNA" , "tRNA" , "MT_tRNA" ] features = set ( ) with open_gzipsafe ( gtf ) as in_handle : for line in in_handle : if not "gene_id" in line or not "transcript_id" in line : continue if any ( x in line for x in rRNA_biotypes ) : geneid = line . split ( "gene_id" ) [ 1...
extract rRNA genes and transcripts from a gtf file
30,323
def _biotype_lookup_fn ( gtf ) : db = get_gtf_db ( gtf ) sources = set ( [ feature . source for feature in db . all_features ( ) ] ) gene_biotypes = set ( [ feature . attributes . get ( "gene_biotype" , [ None ] ) [ 0 ] for feature in db . all_features ( ) ] ) biotypes = set ( [ feature . attributes . get ( "biotype" ,...
return a function that will look up the biotype of a feature this checks for either gene_biotype or biotype being set or for the source column to have biotype information
30,324
def tx2genedict ( gtf , keep_version = False ) : d = { } with open_gzipsafe ( gtf ) as in_handle : for line in in_handle : if "gene_id" not in line or "transcript_id" not in line : continue geneid = line . split ( "gene_id" ) [ 1 ] . split ( " " ) [ 1 ] geneid = _strip_non_alphanumeric ( geneid ) txid = line . split ( ...
produce a tx2gene dictionary from a GTF file
30,325
def _strip_feature_version ( featureid ) : version_detector = re . compile ( r"(?P<featureid>.*)(?P<version>\.\d+)" ) match = version_detector . match ( featureid ) if match : return match . groupdict ( ) [ "featureid" ] else : return featureid
some feature versions are encoded as featureid . version this strips those off if they exist
30,326
def tx2genefile ( gtf , out_file = None , data = None , tsv = True , keep_version = False ) : if tsv : extension = ".tsv" sep = "\t" else : extension = ".csv" sep = "," if file_exists ( out_file ) : return out_file with file_transaction ( data , out_file ) as tx_out_file : with open ( tx_out_file , "w" ) as out_handle ...
write out a file of transcript - > gene mappings .
30,327
def is_qualimap_compatible ( gtf ) : if not gtf : return False db = get_gtf_db ( gtf ) def qualimap_compatible ( feature ) : gene_id = feature . attributes . get ( 'gene_id' , [ None ] ) [ 0 ] transcript_id = feature . attributes . get ( 'transcript_id' , [ None ] ) [ 0 ] gene_biotype = feature . attributes . get ( 'ge...
Qualimap needs a very specific GTF format or it fails so skip it if the GTF is not in that format
30,328
def is_cpat_compatible ( gtf ) : if not gtf : return False db = get_gtf_db ( gtf ) pred = lambda biotype : biotype and biotype == "protein_coding" biotype_lookup = _biotype_lookup_fn ( gtf ) if not biotype_lookup : return False db = get_gtf_db ( gtf ) for feature in db . all_features ( ) : biotype = biotype_lookup ( fe...
CPAT needs some transcripts annotated with protein coding status to work properly
30,329
def organize ( dirs , config , run_info_yaml , sample_names = None , is_cwl = False , integrations = None ) : from bcbio . pipeline import qcsummary if integrations is None : integrations = { } logger . info ( "Using input YAML configuration: %s" % run_info_yaml ) assert run_info_yaml and os . path . exists ( run_info_...
Organize run information from a passed YAML file or the Galaxy API .
30,330
def _get_full_paths ( fastq_dir , config , config_file ) : if fastq_dir : fastq_dir = utils . add_full_path ( fastq_dir ) config_dir = utils . add_full_path ( os . path . dirname ( config_file ) ) galaxy_config_file = utils . add_full_path ( config . get ( "galaxy_config" , "universe_wsgi.ini" ) , config_dir ) return f...
Retrieve full paths for directories in the case of relative locations .
30,331
def add_reference_resources ( data , remote_retriever = None ) : aligner = data [ "config" ] [ "algorithm" ] . get ( "aligner" , None ) if remote_retriever : data [ "reference" ] = remote_retriever . get_refs ( data [ "genome_build" ] , alignment . get_aligner_with_aliases ( aligner , data ) , data [ "config" ] ) else ...
Add genome reference information to the item to process .
30,332
def _get_data_versions ( data ) : genome_dir = install . get_genome_dir ( data [ "genome_build" ] , data [ "dirs" ] . get ( "galaxy" ) , data ) if genome_dir : version_file = os . path . join ( genome_dir , "versions.csv" ) if version_file and os . path . exists ( version_file ) : return version_file return None
Retrieve CSV file with version information for reference data .
30,333
def _fill_validation_targets ( data ) : ref_file = dd . get_ref_file ( data ) sv_truth = tz . get_in ( [ "config" , "algorithm" , "svvalidate" ] , data , { } ) sv_targets = ( zip ( itertools . repeat ( "svvalidate" ) , sv_truth . keys ( ) ) if isinstance ( sv_truth , dict ) else [ [ "svvalidate" ] ] ) for vtarget in [ ...
Fill validation targets pointing to globally installed truth sets .
30,334
def _fill_capture_regions ( data ) : special_targets = { "sv_regions" : ( "exons" , "transcripts" ) } ref_file = dd . get_ref_file ( data ) for target in [ "variant_regions" , "sv_regions" , "coverage" ] : val = tz . get_in ( [ "config" , "algorithm" , target ] , data ) if val and not os . path . exists ( val ) and not...
Fill short - hand specification of BED capture regions .
30,335
def _fill_prioritization_targets ( data ) : ref_file = dd . get_ref_file ( data ) for target in [ "svprioritize" , "coverage" ] : val = tz . get_in ( [ "config" , "algorithm" , target ] , data ) if val and not os . path . exists ( val ) and not objectstore . is_remote ( val ) : installed_vals = [ ] for ext in [ ".bed" ...
Fill in globally installed files for prioritization .
30,336
def _clean_algorithm ( data ) : for key in [ "variantcaller" , "jointcaller" , "svcaller" ] : val = tz . get_in ( [ "algorithm" , key ] , data ) if val : if not isinstance ( val , ( list , tuple ) ) and isinstance ( val , six . string_types ) : val = [ val ] if isinstance ( val , ( list , tuple ) ) : if len ( val ) == ...
Clean algorithm keys handling items that can be specified as lists or single items .
30,337
def _organize_tools_on ( data , is_cwl ) : if is_cwl : if tz . get_in ( [ "algorithm" , "jointcaller" ] , data ) : val = tz . get_in ( [ "algorithm" , "tools_on" ] , data ) if not val : val = [ ] if not isinstance ( val , ( list , tuple ) ) : val = [ val ] if "gvcf" not in val : val . append ( "gvcf" ) data [ "algorith...
Ensure tools_on inputs match items specified elsewhere .
30,338
def _clean_background ( data ) : allowed_keys = set ( [ "variant" , "cnv_reference" ] ) val = tz . get_in ( [ "algorithm" , "background" ] , data ) errors = [ ] if val : out = { } if isinstance ( val , six . string_types ) : out [ "variant" ] = _file_to_abs ( val , [ os . getcwd ( ) ] ) elif isinstance ( val , dict ) :...
Clean up background specification remaining back compatible .
30,339
def _clean_characters ( x ) : if not isinstance ( x , six . string_types ) : x = str ( x ) else : if not all ( ord ( char ) < 128 for char in x ) : msg = "Found unicode character in input YAML (%s)" % ( x ) raise ValueError ( repr ( msg ) ) for problem in [ " " , "." , "/" , "\\" , "[" , "]" , "&" , ";" , "#" , "+" , "...
Clean problem characters in sample lane or descriptions .
30,340
def prep_rg_names ( item , config , fc_name , fc_date ) : if fc_name and fc_date : lane_name = "%s_%s_%s" % ( item [ "lane" ] , fc_date , fc_name ) else : lane_name = item [ "description" ] return { "rg" : item [ "description" ] , "sample" : item [ "description" ] , "lane" : lane_name , "pl" : ( tz . get_in ( [ "algori...
Generate read group names from item inputs .
30,341
def _check_for_duplicates ( xs , attr , check_fn = None ) : dups = [ ] for key , vals in itertools . groupby ( x [ attr ] for x in xs ) : if len ( list ( vals ) ) > 1 : dups . append ( key ) if len ( dups ) > 0 : psamples = [ ] for x in xs : if x [ attr ] in dups : psamples . append ( x ) if check_fn and check_fn ( psa...
Identify and raise errors on duplicate items .
30,342
def _check_for_batch_clashes ( xs ) : names = set ( [ x [ "description" ] for x in xs ] ) dups = set ( [ ] ) for x in xs : batches = tz . get_in ( ( "metadata" , "batch" ) , x ) if batches : if not isinstance ( batches , ( list , tuple ) ) : batches = [ batches ] for batch in batches : if batch in names : dups . add ( ...
Check that batch names do not overlap with sample names .
30,343
def _check_for_problem_somatic_batches ( items , config ) : to_check = [ ] for data in items : data = copy . deepcopy ( data ) data [ "config" ] = config_utils . update_w_custom ( config , data ) to_check . append ( data ) data_by_batches = collections . defaultdict ( list ) for data in to_check : batches = dd . get_ba...
Identify problem batch setups for somatic calling .
30,344
def _check_for_misplaced ( xs , subkey , other_keys ) : problems = [ ] for x in xs : check_dict = x . get ( subkey , { } ) for to_check in other_keys : if to_check in check_dict : problems . append ( ( x [ "description" ] , to_check , subkey ) ) if len ( problems ) > 0 : raise ValueError ( "\n" . join ( [ "Incorrectly ...
Ensure configuration keys are not incorrectly nested under other keys .
30,345
def _check_for_degenerate_interesting_groups ( items ) : igkey = ( "algorithm" , "bcbiornaseq" , "interesting_groups" ) interesting_groups = tz . get_in ( igkey , items [ 0 ] , [ ] ) if isinstance ( interesting_groups , str ) : interesting_groups = [ interesting_groups ] for group in interesting_groups : values = [ tz ...
Make sure interesting_groups specify existing metadata and that the interesting_group is not all of the same for all of the samples
30,346
def _check_algorithm_keys ( item ) : problem_keys = [ k for k in item [ "algorithm" ] . keys ( ) if k not in ALGORITHM_KEYS ] if len ( problem_keys ) > 0 : raise ValueError ( "Unexpected configuration keyword in 'algorithm' section: %s\n" "See configuration documentation for supported options:\n%s\n" % ( problem_keys ,...
Check for unexpected keys in the algorithm section .
30,347
def _check_algorithm_values ( item ) : problems = [ ] for k , v in item . get ( "algorithm" , { } ) . items ( ) : if v is True and k not in ALG_ALLOW_BOOLEANS : problems . append ( "%s set as true" % k ) elif v is False and ( k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE ) : problems . append ( "%s set as fa...
Check for misplaced inputs in the algorithms .
30,348
def _check_toplevel_misplaced ( item ) : problem_keys = [ k for k in item . keys ( ) if k in ALGORITHM_KEYS ] if len ( problem_keys ) > 0 : raise ValueError ( "Unexpected configuration keywords found in top level of %s: %s\n" "This should be placed in the 'algorithm' section." % ( item [ "description" ] , problem_keys ...
Check for algorithm keys accidentally placed at the top level .
30,349
def _check_quality_format ( items ) : SAMPLE_FORMAT = { "illumina_1.3+" : "illumina" , "illumina_1.5+" : "illumina" , "illumina_1.8+" : "standard" , "solexa" : "solexa" , "sanger" : "standard" } fastq_extensions = [ "fq.gz" , "fastq.gz" , ".fastq" , ".fq" ] for item in items : specified_format = item [ "algorithm" ] . ...
Check if quality_format = standard and fastq_format is not sanger
30,350
def _check_aligner ( item ) : allowed = set ( list ( alignment . TOOLS . keys ( ) ) + [ None , False ] ) if item [ "algorithm" ] . get ( "aligner" ) not in allowed : raise ValueError ( "Unexpected algorithm 'aligner' parameter: %s\n" "Supported options: %s\n" % ( item [ "algorithm" ] . get ( "aligner" ) , sorted ( list...
Ensure specified aligner is valid choice .
30,351
def _check_variantcaller ( item ) : allowed = set ( list ( genotype . get_variantcallers ( ) . keys ( ) ) + [ None , False ] ) vcs = item [ "algorithm" ] . get ( "variantcaller" ) if not isinstance ( vcs , dict ) : vcs = { "variantcaller" : vcs } for vc_set in vcs . values ( ) : if not isinstance ( vc_set , ( tuple , l...
Ensure specified variantcaller is a valid choice .
30,352
def _check_svcaller ( item ) : allowed = set ( reduce ( operator . add , [ list ( d . keys ( ) ) for d in structural . _CALLERS . values ( ) ] ) + [ None , False ] ) svs = item [ "algorithm" ] . get ( "svcaller" ) if not isinstance ( svs , ( list , tuple ) ) : svs = [ svs ] problem = [ x for x in svs if x not in allowe...
Ensure the provide structural variant caller is valid .
30,353
def _check_hetcaller ( item ) : svs = _get_as_list ( item , "svcaller" ) hets = _get_as_list ( item , "hetcaller" ) if hets or any ( [ x in svs for x in [ "titancna" , "purecn" ] ] ) : if not any ( [ x in svs for x in [ "cnvkit" , "gatk-cnv" ] ] ) : raise ValueError ( "Heterogeneity caller used but need CNV calls. Add ...
Ensure upstream SV callers requires to heterogeneity analysis are available .
30,354
def _check_jointcaller ( data ) : allowed = set ( joint . get_callers ( ) + [ None , False ] ) cs = data [ "algorithm" ] . get ( "jointcaller" , [ ] ) if not isinstance ( cs , ( tuple , list ) ) : cs = [ cs ] problem = [ x for x in cs if x not in allowed ] if len ( problem ) > 0 : raise ValueError ( "Unexpected algorit...
Ensure specified jointcaller is valid .
30,355
def _check_realign ( data ) : if "gatk4" not in data [ "algorithm" ] . get ( "tools_off" , [ ] ) and not "gatk4" == data [ "algorithm" ] . get ( "tools_off" ) : if data [ "algorithm" ] . get ( "realign" ) : raise ValueError ( "In sample %s, realign specified but it is not supported for GATK4. " "Realignment is generall...
Check for realignment which is not supported in GATK4
30,356
def _check_trim ( data ) : trim = data [ "algorithm" ] . get ( "trim_reads" ) if trim : if trim == "fastp" and data [ "algorithm" ] . get ( "align_split_size" ) is not False : raise ValueError ( "In sample %s, `trim_reads: fastp` currently requires `align_split_size: false`" % ( dd . get_sample_name ( data ) ) )
Check for valid values for trim_reads .
30,357
def _check_sample_config ( items , in_file , config ) : logger . info ( "Checking sample YAML configuration: %s" % in_file ) _check_quality_format ( items ) _check_for_duplicates ( items , "lane" ) _check_for_duplicates ( items , "description" ) _check_for_degenerate_interesting_groups ( items ) _check_for_batch_clashe...
Identify common problems in input sample configuration files .
30,358
def _file_to_abs ( x , dnames , makedir = False ) : if x is None or os . path . isabs ( x ) : return x elif isinstance ( x , six . string_types ) and objectstore . is_remote ( x ) : return x elif isinstance ( x , six . string_types ) and x . lower ( ) == "none" : return None else : for dname in dnames : if dname : norm...
Make a file absolute using the supplied base directory choices .
30,359
def _normalize_files ( item , fc_dir = None ) : files = item . get ( "files" ) if files : if isinstance ( files , six . string_types ) : files = [ files ] fastq_dir = flowcell . get_fastq_dir ( fc_dir ) if fc_dir else os . getcwd ( ) files = [ _file_to_abs ( x , [ os . getcwd ( ) , fc_dir , fastq_dir ] ) for x in files...
Ensure the files argument is a list of absolute file names . Handles BAM single and paired end fastq as well as split inputs .
30,360
def _sanity_check_files ( item , files ) : msg = None file_types = set ( [ ( "bam" if x . endswith ( ".bam" ) else "fastq" ) for x in files if x ] ) if len ( file_types ) > 1 : msg = "Found multiple file types (BAM and fastq)" file_type = file_types . pop ( ) if file_type == "bam" : if len ( files ) != 1 : msg = "Expec...
Ensure input files correspond with supported approaches .
30,361
def add_metadata_defaults ( md ) : defaults = { "batch" : None , "phenotype" : "" } for k , v in defaults . items ( ) : if k not in md : md [ k ] = v return md
Central location for defaults for algorithm inputs .
30,362
def _add_algorithm_defaults ( algorithm , analysis , is_cwl ) : if not algorithm : algorithm = { } defaults = { "archive" : None , "tools_off" : [ ] , "tools_on" : [ ] , "qc" : [ ] , "trim_reads" : False , "adapters" : [ ] , "effects" : "snpeff" , "quality_format" : "standard" , "expression_caller" : [ "salmon" ] if an...
Central location specifying defaults for algorithm inputs .
30,363
def _replace_global_vars ( xs , global_vars ) : if isinstance ( xs , ( list , tuple ) ) : return [ _replace_global_vars ( x ) for x in xs ] elif isinstance ( xs , dict ) : final = { } for k , v in xs . items ( ) : if isinstance ( v , six . string_types ) and v in global_vars : v = global_vars [ v ] final [ k ] = v retu...
Replace globally shared names from input header with value .
30,364
def prep_system ( run_info_yaml , bcbio_system = None ) : work_dir = os . getcwd ( ) config , config_file = config_utils . load_system_config ( bcbio_system , work_dir ) dirs = setup_directories ( work_dir , os . path . normpath ( os . path . dirname ( os . path . dirname ( run_info_yaml ) ) ) , config , config_file ) ...
Prepare system configuration information from an input configuration file .
30,365
def run ( align_bams , items , ref_file , assoc_files , region , out_file ) : assert out_file . endswith ( ".vcf.gz" ) if not utils . file_exists ( out_file ) : with file_transaction ( items [ 0 ] , out_file ) as tx_out_file : for align_bam in align_bams : bam . index ( align_bam , items [ 0 ] [ "config" ] ) cmd = [ "p...
Run platypus variant calling germline whole genome or exome .
30,366
def run ( bam_file , data , out_dir ) : m = { "base" : None , "secondary" : [ ] } m . update ( _mirbase_stats ( data , out_dir ) ) m [ "secondary" ] . append ( _seqcluster_stats ( data , out_dir ) )
Create several log files
30,367
def _mirbase_stats ( data , out_dir ) : utils . safe_makedir ( out_dir ) out_file = os . path . join ( out_dir , "%s_bcbio_mirbase.txt" % dd . get_sample_name ( data ) ) out_file_novel = os . path . join ( out_dir , "%s_bcbio_mirdeeep2.txt" % dd . get_sample_name ( data ) ) mirbase_fn = data . get ( "seqbuster" , None ...
Create stats from miraligner
30,368
def _seqcluster_stats ( data , out_dir ) : name = dd . get_sample_name ( data ) fn = data . get ( "seqcluster" , { } ) . get ( "stat_file" , None ) if not fn : return None out_file = os . path . join ( out_dir , "%s.txt" % name ) df = pd . read_csv ( fn , sep = "\t" , names = [ "reads" , "sample" , "type" ] ) df_sample...
Parse seqcluster output
30,369
def from_flowcell ( run_folder , lane_details , out_dir = None ) : fcid = os . path . basename ( run_folder ) if out_dir is None : out_dir = run_folder out_file = os . path . join ( out_dir , "%s.csv" % fcid ) with open ( out_file , "w" ) as out_handle : writer = csv . writer ( out_handle ) writer . writerow ( [ "FCID"...
Convert a flowcell into a samplesheet for demultiplexing .
30,370
def _lane_detail_to_ss ( fcid , ldetail ) : return [ fcid , ldetail [ "lane" ] , ldetail [ "name" ] , ldetail [ "genome_build" ] , ldetail [ "bc_index" ] , ldetail [ "description" ] . encode ( "ascii" , "ignore" ) , "N" , "" , "" , ldetail [ "project_name" ] ]
Convert information about a lane into Illumina samplesheet output .
30,371
def _organize_lanes ( info_iter , barcode_ids ) : all_lanes = [ ] for ( fcid , lane , sampleref ) , info in itertools . groupby ( info_iter , lambda x : ( x [ 0 ] , x [ 1 ] , x [ 1 ] ) ) : info = list ( info ) cur_lane = dict ( flowcell_id = fcid , lane = lane , genome_build = info [ 0 ] [ 3 ] , analysis = "Standard" )...
Organize flat lane information into nested YAML structure .
30,372
def _generate_barcode_ids ( info_iter ) : bc_type = "SampleSheet" barcodes = list ( set ( [ x [ - 1 ] for x in info_iter ] ) ) barcodes . sort ( ) barcode_ids = { } for i , bc in enumerate ( barcodes ) : barcode_ids [ bc ] = ( bc_type , i + 1 ) return barcode_ids
Create unique barcode IDs assigned to sequences
30,373
def _read_input_csv ( in_file ) : with io . open ( in_file , newline = None ) as in_handle : reader = csv . reader ( in_handle ) next ( reader ) for line in reader : if line : ( fc_id , lane , sample_id , genome , barcode ) = line [ : 5 ] yield fc_id , lane , sample_id , genome , barcode
Parse useful details from SampleSheet CSV file .
30,374
def _get_flowcell_id ( in_file , require_single = True ) : fc_ids = set ( [ x [ 0 ] for x in _read_input_csv ( in_file ) ] ) if require_single and len ( fc_ids ) > 1 : raise ValueError ( "There are several FCIDs in the same samplesheet file: %s" % in_file ) else : return fc_ids
Retrieve the unique flowcell id represented in the SampleSheet .
30,375
def csv2yaml ( in_file , out_file = None ) : if out_file is None : out_file = "%s.yaml" % os . path . splitext ( in_file ) [ 0 ] barcode_ids = _generate_barcode_ids ( _read_input_csv ( in_file ) ) lanes = _organize_lanes ( _read_input_csv ( in_file ) , barcode_ids ) with open ( out_file , "w" ) as out_handle : out_hand...
Convert a CSV SampleSheet to YAML run_info format .
30,376
def run_has_samplesheet ( fc_dir , config , require_single = True ) : fc_name , _ = flowcell . parse_dirname ( fc_dir ) sheet_dirs = config . get ( "samplesheet_directories" , [ ] ) fcid_sheet = { } for ss_dir in ( s for s in sheet_dirs if os . path . exists ( s ) ) : with utils . chdir ( ss_dir ) : for ss in glob . gl...
Checks if there s a suitable SampleSheet . csv present for the run
30,377
def combine_bam ( in_files , out_file , config ) : runner = broad . runner_from_path ( "picard" , config ) runner . run_fn ( "picard_merge" , in_files , out_file ) for in_file in in_files : save_diskspace ( in_file , "Merged into {0}" . format ( out_file ) , config ) bam . index ( out_file , config ) return out_file
Parallel target to combine multiple BAM files .
30,378
def write_nochr_reads ( in_file , out_file , config ) : if not file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : samtools = config_utils . get_program ( "samtools" , config ) cmd = "{samtools} view -b -f 4 {in_file} > {tx_out_file}" do . run ( cmd . format ( ** locals ( ) ) , "Sele...
Write a BAM file of reads that are not mapped on a reference chromosome .
30,379
def write_noanalysis_reads ( in_file , region_file , out_file , config ) : if not file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : bedtools = config_utils . get_program ( "bedtools" , config ) sambamba = config_utils . get_program ( "sambamba" , config ) cl = ( "{sambamba} view -f...
Write a BAM file of reads in the specified region file that are not analyzed .
30,380
def subset_bam_by_region ( in_file , region , config , out_file_base = None ) : if out_file_base is not None : base , ext = os . path . splitext ( out_file_base ) else : base , ext = os . path . splitext ( in_file ) out_file = "%s-subset%s%s" % ( base , region , ext ) if not file_exists ( out_file ) : with pysam . Samf...
Subset BAM files based on specified chromosome region .
30,381
def subset_bed_by_chrom ( in_file , chrom , data , out_dir = None ) : if out_dir is None : out_dir = os . path . dirname ( in_file ) base , ext = os . path . splitext ( os . path . basename ( in_file ) ) out_file = os . path . join ( out_dir , "%s-%s%s" % ( base , chrom , ext ) ) if not utils . file_uptodate ( out_file...
Subset a BED file to only have items from the specified chromosome .
30,382
def remove_lcr_regions ( orig_bed , items ) : lcr_bed = tz . get_in ( [ "genome_resources" , "variation" , "lcr" ] , items [ 0 ] ) if lcr_bed and os . path . exists ( lcr_bed ) and "lcr" in get_exclude_regions ( items ) : return _remove_regions ( orig_bed , [ lcr_bed ] , "nolcr" , items [ 0 ] ) else : return orig_bed
If configured and available update a BED file to remove low complexity regions .
30,383
def remove_polyx_regions ( in_file , items ) : ex_bed = tz . get_in ( [ "genome_resources" , "variation" , "polyx" ] , items [ 0 ] ) if ex_bed and os . path . exists ( ex_bed ) : return _remove_regions ( in_file , [ ex_bed ] , "nopolyx" , items [ 0 ] ) else : return in_file
Remove polyX stretches contributing to long variant runtimes .
30,384
def add_highdepth_genome_exclusion ( items ) : out = [ ] for d in items : d = utils . deepish_copy ( d ) if dd . get_coverage_interval ( d ) == "genome" : e = dd . get_exclude_regions ( d ) if "highdepth" not in e : e . append ( "highdepth" ) d = dd . set_exclude_regions ( d , e ) out . append ( d ) return out
Add exclusions to input items to avoid slow runtimes on whole genomes .
30,385
def remove_highdepth_regions ( in_file , items ) : encode_bed = tz . get_in ( [ "genome_resources" , "variation" , "encode_blacklist" ] , items [ 0 ] ) if encode_bed and os . path . exists ( encode_bed ) : return _remove_regions ( in_file , [ encode_bed ] , "glimit" , items [ 0 ] ) else : return in_file
Remove high depth regions from a BED file for analyzing a set of calls .
30,386
def _remove_regions ( in_file , remove_beds , ext , data ) : from bcbio . variation import bedutils out_file = "%s-%s.bed" % ( utils . splitext_plus ( in_file ) [ 0 ] , ext ) if not utils . file_uptodate ( out_file , in_file ) : with file_transaction ( data , out_file ) as tx_out_file : with bedtools_tmpdir ( data ) : ...
Subtract a list of BED files from an input BED .
30,387
def get_exclude_regions ( items ) : def _get_sample_excludes ( d ) : excludes = dd . get_exclude_regions ( d ) if tz . get_in ( ( "config" , "algorithm" , "remove_lcr" ) , d , False ) : excludes . append ( "lcr" ) return excludes out = reduce ( operator . add , [ _get_sample_excludes ( d ) for d in items ] ) return sor...
Retrieve regions to exclude from a set of items .
30,388
def to_multiregion ( region ) : assert isinstance ( region , ( list , tuple ) ) , region if isinstance ( region [ 0 ] , ( list , tuple ) ) : return region else : assert len ( region ) == 3 return [ tuple ( region ) ]
Convert a single region or multiple region specification into multiregion list .
30,389
def subset_variant_regions ( variant_regions , region , out_file , items = None , do_merge = True , data = None ) : if region is None : return variant_regions elif variant_regions is None : return region elif not isinstance ( region , ( list , tuple ) ) and region . find ( ":" ) > 0 : raise ValueError ( "Partial chromo...
Return BED file subset by a specified chromosome region .
30,390
def _delly_exclude_file ( items , base_file , chrom ) : base_exclude = sshared . prepare_exclude_file ( items , base_file , chrom ) out_file = "%s-delly%s" % utils . splitext_plus ( base_exclude ) with file_transaction ( items [ 0 ] , out_file ) as tx_out_file : with open ( tx_out_file , "w" ) as out_handle : with open...
Prepare a delly - specific exclude file eliminating chromosomes . Delly wants excluded chromosomes listed as just the chromosome with no coordinates .
30,391
def _run_delly ( bam_files , chrom , ref_file , work_dir , items ) : batch = sshared . get_cur_batch ( items ) ext = "-%s-svs" % batch if batch else "-svs" out_file = os . path . join ( work_dir , "%s%s-%s.bcf" % ( os . path . splitext ( os . path . basename ( bam_files [ 0 ] ) ) [ 0 ] , ext , chrom ) ) final_file = "%...
Run delly calling structural variations for the specified type .
30,392
def _bgzip_and_clean ( bcf_file , items ) : out_file = "%s.vcf.gz" % ( utils . splitext_plus ( bcf_file ) [ 0 ] ) if not utils . file_exists ( out_file ) : with file_transaction ( items [ 0 ] , out_file ) as tx_out_file : if not utils . file_exists ( bcf_file ) : vcfutils . write_empty_vcf ( tx_out_file , samples = [ d...
Create a clean bgzipped VCF output file from bcf for downstream processing .
30,393
def _prep_subsampled_bams ( data , work_dir ) : sr_bam , disc_bam = sshared . get_split_discordants ( data , work_dir ) ds_bam = bam . downsample ( dd . get_align_bam ( data ) , data , 1e8 , read_filter = "-F 'not secondary_alignment and proper_pair'" , always_run = True , work_dir = work_dir ) out_bam = "%s-final%s" %...
Prepare a subsampled BAM file with discordants from samblaster and minimal correct pairs .
30,394
def run ( items ) : work_dir = utils . safe_makedir ( os . path . join ( items [ 0 ] [ "dirs" ] [ "work" ] , "structural" , dd . get_sample_name ( items [ 0 ] ) , "delly" ) ) config = copy . deepcopy ( items [ 0 ] [ "config" ] ) delly_config = utils . get_in ( config , ( "resources" , "delly" ) , { } ) delly_config [ "...
Perform detection of structural variations with delly .
30,395
def _disambiguate_star_fusion_junctions ( star_junction_file , contamination_bam , disambig_out_file , data ) : out_file = disambig_out_file fusiondict = { } with open ( star_junction_file , "r" ) as in_handle : for my_line in in_handle : my_line_split = my_line . strip ( ) . split ( "\t" ) if len ( my_line_split ) < 1...
Disambiguate detected fusions based on alignments to another species .
30,396
def get_desktop_env ( ) : desktop = os . environ . get ( "XDG_CURRENT_DESKTOP" ) if desktop : return desktop desktop = os . environ . get ( "DESKTOP_SESSION" ) if desktop : return desktop desktop = os . environ . get ( "GNOME_DESKTOP_SESSION_ID" ) if desktop : return "GNOME" desktop = os . environ . get ( "MATE_DESKTOP...
Identify the current running desktop environment .
30,397
def set_wm_wallpaper ( img ) : if shutil . which ( "feh" ) : util . disown ( [ "feh" , "--bg-fill" , img ] ) elif shutil . which ( "nitrogen" ) : util . disown ( [ "nitrogen" , "--set-zoom-fill" , img ] ) elif shutil . which ( "bgs" ) : util . disown ( [ "bgs" , "-z" , img ] ) elif shutil . which ( "hsetroot" ) : util ...
Set the wallpaper for non desktop environments .
30,398
def set_desktop_wallpaper ( desktop , img ) : desktop = str ( desktop ) . lower ( ) if "xfce" in desktop or "xubuntu" in desktop : xfconf ( "/backdrop/screen0/monitor0/image-path" , img ) xfconf ( "/backdrop/screen0/monitor0/workspace0/last-image" , img ) elif "muffin" in desktop or "cinnamon" in desktop : util . disow...
Set the wallpaper for the desktop environment .
30,399
def set_mac_wallpaper ( img ) : db_file = "Library/Application Support/Dock/desktoppicture.db" db_path = os . path . join ( HOME , db_file ) img_dir , _ = os . path . split ( img ) sql = "delete from data; " sql += "insert into data values(\"%s\"); " % img_dir sql += "insert into data values(\"%s\"); " % img sql += "up...
Set the wallpaper on macOS .