text stringlengths 81 112k |
|---|
Genotypes one or more GVCF files and runs either the VQSR or hard filtering pipeline. Uploads the genotyped VCF file
to the config output directory.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID}
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.unsafe_mode If True, then run GATK tools in UNSAFE mode
:return: FileStoreID for genotyped and filtered VCF file
:rtype: str
def genotype_and_filter(job, gvcfs, config):
"""
Genotypes one or more GVCF files and runs either the VQSR or hard filtering pipeline. Uploads the genotyped VCF file
to the config output directory.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID}
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.unsafe_mode If True, then run GATK tools in UNSAFE mode
:return: FileStoreID for genotyped and filtered VCF file
:rtype: str
"""
# Get the total size of the genome reference
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# GenotypeGVCF disk requirement depends on the input GVCF, the genome reference files, and
# the output VCF file. The output VCF is smaller than the input GVCF.
genotype_gvcf_disk = PromisedRequirement(lambda gvcf_ids, ref_size:
2 * sum(gvcf_.size for gvcf_ in gvcf_ids) + ref_size,
gvcfs.values(),
genome_ref_size)
genotype_gvcf = job.addChildJobFn(gatk_genotype_gvcfs,
gvcfs,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
annotations=config.annotations,
unsafe_mode=config.unsafe_mode,
cores=config.cores,
disk=genotype_gvcf_disk,
memory=config.xmx)
# Determine if output GVCF has multiple samples
if len(gvcfs) == 1:
uuid = gvcfs.keys()[0]
else:
uuid = 'joint_genotyped'
genotyped_filename = '%s.genotyped%s.vcf' % (uuid, config.suffix)
genotype_gvcf.addChildJobFn(output_file_job,
genotyped_filename,
genotype_gvcf.rv(),
os.path.join(config.output_dir, uuid),
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, genotype_gvcf.rv()))
if config.run_vqsr:
if not config.joint_genotype:
job.fileStore.logToMaster('WARNING: Running VQSR without joint genotyping.')
joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(vqsr_pipeline,
uuid,
genotype_gvcf.rv(),
config)
else:
joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(hard_filter_pipeline,
uuid,
genotype_gvcf.rv(),
config)
return joint_genotype_vcf.rv() |
Runs Oncotator for a group of VCF files. Each sample is annotated individually.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict vcfs: Dictionary of VCF FileStoreIDs {Sample identifier: FileStoreID}
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.oncotator_db FileStoreID to Oncotator database
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
def annotate_vcfs(job, vcfs, config):
"""
Runs Oncotator for a group of VCF files. Each sample is annotated individually.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict vcfs: Dictionary of VCF FileStoreIDs {Sample identifier: FileStoreID}
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.oncotator_db FileStoreID to Oncotator database
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
"""
job.fileStore.logToMaster('Running Oncotator on the following samples:\n%s' % '\n'.join(vcfs.keys()))
for uuid, vcf_id in vcfs.iteritems():
# The Oncotator disk requirement depends on the input VCF, the Oncotator database
# and the output VCF. The annotated VCF will be significantly larger than the input VCF.
onco_disk = PromisedRequirement(lambda vcf, db: 3 * vcf.size + db.size,
vcf_id,
config.oncotator_db)
annotated_vcf = job.addChildJobFn(run_oncotator,
vcf_id,
config.oncotator_db,
disk=onco_disk,
cores=config.cores,
memory=config.xmx)
output_dir = os.path.join(config.output_dir, uuid)
filename = '{}.oncotator{}.vcf'.format(uuid, config.suffix)
annotated_vcf.addChildJobFn(output_file_job,
filename,
annotated_vcf.rv(),
output_dir,
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, annotated_vcf.rv())) |
Parses manifest file for Toil Germline Pipeline
:param str path_to_manifest: Path to sample manifest file
:return: List of GermlineSample namedtuples
:rtype: list[GermlineSample]
def parse_manifest(path_to_manifest):
"""
Parses manifest file for Toil Germline Pipeline
:param str path_to_manifest: Path to sample manifest file
:return: List of GermlineSample namedtuples
:rtype: list[GermlineSample]
"""
bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)"
fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)"
samples = []
with open(path_to_manifest, 'r') as f:
for line in f.readlines():
line = line.strip()
if line.startswith('#'):
continue
bam_match = re.match(bam_re, line)
fastq_match = re.match(fq_re, line)
if bam_match:
uuid = bam_match.group('uuid')
url = bam_match.group('url')
paired_url = None
rg_line = None
require('.bam' in url.lower(),
'Expected .bam extension:\n{}:\t{}'.format(uuid, url))
elif fastq_match:
uuid = fastq_match.group('uuid')
url = fastq_match.group('url')
paired_url = fastq_match.group('paired_url')
rg_line = fastq_match.group('rg_line')
require('.fq' in url.lower() or '.fastq' in url.lower(),
'Expected .fq extension:\n{}:\t{}'.format(uuid, url))
else:
raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line))
# Checks that URL has a scheme
require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url))
samples.append(GermlineSample(uuid, url, paired_url, rg_line))
return samples |
Downloads shared reference files for Toil Germline pipeline
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options
:return: Updated config with shared fileStoreIDS
:rtype: Namespace
def download_shared_files(job, config):
"""
Downloads shared reference files for Toil Germline pipeline
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options
:return: Updated config with shared fileStoreIDS
:rtype: Namespace
"""
job.fileStore.logToMaster('Downloading shared reference files')
shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'}
nonessential_files = {'genome_fai', 'genome_dict'}
# Download necessary files for pipeline configuration
if config.run_bwa:
shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'}
nonessential_files.add('alt')
if config.preprocess:
shared_files |= {'g1k_indel', 'mills', 'dbsnp'}
if config.run_vqsr:
shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'}
if config.run_oncotator:
shared_files.add('oncotator_db')
for name in shared_files:
try:
url = getattr(config, name, None)
if url is None:
continue
setattr(config, name, job.addChildJobFn(download_url_job,
url,
name=name,
s3_key_path=config.ssec,
disk='15G' # Estimated reference file size
).rv())
finally:
if getattr(config, name, None) is None and name not in nonessential_files:
raise ValueError("Necessary configuration parameter is missing:\n{}".format(name))
return job.addFollowOnJobFn(reference_preprocessing, config).rv() |
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options and shared files.
Requires FileStoreID for genome fasta file as config.genome_fasta
:return: Updated config with reference index files
:rtype: Namespace
def reference_preprocessing(job, config):
"""
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options and shared files.
Requires FileStoreID for genome fasta file as config.genome_fasta
:return: Updated config with reference index files
:rtype: Namespace
"""
job.fileStore.logToMaster('Preparing Reference Files')
genome_id = config.genome_fasta
if getattr(config, 'genome_fai', None) is None:
config.genome_fai = job.addChildJobFn(run_samtools_faidx,
genome_id,
cores=config.cores).rv()
if getattr(config, 'genome_dict', None) is None:
config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary,
genome_id,
cores=config.cores,
memory=config.xmx).rv()
return config |
Prepares BAM file for Toil germline pipeline.
Steps in pipeline
0: Download and align BAM or FASTQ sample
1: Sort BAM
2: Index BAM
3: Run GATK preprocessing pipeline (Optional)
- Uploads preprocessed BAM to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique identifier for the sample
:param str url: URL or local path to BAM file or FASTQs
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.g1k_indel FileStoreID for 1000G INDEL resource file
config.mills FileStoreID for Mills resource file
config.dbsnp FileStoreID for dbSNP resource file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
:param str|None paired_url: URL or local path to paired FASTQ file, default is None
:param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None
:return: BAM and BAI FileStoreIDs
:rtype: tuple
def prepare_bam(job, uuid, url, config, paired_url=None, rg_line=None):
"""
Prepares BAM file for Toil germline pipeline.
Steps in pipeline
0: Download and align BAM or FASTQ sample
1: Sort BAM
2: Index BAM
3: Run GATK preprocessing pipeline (Optional)
- Uploads preprocessed BAM to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique identifier for the sample
:param str url: URL or local path to BAM file or FASTQs
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.g1k_indel FileStoreID for 1000G INDEL resource file
config.mills FileStoreID for Mills resource file
config.dbsnp FileStoreID for dbSNP resource file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
:param str|None paired_url: URL or local path to paired FASTQ file, default is None
:param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None
:return: BAM and BAI FileStoreIDs
:rtype: tuple
"""
# 0: Align FASTQ or realign BAM
if config.run_bwa:
get_bam = job.wrapJobFn(setup_and_run_bwakit,
uuid,
url,
rg_line,
config,
paired_url=paired_url).encapsulate()
# 0: Download BAM
elif '.bam' in url.lower():
job.fileStore.logToMaster("Downloading BAM: %s" % uuid)
get_bam = job.wrapJobFn(download_url_job,
url,
name='toil.bam',
s3_key_path=config.ssec,
disk=config.file_size).encapsulate()
else:
raise ValueError('Could not generate BAM file for %s\n'
'Provide a FASTQ URL and set run-bwa or '
'provide a BAM URL that includes .bam extension.' % uuid)
# 1: Sort BAM file if necessary
# Realigning BAM file shuffles read order
if config.sorted and not config.run_bwa:
sorted_bam = get_bam
else:
# The samtools sort disk requirement depends on the input bam, the tmp files, and the
# sorted output bam.
sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv())
sorted_bam = get_bam.addChildJobFn(run_samtools_sort,
get_bam.rv(),
cores=config.cores,
disk=sorted_bam_disk)
# 2: Index BAM
# The samtools index disk requirement depends on the input bam and the output bam index
index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv())
index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=index_bam_disk)
job.addChild(get_bam)
sorted_bam.addChild(index_bam)
if config.preprocess:
preprocess = job.wrapJobFn(run_gatk_preprocessing,
sorted_bam.rv(),
index_bam.rv(),
config.genome_fasta,
config.genome_dict,
config.genome_fai,
config.g1k_indel,
config.mills,
config.dbsnp,
memory=config.xmx,
cores=config.cores).encapsulate()
sorted_bam.addChild(preprocess)
index_bam.addChild(preprocess)
# Update output BAM promises
output_bam_promise = preprocess.rv(0)
output_bai_promise = preprocess.rv(1)
# Save processed BAM
output_dir = os.path.join(config.output_dir, uuid)
filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix)
output_bam = job.wrapJobFn(output_file_job,
filename,
preprocess.rv(0),
output_dir,
s3_key_path=config.ssec)
preprocess.addChild(output_bam)
else:
output_bam_promise = sorted_bam.rv()
output_bai_promise = index_bam.rv()
return output_bam_promise, output_bai_promise |
Downloads and runs bwakit for BAM or FASTQ files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension.
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.cores Number of cores for each job
config.trim If True, trim adapters using bwakit
config.amb FileStoreID for BWA index file prefix.amb
config.ann FileStoreID for BWA index file prefix.ann
config.bwt FileStoreID for BWA index file prefix.bwt
config.pac FileStoreID for BWA index file prefix.pac
config.sa FileStoreID for BWA index file prefix.sa
config.alt FileStoreID for alternate contigs file or None
:param str|None paired_url: URL to paired FASTQ
:param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar)
:return: BAM FileStoreID
:rtype: str
def setup_and_run_bwakit(job, uuid, url, rg_line, config, paired_url=None):
"""
Downloads and runs bwakit for BAM or FASTQ files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension.
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.cores Number of cores for each job
config.trim If True, trim adapters using bwakit
config.amb FileStoreID for BWA index file prefix.amb
config.ann FileStoreID for BWA index file prefix.ann
config.bwt FileStoreID for BWA index file prefix.bwt
config.pac FileStoreID for BWA index file prefix.pac
config.sa FileStoreID for BWA index file prefix.sa
config.alt FileStoreID for alternate contigs file or None
:param str|None paired_url: URL to paired FASTQ
:param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar)
:return: BAM FileStoreID
:rtype: str
"""
bwa_config = deepcopy(config)
bwa_config.uuid = uuid
bwa_config.rg_line = rg_line
# bwa_alignment uses a different naming convention
bwa_config.ref = config.genome_fasta
bwa_config.fai = config.genome_fai
# Determine if sample is a FASTQ or BAM file using the file extension
basename, ext = os.path.splitext(url)
ext = ext.lower()
if ext == '.gz':
_, ext = os.path.splitext(basename)
ext = ext.lower()
# The pipeline currently supports FASTQ and BAM files
require(ext in ['.fq', '.fastq', '.bam'],
'Please use .fq or .bam file extensions:\n%s' % url)
# Download fastq files
samples = []
input1 = job.addChildJobFn(download_url_job,
url,
name='file1',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input1.rv())
# If the extension is for a BAM file, then configure bwakit to realign the BAM file.
if ext == '.bam':
bwa_config.bam = input1.rv()
else:
bwa_config.r1 = input1.rv()
# Download the paired FASTQ URL
if paired_url:
input2 = job.addChildJobFn(download_url_job,
paired_url,
name='file2',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input2.rv())
bwa_config.r2 = input2.rv()
# The bwakit disk requirement depends on the size of the input files and the index
# Take the sum of the input files and scale it by a factor of 4
bwa_index_size = sum([getattr(config, index_file).size
for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt']
if getattr(config, index_file, None) is not None])
bwakit_disk = PromisedRequirement(lambda lst, index_size:
int(4 * sum(x.size for x in lst) + index_size),
samples,
bwa_index_size)
return job.addFollowOnJobFn(run_bwakit,
bwa_config,
sort=False, # BAM files are sorted later in the pipeline
trim=config.trim,
cores=config.cores,
disk=bwakit_disk).rv() |
Uses GATK HaplotypeCaller to identify SNPs and INDELs. Outputs variants in a Genomic VCF file.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference sequence dictionary file
:param str fai: FileStoreID for reference fasta index file
:param list[str] annotations: List of GATK variant annotations, default is None
:param float emit_threshold: Minimum phred-scale confidence threshold for a variant to be emitted, default is 10.0
:param float call_threshold: Minimum phred-scale confidence threshold for a variant to be called, default is 30.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:param str hc_output: URL or local path to pre-cooked VCF file, default is None
:return: FileStoreID for GVCF file
:rtype: str
def gatk_haplotype_caller(job,
bam, bai,
ref, fai, ref_dict,
annotations=None,
emit_threshold=10.0, call_threshold=30.0,
unsafe_mode=False,
hc_output=None):
"""
Uses GATK HaplotypeCaller to identify SNPs and INDELs. Outputs variants in a Genomic VCF file.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference sequence dictionary file
:param str fai: FileStoreID for reference fasta index file
:param list[str] annotations: List of GATK variant annotations, default is None
:param float emit_threshold: Minimum phred-scale confidence threshold for a variant to be emitted, default is 10.0
:param float call_threshold: Minimum phred-scale confidence threshold for a variant to be called, default is 30.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:param str hc_output: URL or local path to pre-cooked VCF file, default is None
:return: FileStoreID for GVCF file
:rtype: str
"""
job.fileStore.logToMaster('Running GATK HaplotypeCaller')
inputs = {'genome.fa': ref,
'genome.fa.fai': fai,
'genome.dict': ref_dict,
'input.bam': bam,
'input.bam.bai': bai}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call GATK -- HaplotypeCaller with parameters to produce a genomic VCF file:
# https://software.broadinstitute.org/gatk/documentation/article?id=2803
command = ['-T', 'HaplotypeCaller',
'-nct', str(job.cores),
'-R', 'genome.fa',
'-I', 'input.bam',
'-o', 'output.g.vcf',
'-stand_call_conf', str(call_threshold),
'-stand_emit_conf', str(emit_threshold),
'-variant_index_type', 'LINEAR',
'-variant_index_parameter', '128000',
'--genotyping_mode', 'Discovery',
'--emitRefConfidence', 'GVCF']
if unsafe_mode:
command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'] + command
if annotations:
for annotation in annotations:
command.extend(['-A', annotation])
# Uses docker_call mock mode to replace output with hc_output file
outputs = {'output.g.vcf': hc_output}
docker_call(job=job, work_dir=work_dir,
env={'JAVA_OPTS': '-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)},
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
inputs=inputs.keys(),
outputs=outputs,
mock=True if outputs['output.g.vcf'] else False)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.g.vcf')) |
GATK germline pipeline with variant filtering and annotation.
def main():
"""
GATK germline pipeline with variant filtering and annotation.
"""
# Define Parser object and add to jobTree
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
# Generate subparsers
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('generate-config',
help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest',
help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate',
help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the GATK germline pipeline')
parser_run.add_argument('--config',
required=True,
type=str,
help='Path to the (filled in) config file, generated with '
'"generate-config".')
parser_run.add_argument('--manifest',
type=str,
help='Path to the (filled in) manifest file, generated with '
'"generate-manifest".\nDefault value: "%(default)s".')
parser_run.add_argument('--sample',
default=None,
nargs=2,
type=str,
help='Input sample identifier and BAM file URL or local path')
parser_run.add_argument('--output-dir',
default=None,
help='Path/URL to output directory')
parser_run.add_argument('-s', '--suffix',
default=None,
help='Additional suffix to add to the names of the output files')
parser_run.add_argument('--preprocess-only',
action='store_true',
help='Only runs preprocessing steps')
Job.Runner.addToilOptions(parser_run)
options = parser.parse_args()
cwd = os.getcwd()
if options.command == 'generate-config' or options.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-germline.yaml'), generate_config)
if options.command == 'generate-manifest' or options.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-germline.tsv'), generate_manifest)
elif options.command == 'run':
# Program checks
for program in ['curl', 'docker']:
require(next(which(program)),
program + ' must be installed on every node.'.format(program))
require(os.path.exists(options.config), '{} not found. Please run "generate-config"'.format(options.config))
# Read sample manifest
samples = []
if options.manifest:
samples.extend(parse_manifest(options.manifest))
# Add BAM sample from command line
if options.sample:
uuid, url = options.sample
# samples tuple: (uuid, url, paired_url, rg_line)
# BAM samples should not have as paired URL or read group line
samples.append(GermlineSample(uuid, url, None, None))
require(len(samples) > 0,
'No samples were detected in the manifest or on the command line')
# Parse inputs
inputs = {x.replace('-', '_'): y for x, y in
yaml.load(open(options.config).read()).iteritems()}
required_fields = {'genome_fasta',
'output_dir',
'run_bwa',
'sorted',
'snp_filter_annotations',
'indel_filter_annotations',
'preprocess',
'preprocess_only',
'run_vqsr',
'joint_genotype',
'run_oncotator',
'cores',
'file_size',
'xmx',
'suffix'}
input_fields = set(inputs.keys())
require(input_fields > required_fields,
'Missing config parameters:\n{}'.format(', '.join(required_fields - input_fields)))
if inputs['output_dir'] is None:
inputs['output_dir'] = options.output_dir
require(inputs['output_dir'] is not None,
'Missing output directory PATH/URL')
if inputs['suffix'] is None:
inputs['suffix'] = options.suffix if options.suffix else ''
if inputs['preprocess_only'] is None:
inputs['preprocess_only'] = options.preprocess_only
if inputs['run_vqsr']:
# Check that essential VQSR parameters are present
vqsr_fields = {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'}
require(input_fields > vqsr_fields,
'Missing parameters for VQSR:\n{}'.format(', '.join(vqsr_fields - input_fields)))
# Check that hard filtering parameters are present. If only running preprocessing steps, then we do
# not need filtering information.
elif not inputs['preprocess_only']:
hard_filter_fields = {'snp_filter_name', 'snp_filter_expression',
'indel_filter_name', 'indel_filter_expression'}
require(input_fields > hard_filter_fields,
'Missing parameters for hard filtering:\n{}'.format(', '.join(hard_filter_fields - input_fields)))
# Check for falsey hard filtering parameters
for hard_filter_field in hard_filter_fields:
require(inputs[hard_filter_field], 'Missing %s value for hard filtering, '
'got %s.' % (hard_filter_field, inputs[hard_filter_field]))
# Set resource parameters
inputs['xmx'] = human2bytes(inputs['xmx'])
inputs['file_size'] = human2bytes(inputs['file_size'])
inputs['cores'] = int(inputs['cores'])
inputs['annotations'] = set(inputs['snp_filter_annotations'] + inputs['indel_filter_annotations'])
# HaplotypeCaller test data for testing
inputs['hc_output'] = inputs.get('hc_output', None)
# It is a toil-scripts convention to store input parameters in a Namespace object
config = argparse.Namespace(**inputs)
root = Job.wrapJobFn(run_gatk_germline_pipeline, samples, config)
Job.Runner.startToil(root, options) |
Loops over the sample_ids (uuids) in the manifest, creating child jobs to process each
def sample_loop(job, uuid_list, inputs):
"""
Loops over the sample_ids (uuids) in the manifest, creating child jobs to process each
"""
for uuid_rg in uuid_list:
uuid_items = uuid_rg.split(',')
uuid = uuid_items[0]
rg_line = None
if len(uuid_items) > 1:
rg_line = uuid_items[1]
job.addChildJobFn(static_dag, uuid, rg_line, inputs) |
Prefer this here as it allows us to pull the job functions from other jobs
without rewrapping the job functions back together.
bwa_inputs: Input arguments to be passed to BWA.
adam_inputs: Input arguments to be passed to ADAM.
gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing.
gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing.
gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing.
def static_dag(job, uuid, rg_line, inputs):
"""
Prefer this here as it allows us to pull the job functions from other jobs
without rewrapping the job functions back together.
bwa_inputs: Input arguments to be passed to BWA.
adam_inputs: Input arguments to be passed to ADAM.
gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing.
gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing.
gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing.
"""
# get work directory
work_dir = job.fileStore.getLocalTempDir()
inputs.cpu_count = cpu_count()
inputs.maxCores = sys.maxint
args = {'uuid': uuid,
's3_bucket': inputs.s3_bucket,
'sequence_dir': inputs.sequence_dir,
'dir_suffix': inputs.dir_suffix}
# get head BWA alignment job function and encapsulate it
inputs.rg_line = rg_line
inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args)
bwa = job.wrapJobFn(download_reference_files,
inputs,
[[uuid,
['s3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args),
's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]]).encapsulate()
# get head ADAM preprocessing job function and encapsulate it
adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag,
inputs,
's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args),
's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args),
suffix='.adam').encapsulate()
# Configure options for Toil Germline pipeline. This function call only runs the preprocessing steps.
gatk_preprocessing_inputs = copy.deepcopy(inputs)
gatk_preprocessing_inputs.suffix = '.gatk'
gatk_preprocessing_inputs.preprocess = True
gatk_preprocessing_inputs.preprocess_only = True
gatk_preprocessing_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args)
# get head GATK preprocessing job function and encapsulate it
gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline,
GermlineSample(uuid,
's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args),
None, # Does not require second URL or RG_Line
None),
gatk_preprocessing_inputs).encapsulate()
# Configure options for Toil Germline pipeline for preprocessed ADAM BAM file.
adam_call_inputs = inputs
adam_call_inputs.suffix = '.adam'
adam_call_inputs.sorted = True
adam_call_inputs.preprocess = False
adam_call_inputs.run_vqsr = False
adam_call_inputs.joint_genotype = False
adam_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args)
# get head GATK haplotype caller job function for the result of ADAM preprocessing and encapsulate it
gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline,
GermlineSample(uuid,
's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'.format(**args),
None,
None),
adam_call_inputs).encapsulate()
# Configure options for Toil Germline pipeline for preprocessed GATK BAM file.
gatk_call_inputs = copy.deepcopy(inputs)
gatk_call_inputs.sorted = True
gatk_call_inputs.preprocess = False
gatk_call_inputs.run_vqsr = False
gatk_call_inputs.joint_genotype = False
gatk_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args)
# get head GATK haplotype caller job function for the result of GATK preprocessing and encapsulate it
gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline,
GermlineSample(uuid,
'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'.format(**args),
None, None),
gatk_call_inputs).encapsulate()
# wire up dag
if not inputs.skip_alignment:
job.addChild(bwa)
if (inputs.pipeline_to_run == "adam" or
inputs.pipeline_to_run == "both"):
if inputs.skip_preprocessing:
job.addChild(gatk_adam_call)
else:
if inputs.skip_alignment:
job.addChild(adam_preprocess)
else:
bwa.addChild(adam_preprocess)
adam_preprocess.addChild(gatk_adam_call)
if (inputs.pipeline_to_run == "gatk" or
inputs.pipeline_to_run == "both"):
if inputs.skip_preprocessing:
job.addChild(gatk_gatk_call)
else:
if inputs.skip_alignment:
job.addChild(gatk_preprocess)
else:
bwa.addChild(gatk_preprocess)
gatk_preprocess.addChild(gatk_gatk_call) |
This is a Toil pipeline used to perform alignment of fastqs.
def main():
"""
This is a Toil pipeline used to perform alignment of fastqs.
"""
# Define Parser object and add to Toil
if mock_mode():
usage_msg = 'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline ' \
'will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0'
else:
usage_msg = None
parser = argparse.ArgumentParser(usage=usage_msg)
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the ADAM/GATK pipeline')
default_config = 'adam-gatk-mock.config' if mock_mode() else 'adam-gatk.config'
default_manifest = 'adam-gatk-mock-manifest.csv' if mock_mode() else 'adam-gatk-manifest.csv'
parser_run.add_argument('--config', default=default_config, type=str,
help='Path to the (filled in) config file, generated with "generate-config".')
parser_run.add_argument('--manifest', default=default_manifest,
type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". '
'\nDefault value: "%(default)s".')
Job.Runner.addToilOptions(parser_run)
args = parser.parse_args()
cwd = os.getcwd()
if args.command == 'generate-config' or args.command == 'generate':
generate_file(os.path.join(cwd, default_config), generate_config)
if args.command == 'generate-manifest' or args.command == 'generate':
generate_file(os.path.join(cwd, default_manifest), generate_manifest)
# Pipeline execution
elif args.command == 'run':
require(os.path.exists(args.config), '{} not found. Please run '
'generate-config'.format(args.config))
if not hasattr(args, 'sample'):
require(os.path.exists(args.manifest), '{} not found and no samples provided. Please '
'run "generate-manifest"'.format(args.manifest))
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}
inputs = argparse.Namespace(**parsed_config)
# Parse manifest file
uuid_list = []
with open(args.manifest) as f_manifest:
for line in f_manifest:
if not line.isspace() and not line.startswith('#'):
uuid_list.append(line.strip())
inputs.sort = False
if not inputs.dir_suffix:
inputs.dir_suffix = ''
if not inputs.s3_bucket:
inputs.s3_bucket = ''
if inputs.master_ip and inputs.num_nodes:
raise ValueError("Exactly one of master_ip (%s) and num_nodes (%d) must be provided." %
(inputs.master_ip, inputs.num_nodes))
if not hasattr(inputs, 'master_ip') and inputs.num_nodes <= 1:
raise ValueError('num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater '
'than 1. %d was passed.' % inputs.num_nodes)
if (inputs.pipeline_to_run != "adam" and
inputs.pipeline_to_run != "gatk" and
inputs.pipeline_to_run != "both"):
raise ValueError("pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed." % inputs.pipeline_to_run)
Job.Runner.startToil(Job.wrapJobFn(sample_loop, uuid_list, inputs), args) |
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
def generate_unique_key(master_key_path, url):
"""
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key |
Downloads encrypted file from S3
Input1: Working directory
Input2: S3 URL to be downloaded
Input3: Path to key necessary for decryption
Input4: name of file to be downloaded
def download_encrypted_file(work_dir, url, key_path, name):
"""
Downloads encrypted file from S3
Input1: Working directory
Input2: S3 URL to be downloaded
Input3: Path to key necessary for decryption
Input4: name of file to be downloaded
"""
file_path = os.path.join(work_dir, name)
key = generate_unique_key(key_path, url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path) |
Returns the paths of files from the FileStore
Input1: Toil job instance
Input2: Working directory
Input3: jobstore id dictionary
Input4: names of files to be returned from the jobstore
Returns: path(s) to the file(s) requested -- unpack these!
def return_input_paths(job, work_dir, ids, *args):
"""
Returns the paths of files from the FileStore
Input1: Toil job instance
Input2: Working directory
Input3: jobstore id dictionary
Input4: names of files to be returned from the jobstore
Returns: path(s) to the file(s) requested -- unpack these!
"""
paths = OrderedDict()
for name in args:
if not os.path.exists(os.path.join(work_dir, name)):
file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name))
else:
file_path = os.path.join(work_dir, name)
paths[name] = file_path
if len(args) == 1:
return file_path
return paths.values() |
Moves files from work_dir to output_dir
Input1: Working directory
Input2: Output directory
Input3: UUID to be preprended onto file name
Input4: list of file names to be moved from working dir to output dir
def move_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
"""
Moves files from work_dir to output_dir
Input1: Working directory
Input2: Output directory
Input3: UUID to be preprended onto file name
Input4: list of file names to be moved from working dir to output dir
"""
for fname in files:
if uuid is None:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname))) |
Downloads shared files that are used by all samples for alignment and places them in the jobstore.
def batch_start(job, input_args):
"""
Downloads shared files that are used by all samples for alignment and places them in the jobstore.
"""
shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai']
shared_ids = {}
for fname in shared_files:
url = input_args[fname]
shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname).rv()
job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args) |
Spawns an alignment job for every sample in the input configuration file
def spawn_batch_jobs(job, shared_ids, input_args):
"""
Spawns an alignment job for every sample in the input configuration file
"""
samples = []
config = input_args['config']
with open(config, 'r') as f_in:
for line in f_in:
line = line.strip().split(',')
uuid = line[0]
urls = line[1:]
samples.append((uuid, urls))
for sample in samples:
job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G') |
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
def alignment(job, ids, input_args, sample):
"""
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""
uuid, urls = sample
# ids['bam'] = job.fileStore.getEmptyFileStoreID()
work_dir = job.fileStore.getLocalTempDir()
output_dir = input_args['output_dir']
key_path = input_args['ssec']
cores = multiprocessing.cpu_count()
# I/O
return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
# Get fastqs associated with this sample
for url in urls:
download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
# Parameters for BWA and Bamsort
docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
bwa_command = ["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]
bamsort_command = ["jeltje/biobambam",
"/usr/local/bin/bamsort",
"inputformat=sam",
"level=1",
"inputthreads={}".format(cores),
"outputthreads={}".format(cores),
"calmdnm=1",
"calmdnmrecompindetonly=1",
"calmdnmreference=/data/ref.fa",
"I=/data/{}".format(uuid + '.sam')]
# Piping the output to a file handle
with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
# Save in JobStore
# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
# Copy file to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
# Move file in output_dir
if input_args['output_dir']:
move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam']) |
Uploads output BAM from sample to S3
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
def upload_bam_to_s3(job, ids, input_args, sample):
"""
Uploads output BAM from sample to S3
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""
uuid, urls = sample
key_path = input_args['ssec']
work_dir = job.fileStore.getLocalTempDir()
# Parse s3_dir to get bucket and s3 path
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:])
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam')
#I/O
job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
# Generate keyfile for upload
with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Commands to upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, uuid + '.key'),
'file://{}'.format(os.path.join(work_dir, uuid + '.bam')),
bucket_name,
os.path.join(bucket_dir, uuid + '.bam')]
subprocess.check_call(s3am_command) |
Runs GATK Variant Quality Score Recalibration.
0: Start 0 --> 1 --> 3 --> 4 --> 5
1: Recalibrate SNPs | |
2: Recalibrate INDELS +-> 2 -+
3: Apply SNP Recalibration
4: Apply INDEL Recalibration
5: Write VCF to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: unique sample identifier
:param str vcf_id: VCF FileStoreID
:param Namespace config: Pipeline configuration options and shared files
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.suffix Suffix for output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
SNP VQSR attributes:
config.snp_filter_annotations List of GATK variant annotations
config.hapmap FileStoreID for HapMap resource file
config.omni FileStoreID for Omni resource file
config.dbsnp FileStoreID for dbSNP resource file
config.g1k_snp FileStoreID for 1000G SNP resource file
INDEL VQSR attributes:
config.indel_filter_annotations List of GATK variant annotations
config.dbsnp FileStoreID for dbSNP resource file
config.mills FileStoreID for Mills resource file
:return: SNP and INDEL VQSR VCF FileStoreID
:rtype: str
def vqsr_pipeline(job, uuid, vcf_id, config):
"""
Runs GATK Variant Quality Score Recalibration.
0: Start 0 --> 1 --> 3 --> 4 --> 5
1: Recalibrate SNPs | |
2: Recalibrate INDELS +-> 2 -+
3: Apply SNP Recalibration
4: Apply INDEL Recalibration
5: Write VCF to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: unique sample identifier
:param str vcf_id: VCF FileStoreID
:param Namespace config: Pipeline configuration options and shared files
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.suffix Suffix for output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
SNP VQSR attributes:
config.snp_filter_annotations List of GATK variant annotations
config.hapmap FileStoreID for HapMap resource file
config.omni FileStoreID for Omni resource file
config.dbsnp FileStoreID for dbSNP resource file
config.g1k_snp FileStoreID for 1000G SNP resource file
INDEL VQSR attributes:
config.indel_filter_annotations List of GATK variant annotations
config.dbsnp FileStoreID for dbSNP resource file
config.mills FileStoreID for Mills resource file
:return: SNP and INDEL VQSR VCF FileStoreID
:rtype: str
"""
# Get the total size of the genome reference
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# The VariantRecalibator disk requirement depends on the input VCF, the resource files,
# the genome reference files, and the output recalibration table, tranche file, and plots.
# The sum of these output files are less than the input VCF.
snp_resources = ['hapmap', 'omni', 'dbsnp', 'g1k_snp']
snp_resource_size = sum(getattr(config, resource).size for resource in snp_resources)
snp_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size:
2 * in_vcf.size + ref_size + resource_size,
vcf_id,
genome_ref_size,
snp_resource_size)
snp_recal = job.wrapJobFn(gatk_variant_recalibrator,
'SNP',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
get_short_annotations(config.snp_filter_annotations),
hapmap=config.hapmap,
omni=config.omni,
phase=config.g1k_snp,
dbsnp=config.dbsnp,
unsafe_mode=config.unsafe_mode,
disk=snp_recal_disk,
cores=config.cores,
memory=config.xmx)
indel_resource_size = config.mills.size + config.dbsnp.size
indel_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size:
2 * in_vcf.size + ref_size + resource_size,
vcf_id,
genome_ref_size,
indel_resource_size)
indel_recal = job.wrapJobFn(gatk_variant_recalibrator,
'INDEL',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
get_short_annotations(config.indel_filter_annotations),
dbsnp=config.dbsnp,
mills=config.mills,
unsafe_mode=config.unsafe_mode,
disk=indel_recal_disk,
cores=config.cores,
memory=config.xmx)
# The ApplyRecalibration disk requirement depends on the input VCF size, the variant
# recalibration table, the tranche file, the genome reference file, and the output VCF.
# This step labels variants as filtered, so the output VCF file should be slightly larger
# than the input file. Estimate a 10% increase in the VCF file size.
apply_snp_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size:
int(2.1 * in_vcf.size + recal.size + tranche.size + ref_size),
vcf_id,
snp_recal.rv(0),
snp_recal.rv(1),
genome_ref_size)
apply_snp_recal = job.wrapJobFn(gatk_apply_variant_recalibration,
'SNP',
vcf_id,
snp_recal.rv(0), snp_recal.rv(1),
config.genome_fasta,
config.genome_fai,
config.genome_dict,
unsafe_mode=config.unsafe_mode,
disk=apply_snp_recal_disk,
cores=config.cores,
memory=config.xmx)
apply_indel_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size:
int(2.1 * in_vcf.size + recal.size + tranche.size + ref_size),
vcf_id,
indel_recal.rv(0),
indel_recal.rv(1),
genome_ref_size)
apply_indel_recal = job.wrapJobFn(gatk_apply_variant_recalibration,
'INDEL',
apply_snp_recal.rv(),
indel_recal.rv(0), indel_recal.rv(1),
config.genome_fasta,
config.genome_fai,
config.genome_dict,
unsafe_mode=config.unsafe_mode,
disk=apply_indel_recal_disk,
cores=config.cores,
memory=config.xmx)
job.addChild(snp_recal)
job.addChild(indel_recal)
snp_recal.addChild(apply_snp_recal)
indel_recal.addChild(apply_indel_recal)
apply_snp_recal.addChild(apply_indel_recal)
# Output recalibrated VCF
output_dir = config.output_dir
output_dir = os.path.join(output_dir, uuid)
vqsr_name = '%s.vqsr%s.vcf' % (uuid, config.suffix)
output_vqsr = job.wrapJobFn(output_file_job,
vqsr_name,
apply_indel_recal.rv(),
output_dir,
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, apply_indel_recal.rv()))
apply_indel_recal.addChild(output_vqsr)
return apply_indel_recal.rv() |
Converts full GATK annotation name to the shortened version
:param annotations:
:return:
def get_short_annotations(annotations):
"""
Converts full GATK annotation name to the shortened version
:param annotations:
:return:
"""
# Annotations need to match VCF header
short_name = {'QualByDepth': 'QD',
'FisherStrand': 'FS',
'StrandOddsRatio': 'SOR',
'ReadPosRankSumTest': 'ReadPosRankSum',
'MappingQualityRankSumTest': 'MQRankSum',
'RMSMappingQuality': 'MQ',
'InbreedingCoeff': 'ID'}
short_annotations = []
for annotation in annotations:
if annotation in short_name:
annotation = short_name[annotation]
short_annotations.append(annotation)
return short_annotations |
Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ]
Returns duplicate of ids to follow UUID/URL standard.
def parse_sra(path_to_config):
"""
Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ]
Returns duplicate of ids to follow UUID/URL standard.
"""
samples = []
with open(path_to_config, 'r') as f:
for line in f.readlines():
if not line.isspace():
samples.append(line.strip())
return samples |
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
def tarball_files(work_dir, tar_name, uuid=None, files=None):
"""
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
"""
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname) |
This function will administer 5 jobs at a time then recursively call itself until subset is empty
def start_batch(job, input_args):
"""
This function will administer 5 jobs at a time then recursively call itself until subset is empty
"""
samples = parse_sra(input_args['sra'])
# for analysis_id in samples:
job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30') |
Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
def download_and_transfer_sample(job, input_args, samples):
"""
Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
"""
if len(samples) > 1:
a = samples[len(samples)/2:]
b = samples[:len(samples)/2]
job.addChildJobFn(download_and_transfer_sample, input_args, a, disk='30G')
job.addChildJobFn(download_and_transfer_sample, input_args, b, disk='30G')
else:
analysis_id = samples[0]
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# Acquire dbgap_key
shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc'))
# Call to fastq-dump to pull down SRA files and convert to fastq
if input_args['single_end']:
parameters = [analysis_id]
else:
parameters = ['--split-files', analysis_id]
docker_call(tool='quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1',
work_dir=work_dir, tool_parameters=parameters, sudo=sudo)
# Collect files and encapsulate into a tarball
shutil.rmtree(os.path.join(work_dir, 'sra'))
sample_name = analysis_id + '.tar.gz'
if input_args['single_end']:
r = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*.f*'))]
tarball_files(work_dir, tar_name=sample_name, files=r)
else:
r1 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_1*'))]
r2 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_2*'))]
tarball_files(work_dir, tar_name=sample_name, files=r1 + r2)
# Parse s3_dir to get bucket and s3 path
key_path = input_args['ssec']
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command) |
Transfer gTEX data from dbGaP (NCBI) to S3
def main():
"""
Transfer gTEX data from dbGaP (NCBI) to S3
"""
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'sra': args.sra,
'dbgap_key': args.dbgap_key,
'ssec': args.ssec,
's3_dir': args.s3_dir,
'single_end': args.single_end,
'sudo': args.sudo}
# Sanity checks
if args.ssec:
assert os.path.isfile(args.ssec)
if args.sra:
assert os.path.isfile(args.sra)
if args.dbgap_key:
assert os.path.isfile(args.dbgap_key)
# Start Pipeline
Job.Runner.startToil(Job.wrapJobFn(start_batch, inputs), args) |
Uploads a file from the FileStore to an output directory on the local filesystem or S3.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str filename: basename for file
:param str file_id: FileStoreID
:param str output_dir: Amazon S3 URL or local path
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
:return:
def output_file_job(job, filename, file_id, output_dir, s3_key_path=None):
"""
Uploads a file from the FileStore to an output directory on the local filesystem or S3.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str filename: basename for file
:param str file_id: FileStoreID
:param str output_dir: Amazon S3 URL or local path
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
:return:
"""
job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir))
work_dir = job.fileStore.getLocalTempDir()
filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename))
if urlparse(output_dir).scheme == 's3':
s3am_upload(job=job, fpath=os.path.join(work_dir, filepath),
s3_dir=output_dir,
s3_key_path=s3_key_path)
elif os.path.exists(os.path.join(output_dir, filename)):
job.fileStore.logToMaster("File already exists: {}".format(filename))
else:
mkdir_p(output_dir)
copy_files([filepath], output_dir) |
Downloads encrypted files from S3 via header injection
input_args: dict Input dictionary defined in main()
name: str Symbolic name associated with file
def download_encrypted_file(job, input_args, name):
"""
Downloads encrypted files from S3 via header injection
input_args: dict Input dictionary defined in main()
name: str Symbolic name associated with file
"""
work_dir = job.fileStore.getLocalTempDir()
key_path = input_args['ssec']
file_path = os.path.join(work_dir, name)
url = input_args[name]
with open(key_path, 'r') as f:
key = f.read()
if len(key) != 32:
raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key))
key = generate_unique_key(key_path, url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path) |
Simple curl request made for a given url
url: str URL to download
def download_from_url(job, url):
"""
Simple curl request made for a given url
url: str URL to download
"""
work_dir = job.fileStore.getLocalTempDir()
file_path = os.path.join(work_dir, os.path.basename(url))
if not os.path.exists(file_path):
if url.startswith('s3:'):
download_from_s3_url(file_path, url)
else:
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path) |
Makes subprocess call of a command to a docker container.
tool_parameters: list An array of the parameters to be passed to the tool
tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools)
java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G')
outfile: file Filehandle that stderr will be passed to
sudo: bool If the user wants the docker command executed as sudo
def docker_call(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False):
"""
Makes subprocess call of a command to a docker container.
tool_parameters: list An array of the parameters to be passed to the tool
tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools)
java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G')
outfile: file Filehandle that stderr will be passed to
sudo: bool If the user wants the docker command executed as sudo
"""
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if sudo:
base_docker_call = ['sudo'] + base_docker_call
if java_opts:
base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)]
try:
if outfile:
subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile)
else:
subprocess.check_call(base_docker_call + [tool] + tool_parameters)
except subprocess.CalledProcessError:
raise RuntimeError('docker command returned a non-zero exit status. Check error logs.')
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.') |
A list of files to move from work_dir to output_dir.
work_dir: str Current working directory
output_dir: str Output directory for files to go
uuid: str UUID to "stamp" onto output files
files: list List of files to iterate through
def copy_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
"""
A list of files to move from work_dir to output_dir.
work_dir: str Current working directory
output_dir: str Output directory for files to go
uuid: str UUID to "stamp" onto output files
files: list List of files to iterate through
"""
for fname in files:
if uuid is None:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname))) |
Checks that dependency programs are installed.
input_args: dict Dictionary of input arguments (from main())
def program_checks(job, input_args):
"""
Checks that dependency programs are installed.
input_args: dict Dictionary of input arguments (from main())
"""
# Program checks
for program in ['curl', 'docker', 'unzip', 'samtools']:
assert which(program), 'Program "{}" must be installed on every node.'.format(program)
job.addChildJobFn(download_shared_files, input_args) |
Downloads and stores shared inputs files in the FileStore
input_args: dict Dictionary of input arguments (from main())
def download_shared_files(job, input_args):
"""
Downloads and stores shared inputs files in the FileStore
input_args: dict Dictionary of input arguments (from main())
"""
shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed', 'normalize.pl', 'rsem_ref.zip',
'ebwt.zip', 'chromosomes.zip']
shared_ids = {}
for f in shared_files:
shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv()
if input_args['config'] or input_args['config_fastq']:
job.addFollowOnJobFn(parse_config_file, shared_ids, input_args)
else:
sample_path = input_args['input']
uuid = os.path.splitext(os.path.basename(sample_path))[0]
sample = (uuid, sample_path)
job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample) |
Launches pipeline for each sample.
shared_ids: dict Dictionary of fileStore IDs
input_args: dict Dictionary of input arguments
def parse_config_file(job, ids, input_args):
"""
Launches pipeline for each sample.
shared_ids: dict Dictionary of fileStore IDs
input_args: dict Dictionary of input arguments
"""
samples = []
config = input_args['config']
with open(config, 'r') as f:
for line in f.readlines():
if not line.isspace():
sample = line.strip().split(',')
samples.append(sample)
for sample in samples:
job.addChildJobFn(download_sample, ids, input_args, sample) |
Defines variables unique to a sample that are used in the rest of the pipelines
ids: dict Dictionary of fileStore IDS
input_args: dict Dictionary of input arguments
sample: tuple Contains uuid and sample_url
def download_sample(job, ids, input_args, sample):
"""
Defines variables unique to a sample that are used in the rest of the pipelines
ids: dict Dictionary of fileStore IDS
input_args: dict Dictionary of input arguments
sample: tuple Contains uuid and sample_url
"""
if len(sample) == 2:
uuid, sample_location = sample
url1, url2 = None, None
else:
uuid, url1, url2 = sample
sample_location = None
# Update values unique to sample
sample_input = dict(input_args)
sample_input['uuid'] = uuid
sample_input['sample.tar'] = sample_location
if sample_input['output_dir']:
sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid)
sample_input['cpu_count'] = multiprocessing.cpu_count()
job_vars = (sample_input, ids)
# Download or locate local file and place in the jobStore
if sample_input['input']:
ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location))
elif sample_input['config_fastq']:
ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path)
ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path)
else:
if sample_input['ssec']:
ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv()
else:
ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv()
job.addFollowOnJobFn(static_dag_launchpoint, job_vars) |
Statically define jobs in the pipeline
job_vars: tuple Tuple of dictionaries: input_args and ids
def static_dag_launchpoint(job, job_vars):
"""
Statically define jobs in the pipeline
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
if input_args['config_fastq']:
cores = input_args['cpu_count']
a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G').encapsulate()
else:
a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate()
b = job.wrapJobFn(consolidate_output, job_vars, a.rv())
# Take advantage of "encapsulate" to simplify pipeline wiring
job.addChild(a)
a.addChild(b) |
Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids
def merge_fastqs(job, job_vars):
"""
Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
single_end_reads = input_args['single_end_reads']
# I/O
sample = return_input_paths(job, work_dir, ids, 'sample.tar')
# Untar File
# subprocess.check_call(['unzip', sample, '-d', work_dir])
subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir])
# Remove large files before creating concat versions.
os.remove(os.path.join(work_dir, 'sample.tar'))
# Zcat files in parallel
if single_end_reads:
files = sorted(glob.glob(os.path.join(work_dir, '*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
subprocess.check_call(['zcat'] + files, stdout=f1)
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
else:
r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*')))
r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1)
with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:
p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2)
p1.wait()
p2.wait()
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))
job.fileStore.deleteGlobalFile(ids['sample.tar'])
# Spawn child job
return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv() |
Maps RNA-Seq reads to a reference genome.
job_vars: tuple Tuple of dictionaries: input_args and ids
def mapsplice(job, job_vars):
"""
Maps RNA-Seq reads to a reference genome.
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
sudo = input_args['sudo']
single_end_reads = input_args['single_end_reads']
files_to_delete = ['R1.fastq']
# I/O
return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip')
if single_end_reads:
return_input_paths(job, work_dir, ids, 'R1.fastq')
else:
return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq')
files_to_delete.extend(['R2.fastq'])
for fname in ['chromosomes.zip', 'ebwt.zip']:
subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir])
# Command and call
parameters = ['-p', str(cores),
'-s', '25',
'--bam',
'--min-map-len', '50',
'-x', '/data/ebwt',
'-c', '/data/chromosomes',
'-1', '/data/R1.fastq',
'-o', '/data']
if not single_end_reads:
parameters.extend(['-2', '/data/R2.fastq'])
docker_call(tool='quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=parameters, work_dir=work_dir, sudo=sudo)
# Write to FileStore
for fname in ['alignments.bam', 'stats.txt']:
ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname))
for fname in files_to_delete:
job.fileStore.deleteGlobalFile(ids[fname])
# Run child job
# map_id = job.addChildJobFn(mapping_stats, job_vars).rv()
if input_args['upload_bam_to_s3'] and input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, job_vars)
output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv()
return output_ids |
This function adds read groups to the headers
job_vars: tuple Tuple of dictionaries: input_args and ids
def add_read_groups(job, job_vars):
"""
This function adds read groups to the headers
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
alignments = return_input_paths(job, work_dir, ids, 'alignments.bam')
output = os.path.join(work_dir, 'rg_alignments.bam')
# Command and callg
parameter = ['AddOrReplaceReadGroups',
'INPUT={}'.format(docker_path(alignments)),
'OUTPUT={}'.format(docker_path(output)),
'RGSM={}'.format(input_args['uuid']),
'RGID={}'.format(input_args['uuid']),
'RGLB=TruSeq',
'RGPL=illumina',
'RGPU=barcode',
'VALIDATION_STRINGENCY=SILENT']
docker_call(tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=parameter, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv() |
Sorts bam file and produces index file
job_vars: tuple Tuple of dictionaries: input_args and ids
def bamsort_and_index(job, job_vars):
"""
Sorts bam file and produces index file
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam')
output = os.path.join(work_dir, 'sorted.bam')
# Command -- second argument is "Output Prefix"
cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')]
cmd2 = ['index', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd1, work_dir=work_dir, sudo=sudo)
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd2, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['sorted.bam'] = job.fileStore.writeGlobalFile(output)
ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai'))
# Run child job
output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv()
rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv()
return rseq_id, output_ids |
QC module: contains QC metrics and information about the BAM post alignment
job_vars: tuple Tuple of dictionaries: input_args and ids
def rseq_qc(job, job_vars):
"""
QC module: contains QC metrics and information about the BAM post alignment
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
# Command
docker_call(tool='jvivian/qc', tool_parameters=['/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid],
work_dir=work_dir, sudo=sudo)
# Write to FileStore
output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f]
tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz')) |
Sorts the bam by reference
job_vars: tuple Tuple of dictionaries: input_args and ids
def sort_bam_by_reference(job, job_vars):
"""
Sorts the bam by reference
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
# I/O
sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
output = os.path.join(work_dir, 'sort_by_ref.bam')
# Call: Samtools
ref_seqs = []
handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout
for line in handle:
if line.startswith("@SQ"):
tmp = line.split("\t")
chrom = tmp[1].split(":")[1]
ref_seqs.append(chrom)
handle.close()
# Iterate through chromosomes to create mini-bams
for chrom in ref_seqs:
# job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam)
cmd_view = ["samtools", "view", "-b", sorted_bam, chrom]
cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)]
p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE)
subprocess.check_call(cmd_sort, stdin=p1.stdout)
sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs]
cmd = ["samtools", "cat", "-o", output] + sorted_files
subprocess.check_call(cmd)
# Write to FileStore
ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output)
rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv()
exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv()
return exon_id, rsem_id |
Produces exon counts
job_vars: tuple Tuple of dictionaries: input_args and ids
def exon_count(job, job_vars):
"""
Produces exon counts
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam',
'normalize.pl', 'composite_exons.bed')
# Command
tool = 'jvivian/bedtools'
cmd_1 = ['coverage',
'-split',
'-abam', docker_path(sort_by_ref),
'-b', docker_path(composite_bed)]
cmd_2 = ['perl',
os.path.join(work_dir, 'normalize.pl'),
sort_by_ref,
composite_bed]
popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool]
if sudo:
popen_docker = ['sudo'] + popen_docker
p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE)
with open(os.path.join(work_dir, 'exon_quant'), 'w') as f:
subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f)
p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout=subprocess.PIPE)
with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f:
subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f)
# Create zip, upload to fileStore, and move to output_dir as a backup
output_files = ['exon_quant.bed', 'exon_quant']
tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz')) |
Creates a bam of just the transcriptome
job_vars: tuple Tuple of dictionaries: input_args and ids
def transcriptome(job, job_vars):
"""
Creates a bam of just the transcriptome
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam',
'unc.bed', 'hg19.transcripts.fa')
output = os.path.join(work_dir, 'transcriptome.bam')
# Command
parameters = ['sam-xlate',
'--bed', docker_path(bed),
'--in', docker_path(sort_by_ref),
'--order', docker_path(hg19_fa),
'--out', docker_path(output),
'--xgtag',
'--reverse']
docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',
tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo)
# Write to FileStore
ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G').rv() |
Performs filtering on the transcriptome bam
job_vars: tuple Tuple of dictionaries: input_args and ids
def filter_bam(job, job_vars):
"""
Performs filtering on the transcriptome bam
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
sudo = input_args['sudo']
# I/O
transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam')
output = os.path.join(work_dir, 'filtered.bam')
# Command
parameters = ['sam-filter',
'--strip-indels',
'--max-insert', '1000',
'--mapq', '1',
'--in', docker_path(transcriptome_bam),
'--out', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',
tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo)
# Write to FileStore
ids['filtered.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv() |
Runs RSEM to produce counts
job_vars: tuple Tuple of dictionaries: input_args and ids
def rsem(job, job_vars):
"""
Runs RSEM to produce counts
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cpus = input_args['cpu_count']
sudo = input_args['sudo']
single_end_reads = input_args['single_end_reads']
# I/O
filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip')
subprocess.check_call(['unzip', '-o', os.path.join(work_dir, 'rsem_ref.zip'), '-d', work_dir])
output_prefix = 'rsem'
# Make tool call to Docker
parameters = ['--quiet',
'--no-qualities',
'-p', str(cpus),
'--forward-prob', '0.5',
'--seed-length', '25',
'--fragment-length-mean', '-1.0',
'--bam', docker_path(filtered_bam)]
if not single_end_reads:
parameters.extend(['--paired-end'])
parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix])
docker_call(tool='quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88',
tool_parameters=parameters, work_dir=work_dir, sudo=sudo)
os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os.path.join(work_dir, 'rsem_gene.tab'))
os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'), os.path.join(work_dir, 'rsem_isoform.tab'))
# Write to FileStore
ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_gene.tab'))
ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_isoform.tab'))
# Run child jobs
return job.addChildJobFn(rsem_postprocess, job_vars).rv() |
Combine the contents of separate zipped outputs into one via streaming
job_vars: tuple Tuple of dictionaries: input_args and ids
output_ids: tuple Nested tuple of all the output fileStore IDs
def consolidate_output(job, job_vars, output_ids):
"""
Combine the contents of separate zipped outputs into one via streaming
job_vars: tuple Tuple of dictionaries: input_args and ids
output_ids: tuple Nested tuple of all the output fileStore IDs
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
# Retrieve IDs
rseq_id, exon_id, rsem_id = flatten(output_ids)
# Retrieve output file paths to consolidate
# map_tar = job.fileStore.readGlobalFile(map_id, os.path.join(work_dir, 'map.tar.gz'))
qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir, 'qc.tar.gz'))
exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir, 'exon.tar.gz'))
rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir, 'rsem.tar.gz'))
# I/O
out_tar = os.path.join(work_dir, uuid + '.tar.gz')
# Consolidate separate tarballs
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in [rsem_tar, exon_tar, qc_tar]:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar == qc_tar:
tarinfo.name = os.path.join(uuid, 'rseq_qc', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(uuid, os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output directory of selected
if input_args['output_dir']:
output_dir = input_args['output_dir']
mkdir_p(output_dir)
copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.tar.gz'])
# Write output file to fileStore
ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar)
# If S3 bucket argument specified, upload to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_output_to_s3, job_vars) |
If s3_dir is specified in arguments, file will be uploaded to S3 using boto.
WARNING: ~/.boto credentials are necessary for this to succeed!
job_vars: tuple Tuple of dictionaries: input_args and ids
def upload_output_to_s3(job, job_vars):
"""
If s3_dir is specified in arguments, file will be uploaded to S3 using boto.
WARNING: ~/.boto credentials are necessary for this to succeed!
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
import boto
from boto.s3.key import Key
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
# Parse s3_dir
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.split('/')[0]
bucket_dir = '/'.join(s3_dir.split('/')[1:])
# I/O
uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz')
# Upload to S3 via boto
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = os.path.join(bucket_dir, uuid + '.tar.gz')
k.set_contents_from_filename(uuid_tar) |
Upload bam to S3. Requires S3AM and a ~/.boto config file.
def upload_bam_to_s3(job, job_vars):
"""
Upload bam to S3. Requires S3AM and a ~/.boto config file.
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
# I/O
job.fileStore.readGlobalFile(ids['alignments.bam'], os.path.join(work_dir, 'alignments.bam'))
bam_path = os.path.join(work_dir, 'alignments.bam')
sample_name = uuid + '.bam'
# Parse s3_dir to get bucket and s3 path
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.split('/')[0]
bucket_dir = os.path.join('/'.join(s3_dir.split('/')[1:]), 'bam_files')
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'file://{}'.format(bam_path),
os.path.join('s3://', bucket_name, bucket_dir, sample_name)]
subprocess.check_call(s3am_command) |
This is a Toil pipeline for the UNC best practice RNA-Seq analysis.
RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified.
Please read the README.md located in the same directory.
def main():
"""
This is a Toil pipeline for the UNC best practice RNA-Seq analysis.
RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified.
Please read the README.md located in the same directory.
"""
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'config': args.config,
'config_fastq': args.config_fastq,
'input': args.input,
'unc.bed': args.unc,
'hg19.transcripts.fa': args.fasta,
'composite_exons.bed': args.composite_exons,
'normalize.pl': args.normalize,
'output_dir': args.output_dir,
'rsem_ref.zip': args.rsem_ref,
'chromosomes.zip': args.chromosomes,
'ebwt.zip': args.ebwt,
'ssec': args.ssec,
's3_dir': args.s3_dir,
'sudo': args.sudo,
'single_end_reads': args.single_end_reads,
'upload_bam_to_s3': args.upload_bam_to_s3,
'uuid': None,
'sample.tar': None,
'cpu_count': None}
# Launch jobs
Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args) |
Remove the given file from hdfs with master at the given IP address
:type masterIP: MasterAddress
def remove_file(master_ip, filename, spark_on_toil):
"""
Remove the given file from hdfs with master at the given IP address
:type masterIP: MasterAddress
"""
master_ip = master_ip.actual
ssh_call = ['ssh', '-o', 'StrictHostKeyChecking=no', master_ip]
if spark_on_toil:
output = check_output(ssh_call + ['docker', 'ps'])
container_id = next(line.split()[0] for line in output.splitlines() if 'apache-hadoop-master' in line)
ssh_call += ['docker', 'exec', container_id]
try:
check_call(ssh_call + ['hdfs', 'dfs', '-rm', '-r', '/' + filename])
except:
pass |
Downloads input data files from S3.
:type masterIP: MasterAddress
def download_data(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam):
"""
Downloads input data files from S3.
:type masterIP: MasterAddress
"""
log.info("Downloading known sites file %s to %s.", known_snps, hdfs_snps)
call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory)
log.info("Downloading input BAM %s to %s.", bam, hdfs_bam)
call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory) |
Convert input sam/bam file and known SNPs file into ADAM format
def adam_convert(job, master_ip, inputs, in_file, in_snps, adam_file, adam_snps, spark_on_toil):
"""
Convert input sam/bam file and known SNPs file into ADAM format
"""
log.info("Converting input BAM to ADAM.")
call_adam(job, master_ip,
["transform", in_file, adam_file],
memory=inputs.memory,
run_local=inputs.run_local,
native_adam_path=inputs.native_adam_path)
in_file_name = in_file.split("/")[-1]
remove_file(master_ip, in_file_name, spark_on_toil)
log.info("Converting known sites VCF to ADAM.")
call_adam(job, master_ip,
["vcf2adam", "-only_variants", in_snps, adam_snps],
memory=inputs.memory,
run_local=inputs.run_local,
native_adam_path=inputs.native_adam_path)
in_snps_name = in_snps.split("/")[-1]
remove_file(master_ip, in_snps_name, spark_on_toil) |
Preprocess in_file with known SNPs snp_file:
- mark duplicates
- realign indels
- recalibrate base quality scores
def adam_transform(job, master_ip, inputs, in_file, snp_file, hdfs_dir, out_file, spark_on_toil):
"""
Preprocess in_file with known SNPs snp_file:
- mark duplicates
- realign indels
- recalibrate base quality scores
"""
log.info("Marking duplicate reads.")
call_adam(job, master_ip,
["transform",
in_file, hdfs_dir + "/mkdups.adam",
"-aligned_read_predicate",
"-limit_projection",
"-mark_duplicate_reads"],
memory=inputs.memory,
run_local=inputs.run_local,
native_adam_path=inputs.native_adam_path)
#FIXME
in_file_name = in_file.split("/")[-1]
remove_file(master_ip, in_file_name + "*", spark_on_toil)
log.info("Realigning INDELs.")
call_adam(job, master_ip,
["transform",
hdfs_dir + "/mkdups.adam",
hdfs_dir + "/ri.adam",
"-realign_indels"],
memory=inputs.memory,
run_local=inputs.run_local,
native_adam_path=inputs.native_adam_path)
remove_file(master_ip, hdfs_dir + "/mkdups.adam*", spark_on_toil)
log.info("Recalibrating base quality scores.")
call_adam(job, master_ip,
["transform",
hdfs_dir + "/ri.adam",
hdfs_dir + "/bqsr.adam",
"-recalibrate_base_qualities",
"-known_snps", snp_file],
memory=inputs.memory,
run_local=inputs.run_local,
native_adam_path=inputs.native_adam_path)
remove_file(master_ip, "ri.adam*", spark_on_toil)
log.info("Sorting reads and saving a single BAM file.")
call_adam(job, master_ip,
["transform",
hdfs_dir + "/bqsr.adam",
out_file,
"-sort_reads", "-single"],
memory=inputs.memory,
run_local=inputs.run_local,
native_adam_path=inputs.native_adam_path)
remove_file(master_ip, "bqsr.adam*", spark_on_toil)
return out_file |
Upload file hdfsName from hdfs to s3
def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil):
"""
Upload file hdfsName from hdfs to s3
"""
if mock_mode():
truncate_file(master_ip, hdfs_name, spark_on_toil)
log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name)
call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory)
remove_file(master_ip, hdfs_name, spark_on_toil) |
Monolithic job that calls data download, conversion, transform, upload.
Previously, this was not monolithic; change came in due to #126/#134.
def download_run_and_upload(job, master_ip, inputs, spark_on_toil):
"""
Monolithic job that calls data download, conversion, transform, upload.
Previously, this was not monolithic; change came in due to #126/#134.
"""
master_ip = MasterAddress(master_ip)
bam_name = inputs.sample.split('://')[-1].split('/')[-1]
sample_name = ".".join(os.path.splitext(bam_name)[:-1])
hdfs_subdir = sample_name + "-dir"
if inputs.run_local:
inputs.local_dir = job.fileStore.getLocalTempDir()
if inputs.native_adam_path is None:
hdfs_dir = "/data/"
else:
hdfs_dir = inputs.local_dir
else:
inputs.local_dir = None
hdfs_dir = "hdfs://{0}:{1}/{2}".format(master_ip, HDFS_MASTER_PORT, hdfs_subdir)
try:
hdfs_prefix = hdfs_dir + "/" + sample_name
hdfs_bam = hdfs_dir + "/" + bam_name
hdfs_snps = hdfs_dir + "/" + inputs.dbsnp.split('://')[-1].split('/')[-1]
if not inputs.run_local:
download_data(job, master_ip, inputs, inputs.dbsnp, inputs.sample, hdfs_snps, hdfs_bam)
else:
copy_files([inputs.sample, inputs.dbsnp], inputs.local_dir)
adam_input = hdfs_prefix + ".adam"
adam_snps = hdfs_dir + "/snps.var.adam"
adam_convert(job, master_ip, inputs, hdfs_bam, hdfs_snps, adam_input, adam_snps, spark_on_toil)
adam_output = hdfs_prefix + ".processed.bam"
adam_transform(job, master_ip, inputs, adam_input, adam_snps, hdfs_dir, adam_output, spark_on_toil)
out_file = inputs.output_dir + "/" + sample_name + inputs.suffix + ".bam"
if not inputs.run_local:
upload_data(job, master_ip, inputs, adam_output, out_file, spark_on_toil)
else:
local_adam_output = "%s/%s.processed.bam" % (inputs.local_dir, sample_name)
move_files([local_adam_output], inputs.output_dir)
remove_file(master_ip, hdfs_subdir, spark_on_toil)
except:
remove_file(master_ip, hdfs_subdir, spark_on_toil)
raise |
A Toil job function performing ADAM preprocessing on a single sample
def static_adam_preprocessing_dag(job, inputs, sample, output_dir, suffix=''):
"""
A Toil job function performing ADAM preprocessing on a single sample
"""
inputs.sample = sample
inputs.output_dir = output_dir
inputs.suffix = suffix
if inputs.master_ip is not None or inputs.run_local:
if not inputs.run_local and inputs.master_ip == 'auto':
# Static, standalone Spark cluster managed by uberscript
spark_on_toil = False
scale_up = job.wrapJobFn(scale_external_spark_cluster, 1)
job.addChild(scale_up)
spark_work = job.wrapJobFn(download_run_and_upload,
inputs.master_ip, inputs, spark_on_toil)
scale_up.addChild(spark_work)
scale_down = job.wrapJobFn(scale_external_spark_cluster, -1)
spark_work.addChild(scale_down)
else:
# Static, external Spark cluster
spark_on_toil = False
spark_work = job.wrapJobFn(download_run_and_upload,
inputs.master_ip, inputs, spark_on_toil)
job.addChild(spark_work)
else:
# Dynamic subclusters, i.e. Spark-on-Toil
spark_on_toil = True
cores = multiprocessing.cpu_count()
master_ip = spawn_spark_cluster(job,
False, # Sudo
inputs.num_nodes-1,
cores=cores,
memory=inputs.memory)
spark_work = job.wrapJobFn(download_run_and_upload,
master_ip, inputs, spark_on_toil)
job.addChild(spark_work) |
Runs GATK Hard Filtering on a Genomic VCF file and uploads the results.
0: Start 0 --> 1 --> 3 --> 5 --> 6
1: Select SNPs | |
2: Select INDELs +-> 2 --> 4 +
3: Apply SNP Filter
4: Apply INDEL Filter
5: Merge SNP and INDEL VCFs
6: Write filtered VCF to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str vcf_id: VCF FileStoreID
:param Namespace config: Pipeline configuration options and shared files
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.snp_filter_name Name of SNP filter for VCF header
config.snp_filter_expression SNP JEXL filter expression
config.indel_filter_name Name of INDEL filter for VCF header
config.indel_filter_expression INDEL JEXL filter expression
config.xmx Java heap size in bytes
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
:return: SNP and INDEL FileStoreIDs
:rtype: tuple
def hard_filter_pipeline(job, uuid, vcf_id, config):
"""
Runs GATK Hard Filtering on a Genomic VCF file and uploads the results.
0: Start 0 --> 1 --> 3 --> 5 --> 6
1: Select SNPs | |
2: Select INDELs +-> 2 --> 4 +
3: Apply SNP Filter
4: Apply INDEL Filter
5: Merge SNP and INDEL VCFs
6: Write filtered VCF to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str vcf_id: VCF FileStoreID
:param Namespace config: Pipeline configuration options and shared files
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.snp_filter_name Name of SNP filter for VCF header
config.snp_filter_expression SNP JEXL filter expression
config.indel_filter_name Name of INDEL filter for VCF header
config.indel_filter_expression INDEL JEXL filter expression
config.xmx Java heap size in bytes
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
:return: SNP and INDEL FileStoreIDs
:rtype: tuple
"""
job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid))
# Get the total size of the genome reference
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# The SelectVariants disk requirement depends on the input VCF, the genome reference files,
# and the output VCF. The output VCF is smaller than the input VCF. The disk requirement
# is identical for SNPs and INDELs.
select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
vcf_id,
genome_ref_size)
select_snps = job.wrapJobFn(gatk_select_variants,
'SNP',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=select_variants_disk)
# The VariantFiltration disk requirement depends on the input VCF, the genome reference files,
# and the output VCF. The filtered VCF is smaller than the input VCF.
snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
select_snps.rv(),
genome_ref_size)
snp_filter = job.wrapJobFn(gatk_variant_filtration,
select_snps.rv(),
config.snp_filter_name,
config.snp_filter_expression,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=snp_filter_disk)
select_indels = job.wrapJobFn(gatk_select_variants,
'INDEL',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=select_variants_disk)
indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
select_indels.rv(),
genome_ref_size)
indel_filter = job.wrapJobFn(gatk_variant_filtration,
select_indels.rv(),
config.indel_filter_name,
config.indel_filter_expression,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=indel_filter_disk)
# The CombineVariants disk requirement depends on the SNP and INDEL input VCFs and the
# genome reference files. The combined VCF is approximately the same size as the input files.
combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size:
2 * (vcf1.size + vcf2.size) + ref_size,
indel_filter.rv(),
snp_filter.rv(),
genome_ref_size)
combine_vcfs = job.wrapJobFn(gatk_combine_variants,
{'SNPs': snp_filter.rv(), 'INDELs': indel_filter.rv()},
config.genome_fasta,
config.genome_fai,
config.genome_dict,
merge_option='UNSORTED', # Merges variants from a single sample
memory=config.xmx,
disk=combine_vcfs_disk)
job.addChild(select_snps)
job.addChild(select_indels)
select_snps.addChild(snp_filter)
snp_filter.addChild(combine_vcfs)
select_indels.addChild(indel_filter)
indel_filter.addChild(combine_vcfs)
# Output the hard filtered VCF
output_dir = os.path.join(config.output_dir, uuid)
output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix)
output_vcf = job.wrapJobFn(output_file_job,
output_filename,
combine_vcfs.rv(),
output_dir,
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, combine_vcfs.rv()))
combine_vcfs.addChild(output_vcf)
return combine_vcfs.rv() |
Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
def download_and_transfer_sample(job, sample, inputs):
"""
Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
"""
analysis_id = sample[0]
work_dir = job.fileStore.getLocalTempDir()
folder_path = os.path.join(work_dir, os.path.basename(analysis_id))
# Acquire genetorrent key and download sample
shutil.copy(inputs['genetorrent_key'], os.path.join(work_dir, 'cghub.key'))
parameters = ['-vv', '-c', 'cghub.key', '-d', analysis_id]
docker_call(job=job, tool='quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b',
work_dir=work_dir, parameters=parameters)
try:
sample = glob.glob(os.path.join(folder_path, '*tar*'))[0]
except KeyError as e:
print 'No tarfile found inside of folder: '.format(e)
raise
# Upload sample to S3AM
key_path = inputs['ssec']
if sample.endswith('gz'):
sample_name = analysis_id + '.tar.gz'
shutil.move(sample, os.path.join(work_dir, sample_name))
else:
sample_name = analysis_id + '.tar'
shutil.move(sample, os.path.join(work_dir, sample_name))
# Parse s3_dir to get bucket and s3 path
s3_dir = inputs['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command) |
This is a Toil pipeline to transfer TCGA data into an S3 Bucket
Data is pulled down with Genetorrent and transferred to S3 via S3AM.
def main():
"""
This is a Toil pipeline to transfer TCGA data into an S3 Bucket
Data is pulled down with Genetorrent and transferred to S3 via S3AM.
"""
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'genetorrent': args.genetorrent,
'genetorrent_key': args.genetorrent_key,
'ssec': args.ssec,
's3_dir': args.s3_dir}
# Sanity checks
if args.ssec:
assert os.path.isfile(args.ssec)
if args.genetorrent:
assert os.path.isfile(args.genetorrent)
if args.genetorrent_key:
assert os.path.isfile(args.genetorrent_key)
samples = parse_genetorrent(args.genetorrent)
# Start pipeline
# map_job accepts a function, an iterable, and *args. The function is launched as a child
# process with one element from the iterable and *args, which in turn spawns a tree of child jobs.
Job.Runner.startToil(Job.wrapJobFn(map_job, download_and_transfer_sample, samples, inputs), args) |
Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False |
Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip |
Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets) |
Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
def long2rfc1924(l):
"""Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20) |
Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x |
Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False |
Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end)) |
Parses config file to pull sample information.
Stores samples as tuples of (uuid, URL)
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
def parse_input_samples(job, inputs):
"""
Parses config file to pull sample information.
Stores samples as tuples of (uuid, URL)
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
"""
job.fileStore.logToMaster('Parsing input samples and batching jobs')
samples = []
if inputs.config:
with open(inputs.config, 'r') as f:
for line in f.readlines():
if not line.isspace():
sample = line.strip().split(',')
assert len(sample) == 2, 'Error: Config file is inappropriately formatted.'
samples.append(sample)
job.addChildJobFn(map_job, download_sample, samples, inputs) |
Download the input sample
:param JobFunctionWrappingJob job: passed by Toil automatically
:param tuple sample: Tuple containing (UUID,URL) of a sample
:param Namespace inputs: Stores input arguments (see main)
def download_sample(job, sample, inputs):
"""
Download the input sample
:param JobFunctionWrappingJob job: passed by Toil automatically
:param tuple sample: Tuple containing (UUID,URL) of a sample
:param Namespace inputs: Stores input arguments (see main)
"""
uuid, url = sample
job.fileStore.logToMaster('Downloading sample: {}'.format(uuid))
# Download sample
tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv()
# Create copy of inputs for each sample
sample_inputs = argparse.Namespace(**vars(inputs))
sample_inputs.uuid = uuid
sample_inputs.cores = multiprocessing.cpu_count()
# Call children and follow-on jobs
job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G') |
Converts sample.tar(.gz) into two fastq files.
Due to edge conditions... BEWARE: HERE BE DRAGONS
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str tar_id: FileStore ID of sample tar
def process_sample(job, inputs, tar_id):
"""
Converts sample.tar(.gz) into two fastq files.
Due to edge conditions... BEWARE: HERE BE DRAGONS
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str tar_id: FileStore ID of sample tar
"""
job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
# I/O
tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar'))
# Untar File and concat
subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir])
os.remove(os.path.join(work_dir, 'sample.tar'))
# Grab files from tarball
fastqs = []
for root, subdir, files in os.walk(work_dir):
fastqs.extend([os.path.join(root, x) for x in files])
# Check for read 1 and read 2 files
r1 = sorted([x for x in fastqs if 'R1' in x])
r2 = sorted([x for x in fastqs if 'R2' in x])
if not r1 or not r2:
# Check if using a different standard
r1 = sorted([x for x in fastqs if '_1' in x])
r2 = sorted([x for x in fastqs if '_2' in x])
# Prune file name matches from each list
if len(r1) > len(r2):
r1 = [x for x in r1 if x not in r2]
elif len(r2) > len(r1):
r2 = [x for x in r2 if x not in r1]
# Flag if data is single-ended
assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\nR2:{}'.format(r1, r2)
command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz') else 'cat'
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
p1 = subprocess.Popen([command] + r1, stdout=f1)
with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:
p2 = subprocess.Popen([command] + r2, stdout=f2)
p1.wait()
p2.wait()
# Write to fileStore
r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))
job.fileStore.deleteGlobalFile(tar_id)
# Start cutadapt step
job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv() |
Filters out adapters that may be left in the RNA-seq files
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str r1_id: FileStore ID of read 1 fastq
:param str r2_id: FileStore ID of read 2 fastq
def cutadapt(job, inputs, r1_id, r2_id):
"""
Filters out adapters that may be left in the RNA-seq files
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str r1_id: FileStore ID of read 1 fastq
:param str r2_id: FileStore ID of read 2 fastq
"""
job.fileStore.logToMaster('Running CutAdapt: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
inputs.improper_pair = None
# Retrieve files
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
# Cutadapt parameters
parameters = ['-a', inputs.fwd_3pr_adapter,
'-m', '35',
'-A', inputs.rev_3pr_adapter,
'-o', '/data/R1_cutadapt.fastq',
'-p', '/data/R2_cutadapt.fastq',
'/data/R1.fastq', '/data/R2.fastq']
# Call: CutAdapt
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if inputs.sudo:
base_docker_call = ['sudo'] + base_docker_call
tool = 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2'
p = subprocess.Popen(base_docker_call + [tool] + parameters, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
if 'improperly paired' in stderr:
inputs.improper_pair = True
shutil.move(os.path.join(work_dir, 'R1.fastq'), os.path.join(work_dir, 'R1_cutadapt.fastq'))
shutil.move(os.path.join(work_dir, 'R2.fastq'), os.path.join(work_dir, 'R2_cutadapt.fastq'))
# Write to fileStore
if inputs.improper_pair:
r1_cutadapt = r1_id
r2_cutadapt = r2_id
else:
r1_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq'))
job.fileStore.deleteGlobalFile(r1_id)
job.fileStore.deleteGlobalFile(r2_id)
# start STAR
cores = min(inputs.cores, 16)
job.addChildJobFn(star, inputs, r1_cutadapt, r2_cutadapt, cores=cores, disk='100G', memory='40G').rv() |
Performs alignment of fastqs to BAM via STAR
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str r1_cutadapt: FileStore ID of read 1 fastq
:param str r2_cutadapt: FileStore ID of read 2 fastq
def star(job, inputs, r1_cutadapt, r2_cutadapt):
"""
Performs alignment of fastqs to BAM via STAR
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str r1_cutadapt: FileStore ID of read 1 fastq
:param str r2_cutadapt: FileStore ID of read 2 fastq
"""
job.fileStore.logToMaster('Aligning with STAR: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
cores = min(inputs.cores, 16)
# Retrieve files
job.fileStore.readGlobalFile(r1_cutadapt, os.path.join(work_dir, 'R1_cutadapt.fastq'))
job.fileStore.readGlobalFile(r2_cutadapt, os.path.join(work_dir, 'R2_cutadapt.fastq'))
# Get starIndex
download_url(job=job, url=inputs.star_index, work_dir=work_dir, name='starIndex.tar.gz')
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir])
# Parameters
parameters = ['--runThreadN', str(cores),
'--genomeDir', '/data/starIndex',
'--outFileNamePrefix', 'rna',
'--outSAMtype', 'BAM', 'SortedByCoordinate',
'--outSAMunmapped', 'Within',
'--quantMode', 'TranscriptomeSAM',
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outFilterType', 'BySJout',
'--outFilterMultimapNmax', '20',
'--outFilterMismatchNmax', '999',
'--outFilterMismatchNoverReadLmax', '0.04',
'--alignIntronMin', '20',
'--alignIntronMax', '1000000',
'--alignMatesGapMax', '1000000',
'--alignSJoverhangMin', '8',
'--alignSJDBoverhangMin', '1',
'--sjdbScore', '1',
'--readFilesIn', '/data/R1_cutadapt.fastq', '/data/R2_cutadapt.fastq']
# Call: STAR Map
docker_call(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80',
work_dir=work_dir, parameters=parameters)
# Call Samtools Index
index_command = ['index', '/data/rnaAligned.sortedByCoord.out.bam']
docker_call(job=job, work_dir=work_dir, parameters=index_command,
tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c')
# fileStore
bam_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam'))
bai_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam.bai'))
job.fileStore.deleteGlobalFile(r1_cutadapt)
job.fileStore.deleteGlobalFile(r2_cutadapt)
# Launch children and follow-on
vcqc_id = job.addChildJobFn(variant_calling_and_qc, inputs, bam_id, bai_id, cores=2, disk='30G').rv()
spladder_id = job.addChildJobFn(spladder, inputs, bam_id, bai_id, disk='30G').rv()
job.addFollowOnJobFn(consolidate_output_tarballs, inputs, vcqc_id, spladder_id, disk='30G') |
Perform variant calling with samtools nad QC with CheckBias
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str bam_id: FileStore ID of bam
:param str bai_id: FileStore ID of bam index file
:return: FileStore ID of qc tarball
:rtype: str
def variant_calling_and_qc(job, inputs, bam_id, bai_id):
"""
Perform variant calling with samtools nad QC with CheckBias
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str bam_id: FileStore ID of bam
:param str bai_id: FileStore ID of bam index file
:return: FileStore ID of qc tarball
:rtype: str
"""
job.fileStore.logToMaster('Variant calling and QC: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
# Pull in alignment.bam from fileStore
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam'))
job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai'))
# Download input files
input_info = [(inputs.genome, 'genome.fa'), (inputs.positions, 'positions.tsv'),
(inputs.genome_index, 'genome.fa.fai'), (inputs.gtf, 'annotation.gtf'),
(inputs.gtf_m53, 'annotation.m53')]
for url, fname in input_info:
download_url(job=job, url=url, work_dir=work_dir, name=fname)
# Part 1: Variant Calling
variant_command = ['mpileup',
'-f', 'genome.fa',
'-l', 'positions.tsv',
'-v', 'alignment.bam',
'-t', 'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP',
'-o', '/data/output.vcf.gz']
docker_call(job=job, work_dir=work_dir, parameters=variant_command,
tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c')
# Part 2: QC
qc_command = ['-o', 'qc',
'-n', 'alignment.bam',
'-a', 'annotation.gtf',
'-m', 'annotation.m53']
docker_call(job=job, work_dir=work_dir, parameters=qc_command,
tool='jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3')
# Write output to fileStore and return ids
output_tsv = glob(os.path.join(work_dir, '*counts.tsv*'))[0]
output_vcf = os.path.join(work_dir, 'output.vcf.gz')
tarball_files('vcqc.tar.gz', file_paths=[output_tsv, output_vcf], output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vcqc.tar.gz')) |
Run SplAdder to detect and quantify alternative splicing events
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str bam_id: FileStore ID of bam
:param str bai_id: FileStore ID of bam index file
:return: FileStore ID of SplAdder tarball
:rtype: str
def spladder(job, inputs, bam_id, bai_id):
"""
Run SplAdder to detect and quantify alternative splicing events
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str bam_id: FileStore ID of bam
:param str bai_id: FileStore ID of bam index file
:return: FileStore ID of SplAdder tarball
:rtype: str
"""
job.fileStore.logToMaster('SplAdder: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
# Pull in alignment.bam from fileStore
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam'))
job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai'))
# Download input file
download_url(job=job, url=inputs.gtf, work_dir=work_dir, name='annotation.gtf')
download_url(job=job, url=inputs.gtf_pickle, work_dir=work_dir, name='annotation.gtf.pickle')
# Call Spladder
command = ['--insert_ir=y',
'--insert_es=y',
'--insert_ni=y',
'--remove_se=n',
'--validate_sg=n',
'-b', 'alignment.bam',
'-o ', '/data',
'-a', 'annotation.gtf',
'-v', 'y',
'-c', '3',
'-M', 'single',
'-T', 'n',
'-n', '50',
'-P', 'y',
'-p', 'n',
'--sparse_bam', 'y']
docker_call(job=job, work_dir=work_dir, parameters=command, sudo=inputs.sudo, tool='jvivian/spladder:1.0')
# Write output to fileStore and return ids
output_pickle = os.path.join(work_dir, ' ', 'spladder', 'genes_graph_conf3.alignment.pickle')
if not os.path.exists(output_pickle):
matches = []
for root, dirnames, filenames in os.walk(work_dir):
for filename in fnmatch.filter(filenames, '*genes_graph*'):
matches.append(os.path.join(root, filename))
if matches:
output_pickle = matches[0]
else:
raise RuntimeError("Couldn't find genes file!")
output_filt = os.path.join(work_dir, 'alignment.filt.hdf5')
output = os.path.join(work_dir, 'alignment.hdf5')
print os.listdir(work_dir)
tarball_files('spladder.tar.gz', file_paths=[output_pickle, output_filt, output], output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'spladder.tar.gz')) |
Combine the contents of separate tarballs into one.
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str vcqc_id: FileStore ID of variant calling and QC tarball
:param str spladder_id: FileStore ID of spladder tarball
def consolidate_output_tarballs(job, inputs, vcqc_id, spladder_id):
"""
Combine the contents of separate tarballs into one.
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str vcqc_id: FileStore ID of variant calling and QC tarball
:param str spladder_id: FileStore ID of spladder tarball
"""
job.fileStore.logToMaster('Consolidating files and uploading: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
# Retrieve IDs
uuid = inputs.uuid
# Unpack IDs
# Retrieve output file paths to consolidate
vcqc_tar = job.fileStore.readGlobalFile(vcqc_id, os.path.join(work_dir, 'vcqc.tar.gz'))
spladder_tar = job.fileStore.readGlobalFile(spladder_id, os.path.join(work_dir, 'spladder.tar.gz'))
# I/O
fname = uuid + '.tar.gz' if not inputs.improper_pair else 'IMPROPER_PAIR' + uuid + '.tar.gz'
out_tar = os.path.join(work_dir, fname)
# Consolidate separate tarballs into one
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in [vcqc_tar, spladder_tar]:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar == vcqc_tar:
tarinfo.name = os.path.join(uuid, 'variants_and_qc', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(uuid, 'spladder', os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output directory
if inputs.output_dir:
mkdir_p(inputs.output_dir)
shutil.copy(out_tar, os.path.join(inputs.output_dir, os.path.basename(out_tar)))
# Upload to S3
if inputs.output_s3_dir:
out_id = job.fileStore.writeGlobalFile(out_tar)
job.addChildJobFn(s3am_upload_job, file_id=out_id, s3_dir=inputs.output_s3_dir,
file_name=fname, key_path=inputs.ssec, cores=inputs.cores) |
This Toil pipeline aligns reads and performs alternative splicing analysis.
Please read the README.md located in the same directory for run instructions.
def main():
"""
This Toil pipeline aligns reads and performs alternative splicing analysis.
Please read the README.md located in the same directory for run instructions.
"""
# Define Parser object and add to toil
url_prefix = 'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/'
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--config', required=True,
help='Path to configuration file for samples, one per line. UUID,URL_to_bamfile. '
'The URL may be a standard "http://", a "file://<abs_path>", or "s3://<bucket>/<key>"')
parser.add_argument('--gtf', help='URL to annotation GTF file',
default=url_prefix + 'rnaseq_cgl/gencode.v23.annotation.gtf')
parser.add_argument('--gtf-pickle', help='Pickled GTF file',
default=url_prefix + 'spladder/gencode.v23.annotation.gtf.pickle')
parser.add_argument('--gtf-m53', help='M53 preprocessing annotation table',
default=url_prefix + 'spladder/gencode.v23.annotation.gtf.m53')
parser.add_argument('--positions', help='URL to SNP positions over genes file (TSV)',
default=url_prefix + 'spladder/positions_fixed.tsv')
parser.add_argument('--genome', help='URL to Genome fasta',
default=url_prefix + 'rnaseq_cgl/hg38_no_alt.fa')
parser.add_argument('--genome-index', help='Index file (fai) of genome',
default=url_prefix + 'spladder/hg38_no_alt.fa.fai')
parser.add_argument('--ssec', default=None, help='Path to master key used for downloading encrypted files.')
parser.add_argument('--output-s3-dir', default=None, help='S3 Directory of the form: s3://bucket/directory')
parser.add_argument('--output-dir', default=None, help='full path where final results will be output')
parser.add_argument('--sudo', action='store_true', default=False,
help='Set flag if sudo is required to run Docker.')
parser.add_argument('--star-index', help='URL to download STAR Index built from HG38/gencodev23 annotation.',
default=url_prefix + 'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz')
parser.add_argument('--fwd-3pr-adapter', help="Sequence for the FWD 3' Read Adapter.", default='AGATCGGAAGAG')
parser.add_argument('--rev-3pr-adapter', help="Sequence for the REV 3' Read Adapter.", default='AGATCGGAAGAG')
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Sanity Checks
if args.config:
assert os.path.isfile(args.config), 'Config not found at: {}'.format(args.config)
if args.ssec:
assert os.path.isfile(args.ssec), 'Encryption key not found at: {}'.format(args.config)
if args.output_s3_dir:
assert args.output_s3_dir.startswith('s3://'), 'Wrong format for output s3 directory'
# Program checks
for program in ['curl', 'docker']:
assert which(program), 'Program "{}" must be installed on every node.'.format(program)
Job.Runner.startToil(Job.wrapJobFn(parse_input_samples, args), args) |
Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False |
Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False |
Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode") |
Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip |
Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw |
Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255) |
Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix) |
Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix) |
Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end)) |
Downloads files shared by all samples in the pipeline
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
def download_shared_files(job, samples, config):
"""
Downloads files shared by all samples in the pipeline
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
"""
job.fileStore.logToMaster('Downloaded shared files')
file_names = ['reference', 'phase', 'mills', 'dbsnp', 'cosmic']
urls = [config.reference, config.phase, config.mills, config.dbsnp, config.cosmic]
for name, url in zip(file_names, urls):
if url:
vars(config)[name] = job.addChildJobFn(download_url_job, url=url).rv()
job.addFollowOnJobFn(reference_preprocessing, samples, config) |
Spawn the jobs that create index and dict file for reference
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
def reference_preprocessing(job, samples, config):
"""
Spawn the jobs that create index and dict file for reference
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
"""
job.fileStore.logToMaster('Processed reference files')
config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv()
config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv()
job.addFollowOnJobFn(map_job, download_sample, samples, config) |
Download sample and store sample specific attributes
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list sample: Contains uuid, normal URL, and tumor URL
:param Namespace config: Argparse Namespace object containing argument inputs
def download_sample(job, sample, config):
"""
Download sample and store sample specific attributes
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list sample: Contains uuid, normal URL, and tumor URL
:param Namespace config: Argparse Namespace object containing argument inputs
"""
# Create copy of config that is sample specific
config = argparse.Namespace(**vars(config))
uuid, normal_url, tumor_url = sample
job.fileStore.logToMaster('Downloaded sample: ' + uuid)
config.uuid = uuid
config.normal = normal_url
config.tumor = tumor_url
config.cores = min(config.maxCores, int(multiprocessing.cpu_count()))
disk = '1G' if config.ci_test else '20G'
# Download sample bams and launch pipeline
config.normal_bam = job.addChildJobFn(download_url_job, url=config.normal, s3_key_path=config.ssec,
cghub_key_path=config.gtkey, disk=disk).rv()
config.tumor_bam = job.addChildJobFn(download_url_job, url=config.tumor, s3_key_path=config.ssec,
cghub_key_path=config.gtkey, disk=disk).rv()
job.addFollowOnJobFn(index_bams, config) |
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
def index_bams(job, config):
"""
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
"""
job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid)
disk = '1G' if config.ci_test else '20G'
config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv()
config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv()
job.addFollowOnJobFn(preprocessing_declaration, config) |
Declare jobs related to preprocessing
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
def preprocessing_declaration(job, config):
"""
Declare jobs related to preprocessing
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
"""
if config.preprocessing:
job.fileStore.logToMaster('Ran preprocessing: ' + config.uuid)
disk = '1G' if config.ci_test else '20G'
mem = '2G' if config.ci_test else '10G'
processed_normal = job.wrapJobFn(run_gatk_preprocessing, config.normal_bam, config.normal_bai,
config.reference, config.dict, config.fai, config.phase, config.mills,
config.dbsnp, mem, cores=1, memory=mem, disk=disk)
processed_tumor = job.wrapJobFn(run_gatk_preprocessing, config.tumor_bam, config.tumor_bai,
config.reference, config.dict, config.fai, config.phase, config.mills,
config.dbsnp, mem, cores=1, memory=mem, disk=disk)
static_workflow = job.wrapJobFn(static_workflow_declaration, config, processed_normal.rv(0),
processed_normal.rv(1), processed_tumor.rv(0), processed_tumor.rv(1))
job.addChild(processed_normal)
job.addChild(processed_tumor)
job.addFollowOn(static_workflow)
else:
job.addFollowOnJobFn(static_workflow_declaration, config, config.normal_bam, config.normal_bai,
config.tumor_bam, config.tumor_bai) |
Statically declare workflow so sections can be modularly repurposed
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
def static_workflow_declaration(job, config, normal_bam, normal_bai, tumor_bam, tumor_bai):
"""
Statically declare workflow so sections can be modularly repurposed
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
"""
# Mutation and indel tool wiring
memory = '1G' if config.ci_test else '10G'
disk = '1G' if config.ci_test else '75G'
mutect_results, pindel_results, muse_results = None, None, None
if config.run_mutect:
mutect_results = job.addChildJobFn(run_mutect, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference,
config.dict, config.fai, config.cosmic, config.dbsnp,
cores=1, memory=memory, disk=disk).rv()
if config.run_pindel:
pindel_results = job.addChildJobFn(run_pindel, normal_bam, normal_bai, tumor_bam, tumor_bai,
config.reference, config.fai,
cores=config.cores, memory=memory, disk=disk).rv()
if config.run_muse:
muse_results = job.addChildJobFn(run_muse, normal_bam, normal_bai, tumor_bam, tumor_bai,
config.reference, config.dict, config.fai, config.dbsnp,
cores=config.cores, memory=memory, disk=disk).rv()
# Pass tool results (whether None or a promised return value) to consolidation step
consolidation = job.wrapJobFn(consolidate_output, config, mutect_results, pindel_results, muse_results)
job.addFollowOn(consolidation) |
Combine the contents of separate tarball outputs into one via streaming
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str mutect: MuTect tarball FileStoreID
:param str pindel: Pindel tarball FileStoreID
:param str muse: MuSe tarball FileStoreID
def consolidate_output(job, config, mutect, pindel, muse):
"""
Combine the contents of separate tarball outputs into one via streaming
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str mutect: MuTect tarball FileStoreID
:param str pindel: Pindel tarball FileStoreID
:param str muse: MuSe tarball FileStoreID
"""
work_dir = job.fileStore.getLocalTempDir()
mutect_tar, pindel_tar, muse_tar = None, None, None
if mutect:
mutect_tar = job.fileStore.readGlobalFile(mutect, os.path.join(work_dir, 'mutect.tar.gz'))
if pindel:
pindel_tar = job.fileStore.readGlobalFile(pindel, os.path.join(work_dir, 'pindel.tar.gz'))
if muse:
muse_tar = job.fileStore.readGlobalFile(muse, os.path.join(work_dir, 'muse.tar.gz'))
out_tar = os.path.join(work_dir, config.uuid + '.tar.gz')
# Consolidate separate tarballs into one as streams (avoids unnecessary untaring)
tar_list = [x for x in [mutect_tar, pindel_tar, muse_tar] if x is not None]
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in tar_list:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar is mutect_tar:
tarinfo.name = os.path.join(config.uuid, 'mutect', os.path.basename(tarinfo.name))
elif tar is pindel_tar:
tarinfo.name = os.path.join(config.uuid, 'pindel', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(config.uuid, 'muse', os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output location
if urlparse(config.output_dir).scheme == 's3':
job.fileStore.logToMaster('Uploading {} to S3: {}'.format(config.uuid, config.output_dir))
s3am_upload(job=job, fpath=out_tar, s3_dir=config.output_dir, num_cores=config.cores)
else:
job.fileStore.logToMaster('Moving {} to output dir: {}'.format(config.uuid, config.output_dir))
mkdir_p(config.output_dir)
copy_files(file_paths=[out_tar], output_dir=config.output_dir) |
Parses samples, specified in either a manifest or listed with --samples
:param str path_to_manifest: Path to configuration file
:return: Samples and their attributes as defined in the manifest
:rtype: list[list]
def parse_manifest(path_to_manifest):
"""
Parses samples, specified in either a manifest or listed with --samples
:param str path_to_manifest: Path to configuration file
:return: Samples and their attributes as defined in the manifest
:rtype: list[list]
"""
samples = []
with open(path_to_manifest, 'r') as f:
for line in f.readlines():
if not line.isspace() and not line.startswith('#'):
sample = line.strip().split('\t')
require(len(sample) == 3, 'Bad manifest format! '
'Expected 3 tab separated columns, got: {}'.format(sample))
uuid, normal, tumor = sample
for url in [normal, tumor]:
require(urlparse(url).scheme and urlparse(url), 'Invalid URL passed for {}'.format(url))
samples.append(sample)
return samples |
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil exome pipeline
Perform variant / indel analysis given a pair of tumor/normal BAM files.
Samples are optionally preprocessed (indel realignment and base quality score recalibration)
The output of this pipeline is a tarball containing results from MuTect, MuSe, and Pindel.
General usage:
1. Type "toil-exome generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-exome run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/exome_variant_pipeline
Structure of variant pipeline (per sample)
1 2 3 4 14 -------
| | | | | |
0 --------- 5 ----- 15 -------- 17
| | |
--- 16 -------
| |
6 7
| |
8 9
| |
10 11
| |
12 13
0 = Start node
1 = reference index
2 = reference dict
3 = normal bam index
4 = tumor bam index
5 = pre-processing node / DAG declaration
6,7 = RealignerTargetCreator
8,9 = IndelRealigner
10,11 = BaseRecalibration
12,13 = PrintReads
14 = MuTect
15 = Pindel
16 = MuSe
17 = Consolidate Output and move/upload results
==================================================
Dependencies
Curl: apt-get install curl
Docker: wget -qO- https://get.docker.com/ | sh
Toil: pip install toil
Boto: pip install boto (OPTIONAL)
def main():
"""
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil exome pipeline
Perform variant / indel analysis given a pair of tumor/normal BAM files.
Samples are optionally preprocessed (indel realignment and base quality score recalibration)
The output of this pipeline is a tarball containing results from MuTect, MuSe, and Pindel.
General usage:
1. Type "toil-exome generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-exome run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/exome_variant_pipeline
Structure of variant pipeline (per sample)
1 2 3 4 14 -------
| | | | | |
0 --------- 5 ----- 15 -------- 17
| | |
--- 16 -------
| |
6 7
| |
8 9
| |
10 11
| |
12 13
0 = Start node
1 = reference index
2 = reference dict
3 = normal bam index
4 = tumor bam index
5 = pre-processing node / DAG declaration
6,7 = RealignerTargetCreator
8,9 = IndelRealigner
10,11 = BaseRecalibration
12,13 = PrintReads
14 = MuTect
15 = Pindel
16 = MuSe
17 = Consolidate Output and move/upload results
==================================================
Dependencies
Curl: apt-get install curl
Docker: wget -qO- https://get.docker.com/ | sh
Toil: pip install toil
Boto: pip install boto (OPTIONAL)
"""
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
# Generate subparsers
subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the CGL exome pipeline')
parser_run.add_argument('--config', default='config-toil-exome.yaml', type=str,
help='Path to the (filled in) config file, generated with "generate-config". '
'\nDefault value: "%(default)s"')
parser_run.add_argument('--manifest', default='manifest-toil-exome.tsv', type=str,
help='Path to the (filled in) manifest file, generated with "generate-manifest". '
'\nDefault value: "%(default)s"')
parser_run.add_argument('--normal', default=None, type=str,
help='URL for the normal BAM. URLs can take the form: http://, ftp://, file://, s3://, '
'and gnos://. The UUID for the sample must be given with the "--uuid" flag.')
parser_run.add_argument('--tumor', default=None, type=str,
help='URL for the tumor BAM. URLs can take the form: http://, ftp://, file://, s3://, '
'and gnos://. The UUID for the sample must be given with the "--uuid" flag.')
parser_run.add_argument('--uuid', default=None, type=str, help='Provide the UUID of a sample when using the'
'"--tumor" and "--normal" option')
# If no arguments provided, print full help menu
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Add Toil options
Job.Runner.addToilOptions(parser_run)
args = parser.parse_args()
# Parse subparsers related to generation of config and manifest
cwd = os.getcwd()
if args.command == 'generate-config' or args.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-exome.yaml'), generate_config)
if args.command == 'generate-manifest' or args.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-exome.tsv'), generate_manifest)
# Pipeline execution
elif args.command == 'run':
require(os.path.exists(args.config), '{} not found. Please run '
'"toil-rnaseq generate-config"'.format(args.config))
if args.normal or args.tumor or args.uuid:
require(args.normal and args.tumor and args.uuid, '"--tumor", "--normal" and "--uuid" must all be supplied')
samples = [[args.uuid, args.normal, args.tumor]]
else:
samples = parse_manifest(args.manifest)
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}
config = argparse.Namespace(**parsed_config)
config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint
# Exome pipeline sanity checks
if config.preprocessing:
require(config.reference and config.phase and config.mills and config.dbsnp,
'Missing inputs for preprocessing, check config file.')
if config.run_mutect:
require(config.reference and config.dbsnp and config.cosmic,
'Missing inputs for MuTect, check config file.')
if config.run_pindel:
require(config.reference, 'Missing input (reference) for Pindel.')
if config.run_muse:
require(config.reference and config.dbsnp,
'Missing inputs for MuSe, check config file.')
require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))
# Program checks
for program in ['curl', 'docker']:
require(next(which(program), None), program + ' must be installed on every node.'.format(program))
# Launch Pipeline
Job.Runner.startToil(Job.wrapJobFn(download_shared_files, samples, config), args) |
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
def download_reference_files(job, inputs, samples):
"""
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
"""
# Create dictionary to store FileStoreIDs of shared input files
shared_ids = {}
urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt),
('pac', inputs.pac), ('sa', inputs.sa)]
# Alt file is optional and can only be provided, not generated
if inputs.alt:
urls.append(('alt', inputs.alt))
# Download reference
download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G
job.addChild(download_ref)
shared_ids['ref'] = download_ref.rv()
# If FAI is provided, download it. Otherwise, generate it
if inputs.fai:
shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv()
else:
faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv())
shared_ids['fai'] = download_ref.addChild(faidx).rv()
# If all BWA index files are provided, download them. Otherwise, generate them
if all(x[1] for x in urls):
for name, url in urls:
shared_ids[name] = job.addChildJobFn(download_url_job, url).rv()
else:
job.fileStore.logToMaster('BWA index files not provided, creating now')
bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv())
download_ref.addChild(bwa_index)
for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']):
shared_ids[name] = bwa_index.rv(x)
# Map_job distributes one sample in samples to the downlaod_sample_and_align function
job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids) |
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
def download_sample_and_align(job, sample, inputs, ids):
"""
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
"""
uuid, urls = sample
r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url))
# Read fastq samples from file store
ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
else:
ids['r2'] = None
# Create config for bwakit
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want
config.update(ids) # Overwrite attributes with the FileStoreIDs from ids
config = argparse.Namespace(**config)
# Define and wire job functions
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim,
disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam'
if urlparse(inputs.output_dir).scheme == 's3':
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir,
s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size)
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir,
disk=inputs.file_size) |
Parse manifest file
:param str manifest_path: Path to manifest file
:return: samples
:rtype: list[str, list]
def parse_manifest(manifest_path):
"""
Parse manifest file
:param str manifest_path: Path to manifest file
:return: samples
:rtype: list[str, list]
"""
samples = []
with open(manifest_path, 'r') as f:
for line in f:
if not line.isspace() and not line.startswith('#'):
sample = line.strip().split('\t')
require(2 <= len(sample) <= 3, 'Bad manifest format! '
'Expected UUID\tURL1\t[URL2] (tab separated), got: {}'.format(sample))
uuid = sample[0]
urls = sample[1:]
for url in urls:
require(urlparse(url).scheme and urlparse(url), 'Invalid URL passed for {}'.format(url))
samples.append([uuid, urls])
return samples |
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto
def main():
"""
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto
"""
# Define Parser object and add to Toil
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
# Generate subparsers
subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')
group = parser_run.add_mutually_exclusive_group()
parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,
help='Path to the (filled in) config file, generated with "generate-config".')
group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,
help='Path to the (filled in) manifest file, generated with "generate-manifest". '
'\nDefault value: "%(default)s".')
group.add_argument('--sample', nargs='+', action=required_length(2, 3),
help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')
# Print docstring help if no arguments provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
Job.Runner.addToilOptions(parser_run)
args = parser.parse_args()
# Parse subparsers related to generation of config and manifest
cwd = os.getcwd()
if args.command == 'generate-config' or args.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)
if args.command == 'generate-manifest' or args.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)
# Pipeline execution
elif args.command == 'run':
require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))
if not args.sample:
args.sample = None
require(os.path.exists(args.manifest), '{} not found and no sample provided. '
'Please run "generate-manifest"'.format(args.manifest))
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}
config = argparse.Namespace(**parsed_config)
config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint
samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)
# Sanity checks
require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))
require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))
# Launch Pipeline
Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args) |
Convert an address string to a long.
def _address2long(address):
"""
Convert an address string to a long.
"""
parsed = ipv4.ip2long(address)
if parsed is None:
parsed = ipv6.ip2long(address)
return parsed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.