text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vcf_to_df_worker(arg):
""" Convert CANVAS vcf to a dict, single thread """ |
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit('_', 1)[0]
d = {'SampleKey': samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
# Example of ov.fields:
# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',
# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',
# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vcf_to_df(canvasvcfs, exonbed, cpus):
""" Compile a number of vcf files into tsv file for easy manipulation """ |
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
args = [(x, exonbed, i) for (i, x) in enumerate(canvasvcfs)]
r = p.map_async(vcf_to_df_worker, args,
callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def df_to_tsv(df, tsvfile, suffix):
""" Serialize the dataframe as a tsv """ |
tsvfile += suffix
columns = ["SampleKey"] + sorted(x for x in df.columns
if x.endswith(suffix))
tf = df.reindex_axis(columns, axis='columns')
tf.sort_values("SampleKey")
tf.to_csv(tsvfile, sep='\t', index=False, float_format='%.4g', na_rep="na")
print("TSV output written to `{}` (# samples={})"\
.format(tsvfile, tf.shape[0]), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot(args):
""" %prog plot workdir sample chr1,chr2 Plot some chromosomes for visual proof. Separate multiple chromosomes with comma. Must contain folder workdir/sample-cn/. """ |
from jcvi.graphics.base import savefig
p = OptionParser(plot.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png")
if len(args) != 3:
sys.exit(not p.print_help())
workdir, sample_key, chrs = args
chrs = chrs.split(",")
hmm = CopyNumberHMM(workdir=workdir)
hmm.plot(sample_key, chrs=chrs)
image_name = sample_key + "_cn." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sweep(args):
""" %prog sweep workdir 102340_NA12878 Write a number of commands to sweep parameter space. """ |
p = OptionParser(sweep.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
golden_ratio = (1 + 5 ** .5) / 2
cmd = "python -m jcvi.variation.cnv hmm {} {}".format(workdir, sample_key)
cmd += " --mu {:.5f} --sigma {:.3f} --threshold {:.3f}"
mus = [.00012 * golden_ratio ** x for x in range(10)]
sigmas = [.0012 * golden_ratio ** x for x in range(20)]
thresholds = [.1 * golden_ratio ** x for x in range(10)]
print(mus, file=sys.stderr)
print(sigmas, file=sys.stderr)
print(thresholds, file=sys.stderr)
for mu in mus:
for sigma in sigmas:
for threshold in thresholds:
tcmd = cmd.format(mu, sigma, threshold)
print(tcmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cib(args):
""" %prog cib bamfile samplekey Convert BAM to CIB (a binary storage of int8 per base). """ |
p = OptionParser(cib.__doc__)
p.add_option("--prefix", help="Report seqids with this prefix only")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, samplekey = args
mkdir(samplekey)
bam = pysam.AlignmentFile(bamfile, "rb")
refs = [x for x in bam.header["SQ"]]
prefix = opts.prefix
if prefix:
refs = [x for x in refs if x["SN"].startswith(prefix)]
task_args = []
for r in refs:
task_args.append((bamfile, r, samplekey))
cpus = min(opts.cpus, len(task_args))
logging.debug("Use {} cpus".format(cpus))
p = Pool(processes=cpus)
for res in p.imap(bam_to_cib, task_args):
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batchcn(args):
""" %prog batchcn workdir samples.csv Run CNV segmentation caller in batch mode. Scans a workdir. """ |
p = OptionParser(batchcn.__doc__)
p.add_option("--upload", default="s3://hli-mv-data-science/htang/ccn",
help="Upload cn and seg results to s3")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, samples = args
upload = opts.upload
store = upload + "/{}/*.seg".format(workdir)
computed = [op.basename(x).split(".")[0] for x in glob_s3(store)]
computed = set(computed)
# Generate a bunch of cn commands
fp = open(samples)
nskipped = ntotal = 0
cmd = "python -m jcvi.variation.cnv cn --hmm --cleanup {}".format(workdir)
for row in fp:
samplekey, path = row.strip().split(",")
ntotal += 1
if samplekey in computed:
nskipped += 1
continue
print(" ".join((cmd, samplekey, path)))
logging.debug("Skipped: {}".format(percentage(nskipped, ntotal))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hmm(args):
""" %prog hmm workdir sample_key Run CNV segmentation caller. The workdir must contain a subfolder called `sample_key-cn` that contains CN for each chromosome. A `beta` directory that contains scaler for each bin must also be present in the current directory. """ |
p = OptionParser(hmm.__doc__)
p.add_option("--mu", default=.003, type="float",
help="Transition probability")
p.add_option("--sigma", default=.1, type="float",
help="Standard deviation of Gaussian emission distribution")
p.add_option("--threshold", default=1, type="float",
help="Standard deviation must be < this "
"in the baseline population")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
model = CopyNumberHMM(workdir=workdir, mu=opts.mu, sigma=opts.sigma,
threshold=opts.threshold)
events = model.run(sample_key)
params = ".mu-{}.sigma-{}.threshold-{}"\
.format(opts.mu, opts.sigma, opts.threshold)
hmmfile = op.join(workdir, sample_key + params + ".seg")
fw = open(hmmfile, "w")
nevents = 0
for mean_cn, rr, event in events:
if event is None:
continue
print(" ".join((event.bedline, sample_key)), file=fw)
nevents += 1
fw.close()
logging.debug("A total of {} aberrant events written to `{}`"
.format(nevents, hmmfile))
return hmmfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batchccn(args):
""" %prog batchccn test.csv Run CCN script in batch. Write makefile. """ |
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df = pd.read_csv(csvfile, header=header)
cmd = "perl /mnt/software/ccn_gcn_hg38_script/ccn_gcn_hg38.pl"
cmd += " -n {} -b {}"
cmd += " -o {} -r hg38".format(pf)
for i, (sample_key, bam) in df.iterrows():
cmdi = cmd.format(sample_key, bam)
outfile = "{}/{}/{}.ccn".format(pf, sample_key, sample_key)
mm.add(csvfile, outfile, cmdi)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mergecn(args):
""" %prog mergecn FACE.csv Compile matrix of GC-corrected copy numbers. Place a bunch of folders in csv file. Each folder will be scanned, one chromosomes after another. """ |
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotate_segments(self, Z):
""" Report the copy number and start-end segment """ |
# We need a way to go from compressed idices to original indices
P = Z.copy()
P[~np.isfinite(P)] = -1
_, mapping = np.unique(np.cumsum(P >= 0), return_index=True)
dZ = Z.compressed()
uniq, idx = np.unique(dZ, return_inverse=True)
segments = []
for i, mean_cn in enumerate(uniq):
if not np.isfinite(mean_cn):
continue
for rr in contiguous_regions(idx == i):
segments.append((mean_cn, mapping[rr]))
return segments |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def role(args):
""" %prog role htang Change aws role. """ |
src_acct, src_username, dst_acct, dst_role = \
"205134639408 htang 114692162163 mvrad-datasci-role".split()
p = OptionParser(role.__doc__)
p.add_option("--profile", default="mvrad-datasci-role", help="Profile name")
p.add_option('--device',
default="arn:aws:iam::" + src_acct + ":mfa/" + src_username,
metavar='arn:aws:iam::123456788990:mfa/dudeman',
help="The MFA Device ARN. This value can also be "
"provided via the environment variable 'MFA_DEVICE' or"
" the ~/.aws/credentials variable 'aws_mfa_device'.")
p.add_option('--duration',
type=int, default=3600,
help="The duration, in seconds, that the temporary "
"credentials should remain valid. Minimum value: "
"900 (15 minutes). Maximum: 129600 (36 hours). "
"Defaults to 43200 (12 hours), or 3600 (one "
"hour) when using '--assume-role'. This value "
"can also be provided via the environment "
"variable 'MFA_STS_DURATION'. ")
p.add_option('--assume-role', '--assume',
default="arn:aws:iam::" + dst_acct + ":role/" + dst_role,
metavar='arn:aws:iam::123456788990:role/RoleName',
help="The ARN of the AWS IAM Role you would like to "
"assume, if specified. This value can also be provided"
" via the environment variable 'MFA_ASSUME_ROLE'")
p.add_option('--role-session-name',
help="Friendly session name required when using "
"--assume-role",
default=getpass.getuser())
p.add_option('--force',
help="Refresh credentials even if currently valid.",
action="store_true")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
# Use a config to check the expiration of session token
config = get_config(AWS_CREDS_PATH)
validate(opts, config) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query(args):
""" %prog query out.loci contig Random access to loci file. This script helps speeding up debugging. """ |
p = OptionParser(query.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
locifile, contig = args
idx = build_index(locifile)
pos = idx[contig]
logging.debug("Contig {0} found at pos {1}".format(contig, pos))
fp = open(locifile)
fp.seek(pos)
section = []
while True:
row = fp.readline()
if row.startswith("//") and row.split()[1] != contig:
break
section.append(row)
print("".join(section)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def synteny(args):
""" %prog synteny mstmap.out novo.final.fasta reference.fasta Plot MSTmap against reference genome. """ |
from jcvi.assembly.geneticmap import bed as geneticmap_bed
from jcvi.apps.align import blat
from jcvi.formats.blast import bed as blast_bed, best
p = OptionParser(synteny.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
mstmapout, novo, ref = args
pf = mstmapout.split(".")[0]
rf = ref.split(".")[0]
mstmapbed = geneticmap_bed([mstmapout])
cmd = "cut -d. -f1 {0}".format(mstmapbed)
tmpbed = mstmapbed + ".tmp"
sh(cmd, outfile=tmpbed)
os.rename(tmpbed, pf + ".bed")
cmd = "cut -f4 {0} | cut -d. -f1 | sort -u".format(mstmapbed)
idsfile = pf + ".ids"
sh(cmd, outfile=idsfile)
fastafile = pf + ".fasta"
cmd = "faSomeRecords {0} {1} {2}".format(novo, idsfile, fastafile)
sh(cmd)
blastfile = blat([ref, fastafile])
bestblastfile = best([blastfile])
blastbed = blast_bed([bestblastfile])
os.rename(blastbed, rf + ".bed")
anchorsfile = "{0}.{1}.anchors".format(pf, rf)
cmd = "paste {0} {0}".format(idsfile)
sh(cmd, outfile=anchorsfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mstmap(args):
""" %prog mstmap LMD50.snps.genotype.txt Convert LMDs to MSTMAP input. """ |
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--population_type", default="RIL6",
help="Type of population, possible values are DH and RILd")
p.add_option("--missing_threshold", default=.5,
help="Missing threshold, .25 excludes any marker with >25% missing")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
lmd, = args
fp = open(lmd)
next(fp) # Header
table = {"0": "-", "1": "A", "2": "B", "3": "X"}
mh = ["locus_name"] + fp.next().split()[4:]
genotypes = []
for row in fp:
atoms = row.split()
chr, pos, ref, alt = atoms[:4]
locus_name = ".".join((chr, pos))
codes = [table[x] for x in atoms[4:]]
genotypes.append([locus_name] + codes)
mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold)
mm.write(opts.outfile, header=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count(args):
""" %prog count cdhit.consensus.fasta Scan the headers for the consensus clusters and count the number of reads. """ |
from jcvi.graphics.histogram import stem_leaf_plot
from jcvi.utils.cbook import SummaryStats
p = OptionParser(count.__doc__)
p.add_option("--csv", help="Write depth per contig to file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
csv = open(opts.csv, "w") if opts.csv else None
f = Fasta(fastafile, lazy=True)
sizes = []
for desc, rec in f.iterdescriptions_ordered():
if desc.startswith("singleton"):
sizes.append(1)
continue
# consensus_for_cluster_0 with 63 sequences
if "with" in desc:
name, w, size, seqs = desc.split()
if csv:
print("\t".join(str(x)
for x in (name, size, len(rec))), file=csv)
assert w == "with"
sizes.append(int(size))
# MRD85:00603:02472;size=167;
else:
name, size, tail = desc.split(";")
sizes.append(int(size.replace("size=", "")))
if csv:
csv.close()
logging.debug("File written to `{0}`".format(opts.csv))
s = SummaryStats(sizes)
print(s, file=sys.stderr)
stem_leaf_plot(s.data, 0, 100, 20, title="Cluster size") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def novo(args):
""" %prog novo reads.fastq Reference-free tGBS pipeline v1. """ |
from jcvi.assembly.kmer import jellyfish, histogram
from jcvi.assembly.preprocess import diginorm
from jcvi.formats.fasta import filter as fasta_filter, format
from jcvi.apps.cdhit import filter as cdhit_filter
p = OptionParser(novo.__doc__)
p.add_option("--technology", choices=("illumina", "454", "iontorrent"),
default="iontorrent", help="Sequencing platform")
p.set_depth(depth=50)
p.set_align(pctid=96)
p.set_home("cdhit", default="/usr/local/bin/")
p.set_home("fiona", default="/usr/local/bin/")
p.set_home("jellyfish", default="/usr/local/bin/")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
cpus = opts.cpus
depth = opts.depth
pf, sf = fastqfile.rsplit(".", 1)
diginormfile = pf + ".diginorm." + sf
if need_update(fastqfile, diginormfile):
diginorm([fastqfile, "--single", "--depth={0}".format(depth)])
keepabund = fastqfile + ".keep.abundfilt"
sh("cp -s {0} {1}".format(keepabund, diginormfile))
jf = pf + "-K23.histogram"
if need_update(diginormfile, jf):
jellyfish([diginormfile, "--prefix={0}".format(pf),
"--cpus={0}".format(cpus),
"--jellyfish_home={0}".format(opts.jellyfish_home)])
genomesize = histogram([jf, pf, "23"])
fiona = pf + ".fiona.fa"
if need_update(diginormfile, fiona):
cmd = op.join(opts.fiona_home, "fiona")
cmd += " -g {0} -nt {1} --sequencing-technology {2}".\
format(genomesize, cpus, opts.technology)
cmd += " -vv {0} {1}".format(diginormfile, fiona)
logfile = pf + ".fiona.log"
sh(cmd, outfile=logfile, errfile=logfile)
dedup = "cdhit"
pctid = opts.pctid
cons = fiona + ".P{0}.{1}.consensus.fasta".format(pctid, dedup)
if need_update(fiona, cons):
deduplicate([fiona, "--consensus", "--reads",
"--pctid={0}".format(pctid),
"--cdhit_home={0}".format(opts.cdhit_home)])
filteredfile = pf + ".filtered.fasta"
if need_update(cons, filteredfile):
covfile = pf + ".cov.fasta"
cdhit_filter([cons, "--outfile={0}".format(covfile),
"--minsize={0}".format(depth / 5)])
fasta_filter([covfile, "50", "--outfile={0}".format(filteredfile)])
finalfile = pf + ".final.fasta"
if need_update(filteredfile, finalfile):
format([filteredfile, finalfile, "--sequential=replace",
"--prefix={0}_".format(pf)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def novo2(args):
""" %prog novo2 trimmed projectname Reference-free tGBS pipeline v2. """ |
p = OptionParser(novo2.__doc__)
p.set_fastq_names()
p.set_align(pctid=95)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
trimmed, pf = args
pctid = opts.pctid
reads, samples = scan_read_files(trimmed, opts.names)
# Set up directory structure
clustdir = "uclust"
acdir = "allele_counts"
for d in (clustdir, acdir):
mkdir(d)
mm = MakeManager()
clustfiles = []
# Step 0 - clustering within sample
for s in samples:
flist = [x for x in reads if op.basename(x).split(".")[0] == s]
outfile = s + ".P{0}.clustS".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust cluster --cpus=8"
cmd += " {0} {1}".format(s, " ".join(flist))
cmd += " --outdir={0}".format(clustdir)
cmd += " --pctid={0}".format(pctid)
mm.add(flist, outfile, cmd)
clustfiles.append(outfile)
# Step 1 - make consensus within sample
allcons = []
for s, clustfile in zip(samples, clustfiles):
outfile = s + ".P{0}.consensus".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust consensus"
cmd += " {0}".format(clustfile)
mm.add(clustfile, outfile, cmd)
allcons.append(outfile)
# Step 2 - clustering across samples
clustSfile = pf + ".P{0}.clustS".format(pctid)
cmd = "python -m jcvi.apps.uclust mcluster {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons, clustSfile, cmd)
# Step 3 - make consensus across samples
locifile = pf + ".P{0}.loci".format(pctid)
cmd = "python -m jcvi.apps.uclust mconsensus {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons + [clustSfile], locifile, cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def snpplot(args):
""" %prog counts.cdt Illustrate the histogram per SNP site. """ |
p = OptionParser(snpplot.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
# Read in CDT file
fp = open(datafile)
next(fp)
next(fp)
data = []
for row in fp:
atoms = row.split()[4:]
nval = len(atoms)
values = [float(x) for x in atoms]
# normalize
values = [x * 1. / sum(values) for x in values]
data.append(values)
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
xmin, xmax = .1, .9
ymin, ymax = .1, .9
yinterval = (ymax - ymin) / len(data)
colors = "rbg" if nval == 3 else ["lightgray"] + list("rbg")
ystart = ymax
for d in data:
xstart = xmin
for dd, c in zip(d, colors):
xend = xstart + (xmax - xmin) * dd
root.plot((xstart, xend), (ystart, ystart), "-", color=c)
xstart = xend
ystart -= yinterval
root.text(.05, .5, "{0} LMD50 SNPs".format(len(data)),
ha="center", va="center", rotation=90, color="lightslategray")
for x, t, c in zip((.3, .5, .7), ("REF", "ALT", "HET"), "rbg"):
root.text(x, .95, t, color=c, ha="center", va="center")
normalize_axes(root)
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filterm4(args):
""" %prog filterm4 sample.m4 > filtered.m4 Filter .m4 file after blasr is run. As blasr takes a long time to run, changing -bestn is undesirable. This screens the m4 file to retain top hits. """ |
p = OptionParser(filterm4.__doc__)
p.add_option("--best", default=1, type="int", help="Only retain best N hits")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
m4file, = args
best = opts.best
fp = open(m4file)
fw = must_open(opts.outfile, "w")
seen = defaultdict(int)
retained = total = 0
for row in fp:
r = M4Line(row)
total += 1
if total % 100000 == 0:
logging.debug("Retained {0} lines".\
format(percentage(retained, total)))
if seen.get(r.query, 0) < best:
fw.write(row)
seen[r.query] += 1
retained += 1
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spancount(args):
""" %prog spancount list_of_fillingMetrics Count span support for each gap. A file with paths of all fillingMetrics can be built with Linux `find`. $ (find assembly -name "fillingMetrics.json" -print > list_of_fillMetrics 2> /dev/null &) """ |
import json
p = OptionParser(spancount.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fof, = args
fp = open(fof)
flist = [row.strip() for row in fp]
spanCount = "spanCount"
avgSpanBases = "avgSpanBases"
fw = open(spanCount, "w")
for f in flist:
fp = open(f)
j = json.load(fp)
sc = j.get(spanCount, None)
asb = j.get(avgSpanBases, None)
print(f, asb, sc, file=fw)
fw.flush()
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch(args):
""" %prog patch reference.fasta reads.fasta Run PBJelly with reference and reads. """ |
from jcvi.formats.base import write_file
from jcvi.formats.fasta import format
p = OptionParser(patch.__doc__)
p.add_option("--cleanfasta", default=False, action="store_true",
help="Clean FASTA to remove description [default: %default]")
p.add_option("--highqual", default=False, action="store_true",
help="Reads are of high quality [default: %default]")
p.set_home("pbjelly")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, reads = args
cpus = opts.cpus
cmd = op.join(opts.pbjelly_home, "setup.sh")
setup = "source {0}".format(cmd)
if not which("fakeQuals.py"):
sh(setup)
pf = ref.rsplit(".", 1)[0]
pr, px = reads.rsplit(".", 1)
# Remove description line
if opts.cleanfasta:
oref = pf + ".f.fasta"
oreads = pr + ".f.fasta"
format([ref, oref])
format([reads, oreads])
ref, reads = oref, oreads
# Check if the FASTA has qual
ref, refq = fake_quals(ref)
convert_reads = not px in ("fq", "fastq", "txt")
if convert_reads:
reads, readsq = fake_quals(reads)
readsfiles = " ".join((reads, readsq))
else:
readsfiles = reads
# Make directory structure
dref, dreads = "data/reference", "data/reads"
cwd = os.getcwd()
reference = op.join(cwd, "{0}/{1}".format(dref, ref))
reads = op.join(cwd, "{0}/{1}".format(dreads, reads))
if not op.exists(reference):
sh("mkdir -p {0}".format(dref))
sh("cp {0} {1}/".format(" ".join((ref, refq)), dref))
if not op.exists(reads):
sh("mkdir -p {0}".format(dreads))
sh("cp {0} {1}/".format(readsfiles, dreads))
outputDir = cwd
p = Protocol(outputDir, reference, reads, highqual=opts.highqual)
p.write_xml()
# Build the pipeline
runsh = [setup]
for action in "setup|mapping|support|extraction".split("|"):
runsh.append("Jelly.py {0} Protocol.xml".format(action))
runsh.append('Jelly.py assembly Protocol.xml -x "--nproc={0}"'.format(cpus))
runsh.append("Jelly.py output Protocol.xml")
runfile = "run.sh"
contents = "\n".join(runsh)
write_file(runfile, contents) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isPlantOrigin(taxid):
""" Given a taxid, this gets the expanded tree which can then be checked to see if the organism is a plant or not True """ |
assert isinstance(taxid, int)
t = TaxIDTree(taxid)
try:
return "Viridiplantae" in str(t)
except AttributeError:
raise ValueError("{0} is not a valid ID".format(taxid)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def newick(args):
""" %prog newick idslist Query a list of IDs to retrieve phylogeny. """ |
p = OptionParser(newick.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
mylist = [x.strip() for x in open(idsfile) if x.strip()]
print(get_taxids(mylist))
t = TaxIDTree(mylist)
print(t) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fastq(args):
""" %prog fastq bamfile prefix Convert BAM files to paired FASTQ files. """ |
p = OptionParser(fastq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, pf = args
singletons = pf + ".se.fastq"
a = pf + ".read1.fastq"
b = pf + ".read2.fastq"
cmd = "samtools collate -uOn 128 {} tmp-prefix".format(bamfile)
cmd += " | samtools fastq -s {} -1 {} -2 {} -"\
.format(singletons, a, b)
sh(cmd)
if os.stat(singletons).st_size == 0: # singleton file is empty
os.remove(singletons)
return a, b |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mini(args):
""" %prog mini bamfile region Extract mini-bam for a single region. """ |
p = OptionParser(mini.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, region = args
get_minibam(bamfile, region) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def noclip(args):
""" %prog noclip bamfile Remove clipped reads from BAM. """ |
p = OptionParser(noclip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
noclipbam = bamfile.replace(".bam", ".noclip.bam")
cmd = "samtools view -h {} | awk -F '\t' '($6 !~ /H|S/)'".format(bamfile)
cmd += " | samtools view -@ 4 -b -o {}".format(noclipbam)
sh(cmd)
sh("samtools index {}".format(noclipbam)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append(args):
""" %prog append bamfile Append /1 or /2 to read names. Useful for using the Tophat2 bam file for training AUGUSTUS gene models. """ |
p = OptionParser(append.__doc__)
p.add_option("--prepend", help="Prepend string to read names")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
prepend = opts.prepend
icmd = "samtools view -h {0}".format(bamfile)
bamfile = bamfile.rsplit(".", 1)[0] + ".append.bam"
ocmd = "samtools view -b -@ 64 - -o {0}".format(bamfile)
p = Popen(ocmd, stdin=PIPE)
for row in popen(icmd):
if row[0] == '@':
print(row.strip(), file=p.stdin)
else:
s = SamLine(row)
if prepend:
s.qname = prepend + "_" + s.qname
else:
s.update_readname()
print(s, file=p.stdin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed bedfile bamfiles Convert bam files to bed. """ |
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
bedfile = args[0]
bamfiles = args[1:]
for bamfile in bamfiles:
cmd = "bamToBed -i {0}".format(bamfile)
sh(cmd, outfile=bedfile, append=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" Merge BAM files. Treat the bams with the same prefix as a set. Output the commands first. """ |
from jcvi.apps.grid import MakeManager
p = OptionParser(merge.__doc__)
p.set_sep(sep="_", help="Separator to group per prefix")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
merged_bams = args[0]
bamdirs = args[1:]
mkdir(merged_bams)
bams = []
for x in bamdirs:
bams += glob(op.join(x, "*.bam"))
bams = [x for x in bams if "nsorted" not in x]
logging.debug("Found a total of {0} BAM files.".format(len(bams)))
sep = opts.sep
key = lambda x: op.basename(x).split(sep)[0]
bams.sort(key=key)
mm = MakeManager()
for prefix, files in groupby(bams, key=key):
files = sorted(list(files))
nfiles = len(files)
source = " ".join(files)
target = op.join(merged_bams, op.basename(files[0]))
if nfiles == 1:
source = get_abs_path(source)
cmd = "ln -s {0} {1}".format(source, target)
mm.add("", target, cmd)
else:
cmd = "samtools merge -@ 8 {0} {1}".format(target, source)
mm.add(files, target, cmd, remove=True)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count(args):
""" %prog count bamfile gtf Count the number of reads mapped using `htseq-count`. """ |
p = OptionParser(count.__doc__)
p.add_option("--type", default="exon",
help="Only count feature type")
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, gtf = args
cpus = opts.cpus
pf = bamfile.split(".")[0]
countfile = pf + ".count"
if not need_update(bamfile, countfile):
return
nsorted = pf + "_nsorted"
nsortedbam, nsortedsam = nsorted + ".bam", nsorted + ".sam"
if need_update(bamfile, nsortedsam):
cmd = "samtools sort -@ {0} -n {1} {2}".format(cpus, bamfile, nsorted)
sh(cmd)
cmd = "samtools view -@ {0} -h {1}".format(cpus, nsortedbam)
sh(cmd, outfile=nsortedsam)
if need_update(nsortedsam, countfile):
cmd = "htseq-count --stranded=no --minaqual=10"
cmd += " -t {0}".format(opts.type)
cmd += " {0} {1}".format(nsortedsam, gtf)
sh(cmd, outfile=countfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coverage(args):
""" %prog coverage fastafile bamfile Calculate coverage for BAM file. BAM file will be sorted unless with --nosort. """ |
p = OptionParser(coverage.__doc__)
p.add_option("--format", default="bigwig",
choices=("bedgraph", "bigwig", "coverage"),
help="Output format")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort BAM")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
format = opts.format
if opts.nosort:
logging.debug("BAM sorting skipped")
else:
bamfile = index([bamfile, "--fasta={0}".format(fastafile)])
pf = bamfile.rsplit(".", 2)[0]
sizesfile = Sizes(fastafile).filename
cmd = "genomeCoverageBed -ibam {0} -g {1}".format(bamfile, sizesfile)
if format in ("bedgraph", "bigwig"):
cmd += " -bg"
bedgraphfile = pf + ".bedgraph"
sh(cmd, outfile=bedgraphfile)
if format == "bedgraph":
return bedgraphfile
bigwigfile = pf + ".bigwig"
cmd = "bedGraphToBigWig {0} {1} {2}".\
format(bedgraphfile, sizesfile, bigwigfile)
sh(cmd)
return bigwigfile
coveragefile = pf + ".coverage"
if need_update(fastafile, coveragefile):
sh(cmd, outfile=coveragefile)
gcf = GenomeCoverageFile(coveragefile)
fw = must_open(opts.outfile, "w")
for seqid, cov in gcf.iter_coverage_seqid():
print("\t".join((seqid, "{0:.1f}".format(cov))), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def consensus(args):
""" %prog consensus fastafile bamfile Convert bam alignments to consensus FASTQ/FASTA. """ |
p = OptionParser(consensus.__doc__)
p.add_option("--fasta", default=False, action="store_true",
help="Generate consensus FASTA sequences [default: %default]")
p.add_option("--mask", default=0, type="int",
help="Mask bases with quality lower than")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
fasta = opts.fasta
suffix = "fasta" if fasta else "fastq"
pf = bamfile.rsplit(".", 1)[0]
cnsfile = pf + ".cns.{0}".format(suffix)
vcfgzfile = pf + ".vcf.gz"
vcf([fastafile, bamfile, "-o", vcfgzfile])
cmd += "zcat {0} | vcfutils.pl vcf2fq".format(vcfgzfile)
if fasta:
cmd += " | seqtk seq -q {0} -A -".format(opts.mask)
sh(cmd, outfile=cnsfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vcf(args):
""" %prog vcf fastafile bamfiles > out.vcf.gz Call SNPs on bam files. """ |
from jcvi.apps.grid import Jobs
valid_callers = ("mpileup", "freebayes")
p = OptionParser(vcf.__doc__)
p.set_outfile(outfile="out.vcf.gz")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort the BAM files")
p.add_option("--caller", default="mpileup", choices=valid_callers,
help="Use variant caller [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile = args[0]
bamfiles = args[1:]
caller = opts.caller
unsorted = [x for x in bamfiles if ".sorted." not in x]
if opts.nosort:
bamfiles = unsorted
else:
jargs = [[[x, "--unique"]] for x in unsorted]
jobs = Jobs(index, args=jargs)
jobs.run()
bamfiles = [x.replace(".sorted.bam", ".bam") for x in bamfiles]
bamfiles = [x.replace(".bam", ".sorted.bam") for x in bamfiles]
if caller == "mpileup":
cmd = "samtools mpileup -E -uf"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
cmd += " | bcftools call -vmO v"
elif caller == "freebayes":
cmd = "freebayes -f"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
sh(cmd, outfile=opts.outfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chimera(args):
""" %prog chimera bamfile Parse BAM file from `bwasw` and list multi-hit reads and breakpoints. """ |
import pysam
from jcvi.utils.natsort import natsorted
p = OptionParser(chimera.__doc__)
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
samfile, = args
samfile = pysam.AlignmentFile(samfile)
rstore = defaultdict(list)
hstore = defaultdict(int)
for r in samfile.fetch():
rstore[r.query_name] += list(breakpoint(r))
hstore[r.query_name] += 1
if opts.verbose:
print(r.query_name, "+-"[r.is_reverse], \
sum(l for o, l in r.cigartuples), r.cigarstring, list(breakpoint(r)), file=sys.stderr)
for rn, bps in natsorted(rstore.items()):
bps = "|".join(str(x) for x in sorted(bps)) if bps else "na"
print("\t".join((rn, str(hstore[rn]), bps))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pair(args):
""" %prog pair samfile Parses the sam file and retrieve in pairs format, query:pos ref:pos """ |
p = OptionParser(pair.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
def callback(s):
print(s.pairline)
Sam(args[0], callback=callback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cigar_to_seq(a, gap='*'):
""" Accepts a pysam row. cigar alignment is presented as a list of tuples (operation,length). For example, the tuple [ (0,3), (1,5), (0,2) ] refers to an alignment with 3 matches, 5 insertions and another 2 matches. Op BAM Description M 0 alignment match (can be a sequence match or mismatch) I 1 insertion to the reference D 2 deletion from the reference N 3 skipped region from the reference S 4 soft clipping (clipped sequences present in SEQ) H 5 hard clipping (clipped sequences NOT present in SEQ) P 6 padding (silent deletion from padded reference) = 7 sequence match X 8 sequence mismatch convert the sequence based on the cigar string. For example: """ |
seq, cigar = a.seq, a.cigar
start = 0
subseqs = []
npadded = 0
if cigar is None:
return None, npadded
for operation, length in cigar:
end = start if operation == 2 else start + length
if operation == 0: # match
subseq = seq[start:end]
elif operation == 1: # insertion
subseq = ""
elif operation == 2: # deletion
subseq = gap * length
npadded += length
elif operation == 3: # skipped
subseq = 'N' * length
elif operation in (4, 5): # clip
subseq = ""
else:
raise NotImplementedError
subseqs.append(subseq)
start = end
return "".join(subseqs), npadded |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump(args):
""" %prog dump fastbfile Export ALLPATHS fastb file to fastq file. Use --dir to indicate a previously run allpaths folder. """ |
p = OptionParser(dump.__doc__)
p.add_option("--dir",
help="Working directory [default: %default]")
p.add_option("--nosim", default=False, action="store_true",
help="Do not simulate qual to 50 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastbfile, = args
d = opts.dir
if d:
from jcvi.assembly.preprocess import export_fastq
rc = "jump" in fastbfile
export_fastq(d, fastbfile, rc=rc)
return
sim = not opts.nosim
pf = "j" if "jump" in fastbfile else "f"
statsfile = "{0}.lib_stats".format(pf)
if op.exists(statsfile):
os.remove(statsfile)
cmd = "SplitReadsByLibrary READS_IN={0}".format(fastbfile)
cmd += " READS_OUT={0} QUALS=True".format(pf)
sh(cmd)
libs = []
fp = open(statsfile)
next(fp); next(fp) # skip two rows
for row in fp:
if row.strip() == "":
continue
libname = row.split()[0]
if libname == "Unpaired":
continue
libs.append(libname)
logging.debug("Found libraries: {0}".format(",".join(libs)))
cmds = []
for libname in libs:
cmd = "FastbQualbToFastq"
cmd += " HEAD_IN={0}.{1}.AB HEAD_OUT={1}".format(pf, libname)
cmd += " PAIRED=True PHRED_OFFSET=33"
if sim:
cmd += " SIMULATE_QUALS=True"
if pf == 'j':
cmd += " FLIP=True"
cmds.append((cmd, ))
m = Jobs(target=sh, args=cmds)
m.run()
for libname in libs:
cmd = "mv {0}.A.fastq {0}.1.fastq".format(libname)
sh(cmd)
cmd = "mv {0}.B.fastq {0}.2.fastq".format(libname)
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fixpairs(args):
""" %prog fixpairs pairsfile sep sd Fix pairs library stats. This is sometime useful to modify library stats, for example, the separation between paired reads after importing the data. """ |
p = OptionParser(fixpairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pairsfile, sep, sd = args
newpairsfile = pairsfile.rsplit(".", 1)[0] + ".new.pairs"
sep = int(sep)
sd = int(sd)
p = PairsFile(pairsfile)
p.fixLibraryStats(sep, sd)
p.write(newpairsfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill(args):
""" %prog fill frag_reads_corr.fastb Run FillFragments on `frag_reads_corr.fastb`. """ |
p = OptionParser(fill.__doc__)
p.add_option("--stretch", default=3, type="int",
help="MAX_STRETCH to pass to FillFragments [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastb, = args
assert fastb == "frag_reads_corr.fastb"
pcfile = "frag_reads_corr.k28.pc.info"
nthreads = " NUM_THREADS={0}".format(opts.cpus)
maxstretch = " MAX_STRETCH={0}".format(opts.stretch)
if need_update(fastb, pcfile):
cmd = "PathReads READS_IN=frag_reads_corr"
cmd += nthreads
sh(cmd)
filledfastb = "filled_reads.fastb"
if need_update(pcfile, filledfastb):
cmd = "FillFragments PAIRS_OUT=frag_reads_corr_cpd"
cmd += " PRECORRECT_LIBSTATS=True"
cmd += maxstretch
cmd += nthreads
sh(cmd)
filledfasta = "filled_reads.fasta"
if need_update(filledfastb, filledfasta):
cmd = "Fastb2Fasta IN=filled_reads.fastb OUT=filled_reads.fasta"
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=False):
""" Take fastqfile and array of pair ID, extract adjacent pairs to outfile. Perform check on numbers when done. p1fw, p2fw is a list of file handles, each for one end. p is a Pairs instance. """ |
fp = open(fastqfile)
currentID = 0
npairs = nfrags = 0
for x, lib in izip(p.r1, p.libs):
while currentID != x:
fragsfw.writelines(islice(fp, 4)) # Exhaust the iterator
currentID += 1
nfrags += 1
a = list(islice(fp, 4))
b = list(islice(fp, 4))
if suffix:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
else:
b[0] = a[0] # Keep same read ID for pairs
p1fw[lib].writelines(a)
p2fw[lib].writelines(b)
currentID += 2
npairs += 2
# Write the remaining single reads
while True:
contents = list(islice(fp, 4))
if not contents:
break
fragsfw.writelines(contents)
nfrags += 1
logging.debug("A total of {0} paired reads written to `{1}`.".\
format(npairs, ",".join(x.name for x in p1fw + p2fw)))
logging.debug("A total of {0} single reads written to `{1}`.".\
format(nfrags, fragsfw.name))
# Validate the numbers
expected_pairs = 2 * p.npairs
expected_frags = p.nreads - 2 * p.npairs
assert npairs == expected_pairs, "Expect {0} paired reads, got {1} instead".\
format(expected_pairs, npairs)
assert nfrags == expected_frags, "Expect {0} single reads, got {1} instead".\
format(expected_frags, nfrags) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(args):
""" %prog log logfile Prepare a log of created files, ordered by their creation data. The purpose for this script is to touch these files sequentially to reflect their build order. On the JCVI scratch area, the files are touched regularly to avoid getting deleted, losing their respective timestamps. However, this created a problem for the make system adopted by ALLPATHS. An example block to be extracted ==> [PC] Calling PreCorrect to create 2 file(s):
[PC] [PC] $(RUN)/frag_reads_prec.fastb [PC] $(RUN)/frag_reads_prec.qualb [PC] [PC] from 2 file(s):
[PC] [PC] $(RUN)/frag_reads_filt.fastb [PC] $(RUN)/frag_reads_filt.qualb """ |
from jcvi.algorithms.graph import nx, topological_sort
p = OptionParser(log.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
g = nx.DiGraph()
logfile, = args
fp = open(logfile)
row = fp.readline()
incalling = False
basedb = {}
while row:
atoms = row.split()
if len(atoms) < 3:
row = fp.readline()
continue
tag, token, trailing = atoms[0], atoms[1], atoms[-1]
if trailing == 'file(s):':
numfiles = int(atoms[-2])
row = fp.readline()
assert row.strip() == tag
if token == "Calling" and not incalling:
createfiles = []
for i in xrange(numfiles):
row = fp.readline()
createfiles.append(row.split()[-1])
incalling = True
if token == "from" and incalling:
fromfiles = []
for i in xrange(numfiles):
row = fp.readline()
fromfiles.append(row.split()[-1])
for a in fromfiles:
for b in createfiles:
ba, bb = op.basename(a), op.basename(b)
basedb[ba] = a
basedb[bb] = b
g.add_edge(ba, bb)
incalling = False
if token == "ln":
fromfile, createfile = atoms[-2:]
ba, bb = op.basename(fromfile), op.basename(createfile)
#print ba, "-->", bb
if ba != bb:
g.add_edge(ba, bb)
row = fp.readline()
ts = [basedb[x] for x in topological_sort(g) if x in basedb]
print("\n".join(ts)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join(self, a, *args):
""" Join given arguments into the same set. Accepts one or more arguments. """ |
mapping = self._mapping
set_a = mapping.setdefault(a, [a])
for arg in args:
set_b = mapping.get(arg)
if set_b is None:
set_a.append(arg)
mapping[arg] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def joined(self, a, b):
""" Returns True if a and b are members of the same set. """ |
mapping = self._mapping
try:
return mapping[a] is mapping[b]
except KeyError:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromcsv(args):
""" %prog fromcsv csvfile Convert csv file to EXCEL. """ |
from csv import reader
from xlwt import Workbook, easyxf
from jcvi.formats.base import flexible_cast
p = OptionParser(fromcsv.__doc__)
p.add_option("--noheader", default=False, action="store_true",
help="Do not treat the first row as header")
p.add_option("--rgb", default=-1, type="int",
help="Show RGB color box")
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
header = not opts.noheader
rgb = opts.rgb
excelfile = csvfile.rsplit(".", 1)[0] + ".xls"
data = []
for row in reader(open(csvfile), delimiter=opts.sep):
data.append(row)
w = Workbook()
s = w.add_sheet(op.basename(csvfile))
header_style = easyxf('font: bold on')
if header:
s.panes_frozen = True
s.horz_split_pos = 1
cm = ColorMatcher()
for i, row in enumerate(data):
for j, cell in enumerate(row):
cell = flexible_cast(cell)
if header and i == 0:
s.write(i, j, cell, header_style)
else:
if j == rgb:
cix = cm.match_color_index(cell)
color_style = easyxf('font: color_index {0}'.format(cix))
s.write(i, j, cell, color_style)
else:
s.write(i, j, cell)
w.save(excelfile)
logging.debug("File written to `{0}`.".format(excelfile))
return excelfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def csv(args):
""" %prog csv excelfile Convert EXCEL to csv file. """ |
from xlrd import open_workbook
p = OptionParser(csv.__doc__)
p.set_sep(sep=',')
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
excelfile, = args
sep = opts.sep
csvfile = excelfile.rsplit(".", 1)[0] + ".csv"
wb = open_workbook(excelfile)
fw = open(csvfile, "w")
for s in wb.sheets():
print('Sheet:',s.name, file=sys.stderr)
for row in range(s.nrows):
values = []
for col in range(s.ncols):
values.append(s.cell(row, col).value)
print(sep.join(str(x) for x in values), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_color_index(self, color):
"""Takes an "R,G,B" string or wx.Color and returns a matching xlwt color. """ |
from jcvi.utils.webcolors import color_diff
if isinstance(color, int):
return color
if color:
if isinstance(color, six.string_types):
rgb = map(int, color.split(','))
else:
rgb = color.Get()
logging.disable(logging.DEBUG)
distances = [color_diff(rgb, x) for x in self.xlwt_colors]
logging.disable(logging.NOTSET)
result = distances.index(min(distances))
self.unused_colors.discard(self.xlwt_colors[result])
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_unused_color(self):
"""Returns an xlwt color index that has not been previously returned by this instance. Attempts to maximize the distance between the color and all previously used colors. """ |
if not self.unused_colors:
# If we somehow run out of colors, reset the color matcher.
self.reset()
used_colors = [c for c in self.xlwt_colors if c not in self.unused_colors]
result_color = max(self.unused_colors,
key=lambda c: min(self.color_distance(c, c2)
for c2 in used_colors))
result_index = self.xlwt_colors.index(result_color)
self.unused_colors.discard(result_color)
return result_index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(args):
""" %prog validate input.vcf genome.fasta Fasta validation of vcf file. """ |
import pyfasta
p = OptionParser(validate.__doc__)
p.add_option("--prefix", help="Add prefix to seqid")
opts, args = p.parse_args(args)
vcffile, fastafile = args
pf = opts.prefix
genome = pyfasta.Fasta(fastafile, record_class=pyfasta.MemoryRecord)
fp = must_open(vcffile)
match_ref = match_alt = total = 0
for row in fp:
if row[0] == '#':
continue
seqid, pos, id, ref, alt = row.split()[:5]
total += 1
if pf:
seqid = pf + seqid
pos = int(pos)
if seqid not in genome:
continue
true_ref = genome[seqid][pos - 1]
if total % 100000 == 0:
print(total, "sites parsed", file=sys.stderr)
if ref == true_ref:
match_ref += 1
elif alt == true_ref:
match_alt += 1
logging.debug("Match REF: {}".format(percentage(match_ref, total)))
logging.debug("Match ALT: {}".format(percentage(match_alt, total))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uniq(args):
""" %prog uniq vcffile Retain only the first entry in vcf file. """ |
from six.moves.urllib.parse import parse_qs
p = OptionParser(uniq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = must_open(vcffile)
data = []
for row in fp:
if row[0] == '#':
print(row.strip())
continue
v = VcfLine(row)
data.append(v)
for pos, vv in groupby(data, lambda x: x.pos):
vv = list(vv)
if len(vv) == 1:
print(vv[0])
continue
bestv = max(vv, key=lambda x: float(parse_qs(x.info)["R2"][0]))
print(bestv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample(args):
""" %prog sample vcffile 0.9 Sample subset of vcf file. """ |
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromimpute2(args):
""" %prog fromimpute2 impute2file fastafile 1 Convert impute2 output to vcf file. Imputed file looks like: --- 1:10177:A:AC 10177 A AC 0.451 0.547 0.002 """ |
p = OptionParser(fromimpute2.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
impute2file, fastafile, chr = args
fasta = Fasta(fastafile)
print(get_vcfstanza(fastafile, fasta))
fp = open(impute2file)
seen = set()
for row in fp:
snp_id, rsid, pos, ref, alt, aa, ab, bb = row.split()
pos = int(pos)
if pos in seen:
continue
seen.add(pos)
code = max((float(aa), "0/0"), (float(ab), "0/1"), (float(bb), "1/1"))[-1]
tag = "PR" if snp_id == chr else "IM"
print("\t".join(str(x) for x in \
(chr, pos, rsid, ref, alt, ".", ".", tag, \
"GT:GP", code + ":" + ",".join((aa, ab, bb))))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def refallele(args):
""" %prog refallele vcffile > out.refAllele Make refAllele file which can be used to convert PLINK file to VCF file. """ |
p = OptionParser(refallele.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = open(vcffile)
for row in fp:
if row[0] == '#':
continue
atoms = row.split()
marker = "{0}:{1}".format(*atoms[:2])
ref = atoms[3]
print("\t".join((marker, ref))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def location(args):
""" %prog location bedfile fastafile Given SNP locations, summarize the locations in the sequences. For example, find out if there are more 3`-SNPs than 5`-SNPs. """ |
from jcvi.formats.bed import BedLine
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(location.__doc__)
p.add_option("--dist", default=100, type="int",
help="Distance cutoff to call 5` and 3` [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
dist = opts.dist
sizes = Sizes(fastafile).mapping
fp = open(bedfile)
fiveprime = threeprime = total = 0
percentages = []
for row in fp:
b = BedLine(row)
pos = b.start
size = sizes[b.seqid]
if pos < dist:
fiveprime += 1
if size - pos < dist:
threeprime += 1
total += 1
percentages.append(100 * pos / size)
m = "Five prime (within {0}bp of start codon): {1}\n".format(dist, fiveprime)
m += "Three prime (within {0}bp of stop codon): {1}\n".format(dist, threeprime)
m += "Total: {0}".format(total)
print(m, file=sys.stderr)
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot(percentages, 0, 100, bins, title=title) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def liftover(args):
""" %prog liftover old.vcf hg19ToHg38.over.chain.gz new.vcf Lift over coordinates in vcf file. """ |
p = OptionParser(liftover.__doc__)
p.add_option("--newid", default=False, action="store_true",
help="Make new identifiers")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
oldvcf, chainfile, newvcf = args
ul = UniqueLiftover(chainfile)
num_excluded = 0
fp = open(oldvcf)
fw = open(newvcf, "w")
for row in fp:
row = row.strip()
if row[0] == '#':
if row.startswith("##source="):
row = "##source={0}".format(__file__)
elif row.startswith("##reference="):
row = "##reference=hg38"
elif row.startswith("##contig="):
continue
print(row.strip(), file=fw)
continue
v = VcfLine(row)
# GRCh37.p2 has the same MT sequence as hg38 (but hg19 is different)
if v.seqid == "MT":
v.seqid = "chrM"
print(v, file=fw)
continue
try:
new_chrom, new_pos = ul.liftover_cpra(CM[v.seqid], v.pos)
except:
num_excluded +=1
continue
if new_chrom != None and new_pos != None:
v.seqid, v.pos = new_chrom, new_pos
if opts.newid:
v.rsid = "{0}:{1}".format(new_chrom.replace("chr", ""), new_pos)
print(v, file=fw)
else:
num_excluded +=1
logging.debug("Excluded {0}".format(num_excluded)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def multilineplot(args):
""" %prog multilineplot fastafile chr1 Combine multiple line plots in one vertical stack Inputs must be BED-formatted. --lines: traditional line plots, useful for plotting feature freq """ |
p = OptionParser(multilineplot.__doc__)
p.add_option("--lines",
help="Features to plot in lineplot [default: %default]")
p.add_option("--colors",
help="List of colors matching number of input bed files")
p.add_option("--mode", default="span", choices=("span", "count", "score"),
help="Accumulate feature based on [default: %default]")
p.add_option("--binned", default=False, action="store_true",
help="Specify whether the input is already binned; " +
"if True, input files are considered to be binfiles")
p.add_option("--ymax", type="int", help="Set Y-axis max")
add_window_options(p)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, chr = args
window, shift, subtract, merge = check_window_options(opts)
linebeds = []
colors = opts.colors
if opts.lines:
lines = opts.lines.split(",")
assert len(colors) == len(lines), "Number of chosen colors must match" + \
" number of input bed files"
linebeds = get_beds(lines, binned=opts.binned)
linebins = get_binfiles(linebeds, fastafile, shift, mode=opts.mode,
binned=opts.binned, merge=merge)
clen = Sizes(fastafile).mapping[chr]
nbins = get_nbins(clen, shift)
plt.rcParams["xtick.major.size"] = 0
plt.rcParams["ytick.major.size"] = 0
plt.rcParams["figure.figsize"] = iopts.w, iopts.h
fig, axarr = plt.subplots(nrows=len(lines))
if len(linebeds) == 1:
axarr = (axarr, )
fig.suptitle(latex(chr), color="darkslategray")
for i, ax in enumerate(axarr):
lineplot(ax, [linebins[i]], nbins, chr, window, shift, \
color="{0}{1}".format(colors[i], 'r'))
if opts.ymax:
ax.set_ylim(0, opts.ymax)
plt.subplots_adjust(hspace=0.5)
image_name = chr + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _needle(fa, fb, needlefile, a, b, results):
""" Run single needle job """ |
from Bio.Emboss.Applications import NeedleCommandline
needle_cline = NeedleCommandline(asequence=fa, bsequence=fb,
gapopen=10, gapextend=0.5, outfile=needlefile)
stdout, stderr = needle_cline()
nh = NeedleHeader(needlefile)
FileShredder([fa, fb, needlefile], verbose=False)
r = ["\t".join((a, b, nh.identity, nh.score))]
results.extend(r) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def needle(args):
""" %prog needle nw.pairs a.pep.fasta b.pep.fasta Take protein pairs and needle them Automatically writes output file `nw.scores` """ |
from jcvi.formats.fasta import Fasta, SeqIO
p = OptionParser(needle.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
manager = mp.Manager()
results = manager.list()
needle_pool = mp.Pool(processes=mp.cpu_count())
pairsfile, apep, bpep = args
afasta, bfasta = Fasta(apep), Fasta(bpep)
fp = must_open(pairsfile)
for i, row in enumerate(fp):
a, b = row.split()
a, b = afasta[a], bfasta[b]
fa, fb = must_open("{0}_{1}_a.fasta".format(pairsfile, i), "w"), \
must_open("{0}_{1}_b.fasta".format(pairsfile, i), "w")
SeqIO.write([a], fa, "fasta")
SeqIO.write([b], fb, "fasta")
fa.close()
fb.close()
needlefile = "{0}_{1}_ab.needle".format(pairsfile, i)
needle_pool.apply_async(_needle, \
(fa.name, fb.name, needlefile, a.id, b.id, results))
needle_pool.close()
needle_pool.join()
fp.close()
scoresfile = "{0}.scores".format(pairsfile.rsplit(".")[0])
fw = must_open(scoresfile, "w")
for result in results:
print(result, file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def maker(args):
""" %prog maker maker.gff3 genome.fasta Prepare EVM inputs by separating tracks from MAKER. """ |
from jcvi.formats.base import SetFile, FileShredder
A, T, P = "ABINITIO_PREDICTION", "TRANSCRIPT", "PROTEIN"
# Stores default weights and types
Registry = {\
"maker": (A, 5),
"augustus_masked": (A, 1),
"snap_masked": (A, 1),
"genemark": (A, 1),
"est2genome": (T, 5),
"est_gff": (T, 5),
"protein2genome": (P, 5),
"blastx": (P, 1)
}
p = OptionParser(maker.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, fastafile = args
types = "type.ids"
if need_update(gffile, types):
cmd = "cut -f2 -s {0} | sort -u".format(gffile)
sh(cmd, outfile=types)
types = SetFile(types)
reg = defaultdict(list)
weightsfile = "weights.txt"
contents = []
for s in types:
rs = s.split(":")[0]
if rs not in Registry:
continue
type, weight = Registry[rs]
reg[type].append(s)
contents.append("\t".join(str(x) for x in (type, s, weight)))
contents = "\n".join(sorted(contents))
write_file(weightsfile, contents)
evs = [x + ".gff" for x in (A, T, P)]
FileShredder(evs)
for type, tracks in reg.items():
for t in tracks:
cmd = "grep '\t{0}' {1} | grep -v '_match\t' >> {2}.gff".format(t, gffile, type)
sh(cmd)
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tigrload(args):
""" %prog tigrload db ev_type Load EVM results into TIGR db. Actually, just write a load.sh script. The ev_type should be set, e.g. "EVM1", "EVM2", etc. """ |
p = OptionParser(tigrload.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
db, ev_type = args
runfile = "load.sh"
contents = EVMLOAD.format(db, ev_type)
write_file(runfile, contents) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pasa(args):
""" %prog pasa pasa_db fastafile Run EVM in TIGR-only mode. """ |
p = OptionParser(pasa.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pasa_db, fastafile = args
termexons = "pasa.terminal_exons.gff3"
if need_update(fastafile, termexons):
cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi"
cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"'.format(pasa_db)
cmd += ' -g {0}'.format(fastafile)
sh(cmd)
cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl"
cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff"
sh(cmd, outfile=termexons)
return termexons |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tigrprepare(args):
""" %prog tigrprepare asmbl.fasta asmbl.ids db pasa.terminal_exons.gff3 Run EVM in TIGR-only mode. """ |
p = OptionParser(tigrprepare.__doc__)
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
fastafile, asmbl_id, db, pasa_db = args
if asmbl_id == 'all':
idsfile = fastafile + ".ids"
if need_update(fastafile, idsfile):
ids([fastafile, "-o", idsfile])
else:
idsfile = asmbl_id
oneid = open(idsfile).next().strip()
weightsfile = "weights.txt"
if need_update(idsfile, weightsfile):
cmd = "$EVM/TIGR-only/create_sample_weights_file.dbi"
cmd += " {0} {1} | tee weights.txt".format(db, oneid)
sh(cmd)
evs = ["gene_predictions.gff3", "transcript_alignments.gff3",
"protein_alignments.gff3"]
if need_update(weightsfile, evs):
cmd = "$EVM/TIGR-only/write_GFF3_files.dbi"
cmd += " --db {0} --asmbl_id {1} --weights {2}".\
format(db, idsfile, weightsfile)
sh(cmd)
evs[1] = fix_transcript()
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uniq(args):
""" %prog uniq gffile cdsfasta Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping 'piles' are processed, one by one. Here, we use a different algorithm, that retains the best non-overlapping subset witin each pile, rather than single best model. Scoring function is also different, rather than based on score or span, we optimize for the subset that show the best combined score. Score is defined by: score = (1 - AED) * length """ |
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, cdsfasta = args
gff = Gff(gffile)
sizes = Sizes(cdsfasta).mapping
gene_register = {}
for g in gff:
if g.type != "mRNA":
continue
aed = float(g.attributes["_AED"][0])
gene_register[g.parent] = (1 - aed) * sizes[g.accn]
allgenes = import_feats(gffile)
g = get_piles(allgenes)
bestids = set()
for group in g:
ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \
for x in group]
selected_chain, score = range_chain(ranges)
bestids |= set(x.id for x in selected_chain)
removed = set(x.accn for x in allgenes) - bestids
fw = open("removed.ids", "w")
print("\n".join(sorted(removed)), file=fw)
fw.close()
populate_children(opts.outfile, bestids, gffile, "gene") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nmd(args):
""" %prog nmd gffile Identify transcript variants which might be candidates for nonsense mediated decay (NMD) A transcript is considered to be a candidate for NMD when the CDS stop codon is located more than 50nt upstream of terminal splice site donor References: http://www.nature.com/horizon/rna/highlights/figures/s2_spec1_f3.html http://www.biomedcentral.com/1741-7007/7/23/figure/F1 """ |
import __builtin__
from jcvi.utils.cbook import enumerate_reversed
p = OptionParser(nmd.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = make_index(gffile)
fw = must_open(opts.outfile, "w")
for gene in gff.features_of_type('gene', order_by=('seqid', 'start')):
_enumerate = __builtin__.enumerate if gene.strand == "-" else enumerate_reversed
for mrna in gff.children(gene, featuretype='mRNA', order_by=('start')):
tracker = dict()
tracker['exon'] = list(gff.children(mrna, featuretype='exon', order_by=('start')))
tracker['cds'] = [None] * len(tracker['exon'])
tcds_pos = None
for i, exon in _enumerate(tracker['exon']):
for cds in gff.region(region=exon, featuretype='CDS', completely_within=True):
if mrna.id in cds['Parent']:
tracker['cds'][i] = cds
tcds_pos = i
break
if tcds_pos: break
NMD, distance = False, 0
if (mrna.strand == "+" and tcds_pos + 1 < len(tracker['exon'])) \
or (mrna.strand == "-" and tcds_pos - 1 >= 0):
tcds = tracker['cds'][tcds_pos]
texon = tracker['exon'][tcds_pos]
PTC = tcds.end if mrna.strand == '+' else tcds.start
TDSS = texon.end if mrna.strand == '+' else texon.start
distance = abs(TDSS - PTC)
NMD = True if distance > 50 else False
print("\t".join(str(x) for x in (gene.id, mrna.id, \
gff.children_bp(mrna, child_featuretype='CDS'), distance, NMD)), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_edges(G, bed, families):
""" Instead of going through the graph construction, just print the edges. """ |
symbols = {'+': '>', '-': '<'}
for seqid, bs in bed.sub_beds():
prev_node, prev_strand = None, '+'
for b in bs:
accn = b.accn
strand = b.strand
node = "=".join(families[accn])
if prev_node:
print("{}{}--{}{}".format(prev_node, symbols[prev_strand],
symbols[strand], node))
prev_node, prev_strand = node, strand |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def adjgraph(args):
""" %prog adjgraph adjacency.txt subgraph.txt Construct adjacency graph for graphviz. The file may look like sample below. The lines with numbers are chromosomes with gene order information. genome 0 chr 0 -1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360 chr 1 138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143 """ |
import pygraphviz as pgv
from jcvi.utils.iter import pairwise
from jcvi.formats.base import SetFile
p = OptionParser(adjgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
infile, subgraph = args
subgraph = SetFile(subgraph)
subgraph = set(x.strip("-") for x in subgraph)
G = pgv.AGraph(strict=False) # allow multi-edge
SG = pgv.AGraph(strict=False)
palette = ("green", "magenta", "tomato", "peachpuff")
fp = open(infile)
genome_id = -1
key = 0
for row in fp:
if row.strip() == "":
continue
atoms = row.split()
tag = atoms[0]
if tag in ("ChrNumber", "chr"):
continue
if tag == "genome":
genome_id += 1
gcolor = palette[genome_id]
continue
nodeseq = []
for p in atoms:
np = p.strip("-")
nodeL, nodeR = np + "L", np + "R"
if p[0] == "-": # negative strand
nodeseq += [nodeR, nodeL]
else:
nodeseq += [nodeL, nodeR]
for a, b in pairwise(nodeseq):
G.add_edge(a, b, key, color=gcolor)
key += 1
na, nb = a[:-1], b[:-1]
if na not in subgraph and nb not in subgraph:
continue
SG.add_edge(a, b, key, color=gcolor)
G.graph_attr.update(dpi="300")
fw = open("graph.dot", "w")
G.write(fw)
fw.close()
fw = open("subgraph.dot", "w")
SG.write(fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pairs(args):
""" %prog pairs anchorsfile prefix Convert anchorsfile to pairsfile. """ |
p = OptionParser(pairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
anchorfile, prefix = args
outfile = prefix + ".pairs"
fw = open(outfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
npairs = 0
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
lines = []
for q, s, score in block:
npairs += 1
score = score.replace('L', '')
lines.append("\t".join((q, s, score, block_id)))
print("\n".join(sorted(lines)), file=fw)
fw.close()
logging.debug("A total of {0} pairs written to `{1}`.".
format(npairs, outfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zipbed(args):
""" %prog zipbed species.bed collinear.anchors Build ancestral contig from collinear blocks. For example, to build pre-rho order, use `zipbed rice.bed rice.rice.1x1.collinear.anchors`. The algorithms proceeds by interleaving the genes together. """ |
p = OptionParser(zipbed.__doc__)
p.add_option("--prefix", default="b",
help="Prefix for the new seqid [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, anchorfile = args
prefix = opts.prefix
bed = Bed(bedfile)
order = bed.order
newbedfile = prefix + ".bed"
fw = open(newbedfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
pairs = []
for q, s, score in block:
qi, q = order[q]
si, s = order[s]
pairs.append((qi, si))
newbed = list(interleave_pairs(pairs))
for i, b in enumerate(newbed):
accn = bed[b].accn
print("\t".join(str(x)
for x in (block_id, i, i + 1, accn)), file=fw)
logging.debug("Reconstructed bedfile written to `{0}`.".format(newbedfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collinear(args):
""" %prog collinear a.b.anchors Reduce synteny blocks to strictly collinear, use dynamic programming in a procedure similar to DAGchainer. """ |
p = OptionParser(collinear.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
af = AnchorFile(anchorfile)
newanchorfile = anchorfile.rsplit(".", 1)[0] + ".collinear.anchors"
fw = open(newanchorfile, "w")
blocks = af.blocks
for block in blocks:
print("#" * 3, file=fw)
iblock = []
for q, s, score in block:
qi, q = qorder[q]
si, s = sorder[s]
score = int(long(score))
iblock.append([qi, si, score])
block = get_collinear(iblock)
for q, s, score in block:
q = qbed[q].accn
s = sbed[s].accn
print("\t".join((q, s, str(score))), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def counts(args):
""" %prog counts vcffile Collect allele counts from RO and AO fields. """ |
p = OptionParser(counts.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
vcf_reader = vcf.Reader(open(vcffile))
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
for sample in r.samples:
ro = sample["RO"]
ao = sample["AO"]
print("\t".join(str(x) for x in (v, ro, ao))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare(args):
""" %prog prepare vcffile bamfile Convert vcf and bam to variant list. Inputs are: - vcffile: contains the positions of variants - bamfile: contains the reads that hold the variants Outputs: - reads_to_phase: phasing for each read - variants_to_phase: in format of phased vcf """ |
p = OptionParser(prepare.__doc__)
p.add_option("--accuracy", default=.85,
help="Sequencing per-base accuracy")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, bamfile = args
right = "{:.2f}".format(opts.accuracy)
wrong = "{:.2f}".format(1 - opts.accuracy)
vcf_reader = vcf.Reader(open(vcffile))
variants = []
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
variants.append(v)
logging.debug("A total of {} bi-allelic SNVs imported from `{}`".\
format(len(variants), vcffile))
bamfile = pysam.AlignmentFile(bamfile, "rb")
for v in variants:
pos = v.pos - 1
for column in bamfile.pileup(v.chr, pos, pos + 1, truncate=True):
for read in column.pileups:
query_position = read.query_position
if query_position is None:
continue
read_name = read.alignment.query_name
query_base = read.alignment.query_sequence[query_position]
a, b = v.alleles
if query_base == a:
other_base = b
elif query_base == b:
other_base = a
else:
continue
print(" ".join(str(x) for x in \
(v, read_name, query_base, right, other_base, wrong))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_valid(self):
""" Only retain SNPs or single indels, and are bi-allelic """ |
return len(self.ref) == 1 and \
len(self.alt) == 1 and \
len(self.alt[0]) == 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _number_finder(s, regex, numconv):
"""Helper to split numbers""" |
# Split. If there are no splits, return now
s = regex.split(s)
if len(s) == 1:
return tuple(s)
# Now convert the numbers to numbers, and leave strings as strings
s = remove_empty(s)
for i in range(len(s)):
try:
s[i] = numconv(s[i])
except ValueError:
pass
# If the list begins with a number, lead with an empty string.
# This is used to get around the "unorderable types" issue.
if not isinstance(s[0], six.string_types):
return [''] + s
else:
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index_natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True):
"""\ Sorts a sequence naturally, but returns a list of sorted the indeces and not the sorted list. [2, 0, 1] ['num2', 'num3', 'num5'] ['baz', 'foo', 'bar'] [2, 0, 1] """ |
from operator import itemgetter
item1 = itemgetter(1)
# Pair the index and sequence together, then sort by
index_seq_pair = [[x, key(y)] for x, y in zip(range(len(seq)), seq)]
index_seq_pair.sort(key=lambda x: natsort_key(item1(x),
number_type=number_type,
signed=signed, exp=exp))
return [x[0] for x in index_seq_pair] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batchseeds(args):
""" %prog batchseeds folder Extract seed metrics for each image in a directory. """ |
from jcvi.formats.pdf import cat
xargs = args[1:]
p = OptionParser(batchseeds.__doc__)
opts, args, iopts = add_seeds_options(p, args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
folder = folder.rstrip('/')
outdir = folder + "-debug"
outfile = folder + "-output.tsv"
assert op.isdir(folder)
images = []
jsonfile = opts.calibrate or op.join(folder, "calibrate.json")
if not op.exists(jsonfile):
jsonfile = None
for im in iglob(folder, "*.jpg,*.JPG,*.png"):
if im.endswith((".resize.jpg", ".main.jpg", ".label.jpg")):
continue
if op.basename(im).startswith("calibrate"):
continue
images.append(im)
fw = must_open(outfile, 'w')
print(Seed.header(calibrate=jsonfile), file=fw)
nseeds = 0
for im in images:
imargs = [im, "--noheader", "--outdir={0}".format(outdir)] + xargs
if jsonfile:
imargs += ["--calibrate={0}".format(jsonfile)]
objects = seeds(imargs)
for o in objects:
print(o, file=fw)
nseeds += len(objects)
fw.close()
logging.debug("Processed {0} images.".format(len(images)))
logging.debug("A total of {0} objects written to `{1}`.".\
format(nseeds, outfile))
pdfs = iglob(outdir, "*.pdf")
outpdf = folder + "-output.pdf"
cat(pdfs + ["--outfile={0}".format(outpdf)])
logging.debug("Debugging information written to `{0}`.".format(outpdf))
return outfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filterbedgraph(args):
""" %prog filterbedgraph a.bedgraph 1 Filter the bedGraph, typically from the gem-mappability pipeline. Unique regions are 1, two copies .5, etc. """ |
p = OptionParser(filterbedgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedgraphfile, cutoff = args
c = float(cutoff)
fp = open(bedgraphfile)
pf = bedgraphfile.rsplit(".", 1)[0]
filteredbed = pf + ".filtered-{}.bed".format(cutoff)
fw = open(filteredbed, "w")
nfiltered = ntotal = 0
for row in fp:
b = BedLine(row)
ntotal += 1
if float(b.accn) >= c:
print(b, file=fw)
nfiltered += 1
fw.close()
logging.debug("A total of {} intervals (score >= {}) written to `{}`".\
format(percentage(nfiltered, ntotal), cutoff, filteredbed))
mergeBed(filteredbed, sorted=True, delim=None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tiling(args):
""" %prog tiling bedfile Compute minimum tiling path using as few clones as possible. Implemented with dynamic programming. Greedy algorithm may also work according a stackoverflow source. """ |
p = OptionParser(tiling.__doc__)
p.add_option("--overlap", default=3000, type="int",
help="Minimum amount of overlaps required")
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
ov = opts.overlap
bed = Bed(bedfile)
inf = len(bed)
selected = Bed()
for seqid, sbed in bed.sub_beds():
g = Grouper()
current = sbed[0]
# Partition connected features
for a in sbed:
g.join(a)
# requires a real overlap
if a.start < current.end - ov:
g.join(a, current)
if a.end > current.end:
current = a
# Process per partition
for gbed in g:
end = max(x.end for x in gbed)
gbed.sort(key=lambda x: (x.start, -x.end))
entries = len(gbed)
counts = [inf] * entries
counts[0] = 1
traceback = [-1] * entries
for i, a in enumerate(gbed):
for j in xrange(i + 1, entries):
b = gbed[j]
if b.start >= a.end - ov:
break
# Two ranges overlap!
if counts[i] + 1 < counts[j]:
counts[j] = counts[i] + 1
traceback[j] = i
endi = [i for i, a in enumerate(gbed) if a.end == end]
last = min((traceback[i], i) for i in endi)[1]
chain = []
while last != -1:
chain.append(last)
last = traceback[last]
chain = chain[::-1]
selected.extend([gbed[x] for x in chain])
if opts.verbose:
print(counts)
print(traceback)
print(chain)
print("\n".join(str(x) for x in gbed))
print("*" * 30)
print("\n".join(str(gbed[x]) for x in chain))
print()
tilingbedfile = bedfile.rsplit(".", 1)[0] + ".tiling.bed"
selected.print_to_file(filename=tilingbedfile, sorted=True)
logging.debug("A total of {} tiling features written to `{}`"\
.format(len(selected), tilingbedfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chain(args):
""" %prog chain bedfile Chain BED segments together. """ |
p = OptionParser(chain.__doc__)
p.add_option("--dist", default=100000, help="Chaining distance")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
cmd = "sort -k4,4 -k1,1 -k2,2n -k3,3n {0} -o {0}".format(bedfile)
sh(cmd)
bed = Bed(bedfile, sorted=False)
newbed = Bed()
for accn, bb in groupby(bed, key=lambda x: x.accn):
bb = list(bb)
g = Grouper()
for a in bb:
g.join(a)
for a, b in pairwise(bb):
if a.seqid == b.seqid and b.start - a.end < opts.dist:
g.join(a, b)
data = []
for p in g:
seqid = p[0].seqid
start = min(x.start for x in p)
end = max(x.end for x in p)
score = sum(x.span for x in p)
data.append((seqid, start - 1, end, accn, score))
d = max(data, key=lambda x: x[-1])
newbed.append(BedLine("\t".join(str(x) for x in d)))
newbed.print_to_file(opts.outfile, sorted=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def density(args):
""" %prog density bedfile ref.fasta Calculates density of features per seqid. """ |
p = OptionParser(density.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
bed = Bed(bedfile)
sizes = Sizes(fastafile).mapping
header = "seqid features size density_per_Mb".split()
print("\t".join(header))
for seqid, bb in bed.sub_beds():
nfeats = len(bb)
size = sizes[seqid]
ds = nfeats * 1e6 / size
print("\t".join(str(x) for x in \
(seqid, nfeats, size, "{0:.1f}".format(ds)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def alignextend(args):
""" %prog alignextend bedpefile ref.fasta Similar idea to alignextend, using mates from BEDPE and FASTA ref. See AMOS script here: https://github.com/nathanhaigh/amos/blob/master/src/Experimental/alignextend.pl """ |
p = OptionParser(alignextend.__doc__)
p.add_option("--len", default=100, type="int",
help="Extend to this length")
p.add_option("--qv", default=31, type="int",
help="Dummy qv score for extended bases")
p.add_option("--bedonly", default=False, action="store_true",
help="Only generate bed files, no FASTA")
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
qvchar = chr(opts.qv + 33)
pf = bedpe.split(".")[0]
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen, rlen=opts.rlen)
rmdup = filtered + ".filtered.sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
if opts.bedonly:
return
bed1, bed2 = pf + ".1e.bed", pf + ".2e.bed"
if need_update(rmdup, (bed1, bed2)):
sh("cut -f1-3,7-9 {0}".format(rmdup), outfile=bed1)
sh("cut -f4-6,7-8,10 {0}".format(rmdup), outfile=bed2)
sfa1, sfa2 = pf + ".1e.sfa", pf + ".2e.sfa"
if need_update((bed1, bed2, ref), (sfa1, sfa2)):
for bed in (bed1, bed2):
fastaFromBed(bed, ref, name=True, tab=True, stranded=True)
fq1, fq2 = pf + ".1e.fq", pf + ".2e.fq"
if need_update((sfa1, sfa2), (fq1, fq2)):
for sfa in (sfa1, sfa2):
sfa_to_fq(sfa, qvchar) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def seqids(args):
""" %prog seqids bedfile Print out all seqids on one line. Useful for graphics.karyotype. """ |
p = OptionParser(seqids.__doc__)
p.add_option("--maxn", default=100, type="int",
help="Maximum number of seqids")
p.add_option("--prefix", help="Seqids must start with")
p.add_option("--exclude", default="random", help="Seqids should not contain")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfile, = args
pf = opts.prefix
exclude = opts.exclude
bed = Bed(bedfile)
s = bed.seqids
if pf:
s = [x for x in s if x.startswith(pf)]
if exclude:
s = [x for x in s if not exclude in x]
s = s[:opts.maxn]
print(",".join(s)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def random(args):
""" %prog random bedfile number_of_features Extract a random subset of features. Number of features can be an integer number, or a fractional number in which case a random fraction (for example 0.1 = 10% of all features) will be extracted. """ |
from random import sample
from jcvi.formats.base import flexible_cast
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, N = args
assert is_number(N)
b = Bed(bedfile)
NN = flexible_cast(N)
if NN < 1:
NN = int(round(NN * len(b)))
beds = sample(b, NN)
new_bed = Bed()
new_bed.extend(beds)
outfile = bedfile.rsplit(".", 1)[0] + ".{0}.bed".format(N)
new_bed.print_to_file(outfile)
logging.debug("Write {0} features to `{1}`".format(NN, outfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter(args):
""" %prog filter bedfile Filter the bedfile to retain records between certain size range. """ |
p = OptionParser(filter.__doc__)
p.add_option("--minsize", default=0, type="int",
help="Minimum feature length")
p.add_option("--maxsize", default=1000000000, type="int",
help="Minimum feature length")
p.add_option("--minaccn", type="int",
help="Minimum value of accn, useful to filter based on coverage")
p.add_option("--minscore", type="int", help="Minimum score")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fp = must_open(bedfile)
fw = must_open(opts.outfile, "w")
minsize, maxsize = opts.minsize, opts.maxsize
minaccn = opts.minaccn
minscore = opts.minscore
total = []
keep = []
for row in fp:
try:
b = BedLine(row)
except IndexError:
print(row.strip(), file=fw)
continue
span = b.span
total.append(span)
if not minsize <= span <= maxsize:
continue
if minaccn and int(b.accn) < minaccn:
continue
if minscore and int(b.score) < minscore:
continue
print(b, file=fw)
keep.append(span)
logging.debug("Stats: {0} features kept.".\
format(percentage(len(keep), len(total))))
logging.debug("Stats: {0} bases kept.".\
format(percentage(sum(keep), sum(total)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mergebydepth(args):
""" %prog mergebydepth reads.bed genome.fasta Similar to mergeBed, but only returns regions beyond certain depth. """ |
p = OptionParser(mergebydepth.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth required")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + ".d{0}".format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, "--minaccn={0}".format(mindepth),
"--outfile={0}".format(bedgraphfiltered)])
merged = bedgraphfiltered + ".merge.fasta"
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def depth(args):
""" %prog depth reads.bed features.bed Calculate depth depth per feature using coverageBed. """ |
p = OptionParser(depth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
readsbed, featsbed = args
fp = open(featsbed)
nargs = len(fp.readline().split("\t"))
keepcols = ",".join(str(x) for x in range(1, nargs + 1))
cmd = "coverageBed -a {0} -b {1} -d".format(readsbed, featsbed)
cmd += " | groupBy -g {0} -c {1} -o mean".format(keepcols, nargs + 2)
sh(cmd, outfile=opts.outfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_isoforms(ids):
""" This is more or less a hack to remove the GMAP multiple mappings. Multiple GMAP mappings can be seen given the names .mrna1, .mrna2, etc. """ |
key = lambda x: x.rsplit(".", 1)[0]
iso_number = lambda x: get_number(x.split(".")[-1])
ids = sorted(ids, key=key)
newids = []
for k, ii in groupby(ids, key=key):
min_i = min(list(ii), key=iso_number)
newids.append(min_i)
return newids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def longest(args):
""" %prog longest bedfile fastafile Select longest feature within overlapping piles. """ |
from jcvi.formats.sizes import Sizes
p = OptionParser(longest.__doc__)
p.add_option("--maxsize", default=20000, type="int",
help="Limit max size")
p.add_option("--minsize", default=60, type="int",
help="Limit min size")
p.add_option("--precedence", default="Medtr",
help="Accessions with prefix take precedence")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
maxsize = opts.maxsize
minsize = opts.minsize
prec = opts.precedence
mergedbed = mergeBed(bedfile, nms=True)
sizes = Sizes(fastafile).mapping
bed = Bed(mergedbed)
pf = bedfile.rsplit(".", 1)[0]
ids = set()
for b in bed:
accns = b.accn.split(";")
prec_accns = [x for x in accns if x.startswith(prec)]
if prec_accns:
accns = prec_accns
accn_sizes = [(sizes.get(x, 0), x) for x in accns]
accn_sizes = [(size, x) for size, x in accn_sizes if size < maxsize]
if not accn_sizes:
continue
max_size, max_accn = max(accn_sizes)
if max_size < minsize:
continue
ids.add(max_accn)
newids = remove_isoforms(ids)
logging.debug("Remove isoforms: before={0} after={1}".\
format(len(ids), len(newids)))
longestidsfile = pf + ".longest.ids"
fw = open(longestidsfile, "w")
print("\n".join(newids), file=fw)
fw.close()
logging.debug("A total of {0} records written to `{1}`.".\
format(len(newids), longestidsfile))
longestbedfile = pf + ".longest.bed"
some([bedfile, longestidsfile, "--outfile={0}".format(longestbedfile),
"--no_strip_names"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" %prog merge bedfiles > newbedfile Concatenate bed files together. Performing seqid and name changes to avoid conflicts in the new bed file. """ |
p = OptionParser(merge.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfiles = args
fw = must_open(opts.outfile, "w")
for bedfile in bedfiles:
bed = Bed(bedfile)
pf = op.basename(bedfile).split(".")[0]
for b in bed:
b.seqid = "_".join((pf, b.seqid))
print(b, file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix(args):
""" %prog fix bedfile > newbedfile Fix non-standard bed files. One typical problem is start > end. """ |
p = OptionParser(fix.__doc__)
p.add_option("--minspan", default=0, type="int",
help="Enforce minimum span [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
minspan = opts.minspan
fp = open(bedfile)
fw = must_open(opts.outfile, "w")
nfixed = nfiltered = ntotal = 0
for row in fp:
atoms = row.strip().split("\t")
assert len(atoms) >= 3, "Must be at least 3 columns"
seqid, start, end = atoms[:3]
start, end = int(start), int(end)
orientation = '+'
if start > end:
start, end = end, start
orientation = '-'
nfixed += 1
atoms[1:3] = [str(start), str(end)]
if len(atoms) > 6:
atoms[6] = orientation
line = "\t".join(atoms)
b = BedLine(line)
if b.span >= minspan:
print(b, file=fw)
nfiltered += 1
ntotal += 1
if nfixed:
logging.debug("Total fixed: {0}".format(percentage(nfixed, ntotal)))
if nfiltered:
logging.debug("Total filtered: {0}".format(percentage(nfiltered, ntotal))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def some(args):
""" %prog some bedfile idsfile > newbedfile Retrieve a subset of bed features given a list of ids. """ |
from jcvi.formats.base import SetFile
from jcvi.utils.cbook import gene_name
p = OptionParser(some.__doc__)
p.add_option("-v", dest="inverse", default=False, action="store_true",
help="Get the inverse, like grep -v [default: %default]")
p.set_outfile()
p.set_stripnames()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, idsfile = args
inverse = opts.inverse
ostrip = opts.strip_names
fw = must_open(opts.outfile, "w")
ids = SetFile(idsfile)
if ostrip:
ids = set(gene_name(x) for x in ids)
bed = Bed(bedfile)
ntotal = nkeep = 0
for b in bed:
ntotal += 1
keep = b.accn in ids
if inverse:
keep = not keep
if keep:
nkeep += 1
print(b, file=fw)
fw.close()
logging.debug("Stats: {0} features kept.".\
format(percentage(nkeep, ntotal))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uniq(args):
""" %prog uniq bedfile Remove overlapping features with higher scores. """ |
from jcvi.formats.sizes import Sizes
p = OptionParser(uniq.__doc__)
p.add_option("--sizes", help="Use sequence length as score")
p.add_option("--mode", default="span", choices=("span", "score"),
help="Pile mode")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
uniqbedfile = bedfile.split(".")[0] + ".uniq.bed"
bed = Bed(bedfile)
if opts.sizes:
sizes = Sizes(opts.sizes).mapping
ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) \
for i, x in enumerate(bed)]
else:
if opts.mode == "span":
ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) \
for i, x in enumerate(bed)]
else:
ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) \
for i, x in enumerate(bed)]
selected, score = range_chain(ranges)
selected = [x.id for x in selected]
selected_ids = set(selected)
selected = [bed[x] for x in selected]
notselected = [x for i, x in enumerate(bed) if i not in selected_ids]
newbed = Bed()
newbed.extend(selected)
newbed.print_to_file(uniqbedfile, sorted=True)
if notselected:
leftoverfile = bedfile.split(".")[0] + ".leftover.bed"
leftoverbed = Bed()
leftoverbed.extend(notselected)
leftoverbed.print_to_file(leftoverfile, sorted=True)
logging.debug("Imported: {0}, Exported: {1}".format(len(bed), len(newbed)))
return uniqbedfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pile(args):
""" %prog pile abedfile bbedfile > piles Call intersectBed on two bedfiles. """ |
from jcvi.utils.grouper import Grouper
p = OptionParser(pile.__doc__)
p.add_option("--minOverlap", default=0, type="int",
help="Minimum overlap required [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
abedfile, bbedfile = args
iw = intersectBed_wao(abedfile, bbedfile, minOverlap=opts.minOverlap)
groups = Grouper()
for a, b in iw:
groups.join(a.accn, b.accn)
ngroups = 0
for group in groups:
if len(group) > 1:
ngroups += 1
print("|".join(group))
logging.debug("A total of {0} piles (>= 2 members)".format(ngroups)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index(args):
""" %prog index bedfile Compress and index bedfile using `tabix`. Use --fasta to give a FASTA file so that a bedgraph file can be generated and indexed. """ |
p = OptionParser(index.__doc__)
p.add_option("--fasta", help="Generate bedgraph and index")
p.add_option("--query", help="Chromosome location")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fastafile = opts.fasta
if fastafile:
bedfile = make_bedgraph(bedfile, fastafile)
bedfile = sort([bedfile])
gzfile = bedfile + ".gz"
if need_update(bedfile, gzfile):
cmd = "bgzip {0}".format(bedfile)
sh(cmd)
tbifile = gzfile + ".tbi"
if need_update(gzfile, tbifile):
cmd = "tabix -p bed {0}".format(gzfile)
sh(cmd)
query = opts.query
if not query:
return
cmd = "tabix {0} {1}".format(gzfile, query)
sh(cmd, outfile=opts.outfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def evaluate(args):
""" %prog evaluate prediction.bed reality.bed fastafile Make a truth table like: True False --- Reality True TP FP False FN TN |----Prediction Sn = TP / (all true in reality) = TP / (TP + FN) Sp = TP / (all true in prediction) = TP / (TP + FP) Ac = (TP + TN) / (TP + FP + FN + TN) """ |
from jcvi.formats.sizes import Sizes
p = OptionParser(evaluate.__doc__)
p.add_option("--query",
help="Chromosome location [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prediction, reality, fastafile = args
query = opts.query
prediction = mergeBed(prediction)
reality = mergeBed(reality)
sizes = Sizes(fastafile)
sizesfile = sizes.filename
prediction_complement = complementBed(prediction, sizesfile)
reality_complement = complementBed(reality, sizesfile)
TPbed = intersectBed(prediction, reality)
FPbed = intersectBed(prediction, reality_complement)
FNbed = intersectBed(prediction_complement, reality)
TNbed = intersectBed(prediction_complement, reality_complement)
beds = (TPbed, FPbed, FNbed, TNbed)
if query:
subbeds = []
rr = query_to_range(query, sizes)
ce = 'echo "{0}"'.format("\t".join(str(x) for x in rr))
for b in beds:
subbed = ".".join((b, query))
cmd = ce + " | intersectBed -a stdin -b {0}".format(b)
sh(cmd, outfile=subbed)
subbeds.append(subbed)
beds = subbeds
be = BedEvaluate(*beds)
print(be, file=sys.stderr)
if query:
for b in subbeds:
os.remove(b)
return be |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def refine(args):
""" %prog refine bedfile1 bedfile2 refinedbed Refine bed file using a second bed file. The final bed is keeping all the intervals in bedfile1, but refined by bedfile2 whenever they have intersection. """ |
p = OptionParser(refine.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
abedfile, bbedfile, refinedbed = args
fw = open(refinedbed, "w")
intersected = refined = 0
for a, b in intersectBed_wao(abedfile, bbedfile):
if b is None:
print(a, file=fw)
continue
intersected += 1
aspan_before = a.span
arange = (a.start, a.end)
brange = (b.start, b.end)
irange = range_intersect(arange, brange)
a.start, a.end = irange
aspan_after = a.span
if aspan_before > aspan_after:
refined += 1
print(a, file=fw)
fw.close()
print("Total intersected: {0}".format(intersected), file=sys.stderr)
print("Total refined: {0}".format(refined), file=sys.stderr)
summary([abedfile])
summary([refinedbed]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance(args):
""" %prog distance bedfile Calculate distance between bed features. The output file is a list of distances, which can be used to plot histogram, etc. """ |
from jcvi.utils.iter import pairwise
p = OptionParser(distance.__doc__)
p.add_option("--distmode", default="ss", choices=("ss", "ee"),
help="Distance mode between paired reads. ss is outer distance, " \
"ee is inner distance [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
sortedbedfile = sort([bedfile])
valid = total = 0
fp = open(sortedbedfile)
for a, b in pairwise(fp):
a = BedLine(a)
b = BedLine(b)
ar = (a.seqid, a.start, a.end, "+")
br = (b.seqid, b.start, b.end, "+")
dist, oo = range_distance(ar, br, distmode=opts.distmode)
total += 1
if dist > 0:
print(dist)
valid += 1
logging.debug("Total valid (> 0) distances: {0}.".\
format(percentage(valid, total))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bedpe(args):
""" %prog bedpe bedfile Convert to bedpe format. Use --span to write another bed file that contain the span of the read pairs. """ |
from jcvi.assembly.coverage import bed_to_bedpe
p = OptionParser(bedpe.__doc__)
p.add_option("--span", default=False, action="store_true",
help="Write span bed file [default: %default]")
p.add_option("--strand", default=False, action="store_true",
help="Write the strand columns [default: %default]")
p.add_option("--mates", help="Check the library stats from .mates file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
bedpefile = pf + ".bedpe"
bedspanfile = pf + ".spans.bed" if opts.span else None
bed_to_bedpe(bedfile, bedpefile, \
pairsbedfile=bedspanfile, matesfile=opts.mates, \
strand=opts.strand)
return bedpefile, bedspanfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sizes(args):
""" %prog sizes bedfile Infer the sizes for each seqid. Useful before dot plots. """ |
p = OptionParser(sizes.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
assert op.exists(bedfile)
sizesfile = bedfile.rsplit(".", 1)[0] + ".sizes"
fw = must_open(sizesfile, "w", checkexists=True, skipcheck=True)
if fw:
b = Bed(bedfile)
for s, sbeds in b.sub_beds():
print("{0}\t{1}".format(\
s, max(x.end for x in sbeds)), file=fw)
logging.debug("Sizes file written to `{0}`.".format(sizesfile))
return sizesfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def analyze_dists(dists, cutoff=1000, alpha=.1):
""" The dists can show bimodal distribution if they come from a mate-pair library. Assume bimodal distribution and then separate the two peaks. Based on the percentage in each peak, we can decide if it is indeed one peak or two peaks, and report the median respectively. """ |
peak0 = [d for d in dists if d < cutoff]
peak1 = [d for d in dists if d >= cutoff]
c0, c1 = len(peak0), len(peak1)
logging.debug("Component counts: {0} {1}".format(c0, c1))
if c0 == 0 or c1 == 0 or float(c1) / len(dists) < alpha:
logging.debug("Single peak identified ({0} / {1} < {2})".\
format(c1, len(dists), alpha))
return np.median(dists)
peak0_median = np.median(peak0)
peak1_median = np.median(peak1)
logging.debug("Dual peaks identified: {0}bp ({1}), {2}bp ({3}) (selected)".\
format(int(peak0_median), c0, int(peak1_median), c1))
return peak1_median |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary bedfile Sum the total lengths of the intervals. """ |
p = OptionParser(summary.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Write .sizes file")
p.add_option("--all", default=False, action="store_true",
help="Write summary stats per seqid")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
bed = Bed(bedfile)
bs = BedSummary(bed)
if opts.sizes:
sizesfile = bedfile + ".sizes"
fw = open(sizesfile, "w")
for span, accn in bs.mspans:
print(span, file=fw)
fw.close()
logging.debug("Spans written to `{0}`.".format(sizesfile))
return bs
if not opts.all:
bs.report()
return bs
for seqid, subbeds in bed.sub_beds():
bs = BedSummary(subbeds)
print("\t".join((seqid, str(bs)))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.