text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_segments(ranges, extra, minsegment=40):
""" Given a list of Range, perform chaining on the ranges and select a highest scoring subset and cut based on their boundaries. Let's say the projection of the synteny blocks onto one axis look like the following. Then the segmentation will yield a block [1, 20), [20, 35), using an arbitrary right extension rule. Extra are additional end breaks for chromosomes. """ |
from jcvi.utils.range import range_chain, LEFT, RIGHT
NUL = 2
selected, score = range_chain(ranges)
endpoints = [(x.start, NUL) for x in selected]
endpoints += [(x[0], LEFT) for x in extra]
endpoints += [(x[1], RIGHT) for x in extra]
endpoints.sort()
current_left = 0
for a, ai in endpoints:
if ai == LEFT:
current_left = a
if ai == RIGHT:
yield current_left, a
elif ai == NUL:
if a - current_left < minsegment:
continue
yield current_left, a - 1
current_left = a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def entropy(args):
""" %prog entropy kmc_dump.out kmc_dump.out contains two columns: AAAAAAAAAAAGAAGAAAGAAA 34 """ |
p = OptionParser(entropy.__doc__)
p.add_option("--threshold", default=0, type="int",
help="Complexity needs to be above")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
kmc_out, = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed fastafile kmer.dump.txt Map kmers on FASTA. """ |
from jcvi.formats.fasta import rc, parse_fasta
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, dumpfile = args
fp = open(dumpfile)
KMERS = set()
for row in fp:
kmer = row.split()[0]
kmer_rc = rc(kmer)
KMERS.add(kmer)
KMERS.add(kmer_rc)
K = len(kmer)
logging.debug("Imported {} {}-mers".format(len(KMERS), K))
for name, seq in parse_fasta(fastafile):
name = name.split()[0]
for i in range(len(seq) - K):
if i % 5000000 == 0:
print("{}:{}".format(name, i), file=sys.stderr)
kmer = seq[i: i + K]
if kmer in KMERS:
print("\t".join(str(x) for x in (name, i, i + K, kmer))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kmc(args):
""" %prog kmc folder Run kmc3 on Illumina reads. """ |
p = OptionParser(kmc.__doc__)
p.add_option("-k", default=21, type="int", help="Kmer size")
p.add_option("--ci", default=2, type="int",
help="Exclude kmers with less than ci counts")
p.add_option("--cs", default=2, type="int",
help="Maximal value of a counter")
p.add_option("--cx", default=None, type="int",
help="Exclude kmers with more than cx counts")
p.add_option("--single", default=False, action="store_true",
help="Input is single-end data, only one FASTQ/FASTA")
p.add_option("--fasta", default=False, action="store_true",
help="Input is FASTA instead of FASTQ")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
K = opts.k
n = 1 if opts.single else 2
pattern = "*.fa,*.fa.gz,*.fasta,*.fasta.gz" if opts.fasta else \
"*.fq,*.fq.gz,*.fastq,*.fastq.gz"
mm = MakeManager()
for p, pf in iter_project(folder, pattern=pattern,
n=n, commonprefix=False):
pf = pf.split("_")[0] + ".ms{}".format(K)
infiles = pf + ".infiles"
fw = open(infiles, "w")
print("\n".join(p), file=fw)
fw.close()
cmd = "kmc -k{} -m64 -t{}".format(K, opts.cpus)
cmd += " -ci{} -cs{}".format(opts.ci, opts.cs)
if opts.cx:
cmd += " -cx{}".format(opts.cx)
if opts.fasta:
cmd += " -fm"
cmd += " @{} {} .".format(infiles, pf)
outfile = pf + ".kmc_suf"
mm.add(p, outfile, cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def meryl(args):
""" %prog meryl folder Run meryl on Illumina reads. """ |
p = OptionParser(meryl.__doc__)
p.add_option("-k", default=19, type="int", help="Kmer size")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
K = opts.k
cpus = opts.cpus
mm = MakeManager()
for p, pf in iter_project(folder):
cmds = []
mss = []
for i, ip in enumerate(p):
ms = "{}{}.ms{}".format(pf, i + 1, K)
mss.append(ms)
cmd = "meryl -B -C -m {} -threads {}".format(K, cpus)
cmd += " -s {} -o {}".format(ip, ms)
cmds.append(cmd)
ams, bms = mss
pms = "{}.ms{}".format(pf, K)
cmd = "meryl -M add -s {} -s {} -o {}".format(ams, bms, pms)
cmds.append(cmd)
cmd = "rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx".\
format(ams, ams, bms, bms)
cmds.append(cmd)
mm.add(p, pms + ".mcdat", cmds)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def model(args):
""" %prog model erate Model kmer distribution given error rate. See derivation in FIONA paper: <http://bioinformatics.oxfordjournals.org/content/30/17/i356.full> """ |
from scipy.stats import binom, poisson
p = OptionParser(model.__doc__)
p.add_option("-k", default=23, type="int", help="Kmer size")
p.add_option("--cov", default=50, type="int", help="Expected coverage")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
erate, = args
erate = float(erate)
cov = opts.cov
k = opts.k
xy = []
# Range include c although it is unclear what it means to have c=0
for c in xrange(0, cov * 2 + 1):
Prob_Yk = 0
for i in xrange(k + 1):
# Probability of having exactly i errors
pi_i = binom.pmf(i, k, erate)
# Expected coverage of kmer with exactly i errors
mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i)
# Probability of seeing coverage of c
Prob_Yk_i = poisson.pmf(c, mu_i)
# Sum i over 0, 1, ... up to k errors
Prob_Yk += pi_i * Prob_Yk_i
xy.append((c, Prob_Yk))
x, y = zip(*xy)
asciiplot(x, y, title="Model") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logodds(args):
""" %prog logodds cnt1 cnt2 Compute log likelihood between two db. """ |
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cnt1, cnt2 = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
scf, c1 = row.split()
c2 = d[scf]
c1, c2 = float(c1), float(c2)
c1 += 1
c2 += 1
score = int(100 * (log(c1) - log(c2)))
print("{0}\t{1}".format(scf, score)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count(args):
""" %prog count fastafile jf.db Run dump - jellyfish - bin - bincount in serial. """ |
from bitarray import bitarray
p = OptionParser(count.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, jfdb = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open("tmp", "w")
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=proc.stdin)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = ".".join((fastafile, jfdb, "bin"))
fw = open(binfile, "w")
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile))
fw.close()
sh("rm {0}".format(t.name))
logging.debug("Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".\
format(K, fastafile, jfdb, binfile))
cntfile = ".".join((fastafile, jfdb, "cnt"))
bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)])
logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bincount(args):
""" %prog bincount fastafile binfile Count K-mers in the bin. """ |
from bitarray import bitarray
from jcvi.formats.sizes import Sizes
p = OptionParser(bincount.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, binfile = args
K = opts.K
fp = open(binfile)
a = bitarray()
a.fromfile(fp)
f = Sizes(fastafile)
tsize = 0
fw = must_open(opts.outfile, "w")
for name, seqlen in f.iter_sizes():
ksize = seqlen - K + 1
b = a[tsize: tsize + ksize]
bcount = b.count()
print("\t".join(str(x) for x in (name, bcount)), file=fw)
tsize += ksize |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bin(args):
""" %prog bin filename filename.bin Serialize counts to bitarrays. """ |
from bitarray import bitarray
p = OptionParser(bin.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inp, outp = args
fp = must_open(inp)
fw = must_open(outp, "w")
a = bitarray()
for row in fp:
c = row.split()[-1]
a.append(int(c))
a.tofile(fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump(args):
""" %prog dump fastafile Convert FASTA sequences to list of K-mers. """ |
p = OptionParser(dump.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
K = opts.K
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nucmer(args):
""" %prog nucmer ref.fasta query.fasta Run NUCMER using query against reference. Parallel implementation derived from: <https://github.com/fritzsedlazeck/sge_mummer> """ |
from itertools import product
from jcvi.apps.grid import MakeManager
from jcvi.formats.base import split
p = OptionParser(nucmer.__doc__)
p.add_option("--chunks", type="int",
help="Split both query and subject into chunks")
p.set_params(prog="nucmer", params="-l 100 -c 500")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, query = args
cpus = opts.cpus
nrefs = nqueries = opts.chunks or int(cpus ** .5)
refdir = ref.split(".")[0] + "-outdir"
querydir = query.split(".")[0] + "-outdir"
reflist = split([ref, refdir, str(nrefs)]).names
querylist = split([query, querydir, str(nqueries)]).names
mm = MakeManager()
for i, (r, q) in enumerate(product(reflist, querylist)):
pf = "{0:04d}".format(i)
cmd = "nucmer -maxmatch"
cmd += " {0}".format(opts.extra)
cmd += " {0} {1} -p {2}".format(r, q, pf)
deltafile = pf + ".delta"
mm.add((r, q), deltafile, cmd)
print(cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blasr(args):
""" %prog blasr ref.fasta fofn Run blasr on a set of PacBio reads. This is based on a divide-and-conquer strategy described below. """ |
from jcvi.apps.grid import MakeManager
from jcvi.utils.iter import grouper
p = OptionParser(blasr.__doc__)
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, fofn = args
flist = sorted([x.strip() for x in open(fofn)])
h5list = []
mm = MakeManager()
for i, fl in enumerate(grouper(flist, 3)):
chunkname = "chunk{0:03d}".format(i)
fn = chunkname + ".fofn"
h5 = chunkname + ".cmp.h5"
fw = open(fn, "w")
print("\n".join(fl), file=fw)
fw.close()
cmd = "pbalign {0} {1} {2}".format(fn, reffasta, h5)
cmd += " --nproc {0} --forQuiver --tmpDir .".format(opts.cpus)
mm.add((fn, reffasta), h5, cmd)
h5list.append(h5)
# Merge h5, sort and repack
allh5 = "all.cmp.h5"
tmph5 = "tmp.cmp.h5"
cmd_merge = "cmph5tools.py merge --outFile {0}".format(allh5)
cmd_merge += " " + " ".join(h5list)
cmd_sort = "cmph5tools.py sort --deep {0} --tmpDir .".format(allh5)
cmd_repack = "h5repack -f GZIP=1 {0} {1}".format(allh5, tmph5)
cmd_repack += " && mv {0} {1}".format(tmph5, allh5)
mm.add(h5list, allh5, [cmd_merge, cmd_sort, cmd_repack])
# Quiver
pf = reffasta.rsplit(".", 1)[0]
variantsgff = pf + ".variants.gff"
consensusfasta = pf + ".consensus.fasta"
cmd_faidx = "samtools faidx {0}".format(reffasta)
cmd = "quiver -j 32 {0}".format(allh5)
cmd += " -r {0} -o {1} -o {2}".format(reffasta, variantsgff, consensusfasta)
mm.add(allh5, consensusfasta, [cmd_faidx, cmd])
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blat(args):
""" %prog blat ref.fasta query.fasta Calls blat and filters BLAST hits. """ |
p = OptionParser(blat.__doc__)
p.set_align(pctid=95, hitlen=30)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, queryfasta = args
blastfile = get_outfile(reffasta, queryfasta, suffix="blat")
run_blat(infile=queryfasta, outfile=blastfile, db=reffasta,
pctid=opts.pctid, hitlen=opts.hitlen, cpus=opts.cpus,
overwrite=False)
return blastfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blast(args):
""" %prog blast ref.fasta query.fasta Calls blast and then filter the BLAST hits. Default is megablast. """ |
task_choices = ("blastn", "blastn-short", "dc-megablast", \
"megablast", "vecscreen")
p = OptionParser(blast.__doc__)
p.set_align(pctid=0, evalue=.01)
p.add_option("--wordsize", type="int", help="Word size [default: %default]")
p.add_option("--best", default=1, type="int",
help="Only look for best N hits [default: %default]")
p.add_option("--task", default="megablast", choices=task_choices,
help="Task of the blastn [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, queryfasta = args
blastfile = get_outfile(reffasta, queryfasta)
run_megablast(infile=queryfasta, outfile=blastfile, db=reffasta,
wordsize=opts.wordsize, pctid=opts.pctid, evalue=opts.evalue,
hitlen=None, best=opts.best, task=opts.task, cpus=opts.cpus)
return blastfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lastgenome(args):
""" %prog genome_A.fasta genome_B.fasta Run LAST by calling LASTDB, LASTAL and LAST-SPLIT. The recipe is based on tutorial here: <https://github.com/mcfrith/last-genome-alignments> The script runs the following steps: $ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa $ lastal -E0.05 -C2 Chr10A-NEAR Chr10B.fa | last-split -m1 | maf-swap | last-split -m1 -fMAF > Chr10A.Chr10B.1-1.maf $ maf-convert -n blasttab Chr10A.Chr10B.1-1.maf > Chr10A.Chr10B.1-1.blast Works with LAST v959. """ |
from jcvi.apps.grid import MakeManager
p = OptionParser(lastgenome.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gA, gB = args
mm = MakeManager()
bb = lambda x : op.basename(x).rsplit(".", 1)[0]
gA_pf, gB_pf = bb(gA), bb(gB)
# Build LASTDB
dbname = "-".join((gA_pf, "NEAR"))
dbfile = dbname + ".suf"
build_db_cmd = "lastdb -P0 -uNEAR -R01 {} {}".format(dbfile, gA)
mm.add(gA, dbfile, build_db_cmd)
# Run LASTAL
maffile = "{}.{}.1-1.maf".format(gA_pf, gB_pf)
lastal_cmd = "lastal -E0.05 -C2 {} {}".format(dbname, gB)
lastal_cmd += " | last-split -m1"
lastal_cmd += " | maf-swap"
lastal_cmd += " | last-split -m1 -fMAF > {}".format(maffile)
mm.add([dbfile, gB], maffile, lastal_cmd)
# Convert to BLAST format
blastfile = maffile.replace(".maf", ".blast")
convert_cmd = "maf-convert -n blasttab {} > {}".format(maffile, blastfile)
mm.add(maffile, blastfile, convert_cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def last(args, dbtype=None):
""" %prog database.fasta query.fasta Run LAST by calling LASTDB and LASTAL. LAST program available: <http://last.cbrc.jp> Works with LAST-719. """ |
p = OptionParser(last.__doc__)
p.add_option("--dbtype", default="nucl",
choices=("nucl", "prot"),
help="Molecule type of subject database")
p.add_option("--path", help="Specify LAST path")
p.add_option("--mask", default=False, action="store_true", help="Invoke -c in lastdb")
p.add_option("--format", default="BlastTab",
choices=("TAB", "MAF", "BlastTab", "BlastTab+"),
help="Output format")
p.add_option("--minlen", default=0, type="int",
help="Filter alignments by how many bases match")
p.add_option("--minid", default=0, type="int", help="Minimum sequence identity")
p.set_cpus()
p.set_params()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
subject, query = args
path = opts.path
cpus = opts.cpus
if not dbtype:
dbtype = opts.dbtype
getpath = lambda x: op.join(path, x) if path else x
lastdb_bin = getpath("lastdb")
lastal_bin = getpath("lastal")
subjectdb = subject.rsplit(".", 1)[0]
run_lastdb(infile=subject, outfile=subjectdb + ".prj", mask=opts.mask, \
lastdb_bin=lastdb_bin, dbtype=dbtype)
u = 2 if opts.mask else 0
cmd = "{0} -u {1}".format(lastal_bin, u)
cmd += " -P {0} -i3G".format(cpus)
cmd += " -f {0}".format(opts.format)
cmd += " {0} {1}".format(subjectdb, query)
minlen = opts.minlen
minid = opts.minid
extra = opts.extra
assert minid != 100, "Perfect match not yet supported"
mm = minid / (100 - minid)
if minlen:
extra += " -e{0}".format(minlen)
if minid:
extra += " -r1 -q{0} -a{0} -b{0}".format(mm)
if extra:
cmd += " " + extra.strip()
lastfile = get_outfile(subject, query, suffix="last")
sh(cmd, outfile=lastfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fillstats(args):
""" %prog fillstats genome.fill Build stats on .fill file from GapCloser. """ |
from jcvi.utils.cbook import SummaryStats, percentage, thousands
p = OptionParser(fillstats.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fillfile, = args
fp = open(fillfile)
scaffolds = 0
gaps = []
for row in fp:
if row[0] == ">":
scaffolds += 1
continue
fl = FillLine(row)
gaps.append(fl)
print("{0} scaffolds in total".format(scaffolds), file=sys.stderr)
closed = [x for x in gaps if x.closed]
closedbp = sum(x.before for x in closed)
notClosed = [x for x in gaps if not x.closed]
notClosedbp = sum(x.before for x in notClosed)
totalgaps = len(closed) + len(notClosed)
print("Closed gaps: {0} size: {1} bp".\
format(percentage(len(closed), totalgaps), thousands(closedbp)), file=sys.stderr)
ss = SummaryStats([x.after for x in closed])
print(ss, file=sys.stderr)
ss = SummaryStats([x.delta for x in closed])
print("Delta:", ss, file=sys.stderr)
print("Remaining gaps: {0} size: {1} bp".\
format(percentage(len(notClosed), totalgaps), thousands(notClosedbp)), file=sys.stderr)
ss = SummaryStats([x.after for x in notClosed])
print(ss, file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed pslfile Convert to bed format. """ |
p = OptionParser(bed.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pslfile, = args
fw = must_open(opts.outfile, "w")
psl = Psl(pslfile)
for p in psl:
print(p.bed12line, file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gff(args):
""" %prog gff pslfile Convert to gff format. """ |
p = OptionParser(gff.__doc__)
p.add_option("--source", default="GMAP",
help="specify GFF source [default: %default]")
p.add_option("--type", default="EST_match",
help="specify GFF feature type [default: %default]")
p.add_option("--suffix", default=".match",
help="match ID suffix [default: \"%default\"]")
p.add_option("--swap", default=False, action="store_true",
help="swap query and target features [default: %default]")
p.add_option("--simple_score", default=False, action="store_true",
help="calculate a simple percent score [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pslfile, = args
fw = must_open(opts.outfile, "w")
print("##gff-version 3", file=fw)
psl = Psl(pslfile)
for p in psl:
if opts.swap:
p.swap
psl.trackMatches(p.qName)
# switch from 0-origin to 1-origin
p.qStart += 1
p.tStart += 1
print(p.gffline(source=opts.source, type=opts.type, suffix=opts.suffix, \
primary_tag="ID", alt_score=opts.simple_score, \
count=psl.getMatchCount(p.qName)), file=fw)
# create an empty PslLine() object and load only
# the targetName, queryName and strand info
part = PslLine("\t".join(str(x) for x in [0] * p.nargs))
part.tName, part.qName, part.strand = p.tName, p.qName, p.strand
nparts = len(p.qStarts)
for n in xrange(nparts):
part.qStart, part.tStart, aLen = p.qStarts[n] + 1, p.tStarts[n] + 1, p.blockSizes[n]
part.qEnd = part.qStart + aLen - 1
part.tEnd = part.tStart + aLen - 1
if part.strand == "-":
part.qStart = p.qSize - (p.qStarts[n] + p.blockSizes[n]) + 1
part.qEnd = p.qSize - p.qStarts[n]
print(part.gffline(source=opts.source, suffix=opts.suffix, \
count=psl.getMatchCount(part.qName)), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _isProtein(self):
""" check if blockSizes and scores are in the protein space or not """ |
last = self.blockCount - 1
return ((self.tEnd == self.tStarts[last] + 3 * self.blockSizes[last]) \
and self.strand == "+") or \
((self.tStart == self.tSize - (self.tStarts[last] + 3 * self.blockSizes[last])\
and self.strand == "-")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _milliBad(self, ismRNA=False):
""" calculate badness in parts per thousand i.e. number of non-identical matches """ |
sizeMult = self._sizeMult
qAlnSize, tAlnSize = self.qspan * sizeMult, self.tspan
alnSize = min(qAlnSize, tAlnSize)
if alnSize <= 0:
return 0
sizeDiff = qAlnSize - tAlnSize
if sizeDiff < 0:
sizeDiff = 0 if ismRNA else -sizeDiff
insertFactor = self.qNumInsert
if not ismRNA:
insertFactor += self.tNumInsert
total = (self.matches + self.repMatches + self.misMatches) * sizeMult
return (1000 * (self.misMatches * sizeMult + insertFactor + \
round(3 * math.log(1 + sizeDiff)))) / total if total != 0 else 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_prefix(dir="../"):
""" Look for prefix.gkpStore in the upper directory. """ |
prefix = glob(dir + "*.gkpStore")[0]
prefix = op.basename(prefix).rsplit(".", 1)[0]
return prefix |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cnsfix(args):
""" %prog cnsfix consensus-fix.out.FAILED > blacklist.ids Parse consensus-fix.out to extract layouts for fixed unitigs. This will mark all the failed fragments detected by utgcnsfix and pop them out of the existing unitigs. """ |
from jcvi.formats.base import read_block
p = OptionParser(cnsfix.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cnsfixout, = args
fp = open(cnsfixout)
utgs = []
saves = []
for header, contents in read_block(fp, "Evaluating"):
contents = list(contents)
utg = header.split()[2]
utgs.append(utg)
# Look for this line:
# save fragment idx=388 ident=206054426 for next pass
for c in contents:
if not c.startswith("save"):
continue
ident = c.split()[3].split("=")[-1]
saves.append(ident)
print("\n".join(saves)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def error(args):
""" %prog error version backup_folder Find all errors in ../5-consensus/*.err and pull the error unitigs into backup/ folder. """ |
p = OptionParser(error.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
version, backup_folder = args
mkdir(backup_folder)
fw = open("errors.log", "w")
seen = set()
for g in glob("../5-consensus/*.err"):
if "partitioned" in g:
continue
fp = open(g)
partID = op.basename(g).rsplit(".err", 1)[0]
partID = int(partID.split("_")[-1])
for row in fp:
if row.startswith(working):
unitigID = row.split("(")[0].split()[-1]
continue
if not failed.upper() in row.upper():
continue
uu = (version, partID, unitigID)
if uu in seen:
continue
seen.add(uu)
print("\t".join(str(x) for x in (partID, unitigID)), file=fw)
s = [str(x) for x in uu]
unitigfile = pull(s)
cmd = "mv {0} {1}".format(unitigfile, backup_folder)
sh(cmd)
fp.close()
logging.debug("A total of {0} unitigs saved to {1}.".\
format(len(seen), backup_folder)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cut(args):
""" %prog cut unitigfile fragID Cut the unitig at a given fragment. Run `%prog trace unitigfile` first to see which fragment breaks the unitig. """ |
from jcvi.formats.base import SetFile
p = OptionParser(cut.__doc__)
p.add_option("-s", dest="shredafter", default=False, action="store_true",
help="Shred fragments after the given fragID [default: %default]")
p.add_option("--notest", default=False, action="store_true",
help="Do not test the unitigfile after edits [default: %default]")
p.add_option("--blacklist",
help="File that contains blacklisted fragments to be popped "
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
s, fragID = args
u = UnitigLayout(s)
blacklist = opts.blacklist
black = SetFile(blacklist) if blacklist else None
if opts.shredafter:
u.shredafter(fragID)
elif black:
assert fragID == "0", "Must set fragID to 0 when --blacklist is on"
u.pop(black)
else:
u.cut(fragID)
u.print_to_file(inplace=True)
if not opts.notest:
test([s]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shred(args):
""" %prog shred unitigfile Shred the unitig into one fragment per unitig to fix. This is the last resort as a desperate fix. """ |
p = OptionParser(shred.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
s, = args
u = UnitigLayout(s)
u.shred()
u.print_to_file(inplace=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pull(args):
""" %prog pull version partID unitigID For example, `%prog pull 5 530` will pull the utg530 from partition 5 The layout is written to `unitig530` """ |
p = OptionParser(pull.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prefix = get_prefix()
version, partID, unitigID = args
s = ".".join(args)
cmd = "tigStore"
cmd += " -g ../{0}.gkpStore -t ../{0}.tigStore".format(prefix)
cmd += " {0} -up {1} -d layout -u {2}".format(version, partID, unitigID)
unitigfile = "unitig" + s
sh(cmd, outfile=unitigfile)
return unitigfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mtdotplots(args):
""" %prog mtdotplots Mt3.5 Mt4.0 medicago.medicago.lifted.1x1.anchors Plot Mt3.5 and Mt4.0 side-by-side. This is essentially combined from two graphics.dotplot() function calls as panel A and B. """ |
from jcvi.graphics.dotplot import check_beds, dotplot
p = OptionParser(mtdotplots.__doc__)
p.set_beds()
opts, args, iopts = p.set_image_options(args, figsize="16x8", dpi=90)
if len(args) != 3:
sys.exit(not p.print_help())
a, b, ac = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
r1 = fig.add_axes([0, 0, .5, 1])
r2 = fig.add_axes([.5, 0, .5, 1])
a1 = fig.add_axes([.05, .1, .4, .8])
a2 = fig.add_axes([.55, .1, .4, .8])
anchorfile = op.join(a, ac)
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
dotplot(anchorfile, qbed, sbed, fig, r1, a1, is_self=is_self,
genomenames="Mt3.5_Mt3.5")
opts.qbed = opts.sbed = None
anchorfile = op.join(b, ac)
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
dotplot(anchorfile, qbed, sbed, fig, r2, a2, is_self=is_self,
genomenames="Mt4.0_Mt4.0")
root.text(.03, .95, "A", ha="center", va="center", size=36)
root.text(.53, .95, "B", ha="center", va="center", size=36)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "mtdotplots"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def oropetium(args):
""" %prog oropetium mcscan.out all.bed layout switch.ids Build a composite figure that calls graphis.synteny. """ |
p = OptionParser(oropetium.__doc__)
p.add_option("--extra", help="Extra features in BED format")
opts, args, iopts = p.set_image_options(args, figsize="9x6")
if len(args) != 4:
sys.exit(not p.print_help())
datafile, bedfile, slayout, switch = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Synteny(fig, root, datafile, bedfile, slayout,
switch=switch, extra_features=opts.extra)
# legend showing the orientation of the genes
draw_gene_legend(root, .4, .57, .74, text=True, repeat=True)
# On the left panel, make a species tree
fc = 'lightslategrey'
coords = {}
xs, xp = .16, .03
coords["oropetium"] = (xs, .7)
coords["setaria"] = (xs, .6)
coords["sorghum"] = (xs, .5)
coords["rice"] = (xs, .4)
coords["brachypodium"] = (xs, .3)
xs -= xp
coords["Panicoideae"] = join_nodes(root, coords, "setaria", "sorghum", xs)
xs -= xp
coords["BEP"] = join_nodes(root, coords, "rice", "brachypodium", xs)
coords["PACMAD"] = join_nodes(root, coords, "oropetium", "Panicoideae", xs)
xs -= xp
coords["Poaceae"] = join_nodes(root, coords, "BEP", "PACMAD", xs)
# Names of the internal nodes
for tag in ("BEP", "Poaceae"):
nx, ny = coords[tag]
nx, ny = nx - .005, ny - .02
root.text(nx, ny, tag, rotation=90, ha="right", va="top", color=fc)
for tag in ("PACMAD",):
nx, ny = coords[tag]
nx, ny = nx - .005, ny + .02
root.text(nx, ny, tag, rotation=90, ha="right", va="bottom", color=fc)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "oropetium"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def amborella(args):
""" %prog amborella seqids karyotype.layout mcscan.out all.bed synteny.layout Build a composite figure that calls graphics.karyotype and graphics.synteny. """ |
p = OptionParser(amborella.__doc__)
p.add_option("--tree",
help="Display trees on the bottom of the figure [default: %default]")
p.add_option("--switch",
help="Rename the seqid with two-column file [default: %default]")
opts, args, iopts = p.set_image_options(args, figsize="8x7")
if len(args) != 5:
sys.exit(not p.print_help())
seqidsfile, klayout, datafile, bedfile, slayout = args
switch = opts.switch
tree = opts.tree
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Karyotype(fig, root, seqidsfile, klayout)
Synteny(fig, root, datafile, bedfile, slayout, switch=switch, tree=tree)
# legend showing the orientation of the genes
draw_gene_legend(root, .5, .68, .5)
# annotate the WGD events
fc = 'lightslategrey'
x = .05
radius = .012
TextCircle(root, x, .86, '$\gamma$', radius=radius)
TextCircle(root, x, .95, '$\epsilon$', radius=radius)
root.plot([x, x], [.83, .9], ":", color=fc, lw=2)
pts = plot_cap((x, .95), np.radians(range(-70, 250)), .02)
x, y = zip(*pts)
root.plot(x, y, ":", color=fc, lw=2)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "amborella"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotate(args):
""" %prog annotate agpfile gaps.linkage.bed assembly.fasta Annotate AGP file with linkage info of `paired-end` or `map`. File `gaps.linkage.bed` is generated by assembly.gaps.estimate(). """ |
from jcvi.formats.agp import AGP, bed, tidy
p = OptionParser(annotate.__doc__)
p.add_option("--minsize", default=200,
help="Smallest component size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
agpfile, linkagebed, assemblyfasta = args
linkagebed = Bed(linkagebed)
spannedgaps = set()
for b in linkagebed:
score = int(b.score)
if score == 0:
spannedgaps.add((b.accn, b.start, b.end))
agp = AGP(agpfile)
newagpfile = agpfile.rsplit(".", 1)[0] + ".linkage.agp"
newagp = open(newagpfile, "w")
contig_id = 0
minsize = opts.minsize
for a in agp:
if not a.is_gap:
cs = a.component_span
if cs < minsize:
a.is_gap = True
a.component_type = "N"
a.gap_length = cs
a.gap_type = "scaffold"
a.linkage = "yes"
a.linkage_evidence = []
else:
contig_id += 1
a.component_id = "contig{0:04d}".format(contig_id)
a.component_beg = 1
a.component_end = cs
a.component_type = "W"
print(a, file=newagp)
continue
gapinfo = (a.object, a.object_beg, a.object_end)
gaplen = a.gap_length
if gaplen == 100 and gapinfo not in spannedgaps:
a.component_type = "U"
tag = "map"
else:
tag = "paired-ends"
a.linkage_evidence.append(tag)
print(a, file=newagp)
newagp.close()
logging.debug("Annotated AGP written to `{0}`.".format(newagpfile))
contigbed = assemblyfasta.rsplit(".", 1)[0] + ".contigs.bed"
bedfile = bed([newagpfile, "--nogaps", "--outfile=" + contigbed])
contigfasta = fastaFromBed(bedfile, assemblyfasta, name=True, stranded=True)
tidy([newagpfile, contigfasta]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def estimate(args):
""" %prog estimate gaps.bed all.spans.bed all.mates Estimate gap sizes based on mate positions and library insert sizes. """ |
from collections import defaultdict
from jcvi.formats.bed import intersectBed_wao
from jcvi.formats.posmap import MatesFile
p = OptionParser(estimate.__doc__)
p.add_option("--minlinks", default=3, type="int",
help="Minimum number of links to place [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
gapsbed, spansbed, matesfile = args
mf = MatesFile(matesfile)
bed = Bed(gapsbed)
order = bed.order
gap2mate = defaultdict(set)
mate2gap = defaultdict(set)
for a, b in intersectBed_wao(gapsbed, spansbed):
gapsize = a.span
if gapsize != 100:
continue
gapname = a.accn
if b is None:
gap2mate[gapname] = set()
continue
matename = b.accn
gap2mate[gapname].add(matename)
mate2gap[matename].add(gapname)
omgapsbed = "gaps.linkage.bed"
fw = open(omgapsbed, "w")
for gapname, mates in sorted(gap2mate.items()):
i, b = order[gapname]
nmates = len(mates)
if nmates < opts.minlinks:
print("{0}\t{1}".format(b, nmates), file=fw)
continue
print(gapname, mates)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sizes(args):
""" %prog sizes gaps.bed a.fasta b.fasta Take the flanks of gaps within a.fasta, map them onto b.fasta. Compile the results to the gap size estimates in b. The output is detailed below: Columns are: 1. A scaffold 2. Start position 3. End position 4. Gap identifier 5. Gap size in A (= End - Start) 6. Gap size in B (based on BLAST, see below) For each gap, I extracted the left and right sequence (mostly 2Kb, but can be shorter if it runs into another gap) flanking the gap. The flanker names look like gap.00003L and gap.00003R means the left and right flanker of this particular gap, respectively. The BLAST output is used to calculate the gap size. For each flanker sequence, I took the best hit, and calculate the inner distance between the L match range and R range. The two flankers must map with at least 98% identity, and in the same orientation. NOTE the sixth column in the list file is not always a valid number. Other values are: - na: both flankers are missing in B - Singleton: one flanker is missing - Different chr: flankers map to different scaffolds - Strand +|-: flankers map in different orientations - Negative value: the R flanker map before L flanker """ |
from jcvi.formats.base import DictFile
from jcvi.apps.align import blast
p = OptionParser(sizes.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
gapsbed, afasta, bfasta = args
pf = gapsbed.rsplit(".", 1)[0]
extbed = pf + ".ext.bed"
extfasta = pf + ".ext.fasta"
if need_update(gapsbed, extfasta):
extbed, extfasta = flanks([gapsbed, afasta])
q = op.basename(extfasta).split(".")[0]
r = op.basename(bfasta).split(".")[0]
blastfile = "{0}.{1}.blast".format(q, r)
if need_update([extfasta, bfasta], blastfile):
blastfile = blast([bfasta, extfasta, "--wordsize=50", "--pctid=98"])
labelsfile = blast_to_twobeds(blastfile)
labels = DictFile(labelsfile, delimiter='\t')
bed = Bed(gapsbed)
for b in bed:
b.score = b.span
accn = b.accn
print("\t".join((str(x) for x in (b.seqid, b.start - 1, b.end, accn,
b.score, labels.get(accn, "na"))))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flanks(args):
""" %prog flanks gaps.bed fastafile Create sequences flanking the gaps. """ |
p = OptionParser(flanks.__doc__)
p.add_option("--extend", default=2000, type="int",
help="Extend seq flanking the gaps [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gapsbed, fastafile = args
Ext = opts.extend
sizes = Sizes(fastafile).mapping
bed = Bed(gapsbed)
pf = gapsbed.rsplit(".", 1)[0]
extbed = pf + ".ext.bed"
fw = open(extbed, "w")
for i, b in enumerate(bed):
seqid = b.seqid
gapname = b.accn
size = sizes[seqid]
prev_b = bed[i - 1] if i > 0 else None
next_b = bed[i + 1] if i + 1 < len(bed) else None
if prev_b and prev_b.seqid != seqid:
prev_b = None
if next_b and next_b.seqid != seqid:
next_b = None
start = prev_b.end + 1 if prev_b else 1
start, end = max(start, b.start - Ext), b.start - 1
print("\t".join(str(x) for x in \
(b.seqid, start - 1, end, gapname + "L")), file=fw)
end = next_b.start - 1 if next_b else size
start, end = b.end + 1, min(end, b.end + Ext)
print("\t".join(str(x) for x in \
(b.seqid, start - 1, end, gapname + "R")), file=fw)
fw.close()
extfasta = fastaFromBed(extbed, fastafile, name=True)
return extbed, extfasta |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index(args):
""" %prog index frgscf.sorted Compress frgscffile.sorted and index it using `tabix`. """ |
p = OptionParser(index.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
frgscffile, = args
gzfile = frgscffile + ".gz"
cmd = "bgzip -c {0}".format(frgscffile)
if not op.exists(gzfile):
sh(cmd, outfile=gzfile)
tbifile = gzfile + ".tbi"
# Sequence, begin, end in 2, 3, 4-th column, respectively
cmd = "tabix -s 2 -b 3 -e 4 {0}".format(gzfile)
if not op.exists(tbifile):
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reads(args):
""" %prog reads frgscffile Report read counts per scaffold (based on frgscf). """ |
p = OptionParser(reads.__doc__)
p.add_option("-p", dest="prefix_length", default=4, type="int",
help="group the reads based on the first N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
frgscffile, = args
prefix_length = opts.prefix_length
fp = open(frgscffile)
keyfn = lambda: defaultdict(int)
counts = defaultdict(keyfn)
for row in fp:
f = FrgScfLine(row)
fi = f.fragmentID[:prefix_length]
counts[f.scaffoldID][fi] += 1
for scf, count in sorted(counts.items()):
print("{0}\t{1}".format(scf,
", ".join("{0}:{1}".format(*x) for x in sorted(count.items())))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed frgscffile Convert the frgscf posmap file to bed format. """ |
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
frgscffile, = args
bedfile = frgscffile.rsplit(".", 1)[0] + ".bed"
fw = open(bedfile, "w")
fp = open(frgscffile)
for row in fp:
f = FrgScfLine(row)
print(f.bedline, file=fw)
logging.debug("File written to `{0}`.".format(bedfile))
return bedfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dup(args):
""" %prog dup frgscffile Use the frgscf posmap file as an indication of the coverage of the library. Large insert libraries are frequently victims of high levels of redundancy. """ |
p = OptionParser(dup.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
frgscffile, = args
fp = open(frgscffile)
data = [FrgScfLine(row) for row in fp]
# we need to separate forward and reverse reads, because the position
# collisions are handled differently
forward_data = [x for x in data if x.orientation == '+']
reverse_data = [x for x in data if x.orientation == '-']
counts = defaultdict(int)
key = lambda x: (x.scaffoldID, x.begin)
forward_data.sort(key=key)
for k, data in groupby(forward_data, key=key):
data = list(data)
count = len(data)
counts[count] += 1
key = lambda x: (x.scaffoldID, x.end)
reverse_data.sort(key=key)
for k, data in groupby(forward_data, key=key):
data = list(data)
count = len(data)
counts[count] += 1
prefix = frgscffile.split(".")[0]
print("Duplication level in `{0}`".format(prefix), file=sys.stderr)
print("=" * 40, file=sys.stderr)
for c, v in sorted(counts.items()):
if c > 10:
break
label = "unique" if c == 1 else "{0} copies".format(c)
print("{0}: {1}".format(label, v), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _score(cluster):
""" score of the cluster, in this case, is the number of non-repetitive matches """ |
x, y = zip(*cluster)[:2]
return min(len(set(x)), len(set(y))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_blast(blast_file, qorder, sorder, is_self=False, ostrip=True):
""" Read the blast and convert name into coordinates """ |
filtered_blast = []
seen = set()
bl = Blast(blast_file)
for b in bl:
query, subject = b.query, b.subject
if query == subject:
continue
if ostrip:
query, subject = gene_name(query), gene_name(subject)
if query not in qorder or subject not in sorder:
continue
qi, q = qorder[query]
si, s = sorder[subject]
if is_self:
# remove redundant a<->b to one side when doing self-self BLAST
if qi > si:
query, subject = subject, query
qi, si = si, qi
q, s = s, q
# Too close to diagonal! possible tandem repeats
if q.seqid == s.seqid and si - qi < 40:
continue
key = query, subject
if key in seen:
continue
seen.add(key)
b.qseqid, b.sseqid = q.seqid, s.seqid
b.qi, b.si = qi, si
b.query, b.subject = query, subject
filtered_blast.append(b)
logging.debug("A total of {0} BLAST imported from `{1}`.".\
format(len(filtered_blast), blast_file))
return filtered_blast |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_options(p, args, dist=10):
""" scan and liftover has similar interfaces, so share common options returns opts, files """ |
p.set_beds()
p.add_option("--dist", default=dist, type="int",
help="Extent of flanking regions to search [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blast_file, anchor_file = args
return blast_file, anchor_file, opts.dist, opts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def layout(args):
""" %prog layout query.subject.simple query.seqids subject.seqids Compute optimal seqids order in a second genome, based on seqids on one genome, given the pairwise blocks in .simple format. """ |
from jcvi.algorithms.ec import GA_setup, GA_run
p = OptionParser(layout.__doc__)
p.set_beds()
p.set_cpus(cpus=32)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
simplefile, qseqids, sseqids = args
qbed, sbed, qorder, sorder, is_self = check_beds(simplefile, p, opts)
qseqids = qseqids.strip().split(",")
sseqids = sseqids.strip().split(",")
qseqids_ii = dict((s, i) for i, s in enumerate(qseqids))
sseqids_ii = dict((s, i) for i, s in enumerate(sseqids))
blocks = SimpleFile(simplefile).blocks
scores = defaultdict(int)
for a, b, c, d, score, orientation, hl in blocks:
qi, q = qorder[a]
si, s = sorder[c]
qseqid, sseqid = q.seqid, s.seqid
if sseqid not in sseqids:
continue
scores[sseqids_ii[sseqid], qseqid] += score
data = []
for (a, b), score in sorted(scores.items()):
if b not in qseqids_ii:
continue
data.append((qseqids_ii[b], score))
tour = range(len(qseqids))
toolbox = GA_setup(tour)
toolbox.register("evaluate", colinear_evaluate_weights, data=data)
tour, fitness = GA_run(toolbox, ngen=100, npop=100, cpus=opts.cpus)
tour = [qseqids[x] for x in tour]
print(",".join(tour)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromaligns(args):
""" %prog fromaligns out.aligns Convert aligns file (old MCscan output) to anchors file. """ |
p = OptionParser(fromaligns.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
alignsfile, = args
fp = must_open(alignsfile)
fw = must_open(opts.outfile, "w")
for row in fp:
if row.startswith("## Alignment"):
print("###", file=fw)
continue
if row[0] == '#' or not row.strip():
continue
atoms = row.split(':')[-1].split()
print("\t".join(atoms[:2]), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mcscanq(args):
""" %prog mcscanq query.ids blocksfile Query multiple synteny blocks to get the closest alignment feature. Mostly used for 'highlighting' the lines in the synteny plot, drawn by graphics.karyotype and graphics.synteny. """ |
p = OptionParser(mcscanq.__doc__)
p.add_option("--color", help="Add color highlight, used in plotting")
p.add_option("--invert", default=False, action="store_true",
help="Invert query and subject [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
qids, blocksfile = args
b = BlockFile(blocksfile)
fp = open(qids)
for gene in fp:
gene = gene.strip()
for line in b.query_gene(gene, color=opts.color, invert=opts.invert):
print(line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spa(args):
""" %prog spa spafiles Convert chromosome ordering from SPA to simple lists. First column is the reference order. """ |
from jcvi.algorithms.graph import merge_paths
from jcvi.utils.cbook import uniqify
p = OptionParser(spa.__doc__)
p.add_option("--unmapped", default=False, action="store_true",
help="Include unmapped scaffolds in the list [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
spafiles = args
paths = []
mappings = []
missings = []
for spafile in spafiles:
fp = open(spafile)
path = []
mapping = []
missing = []
for row in fp:
if row[0] == '#' or not row.strip():
continue
atoms = row.rstrip().split('\t')
if len(atoms) == 2:
a, c2 = atoms
assert a == "unmapped"
missing.append(c2)
continue
c1, c2, orientation = atoms
path.append(c1)
mapping.append(c2)
paths.append(uniqify(path))
mappings.append(mapping)
missings.append(missing)
ref = merge_paths(paths)
print("ref", len(ref), ",".join(ref))
for spafile, mapping, missing in zip(spafiles, mappings, missings):
mapping = [x for x in mapping if "random" not in x]
mapping = uniqify(mapping)
if len(mapping) < 50 and opts.unmapped:
mapping = uniqify(mapping + missing)
print(spafile, len(mapping), ",".join(mapping)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rebuild(args):
""" %prog rebuild blocksfile blastfile Rebuild anchors file from pre-built blocks file. """ |
p = OptionParser(rebuild.__doc__)
p.add_option("--header", default=False, action="store_true",
help="First line is header [default: %default]")
p.add_option("--write_blast", default=False, action="store_true",
help="Get blast records of rebuilt anchors [default: %default]")
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blocksfile, blastfile = args
bk = BlockFile(blocksfile, header=opts.header)
fw = open("pairs", "w")
for a, b, h in bk.iter_all_pairs():
print("\t".join((a, b)), file=fw)
fw.close()
if opts.write_blast:
AnchorFile("pairs").blast(blastfile, "pairs.blast")
fw = open("tracks", "w")
for g, col in bk.iter_gene_col():
print("\t".join(str(x) for x in (g, col)), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coge(args):
""" %prog coge cogefile Convert CoGe file to anchors file. """ |
p = OptionParser(coge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cogefile, = args
fp = must_open(cogefile)
cogefile = cogefile.replace(".gz", "")
ksfile = cogefile + ".ks"
anchorsfile = cogefile + ".anchors"
fw_ks = must_open(ksfile, "w")
fw_ac = must_open(anchorsfile, "w")
tag = "###"
print(tag, file=fw_ks)
for header, lines in read_block(fp, tag):
print(tag, file=fw_ac)
lines = list(lines)
for line in lines:
if line[0] == '#':
continue
ks, ka, achr, a, astart, astop, bchr, \
b, bstart, bstop, ev, ss = line.split()
a = a.split("||")[3]
b = b.split("||")[3]
print("\t".join((a, b, ev)), file=fw_ac)
print(",".join((";".join((a, b)), ks, ka, ks, ka)), file=fw_ks)
fw_ks.close()
fw_ac.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def matrix(args):
""" %prog matrix all.bed anchorfile matrixfile Make oxford grid based on anchors file. """ |
p = OptionParser(matrix.__doc__)
p.add_option("--seqids", help="File with seqids [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
bedfile, anchorfile, matrixfile = args
ac = AnchorFile(anchorfile)
seqidsfile = opts.seqids
if seqidsfile:
seqids = SetFile(seqidsfile, delimiter=',')
order = Bed(bedfile).order
blocks = ac.blocks
m = defaultdict(int)
fw = open(matrixfile, "w")
aseqids = set()
bseqids = set()
for block in blocks:
a, b, scores = zip(*block)
ai, af = order[a[0]]
bi, bf = order[b[0]]
aseqid = af.seqid
bseqid = bf.seqid
if seqidsfile:
if (aseqid not in seqids) or (bseqid not in seqids):
continue
m[(aseqid, bseqid)] += len(block)
aseqids.add(aseqid)
bseqids.add(bseqid)
aseqids = list(aseqids)
bseqids = list(bseqids)
print("\t".join(["o"] + bseqids), file=fw)
for aseqid in aseqids:
print("\t".join([aseqid] + \
[str(m[(aseqid, x)]) for x in bseqids]), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary anchorfile Provide statistics for pairwise blocks. """ |
from jcvi.utils.cbook import SummaryStats
p = OptionParser(summary.__doc__)
p.add_option("--prefix", help="Generate per block stats [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorfile, = args
ac = AnchorFile(anchorfile)
clusters = ac.blocks
if clusters == [[]]:
logging.debug("A total of 0 anchor was found. Aborted.")
raise ValueError("A total of 0 anchor was found. Aborted.")
nclusters = len(clusters)
nanchors = [len(c) for c in clusters]
nranchors = [_score(c) for c in clusters] # non-redundant anchors
print("A total of {0} (NR:{1}) anchors found in {2} clusters.".\
format(sum(nanchors), sum(nranchors), nclusters), file=sys.stderr)
print("Stats:", SummaryStats(nanchors), file=sys.stderr)
print("NR stats:", SummaryStats(nranchors), file=sys.stderr)
prefix = opts.prefix
if prefix:
pad = len(str(nclusters))
for i, c in enumerate(clusters):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
print("\t".join((block_id, str(len(c))))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stats(args):
""" %prog stats blocksfile Provide statistics for MCscan-style blocks. The count of homologs in each pivot gene is recorded. """ |
from jcvi.utils.cbook import percentage
p = OptionParser(stats.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blocksfile, = args
fp = open(blocksfile)
counts = defaultdict(int)
total = orthologous = 0
for row in fp:
atoms = row.rstrip().split("\t")
hits = [x for x in atoms[1:] if x != '.']
counts[len(hits)] += 1
total += 1
if atoms[1] != '.':
orthologous += 1
print("Total lines: {0}".format(total), file=sys.stderr)
for i, n in sorted(counts.items()):
print("Count {0}: {1}".format(i, percentage(n, total)), file=sys.stderr)
print(file=sys.stderr)
matches = sum(n for i, n in counts.items() if i != 0)
print("Total lines with matches: {0}".\
format(percentage(matches, total)), file=sys.stderr)
for i, n in sorted(counts.items()):
if i == 0:
continue
print("Count {0}: {1}".format(i, percentage(n, matches)), file=sys.stderr)
print(file=sys.stderr)
print("Orthologous matches: {0}".\
format(percentage(orthologous, matches)), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_details(fw, details, bed):
""" Write per gene depth to file """ |
for a, b, depth in details:
for i in xrange(a, b):
gi = bed[i].accn
print("\t".join((gi, str(depth))), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blast(self, blastfile=None, outfile=None):
""" convert anchor file to 12 col blast file """ |
from jcvi.formats.blast import BlastSlow, BlastLineByConversion
if not outfile:
outfile = self.filename + ".blast"
if blastfile is not None:
blasts = BlastSlow(blastfile).to_dict()
else:
blasts = None
fw = must_open(outfile, "w", checkexists=True)
nlines = 0
for a, b, id in self.iter_pairs():
if (a, b) in blasts:
bline = blasts[(a, b)]
elif (b, a) in blasts:
bline = blasts[(b, a)]
else:
line = "\t".join((a, b))
bline = BlastLineByConversion(line, mode="110000000000")
print(bline, file=fw)
nlines += 1
fw.close()
logging.debug("A total of {0} BLAST lines written to `{1}`."\
.format(nlines, outfile))
return outfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter(args):
""" %prog filter test.blast Produce a new blast file and filter based on: - score: >= cutoff - pctid: >= cutoff - hitlen: >= cutoff - evalue: <= cutoff - ids: valid ids Use --inverse to obtain the complementary records for the criteria above. - noself: remove self-self hits """ |
p = OptionParser(filter.__doc__)
p.add_option("--score", dest="score", default=0, type="int",
help="Score cutoff")
p.set_align(pctid=95, hitlen=100, evalue=.01)
p.add_option("--noself", default=False, action="store_true",
help="Remove self-self hits")
p.add_option("--ids", help="Path to file with ids to retain")
p.add_option("--inverse", default=False, action="store_true",
help="Similar to grep -v, inverse")
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
if opts.ids:
ids = set()
for row in must_open(opts.ids):
if row[0] == "#":
continue
row = row.replace(",", "\t")
ids.update(row.split())
else:
ids = None
blastfile, = args
inverse = opts.inverse
outfile = opts.outfile
fp = must_open(blastfile)
score, pctid, hitlen, evalue, noself = \
opts.score, opts.pctid, opts.hitlen, opts.evalue, opts.noself
newblastfile = blastfile + ".P{0}L{1}".format(int(pctid), hitlen) if \
outfile is None else outfile
if inverse:
newblastfile += ".inverse"
fw = must_open(newblastfile, "w")
for row in fp:
if row[0] == '#':
continue
c = BlastLine(row)
if ids:
if c.query in ids and c.subject in ids:
noids = False
else:
noids = True
else:
noids = None
remove = c.score < score or \
c.pctid < pctid or \
c.hitlen < hitlen or \
c.evalue > evalue or \
noids
if inverse:
remove = not remove
remove = remove or (noself and c.query == c.subject)
if not remove:
print(row.rstrip(), file=fw)
fw.close()
return newblastfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_gaps(blast, use_subject=False):
""" Collect the gaps between adjacent HSPs in the BLAST file. """ |
key = lambda x: x.sstart if use_subject else x.qstart
blast.sort(key=key)
for a, b in zip(blast, blast[1:]):
if use_subject:
if a.sstop < b.sstart:
yield b.sstart - a.sstop
else:
if a.qstop < b.qstart:
yield b.qstart - a.qstop |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gaps(args):
""" %prog gaps A_vs_B.blast Find distribution of gap sizes betwen adjacent HSPs. """ |
p = OptionParser(gaps.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
blast = BlastSlow(blastfile)
logging.debug("A total of {} records imported".format(len(blast)))
query_gaps = list(collect_gaps(blast))
subject_gaps = list(collect_gaps(blast, use_subject=True))
logging.debug("Query gaps: {} Subject gaps: {}"\
.format(len(query_gaps), len(subject_gaps)))
from jcvi.graphics.base import savefig
import seaborn as sns
sns.distplot(query_gaps)
savefig("query_gaps.pdf") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rbbh(args):
""" %prog rbbh A_vs_B.blast B_vs_A.blast Identify the reciprocal best blast hit for each query sequence in set A when compared to set B. This program assumes that the BLAST results have already been filtered based on a combination of %id, %cov, e-value cutoffs. BLAST output should be in tabular `-m 8` format. """ |
p = OptionParser(rbbh.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
abfile, bafile, = args
ab = Blast(abfile)
ba = Blast(bafile)
ab_hits = ab.best_hits
ba_hits = ba.best_hits
for aquery in ab_hits:
ahit = ab_hits[aquery].subject
ba_bline = ba_hits.get(ahit)
if ba_bline:
bhit = ba_bline.subject
if bhit == aquery:
print("\t".join(str(x) for x in (aquery, ahit))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score(args):
""" %prog score blastfile query.fasta A.ids Add up the scores for each query seq. Go through the lines and for each query sequence, add up the scores when subject is in each pile by A.ids. """ |
from jcvi.formats.base import SetFile
from jcvi.formats.fasta import Fasta
p = OptionParser(score.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
blastfile, fastafile, idsfile = args
ids = SetFile(idsfile)
blast = Blast(blastfile)
scores = defaultdict(int)
for b in blast:
query = b.query
subject = b.subject
if subject not in ids:
continue
scores[query] += b.score
logging.debug("A total of {0} ids loaded.".format(len(ids)))
f = Fasta(fastafile)
for s in f.iterkeys_ordered():
sc = scores.get(s, 0)
print("\t".join((s, str(sc)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotation(args):
""" %prog annotation blastfile > annotations Create simple two column files from the first two coluns in blastfile. Use --queryids and --subjectids to switch IDs or descriptions. """ |
from jcvi.formats.base import DictFile
p = OptionParser(annotation.__doc__)
p.add_option("--queryids", help="Query IDS file to switch [default: %default]")
p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
d = "\t"
qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None
sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None
blast = Blast(blastfile)
for b in blast:
query, subject = b.query, b.subject
if qids:
query = qids[query]
if sids:
subject = sids[subject]
print("\t".join((query, subject))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def completeness(args):
""" %prog completeness blastfile ref.fasta > outfile Print statistics for each gene, the coverage of the alignment onto the best hit, as an indicator for completeness of the gene model. For example, one might BLAST sugarcane ESTs against sorghum annotations as reference, to find full-length transcripts. """ |
from jcvi.utils.range import range_minmax
from jcvi.utils.cbook import SummaryStats
p = OptionParser(completeness.__doc__)
p.add_option("--ids",
help="Save ids that are over 50% complete [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blastfile, fastafile = args
idsfile = opts.ids
f = Sizes(fastafile).mapping
b = BlastSlow(blastfile)
valid = []
data = []
cutoff = 50
for query, blines in groupby(b, key=lambda x: x.query):
blines = list(blines)
ranges = [(x.sstart, x.sstop) for x in blines]
b = blines[0]
query, subject = b.query, b.subject
rmin, rmax = range_minmax(ranges)
subject_len = f[subject]
nterminal_dist = rmin - 1
cterminal_dist = subject_len - rmax
covered = (rmax - rmin + 1) * 100 / subject_len
if covered > cutoff:
valid.append(query)
data.append((nterminal_dist, cterminal_dist, covered))
print("\t".join(str(x) for x in (query, subject,
nterminal_dist, cterminal_dist, covered)))
nd, cd, cv = zip(*data)
m = "Total: {0}, Coverage > {1}%: {2}\n".\
format(len(data), cutoff, len(valid))
m += "N-terminal: {0}\n".format(SummaryStats(nd))
m += "C-terminal: {0}\n".format(SummaryStats(cd))
m += "Coverage: {0}".format(SummaryStats(cv))
print(m, file=sys.stderr)
if idsfile:
fw = open(idsfile, "w")
print("\n".join(valid), file=fw)
logging.debug("A total of {0} ids (cov > {1} %) written to `{2}`.".\
format(len(valid), cutoff, idsfile))
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotate(args):
""" %prog annotate blastfile query.fasta subject.fasta Annotate overlap types (dovetail, contained, etc) in BLAST tabular file. """ |
from jcvi.assembly.goldenpath import Cutoff, Overlap, Overlap_types
p = OptionParser(annotate.__doc__)
p.set_align(pctid=94, hitlen=500)
p.add_option("--hang", default=500, type="int",
help="Maximum overhang length")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
blastfile, afasta, bfasta = args
fp = must_open(blastfile)
asizes = Sizes(afasta).mapping
bsizes = Sizes(bfasta).mapping
cutoff = Cutoff(opts.pctid, opts.hitlen, opts.hang)
logging.debug(str(cutoff))
for row in fp:
b = BlastLine(row)
asize = asizes[b.query]
bsize = bsizes[b.subject]
if b.query == b.subject:
continue
ov = Overlap(b, asize, bsize, cutoff)
if ov.otype:
ov.print_graphic()
print("{0}\t{1}".format(b, Overlap_types[ov.otype])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def top10(args):
""" %prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`. """ |
from jcvi.formats.base import DictFile
p = OptionParser(top10.__doc__)
p.add_option("--top", default=10, type="int",
help="Top N taxa to extract [default: %default]")
p.add_option("--ids", default=None,
help="Two column ids file to query seqid [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
mapping = DictFile(opts.ids, delimiter="\t") if opts.ids else {}
cmd = "cut -f2 {0}".format(blastfile)
cmd += " | sort | uniq -c | sort -k1,1nr | head -n {0}".format(opts.top)
fp = popen(cmd)
for row in fp:
count, seqid = row.split()
nseqid = mapping.get(seqid, seqid)
print("\t".join((count, nseqid))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cscore(args):
""" %prog cscore blastfile > cscoreOut See supplementary info for sea anemone genome paper, C-score formula: cscore(A,B) = score(A,B) / max(best score for A, best score for B) A C-score of one is the same as reciprocal best hit (RBH). Output file will be 3-column (query, subject, cscore). Use --cutoff to select a different cutoff. """ |
from jcvi.utils.cbook import gene_name
p = OptionParser(cscore.__doc__)
p.add_option("--cutoff", default=.9999, type="float",
help="Minimum C-score to report [default: %default]")
p.add_option("--pct", default=False, action="store_true",
help="Also include pct as last column [default: %default]")
p.add_option("--writeblast", default=False, action="store_true",
help="Also write filtered blast file [default: %default]")
p.set_stripnames()
p.set_outfile()
opts, args = p.parse_args(args)
ostrip = opts.strip_names
writeblast = opts.writeblast
outfile = opts.outfile
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
blast = Blast(blastfile)
logging.debug("Register best scores ..")
best_score = defaultdict(float)
for b in blast:
query, subject = b.query, b.subject
if ostrip:
query, subject = gene_name(query), gene_name(subject)
score = b.score
if score > best_score[query]:
best_score[query] = score
if score > best_score[subject]:
best_score[subject] = score
blast = Blast(blastfile)
pairs = {}
cutoff = opts.cutoff
for b in blast:
query, subject = b.query, b.subject
if ostrip:
query, subject = gene_name(query), gene_name(subject)
score = b.score
pctid = b.pctid
s = score / max(best_score[query], best_score[subject])
if s > cutoff:
pair = (query, subject)
if pair not in pairs or s > pairs[pair][0]:
pairs[pair] = (s, pctid, b)
fw = must_open(outfile, "w")
if writeblast:
fwb = must_open(outfile + ".filtered.blast", "w")
pct = opts.pct
for (query, subject), (s, pctid, b) in sorted(pairs.items()):
args = [query, subject, "{0:.2f}".format(s)]
if pct:
args.append("{0:.1f}".format(pctid))
print("\t".join(args), file=fw)
if writeblast:
print(b, file=fwb)
fw.close()
if writeblast:
fwb.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_distance(a, b, xaxis=True):
""" Returns the distance between two blast HSPs. """ |
if xaxis:
arange = ("0", a.qstart, a.qstop, a.orientation) # 0 is the dummy chromosome
brange = ("0", b.qstart, b.qstop, b.orientation)
else:
arange = ("0", a.sstart, a.sstop, a.orientation)
brange = ("0", b.sstart, b.sstop, b.orientation)
dist, oo = range_distance(arange, brange, distmode="ee")
dist = abs(dist)
return dist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def combine_HSPs(a):
""" Combine HSPs into a single BlastLine. """ |
m = a[0]
if len(a) == 1:
return m
for b in a[1:]:
assert m.query == b.query
assert m.subject == b.subject
m.hitlen += b.hitlen
m.nmismatch += b.nmismatch
m.ngaps += b.ngaps
m.qstart = min(m.qstart, b.qstart)
m.qstop = max(m.qstop, b.qstop)
m.sstart = min(m.sstart, b.sstart)
m.sstop = max(m.sstop, b.sstop)
if m.has_score:
m.score += b.score
m.pctid = 100 - (m.nmismatch + m.ngaps) * 100. / m.hitlen
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chain(args):
""" %prog chain blastfile Chain adjacent HSPs together to form larger HSP. """ |
p = OptionParser(chain.__doc__)
p.add_option("--dist", dest="dist",
default=100, type="int",
help="extent of flanking regions to search [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
dist = opts.dist
assert dist > 0
blast = BlastSlow(blastfile)
logging.debug("A total of {} records imported".format(len(blast)))
chained_hsps = chain_HSPs(blast, xdist=dist, ydist=dist)
logging.debug("A total of {} records after chaining".format(len(chained_hsps)))
for b in chained_hsps:
print(b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def condense(args):
""" %prog condense blastfile > blastfile.condensed Condense HSPs that belong to the same query-subject pair into one. """ |
p = OptionParser(condense.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
blast = BlastSlow(blastfile)
key = lambda x: x.query
blast.sort(key=key)
clusters = []
for q, lines in groupby(blast, key=key):
lines = list(lines)
condenser = defaultdict(list)
for b in lines:
condenser[(b.subject, b.orientation)].append(b)
for bs in condenser.values():
clusters.append(bs)
chained_hsps = [combine_HSPs(x) for x in clusters]
chained_hsps = sorted(chained_hsps, key=lambda x: (x.query, -x.score))
for b in chained_hsps:
print(b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mismatches(args):
""" %prog mismatches blastfile Print out histogram of mismatches of HSPs, usually for evaluating SNP level. """ |
from jcvi.utils.cbook import percentage
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(mismatches.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
data = []
matches = 0
b = Blast(blastfile)
for query, bline in b.iter_best_hit():
mm = bline.nmismatch + bline.ngaps
data.append(mm)
nonzeros = [x for x in data if x != 0]
title = "Polymorphic sites: {0}".\
format(percentage(len(nonzeros), len(data)))
stem_leaf_plot(data, 0, 20, 20, title=title) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def swap(args):
""" %prog swap blastfile Print out a new blast file with query and subject swapped. """ |
p = OptionParser(swap.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
blastfile, = args
swappedblastfile = blastfile + ".swapped"
fp = must_open(blastfile)
fw = must_open(swappedblastfile, "w")
for row in fp:
b = BlastLine(row)
print(b.swapped, file=fw)
fw.close()
sort([swappedblastfile]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed blastfile Print out bed file based on coordinates in BLAST report. By default, write out subject positions. Use --swap to write query positions. """ |
from jcvi.formats.bed import sort as bed_sort
p = OptionParser(bed.__doc__)
p.add_option("--swap", default=False, action="store_true",
help="Write query positions [default: %default]")
p.add_option("--both", default=False, action="store_true",
help="Generate one line for each of query and subject")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
blastfile, = args
positive = (not opts.swap) or opts.both
negative = opts.swap or opts.both
fp = must_open(blastfile)
bedfile = "{0}.bed".format(blastfile.rsplit(".", 1)[0]) \
if blastfile.endswith(".blast") \
else "{0}.bed".format(blastfile)
fw = open(bedfile, "w")
for row in fp:
b = BlastLine(row)
if positive:
print(b.bedline, file=fw)
if negative:
print(b.swapped.bedline, file=fw)
logging.debug("File written to `{0}`.".format(bedfile))
fw.close()
bed_sort([bedfile, "-i"])
return bedfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def best(args):
""" %prog best blastfile print the best hit for each query in the blastfile """ |
p = OptionParser(best.__doc__)
p.add_option("-n", default=1, type="int",
help="get best N hits [default: %default]")
p.add_option("--nosort", default=False, action="store_true",
help="assume BLAST is already sorted [default: %default]")
p.add_option("--hsps", default=False, action="store_true",
help="get all HSPs for the best pair [default: %default]")
p.add_option("--subject", default=False, action="store_true",
help="get best hit(s) for subject genome instead [default: %default]")
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
n = opts.n
hsps = opts.hsps
tmpdir = opts.tmpdir
ref = "query" if not opts.subject else "subject"
if not opts.nosort:
sargs = [blastfile]
if tmpdir:
sargs += ["-T {0}".format(tmpdir)]
if ref != "query":
sargs += ["--refscore"]
sort(sargs)
else:
logging.debug("Assuming sorted BLAST")
if not opts.subject:
bestblastfile = blastfile + ".best"
else:
bestblastfile = blastfile + ".subject.best"
fw = open(bestblastfile, "w")
b = Blast(blastfile)
for q, bline in b.iter_best_hit(N=n, hsps=hsps, ref=ref):
print(bline, file=fw)
return bestblastfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary blastfile Provide summary on id% and cov%, for both query and reference. Often used in comparing genomes (based on NUCMER results). """ |
p = OptionParser(summary.__doc__)
p.add_option("--strict", default=False, action="store_true",
help="Strict 'gapless' mode. Exclude gaps from covered base.")
p.add_option("--tabular", default=False, action="store_true",
help="Print succint tabular output")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
alignstats = get_stats(blastfile, strict=opts.strict)
if opts.tabular:
print(str(alignstats))
else:
alignstats.print_stats() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subset(args):
""" %prog subset blastfile qbedfile sbedfile Extract blast hits between given query and subject chrs. If --qchrs or --schrs is not given, then all chrs from q/s genome will be included. However one of --qchrs and --schrs must be specified. Otherwise the script will do nothing. """ |
p = OptionParser(subset.__doc__)
p.add_option("--qchrs", default=None,
help="query chrs to extract, comma sep [default: %default]")
p.add_option("--schrs", default=None,
help="subject chrs to extract, comma sep [default: %default]")
p.add_option("--convert", default=False, action="store_true",
help="convert accns to chr_rank [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
blastfile, qbedfile, sbedfile = args
qchrs = opts.qchrs
schrs = opts.schrs
assert qchrs or schrs, p.print_help()
convert = opts.convert
outfile = blastfile + "."
if qchrs:
outfile += qchrs + "."
qchrs = set(qchrs.split(","))
else:
qchrs = set(Bed(qbedfile).seqids)
if schrs:
schrs = set(schrs.split(","))
if qbedfile != sbedfile or qchrs != schrs:
outfile += ",".join(schrs) + "."
else:
schrs = set(Bed(sbedfile).seqids)
outfile += "blast"
qo = Bed(qbedfile).order
so = Bed(sbedfile).order
fw = must_open(outfile, "w")
for b in Blast(blastfile):
q, s = b.query, b.subject
if qo[q][1].seqid in qchrs and so[s][1].seqid in schrs:
if convert:
b.query = qo[q][1].seqid + "_" + "{0:05d}".format(qo[q][0])
b.subject = so[s][1].seqid + "_" + "{0:05d}".format(so[s][0])
print(b, file=fw)
fw.close()
logging.debug("Subset blastfile written to `{0}`".format(outfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flanking(args):
""" %prog flanking SI.ids liftover.bed master.txt master-removed.txt Extract flanking genes for given SI loci. """ |
p = OptionParser(flanking.__doc__)
p.add_option("-N", default=50, type="int",
help="How many genes on both directions")
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
SI, liftover, master, te = args
N = opts.N
SI = SetFile(SI, column=0, delimiter='.')
liftover = Bed(liftover)
order = liftover.order
neighbors = set()
for s in SI:
si, s = order[s]
LB = max(si - N, 0)
RB = min(si + N, len(liftover))
for j in xrange(LB, RB + 1):
a = liftover[j]
if a.seqid != s.seqid:
continue
neighbors.add(a.accn)
dmain = DictFile(master, keypos=0, valuepos=None, delimiter='\t')
dte = DictFile(te, keypos=0, valuepos=None, delimiter='\t')
header = next(open(master))
print("\t".join(("SI/Neighbor", "Gene/TE", header.strip())))
for a in liftover:
s = a.accn
if s not in neighbors:
continue
tag = "SI" if s in SI else "neighbor"
if s in dmain:
d = dmain[s]
print("\t".join([tag, "gene"] + d))
elif s in dte:
d = dte[s]
print("\t".join([tag, "TE"] + d)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def geneinfo(args):
""" %prog geneinfo pineapple.20141004.bed liftover.bed pineapple.20150413.bed \ note.txt interproscan.txt Build gene info table from various sources. The three beds contain information on the original scaffolds, linkage groups, and final selected loci (after removal of TEs and split loci). The final two text files contain AHRD and domain data. """ |
p = OptionParser(geneinfo.__doc__)
opts, args = p.parse_args(args)
if len(args) != 5:
sys.exit(not p.print_help())
scfbed, liftoverbed, lgbed, note, ipr = args
note = DictFile(note, delimiter="\t")
scfbed = Bed(scfbed)
lgorder = Bed(lgbed).order
liftover = Bed(liftoverbed).order
header = "Accession Scaffold-position LG-position "\
"Description Interpro-domain Interpro-description "\
"GO-term KEGG".split()
ipr = read_interpro(ipr)
fw_clean = must_open("master.txt", "w")
fw_removed = must_open("master-removed.txt", "w")
for fw in (fw_clean, fw_removed):
print("\t".join(header), file=fw)
for b in scfbed:
accession = b.accn
scaffold_position = b.tag
if accession in liftover:
lg_position = liftover[accession][-1].tag
else:
lg_position = "split"
fw = fw_clean if accession in lgorder else fw_removed
description = note[accession]
interpro = interpro_description = go = kegg = ""
if accession in ipr:
interpro, interpro_description, go, kegg = ipr[accession]
print("\t".join((accession, scaffold_position, lg_position,
description, interpro, interpro_description, go, kegg)), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ploidy(args):
""" %prog ploidy seqids karyotype.layout mcscan.out all.bed synteny.layout Build a figure that calls graphics.karyotype to illustrate the high ploidy of WGD history of pineapple genome. The script calls both graphics.karyotype and graphic.synteny. """ |
p = OptionParser(ploidy.__doc__)
p.add_option("--switch", help="Rename the seqid with two-column file")
opts, args, iopts = p.set_image_options(args, figsize="9x7")
if len(args) != 5:
sys.exit(not p.print_help())
seqidsfile, klayout, datafile, bedfile, slayout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Karyotype(fig, root, seqidsfile, klayout)
Synteny(fig, root, datafile, bedfile, slayout, switch=opts.switch)
# legend showing the orientation of the genes
draw_gene_legend(root, .27, .37, .52)
# annotate the WGD events
fc = 'lightslategrey'
x = .09
radius = .012
TextCircle(root, x, .825, r'$\tau$', radius=radius, fc=fc)
TextCircle(root, x, .8, r'$\sigma$', radius=radius, fc=fc)
TextCircle(root, x, .72, r'$\rho$', radius=radius, fc=fc)
for ypos in (.825, .8, .72):
root.text(.12, ypos, r"$\times2$", color=fc, ha="center", va="center")
root.plot([x, x], [.85, .775], ":", color=fc, lw=2)
root.plot([x, x], [.75, .675], ":", color=fc, lw=2)
labels = ((.04, .96, 'A'), (.04, .54, 'B'))
panel_labels(root, labels)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "pineapple-karyotype"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def node_to_edge(edges, directed=True):
""" From list of edges, record per node, incoming and outgoing edges """ |
outgoing = defaultdict(set)
incoming = defaultdict(set) if directed else outgoing
nodes = set()
for i, edge in enumerate(edges):
a, b, = edge[:2]
outgoing[a].add(i)
incoming[b].add(i)
nodes.add(a)
nodes.add(b)
nodes = sorted(nodes)
if directed:
return outgoing, incoming, nodes
return outgoing, nodes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resample(args):
""" %prog resample yellow-catfish-resample.txt medicago-resample.txt Plot ALLMAPS performance across resampled real data. """ |
p = OptionParser(resample.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x4", dpi=300)
if len(args) != 2:
sys.exit(not p.print_help())
dataA, dataB = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([.1, .18, .32, .64])
B = fig.add_axes([.6, .18, .32, .64])
dataA = import_data(dataA)
dataB = import_data(dataB)
xlabel = "Fraction of markers"
ylabels = ("Anchor rate", "Runtime (m)")
legend = ("anchor rate", "runtime")
subplot_twinx(A, dataA, xlabel, ylabels,
title="Yellow catfish", legend=legend)
subplot_twinx(B, dataB, xlabel, ylabels,
title="Medicago", legend=legend)
labels = ((.04, .92, "A"), (.54, .92, "B"))
panel_labels(root, labels)
normalize_axes(root)
image_name = "resample." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resamplestats(args):
""" %prog resamplestats prefix run.log Prepare resample results table. Ten subsets of original data were generated and ALLMAPS were iterated through them, creating `run.log` which contains the timing results. The anchor rate can be found in `prefix.0.{1-10}.summary.txt`. """ |
p = OptionParser(resamplestats.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pf, runlog = args
fp = open(runlog)
Real = "real"
times = []
for row in fp:
# real 10m31.513s
if not row.startswith(Real):
continue
tag, time = row.split()
assert tag == Real
m, s = time.split('m')
s = s.rstrip('s')
m, s = float(m), float(s)
time = m + s / 60
times.append(time)
N = len(times)
rates = []
for i in xrange(-N + 1, 1, 1):
summaryfile = "{0}.{1}.summary.txt".format(pf, 2 ** i)
fp = open(summaryfile)
lines = fp.readlines()
# Total bases 580,791,244 (80.8%) 138,298,666 (19.2%)
pct = float(lines[-2].split()[3].strip("()%"))
rates.append(pct / 100.)
assert len(rates) == N
print("ratio\tanchor-rate\ttime(m)")
for j, i in enumerate(xrange(-N + 1, 1, 1)):
print("{0}\t{1:.3f}\t{2:.3f}".format(i, rates[j], times[j])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def comparebed(args):
""" %prog comparebed AP.chr.bed infer.bed Compare the scaffold links indicated in two bed files. """ |
p = OptionParser(comparebed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
abed, bbed = args
abed = Bed(abed)
bbed = Bed(bbed)
query_links(abed, bbed)
query_links(bbed, abed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simulation(args):
""" %prog simulation inversion.txt translocation.txt maps.txt multimaps.txt Plot ALLMAPS accuracy across a range of simulated datasets. """ |
p = OptionParser(simulation.__doc__)
opts, args, iopts = p.set_image_options(args, dpi=300)
if len(args) != 4:
sys.exit(not p.print_help())
dataA, dataB, dataC, dataD = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([.12, .62, .35, .35])
B = fig.add_axes([.62, .62, .35, .35])
C = fig.add_axes([.12, .12, .35, .35])
D = fig.add_axes([.62, .12, .35, .35])
dataA = import_data(dataA)
dataB = import_data(dataB)
dataC = import_data(dataC)
dataD = import_data(dataD)
subplot(A, dataA, "Inversion error rate", "Accuracy", xlim=.5)
subplot(B, dataB, "Translocation error rate", "Accuracy", xlim=.5,
legend=("intra-chromosomal", "inter-chromosomal",
"75\% intra + 25\% inter"))
subplot(C, dataC, "Number of input maps", "Accuracy", xcast=int)
subplot(D, dataD, "Number of input maps", "Accuracy", xcast=int)
labels = ((.03, .97, "A"), (.53, .97, "B"),
(.03, .47, "C"), (.53, .47, "D"))
panel_labels(root, labels)
normalize_axes(root)
image_name = "simulation." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def digest(args):
""" %prog digest fastafile NspI,BfuCI Digest fasta sequences to map restriction site positions. """ |
p = OptionParser(digest.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, enzymes = args
enzymes = enzymes.split(",")
enzymes = [x for x in AllEnzymes if str(x) in enzymes]
f = Fasta(fastafile, lazy=True)
fw = must_open(opts.outfile, "w")
header = ["Contig", "Length"] + [str(x) for x in enzymes]
print("\t".join(header), file=fw)
for name, rec in f.iteritems_ordered():
row = [name, len(rec)]
for e in enzymes:
pos = e.search(rec.seq)
pos = "na" if not pos else "|".join(str(x) for x in pos)
row.append(pos)
print("\t".join(str(x) for x in row), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_full(rec, sites, flank, fw):
""" Full extraction of seq flanking the sites. """ |
for s in sites:
newid = "{0}:{1}".format(rec.name, s)
left = max(s - flank, 0)
right = min(s + flank, len(rec))
frag = rec.seq[left:right].strip("Nn")
newrec = SeqRecord(frag, id=newid, description="")
SeqIO.write([newrec], fw, "fasta") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_ends(rec, sites, flank, fw, maxfragsize=800):
""" Extraction of ends of fragments above certain size. """ |
nsites = len(sites)
size = len(rec)
for i, s in enumerate(sites):
newid = "{0}:{1}".format(rec.name, s)
recs = []
if i == 0 or s - sites[i - 1] <= maxfragsize:
newidL = newid + "L"
left = max(s - flank, 0)
right = s
frag = rec.seq[left:right].strip("Nn")
recL = SeqRecord(frag, id=newidL, description="")
if i == 0 and s > maxfragsize: # Contig L-end
pass
else:
recs.append(recL)
if i == nsites - 1 or sites[i + 1] - s <= maxfragsize:
newidR = newid + "R"
left = s
right = min(s + flank, size)
frag = rec.seq[left:right].strip("Nn")
recR = SeqRecord(frag, id=newidR, description="")
if i == nsites - 1 and size - s > maxfragsize: # Contig R-end
pass
else:
recs.append(recR)
SeqIO.write(recs, fw, "fasta") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fragment(args):
""" %prog fragment fastafile enzyme Cut the fastafile using the specified enzyme, and grab upstream and downstream nucleotide sequence along with the cut site. In this case, the sequences extracted are: |- PstI ============|=========== (-------) Sometimes we need to limit the size of the restriction fragments, for example the GBS protocol does not allow fragments larger than 800bp. |-PstI |- PstI |- PstI ~~~====|=============|==========~~~~~~~===|============ (---) (---) In this case, the second fragment is longer than 800bp, therefore the two ends are NOT extracted, as in the first fragment. """ |
p = OptionParser(fragment.__doc__)
p.add_option("--flank", default=150, type="int",
help="Extract flanking bases of the cut sites [default: %default]")
p.add_option("--full", default=False, action="store_true",
help="The full extraction mode [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, enzyme = args
flank = opts.flank
assert flank > 0
extract = extract_full if opts.full else extract_ends
tag = "full" if opts.full else "ends"
assert enzyme in set(str(x) for x in AllEnzymes)
fragfastafile = fastafile.split(".")[0] + \
".{0}.flank{1}.{2}.fasta".format(enzyme, flank, tag)
enzyme = [x for x in AllEnzymes if str(x) == enzyme][0]
f = Fasta(fastafile, lazy=True)
fw = open(fragfastafile, "w")
for name, rec in f.iteritems_ordered():
a = Analysis([enzyme], rec.seq)
sites = a.full()[enzyme]
extract(rec, sites, flank, fw)
logging.debug("Fragments written to `{0}`.".format(fragfastafile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_A50(ctgsizes, cutoff=0, percent=50):
""" Given an array of contig sizes, produce A50, N50, and L50 values """ |
ctgsizes = np.array(ctgsizes, dtype="int")
ctgsizes = np.sort(ctgsizes)[::-1]
ctgsizes = ctgsizes[ctgsizes >= cutoff]
a50 = np.cumsum(ctgsizes)
total = np.sum(ctgsizes)
idx = bisect(a50, total * percent / 100.)
l50 = ctgsizes[idx]
n50 = idx + 1
return a50, l50, n50 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def n50(args):
""" %prog n50 filename Given a file with a list of numbers denoting contig lengths, calculate N50. Input file can be both FASTA or a list of sizes. """ |
from jcvi.graphics.histogram import loghistogram
p = OptionParser(n50.__doc__)
p.add_option("--print0", default=False, action="store_true",
help="Print size and L50 to stdout [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
ctgsizes = []
# Guess file format
probe = open(args[0]).readline()[0]
isFasta = (probe == '>')
if isFasta:
for filename in args:
f = Fasta(filename)
ctgsizes += list(b for a, b in f.itersizes())
else:
for row in must_open(args):
try:
ctgsize = int(float(row.split()[-1]))
except ValueError:
continue
ctgsizes.append(ctgsize)
a50, l50, nn50 = calculate_A50(ctgsizes)
sumsize = sum(ctgsizes)
minsize = min(ctgsizes)
maxsize = max(ctgsizes)
n = len(ctgsizes)
print(", ".join(args), file=sys.stderr)
summary = (sumsize, l50, nn50, minsize, maxsize, n)
print(" ".join("{0}={1}".format(a, b) for a, b in \
zip(header, summary)), file=sys.stderr)
loghistogram(ctgsizes)
if opts.print0:
print("\t".join(str(x) for x in (",".join(args), sumsize, l50)))
return zip(header, summary) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromovl(args):
""" %prog graph nucmer2ovl.ovl fastafile Build overlap graph from ovl file which is converted using NUCMER2OVL. """ |
p = OptionParser(fromovl.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ovlfile, fastafile = args
ovl = OVL(ovlfile)
g = ovl.graph
fw = open("contained.ids", "w")
print("\n".join(sorted(ovl.contained)), file=fw)
graph_to_agp(g, ovlfile, fastafile, exclude=ovl.contained, verbose=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed anchorsfile Convert ANCHORS file to BED format. """ |
from collections import defaultdict
from jcvi.compara.synteny import AnchorFile, check_beds
from jcvi.formats.bed import Bed
from jcvi.formats.base import get_number
p = OptionParser(bed.__doc__)
p.add_option("--switch", default=False, action="store_true",
help="Switch reference and aligned map elements")
p.add_option("--scale", type="float",
help="Scale the aligned map distance by factor")
p.set_beds()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorsfile, = args
switch = opts.switch
scale = opts.scale
ac = AnchorFile(anchorsfile)
pairs = defaultdict(list)
for a, b, block_id in ac.iter_pairs():
pairs[a].append(b)
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts)
bd = Bed()
for q in qbed:
qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn
if qaccn not in pairs:
continue
for s in pairs[qaccn]:
si, s = sorder[s]
sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn
if switch:
qseqid, sseqid = sseqid, qseqid
qstart, sstart = sstart, qstart
qend, send = send, qend
qaccn, saccn = saccn, qaccn
if scale:
sstart /= scale
try:
newsseqid = get_number(sseqid)
except ValueError:
raise ValueError("`{0}` is on `{1}` with no number to extract".\
format(saccn, sseqid))
bedline = "\t".join(str(x) for x in (qseqid, qstart - 1, qend,
"{0}:{1}".format(newsseqid, sstart)))
bd.add(bedline)
bd.print_to_file(filename=opts.outfile, sorted=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def happy_edges(row, prefix=None):
""" Convert a row in HAPPY file and yield edges. """ |
trans = maketrans("[](){}", " ")
row = row.strip().strip("+")
row = row.translate(trans)
scfs = [x.strip("+") for x in row.split(":")]
for a, b in pairwise(scfs):
oa = '<' if a.strip()[0] == '-' else '>'
ob = '<' if b.strip()[0] == '-' else '>'
is_uncertain = a[-1] == ' ' or b[0] == ' '
a = a.strip().strip('-')
b = b.strip().strip('-')
if prefix:
a = prefix + a
b = prefix + b
yield (a, b, oa, ob), is_uncertain |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partition(args):
""" %prog partition happy.txt synteny.graph Select edges from another graph and merge it with the certain edges built from the HAPPY mapping data. """ |
allowed_format = ("png", "ps")
p = OptionParser(partition.__doc__)
p.add_option("--prefix", help="Add prefix to the name [default: %default]")
p.add_option("--namestart", default=0, type="int",
help="Use a shorter name, starting index [default: %default]")
p.add_option("--format", default="png", choices=allowed_format,
help="Generate image of format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
happyfile, graphfile = args
bg = BiGraph()
bg.read(graphfile, color="red")
prefix = opts.prefix
fp = open(happyfile)
for i, row in enumerate(fp):
nns = happy_nodes(row, prefix=prefix)
nodes = set(nns)
edges = happy_edges(row, prefix=prefix)
small_graph = BiGraph()
for (a, b, oa, ob), is_uncertain in edges:
color = "gray" if is_uncertain else "black"
small_graph.add_edge(a, b, oa, ob, color=color)
for (u, v), e in bg.edges.items():
# Grab edge if both vertices are on the same line
if u in nodes and v in nodes:
uv = (str(u), str(v))
if uv in small_graph.edges:
e = small_graph.edges[uv]
e.color = "blue" # supported by both evidences
else:
small_graph.add_edge(e)
print(small_graph, file=sys.stderr)
pngfile = "A{0:02d}.{1}".format(i + 1, opts.format)
telomeres = (nns[0], nns[-1])
small_graph.draw(pngfile, namestart=opts.namestart,
nodehighlight=telomeres, dpi=72)
legend = ["Edge colors:"]
legend.append("[BLUE] Experimental + Synteny")
legend.append("[BLACK] Experimental certain")
legend.append("[GRAY] Experimental uncertain")
legend.append("[RED] Synteny only")
legend.append("Rectangle nodes are telomeres.")
print("\n".join(legend), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" %prog merge graphs Merge multiple graphs together and visualize. """ |
p = OptionParser(merge.__doc__)
p.add_option("--colorlist", default="black,red,pink,blue,green",
help="The color palette [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
colorlist = opts.colorlist.split(",")
assert len(colorlist) >= len(args), "Need more colors in --colorlist"
g = BiGraph()
for a, c in zip(args, colorlist):
g.read(a, color=c)
g.draw("merged.png") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def happy(args):
""" %prog happy happy.txt Make bi-directed graph from HAPPY mapping data. JCVI encodes uncertainties in the order of the contigs / scaffolds. : separates scaffolds + means telomere (though the telomere repeats may not show because the telomere-adjacent sequence is missing) - means that the scaffold is in reverse orientation to that shown in the 2003 TIGR scaffolds. Ambiguities are represented as follows, using Paul Dear.s description: [ ] means undetermined orientation. error quite possible (70% confidence?) ( ) means uncertain orientation. small chance of error (90% confidence?) { } means uncertain order. Example: +-8254707:8254647:-8254690:{[8254694]:[8254713]:[8254531]:[8254797]}:8254802:8254788+ """ |
p = OptionParser(happy.__doc__)
p.add_option("--prefix", help="Add prefix to the name [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
happyfile, = args
certain = "certain.graph"
uncertain = "uncertain.graph"
fw1 = open(certain, "w")
fw2 = open(uncertain, "w")
fp = open(happyfile)
for row in fp:
for e, is_uncertain in happy_edges(row, prefix=opts.prefix):
fw = fw2 if is_uncertain else fw1
print(e, file=fw)
logging.debug("Edges written to `{0}`".format(",".join((certain, uncertain)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromblast(args):
""" %prog fromblast blastfile subject.fasta Generate path from BLAST file. If multiple subjects map to the same query, an edge is constructed between them (with the link provided by the query). The BLAST file MUST be filtered, chained, supermapped. """ |
from jcvi.formats.blast import sort
from jcvi.utils.range import range_distance
p = OptionParser(fromblast.__doc__)
p.add_option("--clique", default=False, action="store_true",
help="Populate clique instead of linear path [default: %default]")
p.add_option("--maxdist", default=100000, type="int",
help="Create edge within certain distance [default: %default]")
p.set_verbose(help="Print verbose reports to stdout")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blastfile, subjectfasta = args
clique = opts.clique
maxdist = opts.maxdist
sort([blastfile, "--query"])
blast = BlastSlow(blastfile, sorted=True)
g = BiGraph()
for query, blines in groupby(blast, key=lambda x: x.query):
blines = list(blines)
iterator = combinations(blines, 2) if clique else pairwise(blines)
for a, b in iterator:
asub, bsub = a.subject, b.subject
if asub == bsub:
continue
arange = (a.query, a.qstart, a.qstop, "+")
brange = (b.query, b.qstart, b.qstop, "+")
dist, oo = range_distance(arange, brange, distmode="ee")
if dist > maxdist:
continue
atag = ">" if a.orientation == "+" else "<"
btag = ">" if b.orientation == "+" else "<"
g.add_edge(asub, bsub, atag, btag)
graph_to_agp(g, blastfile, subjectfasta, verbose=opts.verbose) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(args):
""" %prog connect assembly.fasta read_mapping.blast Connect contigs using long reads. """ |
p = OptionParser(connect.__doc__)
p.add_option("--clip", default=2000, type="int",
help="Only consider end of contigs [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, blastfile = args
clip = opts.clip
sizes = Sizes(fastafile).mapping
blast = Blast(blastfile)
blasts = []
for b in blast:
seqid = b.subject
size = sizes[seqid]
start, end = b.sstart, b.sstop
cstart, cend = min(size, clip), max(0, size - clip)
if start > cstart and end < cend:
continue
blasts.append(b)
key = lambda x: x.query
blasts.sort(key=key)
g = BiGraph()
for query, bb in groupby(blasts, key=key):
bb = sorted(bb, key=lambda x: x.qstart)
nsubjects = len(set(x.subject for x in bb))
if nsubjects == 1:
continue
print("\n".join(str(x) for x in bb))
for a, b in pairwise(bb):
astart, astop = a.qstart, a.qstop
bstart, bstop = b.qstart, b.qstop
if a.subject == b.subject:
continue
arange = astart, astop
brange = bstart, bstop
ov = range_intersect(arange, brange)
alen = astop - astart + 1
blen = bstop - bstart + 1
if ov:
ostart, ostop = ov
ov = ostop - ostart + 1
print(ov, alen, blen)
if ov and (ov > alen / 2 or ov > blen / 2):
print("Too much overlap ({0})".format(ov))
continue
asub = a.subject
bsub = b.subject
atag = ">" if a.orientation == "+" else "<"
btag = ">" if b.orientation == "+" else "<"
g.add_edge(asub, bsub, atag, btag)
graph_to_agp(g, blastfile, fastafile, verbose=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grasstruth(args):
""" %prog grasstruth james-pan-grass.txt Prepare truth pairs for 4 grasses. """ |
p = OptionParser(grasstruth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
james, = args
fp = open(james)
pairs = set()
for row in fp:
atoms = row.split()
genes = []
idx = {}
for i, a in enumerate(atoms):
aa = a.split("||")
for ma in aa:
idx[ma] = i
genes.extend(aa)
genes = [x for x in genes if ":" not in x]
Os = [x for x in genes if x.startswith("Os")]
for o in Os:
for g in genes:
if idx[o] == idx[g]:
continue
pairs.add(tuple(sorted((o, g))))
for a, b in sorted(pairs):
print("\t".join((a, b))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cyntenator(args):
""" %prog cyntenator athaliana.athaliana.last athaliana.bed Prepare input for Cyntenator. """ |
p = OptionParser(cyntenator.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
lastfile = args[0]
fp = open(lastfile)
filteredlastfile = lastfile + ".blast"
fw = open(filteredlastfile, "w")
for row in fp:
b = BlastLine(row)
if b.query == b.subject:
continue
print("\t".join((b.query, b.subject, str(b.score))), file=fw)
fw.close()
bedfiles = args[1:]
fp = open(lastfile)
b = BlastLine(next(fp))
subject = b.subject
txtfiles = []
for bedfile in bedfiles:
order = Bed(bedfile).order
if subject in order:
db = op.basename(bedfile).split(".")[0][:20]
logging.debug("Found db: {0}".format(db))
txtfile = write_txt(bedfile)
txtfiles.append(txtfile)
db += ".txt"
mm = MakeManager()
for txtfile in txtfiles:
outfile = txtfile + ".alignment"
cmd = 'cyntenator -t "({0} {1})" -h blast {2} > {3}'\
.format(txtfile, db, filteredlastfile, outfile)
mm.add((txtfile, db, filteredlastfile), outfile, cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iadhore(args):
""" %prog iadhore athaliana.athaliana.last athaliana.bed Wrap around iADHoRe. """ |
p = OptionParser(iadhore.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
lastfile = args[0]
bedfiles = args[1:]
blast_table = "blast_table.txt"
fp = open(lastfile)
seen = set()
for row in fp:
c = BlastLine(row)
a, b = c.query, c.subject
a, b = gene_name(a), gene_name(b)
if a > b:
a, b = b, a
seen.add((a, b))
fw = open(blast_table, "w")
for a, b in seen:
print("\t".join((a, b)), file=fw)
fw.close()
logging.debug("A total of {0} pairs written to `{1}`"\
.format(len(seen), blast_table))
fw = open("config.txt", "w")
for bedfile in bedfiles:
pf, stanza = write_lst(bedfile)
print("genome={0}".format(pf), file=fw)
for seqid, fname in stanza:
print(" ".join((seqid, fname)), file=fw)
print(file=fw)
print("blast_table={0}".format(blast_table), file=fw)
print("cluster_type=colinear", file=fw)
print("tandem_gap=10", file=fw)
print("prob_cutoff=0.001", file=fw)
print("gap_size=20", file=fw)
print("cluster_gap=20", file=fw)
print("q_value=0.9", file=fw)
print("anchor_points=4", file=fw)
print("alignment_method=gg2", file=fw)
print("max_gaps_in_alignment=20", file=fw)
print("output_path=i-adhore_out", file=fw)
print("number_of_threads=4", file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def athalianatruth(args):
""" %prog athalianatruth J_a.txt J_bc.txt Prepare pairs data for At alpha/beta/gamma. """ |
p = OptionParser(athalianatruth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
atxt, bctxt = args
g = Grouper()
pairs = set()
for txt in (atxt, bctxt):
extract_groups(g, pairs, txt)
fw = open("pairs", "w")
for pair in sorted(pairs):
print("\t".join(pair), file=fw)
fw.close()
fw = open("groups", "w")
for group in list(g):
print(",".join(group), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mcscanx(args):
""" %prog mcscanx athaliana.athaliana.last athaliana.bed Wrap around MCScanX. """ |
p = OptionParser(mcscanx.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
blastfile = args[0]
bedfiles = args[1:]
prefix = "_".join(op.basename(x)[:2] for x in bedfiles)
symlink(blastfile, prefix + ".blast")
allbedfile = prefix + ".gff"
fw = open(allbedfile, "w")
for i, bedfile in enumerate(bedfiles):
prefix = chr(ord('A') + i)
make_gff(bedfile, prefix, fw)
fw.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.