text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unitigs(args):
""" %prog unitigs best.edges Reads Celera Assembler's "best.edges" and extract all unitigs. """ |
p = OptionParser(unitigs.__doc__)
p.add_option("--maxerr", default=2, type="int", help="Maximum error rate")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bestedges, = args
G = read_graph(bestedges, maxerr=opts.maxerr, directed=True)
H = nx.Graph()
intconv = lambda x: int(x.split("-")[0])
for k, v in G.iteritems():
if k == G.get(v, None):
H.add_edge(intconv(k), intconv(v))
nunitigs = nreads = 0
for h in nx.connected_component_subgraphs(H, copy=False):
st = [x for x in h if h.degree(x) == 1]
if len(st) != 2:
continue
src, target = st
path = list(nx.all_simple_paths(h, src, target))
assert len(path) == 1
path, = path
print("|".join(str(x) for x in path))
nunitigs += 1
nreads += len(path)
logging.debug("A total of {0} unitigs built from {1} reads."\
.format(nunitigs, nreads)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def astat(args):
""" %prog astat coverage.log Create coverage-rho scatter plot. """ |
p = OptionParser(astat.__doc__)
p.add_option("--cutoff", default=1000, type="int",
help="Length cutoff [default: %default]")
p.add_option("--genome", default="",
help="Genome name [default: %default]")
p.add_option("--arrDist", default=False, action="store_true",
help="Use arrDist instead [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
covfile, = args
cutoff = opts.cutoff
genome = opts.genome
plot_arrDist = opts.arrDist
suffix = ".{0}".format(cutoff)
small_covfile = covfile + suffix
update_covfile = need_update(covfile, small_covfile)
if update_covfile:
fw = open(small_covfile, "w")
else:
logging.debug("Found `{0}`, will use this one".format(small_covfile))
covfile = small_covfile
fp = open(covfile)
header = next(fp)
if update_covfile:
fw.write(header)
data = []
msg = "{0} tigs scanned ..."
for row in fp:
tigID, rho, covStat, arrDist = row.split()
tigID = int(tigID)
if tigID % 1000000 == 0:
sys.stderr.write(msg.format(tigID) + "\r")
rho, covStat, arrDist = [float(x) for x in (rho, covStat, arrDist)]
if rho < cutoff:
continue
if update_covfile:
fw.write(row)
data.append((tigID, rho, covStat, arrDist))
print(msg.format(tigID), file=sys.stderr)
from jcvi.graphics.base import plt, savefig
logging.debug("Plotting {0} data points.".format(len(data)))
tigID, rho, covStat, arrDist = zip(*data)
y = arrDist if plot_arrDist else covStat
ytag = "arrDist" if plot_arrDist else "covStat"
fig = plt.figure(1, (7, 7))
ax = fig.add_axes([.12, .1, .8, .8])
ax.plot(rho, y, ".", color="lightslategrey")
xtag = "rho"
info = (genome, xtag, ytag)
title = "{0} {1} vs. {2}".format(*info)
ax.set_title(title)
ax.set_xlabel(xtag)
ax.set_ylabel(ytag)
if plot_arrDist:
ax.set_yscale('log')
imagename = "{0}.png".format(".".join(info))
savefig(imagename, dpi=150) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def emitFragment(fw, fragID, libID, shredded_seq, clr=None, qvchar='l', fasta=False):
""" Print out the shredded sequence. """ |
if fasta:
s = SeqRecord(shredded_seq, id=fragID, description="")
SeqIO.write([s], fw, "fasta")
return
seq = str(shredded_seq)
slen = len(seq)
qvs = qvchar * slen # shredded reads have default low qv
if clr is None:
clr_beg, clr_end = 0, slen
else:
clr_beg, clr_end = clr
print(frgTemplate.format(fragID=fragID, libID=libID,
seq=seq, qvs=qvs, clr_beg=clr_beg, clr_end=clr_end), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_matepairs(fastafile):
""" Assumes the mates are adjacent sequence records """ |
assert op.exists(fastafile)
matefile = fastafile.rsplit(".", 1)[0] + ".mates"
if op.exists(matefile):
logging.debug("matepairs file `{0}` found".format(matefile))
else:
logging.debug("parsing matepairs from `{0}`".format(fastafile))
matefw = open(matefile, "w")
it = SeqIO.parse(fastafile, "fasta")
for fwd, rev in zip(it, it):
print("{0}\t{1}".format(fwd.id, rev.id), file=matefw)
matefw.close()
return matefile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sff(args):
""" %prog sff sffiles Convert reads formatted as 454 SFF file, and convert to CA frg file. Turn --nodedup on if another deduplication mechanism is used (e.g. CD-HIT-454). See assembly.sff.deduplicate(). """ |
p = OptionParser(sff.__doc__)
p.add_option("--prefix", dest="prefix", default=None,
help="Output frg filename prefix")
p.add_option("--nodedup", default=False, action="store_true",
help="Do not remove duplicates [default: %default]")
p.set_size()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(p.print_help())
sffiles = args
plates = [x.split(".")[0].split("_")[-1] for x in sffiles]
mated = (opts.size != 0)
mean, sv = get_mean_sv(opts.size)
if len(plates) > 1:
plate = plates[0][:-1] + 'X'
else:
plate = "_".join(plates)
if mated:
libname = "Titan{0}Kb-".format(opts.size / 1000) + plate
else:
libname = "TitanFrags-" + plate
if opts.prefix:
libname = opts.prefix
cmd = "sffToCA"
cmd += " -libraryname {0} -output {0} ".format(libname)
cmd += " -clear 454 -trim chop "
if mated:
cmd += " -linker titanium -insertsize {0} {1} ".format(mean, sv)
if opts.nodedup:
cmd += " -nodedup "
cmd += " ".join(sffiles)
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fastq(args):
""" %prog fastq fastqfile Convert reads formatted as FASTQ file, and convert to CA frg file. """ |
from jcvi.formats.fastq import guessoffset
p = OptionParser(fastq.__doc__)
p.add_option("--outtie", dest="outtie", default=False, action="store_true",
help="Are these outie reads? [default: %default]")
p.set_phred()
p.set_size()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(p.print_help())
fastqfiles = [get_abs_path(x) for x in args]
size = opts.size
outtie = opts.outtie
if size > 1000 and (not outtie):
logging.debug("[warn] long insert size {0} but not outtie".format(size))
mated = (size != 0)
libname = op.basename(args[0]).split(".")[0]
libname = libname.replace("_1_sequence", "")
frgfile = libname + ".frg"
mean, sv = get_mean_sv(opts.size)
cmd = "fastqToCA"
cmd += " -libraryname {0} ".format(libname)
fastqs = " ".join("-reads {0}".format(x) for x in fastqfiles)
if mated:
assert len(args) in (1, 2), "you need one or two fastq files for mated library"
fastqs = "-mates {0}".format(",".join(fastqfiles))
cmd += "-insertsize {0} {1} ".format(mean, sv)
cmd += fastqs
offset = int(opts.phred) if opts.phred else guessoffset([fastqfiles[0]])
illumina = (offset == 64)
if illumina:
cmd += " -type illumina"
if outtie:
cmd += " -outtie"
sh(cmd, outfile=frgfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clr(args):
""" %prog blastfile fastafiles Calculate the vector clear range file based BLAST to the vectors. """ |
p = OptionParser(clr.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
blastfile = args[0]
fastafiles = args[1:]
sizes = {}
for fa in fastafiles:
f = Fasta(fa)
sizes.update(f.itersizes())
b = Blast(blastfile)
for query, hits in b.iter_hits():
qsize = sizes[query]
vectors = list((x.qstart, x.qstop) for x in hits)
vmin, vmax = range_minmax(vectors)
left_size = vmin - 1
right_size = qsize - vmax
if left_size > right_size:
clr_start, clr_end = 0, vmin
else:
clr_start, clr_end = vmax, qsize
print("\t".join(str(x) for x in (query, clr_start, clr_end)))
del sizes[query]
for q, size in sorted(sizes.items()):
print("\t".join(str(x) for x in (q, 0, size))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def truncate_name(name, rule=None):
""" shorten taxa names for tree display Options of rule. This only affects tree display. - headn (eg. head3 truncates first 3 chars) - oheadn (eg. ohead3 retains only the first 3 chars) - tailn (eg. tail3 truncates last 3 chars) - otailn (eg. otail3 retains only the last 3 chars) n = 1 ~ 99 """ |
import re
if rule is None:
return name
k = re.search("(?<=^head)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[int(k):]
else:
k = re.search("(?<=^ohead)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[:int(k)]
else:
k = re.search("(?<=^tail)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[:-int(k)]
else:
k = re.search("(?<=^otail)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[-int(k):]
else:
print(truncate_name.__doc__, file=sys.stderr)
raise ValueError('Wrong rule for truncation!')
return tname |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_treefix(input, stree_file, smap_file, a_ext=".fasta", \ o_ext=".dnd", n_ext = ".treefix.dnd", **kwargs):
""" get the ML tree closest to the species tree """ |
cl = TreeFixCommandline(input=input, \
stree_file=stree_file, smap_file=smap_file, a_ext=a_ext, \
o=o_ext, n=n_ext, **kwargs)
outtreefile = input.rsplit(o_ext, 1)[0] + n_ext
print("TreeFix:", cl, file=sys.stderr)
r, e = cl.run()
if e:
print("***TreeFix could not run", file=sys.stderr)
return None
else:
logging.debug("new tree written to {0}".format(outtreefile))
return outtreefile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_gblocks(align_fasta_file, **kwargs):
""" remove poorly aligned positions and divergent regions with Gblocks """ |
cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs)
r, e = cl.run()
print("Gblocks:", cl, file=sys.stderr)
if e:
print("***Gblocks could not run", file=sys.stderr)
return None
else:
print(r, file=sys.stderr)
alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \
r'\1', r, flags=re.DOTALL)
alignp = int(alignp)
if alignp <= 10:
print("** WARNING ** Only %s %% positions retained by Gblocks. " \
"Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr)
return None
else:
return align_fasta_file+"-gb" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs):
""" Infer tree branch lengths using ffitch in EMBOSS PHYLIP """ |
cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \
intreefile=intreefile, **kwargs)
r, e = cl.run()
if e:
print("***ffitch could not run", file=sys.stderr)
return None
else:
print("ffitch:", cl, file=sys.stderr)
return outtreefile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def smart_reroot(treefile, outgroupfile, outfile, format=0):
""" simple function to reroot Newick format tree using ete2 Tree reading format options see here: http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees """ |
tree = Tree(treefile, format=format)
leaves = [t.name for t in tree.get_leaves()][::-1]
outgroup = []
for o in must_open(outgroupfile):
o = o.strip()
for leaf in leaves:
if leaf[:len(o)] == o:
outgroup.append(leaf)
if outgroup:
break
if not outgroup:
print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr)
return treefile
try:
tree.set_outgroup(tree.get_common_ancestor(*outgroup))
except ValueError:
assert type(outgroup) == list
outgroup = outgroup[0]
tree.set_outgroup(outgroup)
tree.write(outfile=outfile, format=format)
logging.debug("Rerooted tree printed to {0}".format(outfile))
return outfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_ml_phyml(alignment, outfile, work_dir=".", **kwargs):
""" build maximum likelihood tree of DNA seqs with PhyML """ |
phy_file = op.join(work_dir, "work", "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
phyml_cl = PhymlCommandline(cmd=PHYML_BIN("phyml"), input=phy_file, **kwargs)
logging.debug("Building ML tree using PhyML: %s" % phyml_cl)
stdout, stderr = phyml_cl()
tree_file = phy_file + "_phyml_tree.txt"
if not op.exists(tree_file):
print("***PhyML failed.", file=sys.stderr)
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
return outfile, phy_file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_ml_raxml(alignment, outfile, work_dir=".", **kwargs):
""" build maximum likelihood tree of DNA seqs with RAxML """ |
work_dir = op.join(work_dir, "work")
mkdir(work_dir)
phy_file = op.join(work_dir, "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work"))
mkdir(raxml_work)
raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \
sequences=phy_file, algorithm="a", model="GTRGAMMA", \
parsimony_seed=12345, rapid_bootstrap_seed=12345, \
num_replicates=100, name="aln", \
working_dir=raxml_work, **kwargs)
logging.debug("Building ML tree using RAxML: %s" % raxml_cl)
stdout, stderr = raxml_cl()
tree_file = "{0}/RAxML_bipartitions.aln".format(raxml_work)
if not op.exists(tree_file):
print("***RAxML failed.", file=sys.stderr)
sh("rm -rf %s" % raxml_work, log=False)
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
sh("rm -rf %s" % raxml_work)
return outfile, phy_file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def SH_raxml(reftree, querytree, phy_file, shout="SH_out.txt"):
""" SH test using RAxML querytree can be a single tree or a bunch of trees (eg. from bootstrapping) """ |
assert op.isfile(reftree)
shout = must_open(shout, "a")
raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work"))
mkdir(raxml_work)
raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \
sequences=phy_file, algorithm="h", model="GTRGAMMA", \
name="SH", starting_tree=reftree, bipartition_filename=querytree, \
working_dir=raxml_work)
logging.debug("Running SH test in RAxML: %s" % raxml_cl)
o, stderr = raxml_cl()
# hard coded
try:
pval = re.search('(Significantly.*:.*)', o).group(0)
except:
print("SH test failed.", file=sys.stderr)
else:
pval = pval.strip().replace("\t"," ").replace("%","\%")
print("{0}\t{1}".format(op.basename(querytree), pval), file=shout)
logging.debug("SH p-value appended to %s" % shout.name)
shout.close()
return shout.name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subalignment(alnfle, subtype, alntype="fasta"):
""" Subset synonymous or fourfold degenerate sites from an alignment input should be a codon alignment """ |
aln = AlignIO.read(alnfle, alntype)
alnlen = aln.get_alignment_length()
nseq = len(aln)
subaln = None
subalnfile = alnfle.rsplit(".", 1)[0] + "_{0}.{1}".format(subtype, alntype)
if subtype == "synonymous":
for j in range( 0, alnlen, 3 ):
aa = None
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in CODON_TRANSLATION:
break
if aa and CODON_TRANSLATION[codon] != aa:
break
else:
aa = CODON_TRANSLATION[codon]
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subtype == "fourfold":
for j in range( 0, alnlen, 3 ):
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in FOURFOLD:
break
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subaln:
AlignIO.write(subaln, subalnfile, alntype)
return subalnfile
else:
print("No sites {0} selected.".format(subtype), file=sys.stderr)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_rows_local(filename, ignore=".", colsep="\t", local=10, \ fieldcheck=True, fsep=","):
""" merge overlapping rows within given row count distance """ |
fw = must_open(filename+".merged", "w")
rows = file(filename).readlines()
rows = [row.strip().split(colsep) for row in rows]
l = len(rows[0])
for rowi, row in enumerate(rows):
n = len(rows)
i = rowi+1
while i <= min(rowi+local, n-1):
merge = 1
row2 = rows[i]
for j in range(l):
a = row[j]
b = row2[j]
if fieldcheck:
a = set(a.split(fsep))
a = fsep.join(sorted(list(a)))
b = set(b.split(fsep))
b = fsep.join(sorted(list(b)))
if all([a!=ignore, b!=ignore, a not in b, b not in a]):
merge = 0
i += 1
break
if merge:
for x in range(l):
if row[x] == ignore:
rows[rowi][x] = row2[x]
elif row[x] in row2[x]:
rows[rowi][x] = row2[x]
else:
rows[rowi][x] = row[x]
row = rows[rowi]
rows.remove(row2)
print(colsep.join(row), file=fw)
fw.close()
return fw.name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_tandems(mcscanfile, tandemfile):
""" add tandem genes to anchor genes in mcscan file """ |
tandems = [f.strip().split(",") for f in file(tandemfile)]
fw = must_open(mcscanfile+".withtandems", "w")
fp = must_open(mcscanfile)
seen =set()
for i, row in enumerate(fp):
if row[0] == '#':
continue
anchorslist = row.strip().split("\t")
anchors = set([a.split(",")[0] for a in anchorslist])
anchors.remove(".")
if anchors & seen == anchors:
continue
newanchors = []
for a in anchorslist:
if a == ".":
newanchors.append(a)
continue
for t in tandems:
if a in t:
newanchors.append(",".join(t))
seen.update(t)
break
else:
newanchors.append(a)
seen.add(a)
print("\t".join(newanchors), file=fw)
fw.close()
newmcscanfile = merge_rows_local(fw.name)
logging.debug("Tandems added to `{0}`. Results in `{1}`".\
format(mcscanfile, newmcscanfile))
fp.seek(0)
logging.debug("{0} rows merged to {1} rows".\
format(len(fp.readlines()), len(file(newmcscanfile).readlines())))
sh("rm %s" % fw.name)
return newmcscanfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _draw_trees(trees, nrow=1, ncol=1, rmargin=.3, iopts=None, outdir=".", shfile=None, **kwargs):
""" Draw one or multiple trees on one plot. """ |
from jcvi.graphics.tree import draw_tree
if shfile:
SHs = DictFile(shfile, delimiter="\t")
ntrees = len(trees)
n = nrow * ncol
for x in xrange(int(ceil(float(ntrees)/n))):
fig = plt.figure(1, (iopts.w, iopts.h)) if iopts \
else plt.figure(1, (5, 5))
root = fig.add_axes([0, 0, 1, 1])
xiv = 1. / ncol
yiv = 1. / nrow
xstart = list(np.arange(0, 1, xiv)) * nrow
ystart = list(chain(*zip(*[list(np.arange(0, 1, yiv))[::-1]] * ncol)))
for i in xrange(n*x, n*(x+1)):
if i == ntrees:
break
ax = fig.add_axes([xstart[i%n], ystart[i%n], xiv, yiv])
f = trees.keys()[i]
tree = trees[f]
try:
SH = SHs[f]
except:
SH = None
draw_tree(ax, tree, rmargin=rmargin, reroot=False, \
supportcolor="r", SH=SH, **kwargs)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
format = iopts.format if iopts else "pdf"
dpi = iopts.dpi if iopts else 300
if n == 1:
image_name = f.rsplit(".", 1)[0] + "." + format
else:
image_name = "trees{0}.{1}".format(x, format)
image_name = op.join(outdir, image_name)
savefig(image_name, dpi=dpi, iopts=iopts)
plt.clf() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_layout(thread, listfile, column=0):
""" Sort the syntelog table according to chromomomal positions. First orient the contents against threadbed, then for contents not in threadbed, insert to the nearest neighbor. """ |
from jcvi.formats.base import DictFile
outfile = listfile.rsplit(".", 1)[0] + ".sorted.list"
threadorder = thread.order
fw = open(outfile, "w")
lt = DictFile(listfile, keypos=column, valuepos=None)
threaded = []
imported = set()
for t in thread:
accn = t.accn
if accn not in lt:
continue
imported.add(accn)
atoms = lt[accn]
threaded.append(atoms)
assert len(threaded) == len(imported)
total = sum(1 for x in open(listfile))
logging.debug("Total: {0}, currently threaded: {1}".format(total, len(threaded)))
fp = open(listfile)
for row in fp:
atoms = row.split()
accn = atoms[0]
if accn in imported:
continue
insert_into_threaded(atoms, threaded, threadorder)
for atoms in threaded:
print("\t".join(atoms), file=fw)
fw.close()
logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def layout(args):
""" %prog layout omgfile taxa Build column formatted gene lists after omgparse(). Use species list separated by comma in place of taxa, e.g. "BR,BO,AN,CN" """ |
p = OptionParser(layout.__doc__)
p.add_option("--sort",
help="Sort layout file based on bedfile [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
omgfile, taxa = args
listfile = omgfile.rsplit(".", 1)[0] + ".list"
taxa = taxa.split(",")
ntaxa = len(taxa)
fw = open(listfile, "w")
data = []
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
row = ["."] * ntaxa
genes = genes.split(",")
ixs = [int(x) for x in idxs.split(",")]
for gene, idx in zip(genes, ixs):
row[idx] = gene
txs = ",".join(taxa[x] for x in ixs)
print("\t".join(("\t".join(row), txs)), file=fw)
data.append(row)
coldata = zip(*data)
ngenes = []
for i, tx in enumerate(taxa):
genes = [x for x in coldata[i] if x != '.']
genes = set(x.strip("|") for x in genes)
ngenes.append((len(genes), tx))
details = ", ".join("{0} {1}".format(a, b) for a, b in ngenes)
total = sum(a for a, b in ngenes)
s = "A list of {0} orthologous families that collectively".format(len(data))
s += " contain a total of {0} genes ({1})".format(total, details)
print(s, file=sys.stderr)
fw.close()
lastcolumn = ntaxa + 1
cmd = "sort -k{0},{0} {1} -o {1}".format(lastcolumn, listfile)
sh(cmd)
logging.debug("List file written to `{0}`.".format(listfile))
sort = opts.sort
if sort:
thread = Bed(sort)
sort_layout(thread, listfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def omgparse(args):
""" %prog omgparse work Parse the OMG outputs to get gene lists. """ |
p = OptionParser(omgparse.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
work, = args
omgfiles = glob(op.join(work, "gf*.out"))
for omgfile in omgfiles:
omg = OMGFile(omgfile)
best = omg.best()
for bb in best:
genes, taxa = zip(*bb)
print("\t".join((",".join(genes), ",".join(taxa)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def group(args):
""" %prog group anchorfiles Group the anchors into ortho-groups. Can input multiple anchor files. """ |
p = OptionParser(group.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
anchorfiles = args
groups = Grouper()
for anchorfile in anchorfiles:
ac = AnchorFile(anchorfile)
for a, b, idx in ac.iter_pairs():
groups.join(a, b)
logging.debug("Created {0} groups with {1} members.".\
format(len(groups), groups.num_members))
outfile = opts.outfile
fw = must_open(outfile, "w")
for g in groups:
print(",".join(sorted(g)), file=fw)
fw.close()
return outfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def omg(args):
""" %prog omg weightsfile Run Sankoff's OMG algorithm to get orthologs. Download OMG code at: <http://137.122.149.195/IsbraSoftware/OMGMec.html> This script only writes the partitions, but not launch OMGMec. You may need to: $ parallel "java -cp ~/code/OMGMec TestOMGMec {} 4 > {}.out" ::: work/gf????? Then followed by omgparse() to get the gene lists. """ |
p = OptionParser(omg.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
weightsfiles = args
groupfile = group(weightsfiles + ["--outfile=groups"])
weights = get_weights(weightsfiles)
info = get_info()
fp = open(groupfile)
work = "work"
mkdir(work)
for i, row in enumerate(fp):
gf = op.join(work, "gf{0:05d}".format(i))
genes = row.rstrip().split(",")
fw = open(gf, "w")
contents = ""
npairs = 0
for gene in genes:
gene_pairs = weights[gene]
for a, b, c in gene_pairs:
if b not in genes:
continue
contents += "weight {0}".format(c) + '\n'
contents += info[a] + '\n'
contents += info[b] + '\n\n'
npairs += 1
header = "a group of genes :length ={0}".format(npairs)
print(header, file=fw)
print(contents, file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def omgprepare(args):
""" %prog omgprepare ploidy anchorsfile blastfile Prepare to run Sankoff's OMG algorithm to get orthologs. """ |
from jcvi.formats.blast import cscore
from jcvi.formats.base import DictFile
p = OptionParser(omgprepare.__doc__)
p.add_option("--norbh", action="store_true",
help="Disable RBH hits [default: %default]")
p.add_option("--pctid", default=0, type="int",
help="Percent id cutoff for RBH hits [default: %default]")
p.add_option("--cscore", default=90, type="int",
help="C-score cutoff for RBH hits [default: %default]")
p.set_stripnames()
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ploidy, anchorfile, blastfile = args
norbh = opts.norbh
pctid = opts.pctid
cs = opts.cscore
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
fp = open(ploidy)
genomeidx = dict((x.split()[0], i) for i, x in enumerate(fp))
fp.close()
ploidy = DictFile(ploidy)
geneinfo(qbed, qorder, genomeidx, ploidy)
geneinfo(sbed, sorder, genomeidx, ploidy)
pf = blastfile.rsplit(".", 1)[0]
cscorefile = pf + ".cscore"
cscore([blastfile, "-o", cscorefile, "--cutoff=0", "--pct"])
ac = AnchorFile(anchorfile)
pairs = set((a, b) for a, b, i in ac.iter_pairs())
logging.debug("Imported {0} pairs from `{1}`.".format(len(pairs), anchorfile))
weightsfile = pf + ".weights"
fp = open(cscorefile)
fw = open(weightsfile, "w")
npairs = 0
for row in fp:
a, b, c, pct = row.split()
c, pct = float(c), float(pct)
c = int(c * 100)
if (a, b) not in pairs:
if norbh:
continue
if c < cs:
continue
if pct < pctid:
continue
c /= 10 # This severely penalizes RBH against synteny
print("\t".join((a, b, str(c))), file=fw)
npairs += 1
fw.close()
logging.debug("Write {0} pairs to `{1}`.".format(npairs, weightsfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
""" Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py> """ |
od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
od[name].append(value)
return od |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def index(self, item):
'Find the position of an item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].index(item) + i |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def find_ge(self, item):
'Return first item with a key >= equal to item. Raise ValueError if not found'
k = self._key(item)
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def find_gt(self, item):
'Return first item with a key > item. Raise ValueError if not found'
k = self._key(item)
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def multireport(args):
""" %prog multireport layoutfile Generate several Ks value distributions in the same figure. If the layout file is missing then a template file listing all ks files will be written. The layout file contains the Ks file, number of components, colors, and labels: # Ks file, ncomponents, label, color, marker LAP.sorghum.ks, 1, LAP-sorghum, r, o SES.sorghum.ks, 1, SES-sorghum, g, + MOL.sorghum.ks, 1, MOL-sorghum, m, ^ If color or marker is missing, then a random one will be assigned. """ |
p = OptionParser(multireport.__doc__)
p.set_outfile(outfile="Ks_plot.pdf")
add_plot_options(p)
opts, args, iopts = p.set_image_options(args, figsize="5x5")
if len(args) != 1:
sys.exit(not p.print_help())
layoutfile, = args
ks_min = opts.vmin
ks_max = opts.vmax
bins = opts.bins
fill = opts.fill
layout = Layout(layoutfile)
print(layout, file=sys.stderr)
fig = plt.figure(1, (iopts.w, iopts.h))
ax = fig.add_axes([.12, .1, .8, .8])
kp = KsPlot(ax, ks_max, bins, legendp=opts.legendp)
for lo in layout:
data = KsFile(lo.ksfile)
data = [x.ng_ks for x in data]
data = [x for x in data if ks_min <= x <= ks_max]
kp.add_data(data, lo.components, label=lo.label, \
color=lo.color, marker=lo.marker,
fill=fill, fitted=opts.fit)
kp.draw(title=opts.title, filename=opts.outfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromgroups(args):
""" Flatten the gene familes into pairs, the groupsfile is a file with each line containing the members, separated by comma. The commands also require several bed files in order to sort the pairs into different piles (e.g. pairs of species in comparison. """ |
from jcvi.formats.bed import Bed
p = OptionParser(fromgroups.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
groupsfile = args[0]
bedfiles = args[1:]
beds = [Bed(x) for x in bedfiles]
fp = open(groupsfile)
groups = [row.strip().split(",") for row in fp]
for b1, b2 in product(beds, repeat=2):
extract_pairs(b1, b2, groups) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_synonymous(input_file, work_dir):
"""Run yn00 to find the synonymous subsitution rate for the alignment. """ |
cwd = os.getcwd()
os.chdir(work_dir)
# create the .ctl file
ctl_file = "yn-input.ctl"
output_file = "nuc-subs.yn"
ctl_h = open(ctl_file, "w")
ctl_h.write("seqfile = %s\noutfile = %s\nverbose = 0\n" %
(op.basename(input_file), output_file))
ctl_h.write("icode = 0\nweighting = 0\ncommonf3x4 = 0\n")
ctl_h.close()
cl = YnCommandline(ctl_file)
print("\tyn00:", cl, file=sys.stderr)
r, e = cl.run()
ds_value_yn = None
ds_value_ng = None
dn_value_yn = None
dn_value_ng = None
# Nei-Gojobori
output_h = open(output_file)
row = output_h.readline()
while row:
if row.find("Nei & Gojobori") >=0:
for x in xrange(5):
row = next(output_h)
dn_value_ng, ds_value_ng = row.split('(')[1].split(')')[0].split()
break
row = output_h.readline()
output_h.close()
# Yang
output_h = open(output_file)
for line in output_h:
if line.find("+-") >= 0 and line.find("dS") == -1:
parts = line.split(" +-")
ds_value_yn = extract_subs_value(parts[1])
dn_value_yn = extract_subs_value(parts[0])
if ds_value_yn is None or ds_value_ng is None:
h = open(output_file)
print("yn00 didn't work: \n%s" % h.read(), file=sys.stderr)
os.chdir(cwd)
return ds_value_yn, dn_value_yn, ds_value_ng, dn_value_ng |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_mrtrans(align_fasta, recs, work_dir, outfmt="paml"):
"""Align nucleotide sequences with mrtrans and the protein alignment. """ |
align_file = op.join(work_dir, "prot-align.fasta")
nuc_file = op.join(work_dir, "nuc.fasta")
output_file = op.join(work_dir, "nuc-align.mrtrans")
# make the prot_align file and nucleotide file
align_h0 = open(align_file + "0", "w")
align_h0.write(str(align_fasta))
align_h0.close()
prot_seqs = {}
i = 0
for rec in SeqIO.parse(align_h0.name, "fasta"):
prot_seqs[i] = rec.seq
i += 1
align_h = open(align_file, "w")
for i, rec in enumerate(recs):
if len(rec.id) > 30:
rec.id = rec.id[:28] + "_" + str(i)
rec.description = ""
print(">{0}\n{1}".format(rec.id, prot_seqs[i]), file=align_h)
align_h.close()
SeqIO.write(recs, file(nuc_file, "w"), "fasta")
# run the program
cl = MrTransCommandline(align_file, nuc_file, output_file, outfmt=outfmt)
r, e = cl.run()
if e is None:
print("\tpal2nal:", cl, file=sys.stderr)
return output_file
elif e.read().find("could not translate") >= 0:
print("***pal2nal could not translate", file=sys.stderr)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clustal_align_protein(recs, work_dir, outfmt="fasta"):
""" Align given proteins with clustalw. recs are iterable of Biopython SeqIO objects """ |
fasta_file = op.join(work_dir, "prot-start.fasta")
align_file = op.join(work_dir, "prot.aln")
SeqIO.write(recs, file(fasta_file, "w"), "fasta")
clustal_cl = ClustalwCommandline(cmd=CLUSTALW_BIN("clustalw2"),
infile=fasta_file, outfile=align_file, outorder="INPUT",
type="PROTEIN")
stdout, stderr = clustal_cl()
aln_file = file(clustal_cl.outfile)
alignment = AlignIO.read(aln_file, "clustal")
print("\tDoing clustalw alignment: %s" % clustal_cl, file=sys.stderr)
if outfmt == "fasta":
return alignment.format("fasta")
if outfmt == "clustal":
return alignment |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def muscle_align_protein(recs, work_dir, outfmt="fasta", inputorder=True):
""" Align given proteins with muscle. recs are iterable of Biopython SeqIO objects """ |
fasta_file = op.join(work_dir, "prot-start.fasta")
align_file = op.join(work_dir, "prot.aln")
SeqIO.write(recs, file(fasta_file, "w"), "fasta")
muscle_cl = MuscleCommandline(cmd=MUSCLE_BIN("muscle"),
input=fasta_file, out=align_file, seqtype="protein",
clwstrict=True)
stdout, stderr = muscle_cl()
alignment = AlignIO.read(muscle_cl.out, "clustal")
if inputorder:
try:
muscle_inputorder(muscle_cl.input, muscle_cl.out)
except ValueError:
return ""
alignment = AlignIO.read(muscle_cl.out, "fasta")
print("\tDoing muscle alignment: %s" % muscle_cl, file=sys.stderr)
if outfmt == "fasta":
return alignment.format("fasta")
if outfmt == "clustal":
return alignment.format("clustal") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subset(args):
""" Subset some pre-calculated ks ka values (in ksfile) according to pairs in tab delimited pairsfile/anchorfile. """ |
p = OptionParser(subset.__doc__)
p.add_option("--noheader", action="store_true",
help="don't write ksfile header line [default: %default]")
p.add_option("--block", action="store_true",
help="preserve block structure in input [default: %default]")
p.set_stripnames()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, ksfiles = args[0], args[1:]
noheader = opts.noheader
block = opts.block
if block:
noheader = True
outfile = opts.outfile
ksvals = {}
for ksfile in ksfiles:
ksvals.update(dict((line.name, line) for line in \
KsFile(ksfile, strip_names=opts.strip_names)))
fp = open(pairsfile)
fw = must_open(outfile, "w")
if not noheader:
print(fields, file=fw)
i = j = 0
for row in fp:
if row[0] == '#':
if block:
print(row.strip(), file=fw)
continue
a, b = row.split()[:2]
name = ";".join((a, b))
if name not in ksvals:
name = ";".join((b, a))
if name not in ksvals:
j += 1
print("\t".join((a, b, ".", ".")), file=fw)
continue
ksline = ksvals[name]
if block:
print("\t".join(str(x) for x in (a, b, ksline.ks)), file=fw)
else:
ksline.name = ";".join((a, b))
print(ksline, file=fw)
i += 1
fw.close()
logging.debug("{0} pairs not found in ksfiles".format(j))
logging.debug("{0} ks records written to `{1}`".format(i, outfile))
return outfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def report(args):
'''
%prog report ksfile
generate a report given a Ks result file (as produced by synonymous_calc.py).
describe the median Ks, Ka values, as well as the distribution in stem-leaf plot
'''
from jcvi.utils.cbook import SummaryStats
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(report.__doc__)
p.add_option("--pdf", default=False, action="store_true",
help="Generate graphic output for the histogram [default: %default]")
p.add_option("--components", default=1, type="int",
help="Number of components to decompose peaks [default: %default]")
add_plot_options(p)
opts, args, iopts = p.set_image_options(args, figsize="5x5")
if len(args) != 1:
sys.exit(not p.print_help())
ks_file, = args
data = KsFile(ks_file)
ks_min = opts.vmin
ks_max = opts.vmax
bins = opts.bins
for f in fields.split(",")[1:]:
columndata = [getattr(x, f) for x in data]
ks = ("ks" in f)
if not ks:
continue
columndata = [x for x in columndata if ks_min <= x <= ks_max]
st = SummaryStats(columndata)
title = "{0} ({1}): ".format(descriptions[f], ks_file)
title += "Median:{0:.3f} (1Q:{1:.3f}|3Q:{2:.3f}||".\
format(st.median, st.firstq, st.thirdq)
title += "Mean:{0:.3f}|Std:{1:.3f}||N:{2})".\
format(st.mean, st.sd, st.size)
tbins = (0, ks_max, bins) if ks else (0, .6, 10)
digit = 2 if (ks_max * 1. / bins) < .1 else 1
stem_leaf_plot(columndata, *tbins, digit=digit, title=title)
if not opts.pdf:
return
components = opts.components
data = [x.ng_ks for x in data]
data = [x for x in data if ks_min <= x <= ks_max]
fig = plt.figure(1, (iopts.w, iopts.h))
ax = fig.add_axes([.12, .1, .8, .8])
kp = KsPlot(ax, ks_max, opts.bins, legendp=opts.legendp)
kp.add_data(data, components, fill=opts.fill, fitted=opts.fit)
kp.draw(title=opts.title) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def passthrough(args):
""" %prog passthrough chrY.vcf chrY.new.vcf Pass through Y and MT vcf. """ |
p = OptionParser(passthrough.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, newvcffile = args
fp = open(vcffile)
fw = open(newvcffile, "w")
gg = ["0/0", "0/1", "1/1"]
for row in fp:
if row[0] == "#":
print(row.strip(), file=fw)
continue
v = VcfLine(row)
v.filter = "PASS"
v.format = "GT:GP"
probs = [0] * 3
probs[gg.index(v.genotype)] = 1
v.genotype = v.genotype.replace("/", "|") + \
":{0}".format(",".join("{0:.3f}".format(x) for x in probs))
print(v, file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(args):
""" %prog validate imputed.vcf withheld.vcf Validate imputation against withheld variants. """ |
p = OptionParser(validate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
imputed, withheld = args
register = {}
fp = open(withheld)
for row in fp:
if row[0] == "#":
continue
v = VcfLine(row)
register[(v.seqid, v.pos)] = v.genotype
logging.debug("Imported {0} records from `{1}`".\
format(len(register), withheld))
fp = must_open(imputed)
hit = concordant = 0
seen = set()
for row in fp:
if row[0] == "#":
continue
v = VcfLine(row)
chr, pos, genotype = v.seqid, v.pos, v.genotype
if (chr, pos) in seen:
continue
seen.add((chr, pos))
if (chr, pos) not in register:
continue
truth = register[(chr, pos)]
imputed = genotype.split(":")[0]
if "|" in imputed:
imputed = "/".join(sorted(genotype.split(":")[0].split("|")))
#probs = [float(x) for x in genotype.split(":")[-1].split(",")]
#imputed = max(zip(probs, ["0/0", "0/1", "1/1"]))[-1]
hit += 1
if truth == imputed:
concordant += 1
else:
print(row.strip(), "truth={0}".format(truth), file=sys.stderr)
logging.debug("Total concordant: {0}".\
format(percentage(concordant, hit))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def minimac(args):
""" %prog batchminimac input.txt Use MINIMAC3 to impute vcf on all chromosomes. """ |
p = OptionParser(minimac.__doc__)
p.set_home("shapeit")
p.set_home("minimac")
p.set_outfile()
p.set_chr()
p.set_ref()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
txtfile, = args
ref = opts.ref
mm = MakeManager()
pf = txtfile.split(".")[0]
allrawvcf = []
alloutvcf = []
chrs = opts.chr.split(",")
for x in chrs:
px = CM[x]
chrvcf = pf + ".{0}.vcf".format(px)
if txtfile.endswith(".vcf"):
cmd = "vcftools --vcf {0} --chr {1}".format(txtfile, x)
cmd += " --out {0}.{1} --recode".format(pf, px)
cmd += " && mv {0}.{1}.recode.vcf {2}".format(pf, px, chrvcf)
else: # 23andme
cmd = "python -m jcvi.formats.vcf from23andme {0} {1}".format(txtfile, x)
cmd += " --ref {0}".format(ref)
mm.add(txtfile, chrvcf, cmd)
chrvcf_hg38 = pf + ".{0}.23andme.hg38.vcf".format(px)
minimac_liftover(mm, chrvcf, chrvcf_hg38, opts)
allrawvcf.append(chrvcf_hg38)
minimacvcf = "{0}.{1}.minimac.dose.vcf".format(pf, px)
if x == "X":
minimac_X(mm, x, chrvcf, opts)
elif x in ["Y", "MT"]:
cmd = "python -m jcvi.variation.impute passthrough"
cmd += " {0} {1}".format(chrvcf, minimacvcf)
mm.add(chrvcf, minimacvcf, cmd)
else:
minimac_autosome(mm, x, chrvcf, opts)
# keep the best line for multi-allelic markers
uniqvcf= "{0}.{1}.minimac.uniq.vcf".format(pf, px)
cmd = "python -m jcvi.formats.vcf uniq {0} > {1}".\
format(minimacvcf, uniqvcf)
mm.add(minimacvcf, uniqvcf, cmd)
minimacvcf_hg38 = "{0}.{1}.minimac.hg38.vcf".format(pf, px)
minimac_liftover(mm, uniqvcf, minimacvcf_hg38, opts)
alloutvcf.append(minimacvcf_hg38)
if len(allrawvcf) > 1:
rawhg38vcfgz = pf + ".all.23andme.hg38.vcf.gz"
cmd = "vcf-concat {0} | bgzip > {1}".format(" ".join(allrawvcf), rawhg38vcfgz)
mm.add(allrawvcf, rawhg38vcfgz, cmd)
if len(alloutvcf) > 1:
outhg38vcfgz = pf + ".all.minimac.hg38.vcf.gz"
cmd = "vcf-concat {0} | bgzip > {1}".format(" ".join(alloutvcf), outhg38vcfgz)
mm.add(alloutvcf, outhg38vcfgz, cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def beagle(args):
""" %prog beagle input.vcf 1 Use BEAGLE4.1 to impute vcf on chromosome 1. """ |
p = OptionParser(beagle.__doc__)
p.set_home("beagle")
p.set_ref()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, chr = args
pf = vcffile.rsplit(".", 1)[0]
outpf = pf + ".beagle"
outfile = outpf + ".vcf.gz"
mm = MakeManager()
beagle_cmd = opts.beagle_home
kg = op.join(opts.ref, "1000GP_Phase3")
cmd = beagle_cmd + " gt={0}".format(vcffile)
cmd += " ref={0}/chr{1}.1kg.phase3.v5a.bref".format(kg, chr)
cmd += " map={0}/plink.chr{1}.GRCh37.map".format(kg, chr)
cmd += " out={0}".format(outpf)
cmd += " nthreads=16 gprobs=true"
mm.add(vcffile, outfile, cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def impute(args):
""" %prog impute input.vcf hs37d5.fa 1 Use IMPUTE2 to impute vcf on chromosome 1. """ |
from pyfaidx import Fasta
p = OptionParser(impute.__doc__)
p.set_home("shapeit")
p.set_home("impute")
p.set_ref()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
vcffile, fastafile, chr = args
mm = MakeManager()
pf = vcffile.rsplit(".", 1)[0]
hapsfile = pf + ".haps"
kg = op.join(opts.ref, "1000GP_Phase3")
shapeit_phasing(mm, chr, vcffile, opts)
fasta = Fasta(fastafile)
size = len(fasta[chr])
binsize = 5000000
bins = size / binsize # 5Mb bins
if size % binsize:
bins += 1
impute_cmd = op.join(opts.impute_home, "impute2")
chunks = []
for x in xrange(bins + 1):
chunk_start = x * binsize + 1
chunk_end = min(chunk_start + binsize - 1, size)
outfile = pf + ".chunk{0:02d}.impute2".format(x)
mapfile = "{0}/genetic_map_chr{1}_combined_b37.txt".format(kg, chr)
rpf = "{0}/1000GP_Phase3_chr{1}".format(kg, chr)
cmd = impute_cmd + " -m {0}".format(mapfile)
cmd += " -known_haps_g {0}".format(hapsfile)
cmd += " -h {0}.hap.gz -l {0}.legend.gz".format(rpf)
cmd += " -Ne 20000 -int {0} {1}".format(chunk_start, chunk_end)
cmd += " -o {0} -allow_large_regions -seed 367946".format(outfile)
cmd += " && touch {0}".format(outfile)
mm.add(hapsfile, outfile, cmd)
chunks.append(outfile)
# Combine all the files
imputefile = pf + ".impute2"
cmd = "cat {0} > {1}".format(" ".join(chunks), imputefile)
mm.add(chunks, imputefile, cmd)
# Convert to vcf
vcffile = pf + ".impute2.vcf"
cmd = "python -m jcvi.formats.vcf fromimpute2 {0} {1} {2} > {3}".\
format(imputefile, fastafile, chr, vcffile)
mm.add(imputefile, vcffile, cmd)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary gffile fastafile Print summary stats, including: - Gene/Exon/Intron - Number - Average size (bp) - Median size (bp) - Total length (Mb) - % of genome - % GC """ |
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, ref = args
s = Fasta(ref)
g = make_index(gff_file)
geneseqs, exonseqs, intronseqs = [], [], [] # Calc % GC
for f in g.features_of_type("gene"):
fid = f.id
fseq = s.sequence({'chr': f.chrom, 'start': f.start, 'stop': f.stop})
geneseqs.append(fseq)
exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \
if c.featuretype == "exon")
exons = list(exons)
for chrom, start, stop in exons:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
exonseqs.append(fseq)
introns = range_interleave(exons)
for chrom, start, stop in introns:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
intronseqs.append(fseq)
r = {} # Report
for t, tseqs in zip(("Gene", "Exon", "Intron"), (geneseqs, exonseqs, intronseqs)):
tsizes = [len(x) for x in tseqs]
tsummary = SummaryStats(tsizes, dtype="int")
r[t, "Number"] = tsummary.size
r[t, "Average size (bp)"] = tsummary.mean
r[t, "Median size (bp)"] = tsummary.median
r[t, "Total length (Mb)"] = human_size(tsummary.sum, precision=0, target="Mb")
r[t, "% of genome"] = percentage(tsummary.sum, s.totalsize, precision=0, mode=-1)
r[t, "% GC"] = gc(tseqs)
print(tabulate(r), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, clean=True):
""" Create a temporary file and run it """ |
template = self.template
parameters = self.parameters
# write to a temporary R script
fw = must_open("tmp", "w")
path = fw.name
fw.write(template.safe_substitute(**parameters))
fw.close()
sh("Rscript %s" % path)
if clean:
os.remove(path)
# I have no idea why using ggsave, there is one extra image
# generated, but here I remove it
rplotspdf = "Rplots.pdf"
if op.exists(rplotspdf):
os.remove(rplotspdf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transitive_reduction(G):
""" Returns a transitive reduction of a graph. The original graph is not modified. A transitive reduction H of G has a path from x to y if and only if there was a path from x to y in G. Deleting any edge of H destroys this property. A transitive reduction is not unique in general. A transitive reduction has the same transitive closure as the original graph. A transitive reduction of a complete graph is a tree. A transitive reduction of a tree is itself. [(1, 2), (2, 3), (3, 4)] """ |
H = G.copy()
for a, b, w in G.edges_iter(data=True):
# Try deleting the edge, see if we still have a path
# between the vertices
H.remove_edge(a, b)
if not nx.has_path(H, a, b): # we shouldn't have deleted it
H.add_edge(a, b, w)
return H |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_paths(paths, weights=None):
""" Zip together sorted lists. [1, 2, 3, 4, 5] [1, 2, 3, 4] """ |
G = make_paths(paths, weights=weights)
G = reduce_paths(G)
return G |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_next(self, tag="<"):
""" This function is tricky and took me a while to figure out. The tag specifies the direction where the current edge came from. tag ntag ---> V >----> U cur next This means the next vertex should follow the outs since this tag is inward '<'. Check if there are multiple branches if len(L) == 1, and also check if the next it finds has multiple incoming edges though if len(B) == 1. """ |
next, ntag = None, None
L = self.outs if tag == "<" else self.ins
if len(L) == 1:
e, = L
if e.v1.v == self.v:
next, ntag = e.v2, e.o2
ntag = "<" if ntag == ">" else ">" # Flip tag if on other end
else:
next, ntag = e.v1, e.o1
if next: # Validate the next vertex
B = next.ins if ntag == "<" else next.outs
if len(B) > 1:
return None, None
return next, ntag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dust2bed(args):
""" %prog dust2bed fastafile Use dustmasker to find low-complexity regions (LCRs) in the genome. """ |
from jcvi.formats.base import read_block
p = OptionParser(dust2bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
interval = fastafile + ".iv"
if need_update(fastafile, interval):
cmd = "dustmasker -in {0}".format(fastafile)
sh(cmd, outfile=interval)
fp = open(interval)
bedfile = fastafile.rsplit(".", 1)[0] + ".dust.bed"
fw = must_open(bedfile, "w")
nlines = 0
nbases = 0
for header, block in read_block(fp, ">"):
header = header.strip(">")
for b in block:
start, end = b.split(" - ")
start, end = int(start), int(end)
print("\t".join(str(x) for x in (header, start, end)), file=fw)
nlines += 1
nbases += end - start
logging.debug("A total of {0} DUST intervals ({1} bp) exported to `{2}`".\
format(nlines, nbases, bedfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fasta2bed(fastafile):
""" Alternative BED generation from FASTA file. Used for sanity check. """ |
dustfasta = fastafile.rsplit(".", 1)[0] + ".dust.fasta"
for name, seq in parse_fasta(dustfasta):
for islower, ss in groupby(enumerate(seq), key=lambda x: x[-1].islower()):
if not islower:
continue
ss = list(ss)
ms, mn = min(ss)
xs, xn = max(ss)
print("\t".join(str(x) for x in (name, ms, xs))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def circular(args):
""" %prog circular fastafile startpos Make circular genome, startpos is the place to start the sequence. This can be determined by mapping to a reference. Self overlaps are then resolved. Startpos is 1-based. """ |
from jcvi.assembly.goldenpath import overlap
p = OptionParser(circular.__doc__)
p.add_option("--flip", default=False, action="store_true",
help="Reverse complement the sequence")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, startpos = args
startpos = int(startpos)
key, seq = next(parse_fasta(fastafile))
aseq = seq[startpos:]
bseq = seq[:startpos]
aseqfile, bseqfile = "a.seq", "b.seq"
for f, s in zip((aseqfile, bseqfile), (aseq, bseq)):
fw = must_open(f, "w")
print(">{0}\n{1}".format(f, s), file=fw)
fw.close()
o = overlap([aseqfile, bseqfile])
seq = aseq[:o.qstop] + bseq[o.sstop:]
seq = Seq(seq)
if opts.flip:
seq = seq.reverse_complement()
for f in (aseqfile, bseqfile):
os.remove(f)
fw = must_open(opts.outfile, "w")
rec = SeqRecord(seq, id=key, description="")
SeqIO.write([rec], fw, "fasta")
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dust(args):
""" %prog dust assembly.fasta Remove low-complexity contigs within assembly. """ |
p = OptionParser(dust.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
dustfastafile = fastafile.rsplit(".", 1)[0] + ".dust.fasta"
if need_update(fastafile, dustfastafile):
cmd = "dustmasker -in {0}".format(fastafile)
cmd += " -out {0} -outfmt fasta".format(dustfastafile)
sh(cmd)
for name, seq in parse_fasta(dustfastafile):
nlow = sum(1 for x in seq if x in "acgtnN")
pctlow = nlow * 100. / len(seq)
if pctlow < 98:
continue
#print "{0}\t{1:.1f}".format(name, pctlow)
print(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dedup(args):
""" %prog dedup assembly.assembly.blast assembly.fasta Remove duplicate contigs within assembly. """ |
from jcvi.formats.blast import BlastLine
p = OptionParser(dedup.__doc__)
p.set_align(pctid=0, pctcov=98)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blastfile, fastafile = args
cov = opts.pctcov / 100.
sizes = Sizes(fastafile).mapping
fp = open(blastfile)
removed = set()
for row in fp:
b = BlastLine(row)
query, subject = b.query, b.subject
if query == subject:
continue
qsize, ssize = sizes[query], sizes[subject]
qspan = abs(b.qstop - b.qstart)
if qspan < qsize * cov:
continue
if (qsize, query) < (ssize, subject):
removed.add(query)
print("\n".join(sorted(removed))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(args):
""" %prog build current.fasta Bacteria_Virus.fasta prefix Build assembly files after a set of clean-ups: 1. Use cdhit (100%) to remove duplicate scaffolds 2. Screen against the bacteria and virus database (remove scaffolds 95% id, 50% cov) 3. Mask matches to UniVec_Core 4. Sort by decreasing scaffold sizes 5. Rename the scaffolds sequentially 6. Build the contigs by splitting scaffolds at gaps 7. Rename the contigs sequentially """ |
from jcvi.apps.cdhit import deduplicate
from jcvi.apps.vecscreen import mask
from jcvi.formats.fasta import sort
p = OptionParser(build.__doc__)
p.add_option("--nodedup", default=False, action="store_true",
help="Do not deduplicate [default: deduplicate]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
fastafile, bacteria, pf = args
dd = deduplicate([fastafile, "--pctid=100"]) \
if not opts.nodedup else fastafile
screenfasta = screen([dd, bacteria])
tidyfasta = mask([screenfasta])
sortedfasta = sort([tidyfasta, "--sizes"])
scaffoldfasta = pf + ".assembly.fasta"
format([sortedfasta, scaffoldfasta, "--prefix=scaffold_", "--sequential"])
gapsplitfasta = pf + ".gapSplit.fasta"
cmd = "gapSplit -minGap=10 {0} {1}".format(scaffoldfasta, gapsplitfasta)
sh(cmd)
contigsfasta = pf + ".contigs.fasta"
format([gapsplitfasta, contigsfasta, "--prefix=contig_", "--sequential"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def screen(args):
""" %prog screen scaffolds.fasta library.fasta Screen sequences against FASTA library. Sequences that have 95% id and 50% cov will be removed by default. """ |
from jcvi.apps.align import blast
from jcvi.formats.blast import covfilter
p = OptionParser(screen.__doc__)
p.set_align(pctid=95, pctcov=50)
p.add_option("--best", default=1, type="int",
help="Get the best N hit [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
scaffolds, library = args
pctidflag = "--pctid={0}".format(opts.pctid)
blastfile = blast([library, scaffolds, pctidflag,
"--best={0}".format(opts.best)])
idsfile = blastfile.rsplit(".", 1)[0] + ".ids"
covfilter([blastfile, scaffolds, "--ids=" + idsfile,
pctidflag, "--pctcov={0}".format(opts.pctcov)])
pf = scaffolds.rsplit(".", 1)[0]
nf = pf + ".screen.fasta"
cmd = "faSomeRecords {0} -exclude {1} {2}".format(scaffolds, idsfile, nf)
sh(cmd)
logging.debug("Screened FASTA written to `{0}`.".format(nf))
return nf |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scaffold(args):
""" %prog scaffold ctgfasta agpfile Build scaffolds based on ordering in the AGP file. """ |
from jcvi.formats.agp import bed, order_to_agp, build
from jcvi.formats.bed import Bed
p = OptionParser(scaffold.__doc__)
p.add_option("--prefix", default=False, action="store_true",
help="Keep IDs with same prefix together [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ctgfasta, agpfile = args
sizes = Sizes(ctgfasta).mapping
pf = ctgfasta.rsplit(".", 1)[0]
phasefile = pf + ".phases"
fwphase = open(phasefile, "w")
newagpfile = pf + ".new.agp"
fwagp = open(newagpfile, "w")
scaffoldbuckets = defaultdict(list)
bedfile = bed([agpfile, "--nogaps", "--outfile=tmp"])
bb = Bed(bedfile)
for s, partialorder in bb.sub_beds():
name = partialorder[0].accn
bname = name.rsplit("_", 1)[0] if opts.prefix else s
scaffoldbuckets[bname].append([(b.accn, b.strand) for b in partialorder])
# Now the buckets contain a mixture of singletons and partially resolved
# scaffolds. Print the scaffolds first then remaining singletons.
for bname, scaffolds in sorted(scaffoldbuckets.items()):
ctgorder = []
singletons = set()
for scaf in sorted(scaffolds):
for node, orientation in scaf:
ctgorder.append((node, orientation))
if len(scaf) == 1:
singletons.add(node)
nscaffolds = len(scaffolds)
nsingletons = len(singletons)
if nsingletons == 1 and nscaffolds == 0:
phase = 3
elif nsingletons == 0 and nscaffolds == 1:
phase = 2
else:
phase = 1
msg = "{0}: Scaffolds={1} Singletons={2} Phase={3}".\
format(bname, nscaffolds, nsingletons, phase)
print(msg, file=sys.stderr)
print("\t".join((bname, str(phase))), file=fwphase)
order_to_agp(bname, ctgorder, sizes, fwagp)
fwagp.close()
os.remove(bedfile)
fastafile = "final.fasta"
build([newagpfile, ctgfasta, fastafile])
tidy([fastafile]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def overlapbatch(args):
""" %prog overlapbatch ctgfasta poolfasta Fish out the sequences in `poolfasta` that overlap with `ctgfasta`. Mix and combine using `minimus2`. """ |
p = OptionParser(overlap.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ctgfasta, poolfasta = args
f = Fasta(ctgfasta)
for k, rec in f.iteritems_ordered():
fastafile = k + ".fasta"
fw = open(fastafile, "w")
SeqIO.write([rec], fw, "fasta")
fw.close()
overlap([fastafile, poolfasta]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def array(args):
""" %prog array commands.list Parallelize a set of commands on grid using array jobs. """ |
p = OptionParser(array.__doc__)
p.set_grid_opts(array=True)
p.set_params(prog="grid")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cmds, = args
fp = open(cmds)
N = sum(1 for x in fp)
fp.close()
pf = cmds.rsplit(".", 1)[0]
runfile = pf + ".sh"
assert runfile != cmds, \
"Commands list file should not have a `.sh` extension"
engine = get_grid_engine()
threaded = opts.threaded or 1
contents = arraysh.format(cmds) if engine == "SGE" \
else arraysh_ua.format(N, threaded, cmds)
write_file(runfile, contents)
if engine == "PBS":
return
outfile = "{0}.{1}.out".format(pf, "\$TASK_ID")
errfile = "{0}.{1}.err".format(pf, "\$TASK_ID")
p = GridProcess("sh {0}".format(runfile), outfile=outfile, errfile=errfile,
arr=N, extra_opts=opts.extra, grid_opts=opts)
p.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def covlen(args):
""" %prog covlen covfile fastafile Plot coverage vs length. `covfile` is two-column listing contig id and depth of coverage. """ |
import numpy as np
import pandas as pd
import seaborn as sns
from jcvi.formats.base import DictFile
p = OptionParser(covlen.__doc__)
p.add_option("--maxsize", default=1000000, type="int", help="Max contig size")
p.add_option("--maxcov", default=100, type="int", help="Max contig size")
p.add_option("--color", default='m', help="Color of the data points")
p.add_option("--kind", default="scatter",
choices=("scatter", "reg", "resid", "kde", "hex"),
help="Kind of plot to draw")
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 2:
sys.exit(not p.print_help())
covfile, fastafile = args
cov = DictFile(covfile, cast=float)
s = Sizes(fastafile)
data = []
maxsize, maxcov = opts.maxsize, opts.maxcov
for ctg, size in s.iter_sizes():
c = cov.get(ctg, 0)
if size > maxsize:
continue
if c > maxcov:
continue
data.append((size, c))
x, y = zip(*data)
x = np.array(x)
y = np.array(y)
logging.debug("X size {0}, Y size {1}".format(x.size, y.size))
df = pd.DataFrame()
xlab, ylab = "Length", "Coverage of depth (X)"
df[xlab] = x
df[ylab] = y
sns.jointplot(xlab, ylab, kind=opts.kind, data=df,
xlim=(0, maxsize), ylim=(0, maxcov),
stat_func=None, edgecolor="w", color=opts.color)
figname = covfile + ".pdf"
savefig(figname, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scaffold(args):
""" %prog scaffold scaffold.fasta synteny.blast synteny.sizes synteny.bed physicalmap.blast physicalmap.sizes physicalmap.bed As evaluation of scaffolding, visualize external line of evidences: * Plot synteny to an external genome * Plot alignments to physical map * Plot alignments to genetic map (TODO) Each trio defines one panel to be plotted. blastfile defines the matchings between the evidences vs scaffolds. Then the evidence sizes, and evidence bed to plot dot plots. This script will plot a dot in the dot plot in the corresponding location the plots are one contig/scaffold per plot. """ |
from jcvi.utils.iter import grouper
p = OptionParser(scaffold.__doc__)
p.add_option("--cutoff", type="int", default=1000000,
help="Plot scaffolds with size larger than [default: %default]")
p.add_option("--highlights",
help="A set of regions in BED format to highlight [default: %default]")
opts, args, iopts = p.set_image_options(args, figsize="14x8", dpi=150)
if len(args) < 4 or len(args) % 3 != 1:
sys.exit(not p.print_help())
highlights = opts.highlights
scafsizes = Sizes(args[0])
trios = list(grouper(args[1:], 3))
trios = [(a, Sizes(b), Bed(c)) for a, b, c in trios]
if highlights:
hlbed = Bed(highlights)
for scaffoldID, scafsize in scafsizes.iter_sizes():
if scafsize < opts.cutoff:
continue
logging.debug("Loading {0} (size={1})".format(scaffoldID,
thousands(scafsize)))
tmpname = scaffoldID + ".sizes"
tmp = open(tmpname, "w")
tmp.write("{0}\t{1}".format(scaffoldID, scafsize))
tmp.close()
tmpsizes = Sizes(tmpname)
tmpsizes.close(clean=True)
if highlights:
subhighlights = list(hlbed.sub_bed(scaffoldID))
imagename = ".".join((scaffoldID, opts.format))
plot_one_scaffold(scaffoldID, tmpsizes, None, trios, imagename, iopts,
highlights=subhighlights) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def A50(args):
""" Plots A50 graphics, see blog post (http://blog.malde.org/index.php/a50/) """ |
p = OptionParser(A50.__doc__)
p.add_option("--overwrite", default=False, action="store_true",
help="overwrite .rplot file if exists [default: %default]")
p.add_option("--cutoff", default=0, type="int", dest="cutoff",
help="use contigs above certain size [default: %default]")
p.add_option("--stepsize", default=10, type="int", dest="stepsize",
help="stepsize for the distribution [default: %default]")
opts, args = p.parse_args(args)
if not args:
sys.exit(p.print_help())
import numpy as np
from jcvi.utils.table import loadtable
stepsize = opts.stepsize # use stepsize to speed up drawing
rplot = "A50.rplot"
if not op.exists(rplot) or opts.overwrite:
fw = open(rplot, "w")
header = "\t".join(("index", "cumsize", "fasta"))
statsheader = ("Fasta", "L50", "N50", "Min", "Max", "Average", "Sum",
"Counts")
statsrows = []
print(header, file=fw)
for fastafile in args:
f = Fasta(fastafile, index=False)
ctgsizes = [length for k, length in f.itersizes()]
ctgsizes = np.array(ctgsizes)
a50, l50, n50 = calculate_A50(ctgsizes, cutoff=opts.cutoff)
cmin, cmax, cmean = min(ctgsizes), max(ctgsizes), np.mean(ctgsizes)
csum, counts = np.sum(ctgsizes), len(ctgsizes)
cmean = int(round(cmean))
statsrows.append((fastafile, l50, n50, cmin, cmax, cmean, csum,
counts))
logging.debug("`{0}` ctgsizes: {1}".format(fastafile, ctgsizes))
tag = "{0} (L50={1})".format(\
op.basename(fastafile).rsplit(".", 1)[0], l50)
logging.debug(tag)
for i, s in zip(xrange(0, len(a50), stepsize), a50[::stepsize]):
print("\t".join((str(i), str(s / 1000000.), tag)), file=fw)
fw.close()
table = loadtable(statsheader, statsrows)
print(table, file=sys.stderr)
generate_plot(rplot) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromdelta(args):
""" %prog fromdelta deltafile Convert deltafile to coordsfile. """ |
p = OptionParser(fromdelta.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
deltafile, = args
coordsfile = deltafile.rsplit(".", 1)[0] + ".coords"
cmd = "show-coords -rclH {0}".format(deltafile)
sh(cmd, outfile=coordsfile)
return coordsfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort(args):
""" %prog sort coordsfile Sort coordsfile based on query or ref. """ |
import jcvi.formats.blast
return jcvi.formats.blast.sort(args + ["--coords"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coverage(args):
""" %prog coverage coordsfile Report the coverage per query record, useful to see which query matches reference. The coords file MUST be filtered with supermap:: jcvi.algorithms.supermap --filter query """ |
p = OptionParser(coverage.__doc__)
p.add_option("-c", dest="cutoff", default=0.5, type="float",
help="only report query with coverage greater than [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
coordsfile, = args
fp = open(coordsfile)
coords = []
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
coords.append(c)
coords.sort(key=lambda x: x.query)
coverages = []
for query, lines in groupby(coords, key=lambda x: x.query):
cumulative_cutoff = sum(x.querycov for x in lines)
coverages.append((query, cumulative_cutoff))
coverages.sort(key=lambda x: (-x[1], x[0]))
for query, cumulative_cutoff in coverages:
if cumulative_cutoff < opts.cutoff:
break
print("{0}\t{1:.2f}".format(query, cumulative_cutoff)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotate(args):
""" %prog annotate coordsfile Annotate coordsfile to append an additional column, with the following overlaps: {0}. """ |
p = OptionParser(annotate.__doc__.format(", ".join(Overlap_types)))
p.add_option("--maxhang", default=100, type="int",
help="Max hang to call dovetail overlap [default: %default]")
p.add_option("--all", default=False, action="store_true",
help="Output all lines [default: terminal/containment]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
coordsfile, = args
fp = open(coordsfile)
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
ov = c.overlap(opts.maxhang)
if not opts.all and ov == 0:
continue
print("{0}\t{1}".format(row.strip(), Overlap_types[ov])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary coordsfile provide summary on id% and cov%, for both query and reference """ |
from jcvi.formats.blast import AlignStats
p = OptionParser(summary.__doc__)
p.add_option("-s", dest="single", default=False, action="store_true",
help="provide stats per reference seq")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
coordsfile, = args
alignstats = get_stats(coordsfile)
alignstats.print_stats() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed coordsfile will produce a bed list of mapped position and orientation (needs to be beyond quality cutoff, say 50) in bed format """ |
p = OptionParser(bed.__doc__)
p.add_option("--query", default=False, action="store_true",
help="print out query intervals rather than ref [default: %default]")
p.add_option("--pctid", default=False, action="store_true",
help="use pctid in score [default: %default]")
p.add_option("--cutoff", dest="cutoff", default=0, type="float",
help="get all the alignments with quality above threshold " +\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
coordsfile, = args
query = opts.query
pctid = opts.pctid
quality_cutoff = opts.cutoff
coords = Coords(coordsfile)
for c in coords:
if c.quality < quality_cutoff:
continue
line = c.qbedline(pctid=pctid) if query else c.bedline(pctid=pctid)
print(line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hits(self):
""" returns a dict with query => blastline """ |
self.quality_sort()
hits = dict((query, list(blines)) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return hits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def best_hits(self):
""" returns a dict with query => best mapped position """ |
self.quality_sort()
best_hits = dict((query, next(blines)) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return best_hits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sh(cmd, grid=False, infile=None, outfile=None, errfile=None, append=False, background=False, threaded=None, log=True, grid_opts=None, silent=False, shell="/bin/bash", check=False):
""" simple wrapper for system calls """ |
if not cmd:
return 1
if silent:
outfile = errfile = "/dev/null"
if grid:
from jcvi.apps.grid import GridProcess
pr = GridProcess(cmd, infile=infile, outfile=outfile, errfile=errfile,
threaded=threaded, grid_opts=grid_opts)
pr.start()
return pr.jobid
else:
if infile:
cat = "cat"
if infile.endswith(".gz"):
cat = "zcat"
cmd = "{0} {1} |".format(cat, infile) + cmd
if outfile and outfile != "stdout":
if outfile.endswith(".gz"):
cmd += " | gzip"
tag = ">"
if append:
tag = ">>"
cmd += " {0}{1}".format(tag, outfile)
if errfile:
if errfile == outfile:
errfile = "&1"
cmd += " 2>{0}".format(errfile)
if background:
cmd += " &"
if log:
logging.debug(cmd)
call_func = check_call if check else call
return call_func(cmd, shell=True, executable=shell) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Popen(cmd, stdin=None, stdout=PIPE, debug=False, shell="/bin/bash"):
""" Capture the cmd stdout output to a file handle. """ |
from subprocess import Popen as P
if debug:
logging.debug(cmd)
# See: <https://blog.nelhage.com/2010/02/a-very-subtle-bug/>
proc = P(cmd, bufsize=1, stdin=stdin, stdout=stdout, \
shell=True, executable=shell)
return proc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_newer_file(a, b):
""" Check if the file a is newer than file b """ |
if not (op.exists(a) and op.exists(b)):
return False
am = os.stat(a).st_mtime
bm = os.stat(b).st_mtime
return am > bm |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def need_update(a, b):
""" Check if file a is newer than file b and decide whether or not to update file b. Can generalize to two lists. """ |
a = listify(a)
b = listify(b)
return any((not op.exists(x)) for x in b) or \
all((os.stat(x).st_size == 0 for x in b)) or \
any(is_newer_file(x, y) for x in a for y in b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def debug(level=logging.DEBUG):
""" Turn on the debugging """ |
from jcvi.apps.console import magenta, yellow
format = yellow("%(asctime)s [%(module)s]")
format += magenta(" %(message)s")
logging.basicConfig(level=level,
format=format,
datefmt="%H:%M:%S") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mdownload(args):
""" %prog mdownload links.txt Multiple download a list of files. Use formats.html.links() to extract the links file. """ |
from jcvi.apps.grid import Jobs
p = OptionParser(mdownload.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
linksfile, = args
links = [(x.strip(),) for x in open(linksfile)]
j = Jobs(download, links)
j.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timestamp(args):
""" %prog timestamp path > timestamp.info Record the timestamps for all files in the current folder. filename atime mtime This file can be used later to recover previous timestamps through touch(). """ |
p = OptionParser(timestamp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
path, = args
for root, dirs, files in os.walk(path):
for f in files:
filename = op.join(root, f)
atime, mtime = get_times(filename)
print(filename, atime, mtime) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def touch(args):
""" %prog touch timestamp.info Recover timestamps for files in the current folder. CAUTION: you must execute this in the same directory as timestamp(). """ |
from time import ctime
p = OptionParser(touch.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
info, = args
fp = open(info)
for row in fp:
path, atime, mtime = row.split()
atime = float(atime)
mtime = float(mtime)
current_atime, current_mtime = get_times(path)
# Check if the time has changed, with resolution up to 1 sec
if int(atime) == int(current_atime) and \
int(mtime) == int(current_mtime):
continue
times = [ctime(x) for x in (current_atime, current_mtime, atime, mtime)]
msg = "{0} : ".format(path)
msg += "({0}, {1}) => ({2}, {3})".format(*times)
print(msg, file=sys.stderr)
os.utime(path, (atime, mtime)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def less(args):
""" %prog less filename position | less Enhance the unix `less` command by seeking to a file location first. This is useful to browse big files. Position is relative 0.00 - 1.00, or bytenumber. $ %prog less myfile 0.1 # Go to 10% of the current file and streaming $ %prog less myfile 0.1,0.2 # Stream at several positions $ %prog less myfile 100 # Go to certain byte number and streaming $ %prog less myfile 100,200 # Stream at several positions $ %prog less myfile all # Generate a snapshot every 10% (10%, 20%, ..) """ |
from jcvi.formats.base import must_open
p = OptionParser(less.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
filename, pos = args
fsize = getfilesize(filename)
if pos == "all":
pos = [x / 10. for x in range(0, 10)]
else:
pos = [float(x) for x in pos.split(",")]
if pos[0] > 1:
pos = [x / fsize for x in pos]
if len(pos) > 1:
counts = 20
else:
counts = None
fp = must_open(filename)
for p in pos:
snapshot(fp, p, fsize, counts=counts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pushover(message, token, user, title="JCVI: Job Monitor", \ priority=0, timestamp=None):
""" pushover.net python API <https://pushover.net/faq#library-python> """ |
assert -1 <= priority <= 2, \
"Priority should be an int() between -1 and 2"
if timestamp == None:
from time import time
timestamp = int(time())
retry, expire = (300, 3600) if priority == 2 \
else (None, None)
conn = HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urlencode({
"token": token,
"user": user,
"message": message,
"title": title,
"priority": priority,
"timestamp": timestamp,
"retry": retry,
"expire": expire,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_email(fromaddr, toaddr, subject, message):
""" Send an email message """ |
from smtplib import SMTP
from email.mime.text import MIMEText
SERVER = "localhost"
_message = MIMEText(message)
_message['Subject'] = subject
_message['From'] = fromaddr
_message['To'] = ", ".join(toaddr)
server = SMTP(SERVER)
server.sendmail(fromaddr, toaddr, _message.as_string())
server.quit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_email_address(whoami="user"):
""" Auto-generate the FROM and TO email address """ |
if whoami == "user":
username = getusername()
domain = getdomainname()
myemail = "{0}@{1}".format(username, domain)
return myemail
else:
fromaddr = "notifier-donotreply@{0}".format(getdomainname())
return fromaddr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notify(args):
""" %prog notify "Message to be sent" Send a message via email/push notification. Email notify: Recipient email address is constructed by joining the login `username` and `dnsdomainname` of the server Push notify: Uses available API """ |
from jcvi.utils.iter import flatten
valid_notif_methods.extend(available_push_api.keys())
fromaddr = get_email_address(whoami="notifier")
p = OptionParser(notify.__doc__)
p.add_option("--method", default="email", choices=valid_notif_methods,
help="Specify the mode of notification [default: %default]")
p.add_option("--subject", default="JCVI: job monitor",
help="Specify the subject of the notification message")
p.set_email()
g1 = OptionGroup(p, "Optional `push` parameters")
g1.add_option("--api", default="pushover", \
choices=list(flatten(available_push_api.values())),
help="Specify API used to send the push notification")
g1.add_option("--priority", default=0, type="int",
help="Message priority (-1 <= p <= 2) [default: %default]")
g1.add_option("--timestamp", default=None, type="int", \
dest="timestamp", \
help="Message timestamp in unix format [default: %default]")
p.add_option_group(g1)
opts, args = p.parse_args(args)
if len(args) == 0:
logging.error("Please provide a brief message to be sent")
sys.exit(not p.print_help())
subject = opts.subject
message = " ".join(args).strip()
if opts.method == "email":
toaddr = opts.email.split(",") # TO address should be in a list
for addr in toaddr:
if not is_valid_email(addr):
logging.debug("Email address `{0}` is not valid!".format(addr))
sys.exit()
send_email(fromaddr, toaddr, subject, message)
else:
pushnotify(subject, message, api=opts.api, priority=opts.priority, \
timestamp=opts.timestamp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_db_opts(self, dbname="mta4", credentials=True):
""" Add db connection specific attributes """ |
from jcvi.utils.db import valid_dbconn, get_profile
self.add_option("--db", default=dbname, dest="dbname",
help="Specify name of database to query [default: %default]")
self.add_option("--connector", default="Sybase", dest="dbconn",
choices=valid_dbconn.keys(), help="Specify database connector [default: %default]")
hostname, username, password = get_profile()
if credentials:
self.add_option("--hostname", default=hostname,
help="Specify hostname [default: %default]")
self.add_option("--username", default=username,
help="Username to connect to database [default: %default]")
self.add_option("--password", default=password,
help="Password to connect to database [default: %default]")
self.add_option("--port", type="int",
help="Specify port number [default: %default]") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_image_options(self, args=None, figsize="6x6", dpi=300, format="pdf", font="Helvetica", palette="deep", style="darkgrid", cmap="jet"):
""" Add image format options for given command line programs. """ |
from jcvi.graphics.base import ImageOptions, setup_theme
allowed_format = ("emf", "eps", "pdf", "png", "ps", \
"raw", "rgba", "svg", "svgz")
allowed_fonts = ("Helvetica", "Palatino", "Schoolbook", "Arial")
allowed_styles = ("darkgrid", "whitegrid", "dark", "white", "ticks")
allowed_diverge = ("BrBG", "PiYG", "PRGn", "PuOr", "RdBu", \
"RdGy", "RdYlBu", "RdYlGn", "Spectral")
group = OptionGroup(self, "Image options")
self.add_option_group(group)
group.add_option("--figsize", default=figsize,
help="Figure size `width`x`height` in inches [default: %default]")
group.add_option("--dpi", default=dpi, type="int",
help="Physical dot density (dots per inch) [default: %default]")
group.add_option("--format", default=format, choices=allowed_format,
help="Generate image of format [default: %default]")
group.add_option("--font", default=font, choices=allowed_fonts,
help="Font name")
group.add_option("--style", default=style, choices=allowed_styles,
help="Axes background")
group.add_option("--diverge", default="PiYG", choices=allowed_diverge,
help="Contrasting color scheme")
group.add_option("--cmap", default=cmap, help="Use this color map")
group.add_option("--notex", default=False, action="store_true",
help="Do not use tex")
if args is None:
args = sys.argv[1:]
opts, args = self.parse_args(args)
assert opts.dpi > 0
assert "x" in opts.figsize
setup_theme(style=opts.style, font=opts.font, usetex=(not opts.notex))
return opts, args, ImageOptions(opts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dedup(args):
""" %prog dedup scaffolds.fasta Remove redundant contigs with CD-HIT. This is run prior to assembly.sspace.embed(). """ |
from jcvi.formats.fasta import gaps
from jcvi.apps.cdhit import deduplicate, ids
p = OptionParser(dedup.__doc__)
p.set_align(pctid=GoodPct)
p.set_mingap(default=10)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
scaffolds, = args
mingap = opts.mingap
splitfile, oagpfile, cagpfile = gaps([scaffolds, "--split", "--mingap={0}".format(mingap)])
dd = splitfile + ".cdhit"
clstrfile = dd + ".clstr"
idsfile = dd + ".ids"
if need_update(splitfile, clstrfile):
deduplicate([splitfile, "--pctid={0}".format(opts.pctid)])
if need_update(clstrfile, idsfile):
ids([clstrfile])
agp = AGP(cagpfile)
reps = set(x.split()[-1] for x in open(idsfile))
pf = scaffolds.rsplit(".", 1)[0]
dedupagp = pf + ".dedup.agp"
fw = open(dedupagp, "w")
ndropped = ndroppedbases = 0
for a in agp:
if not a.is_gap and a.component_id not in reps:
span = a.component_span
logging.debug("Drop component {0} ({1})".\
format(a.component_id, span))
ndropped += 1
ndroppedbases += span
continue
print(a, file=fw)
fw.close()
logging.debug("Dropped components: {0}, Dropped bases: {1}".\
format(ndropped, ndroppedbases))
logging.debug("Deduplicated file written to `{0}`.".format(dedupagp))
tidyagp = tidy([dedupagp, splitfile])
dedupfasta = pf + ".dedup.fasta"
build([tidyagp, dd, dedupfasta])
return dedupfasta |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blast(args):
""" %prog blast allfasta clonename Insert a component into agpfile by aligning to the best hit in pool and see if they have good overlaps. """ |
from jcvi.apps.align import run_megablast
p = OptionParser(blast.__doc__)
p.add_option("-n", type="int", default=2,
help="Take best N hits [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
allfasta, clonename = args
fastadir = "fasta"
infile = op.join(fastadir, clonename + ".fasta")
if not op.exists(infile):
entrez([clonename, "--skipcheck", "--outdir=" + fastadir])
outfile = "{0}.{1}.blast".format(clonename, allfasta.split(".")[0])
run_megablast(infile=infile, outfile=outfile, db=allfasta, \
pctid=GoodPct, hitlen=GoodOverlap)
blasts = [BlastLine(x) for x in open(outfile)]
besthits = []
for b in blasts:
if b.query.count("|") >= 3:
b.query = b.query.split("|")[3]
if b.subject.count("|") >= 3:
b.subject = b.subject.split("|")[3]
b.query = b.query.rsplit(".", 1)[0]
b.subject = b.subject.rsplit(".", 1)[0]
if b.query == b.subject:
continue
if b.subject not in besthits:
besthits.append(b.subject)
if len(besthits) == opts.n:
break
for b in besthits:
overlap([clonename, b, "--dir=" + fastadir]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bes(args):
""" %prog bes bacfasta clonename Use the clone name to download BES gss sequences from Genbank, map and then visualize. """ |
from jcvi.apps.align import run_blat
p = OptionParser(bes.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bacfasta, clonename = args
entrez([clonename, "--database=nucgss", "--skipcheck"])
besfasta = clonename + ".fasta"
blatfile = clonename + ".bes.blat"
run_blat(infile=besfasta, outfile=blatfile, db=bacfasta, \
pctid=95, hitlen=100, cpus=opts.cpus)
aid, asize = next(Fasta(bacfasta).itersizes())
width = 50
msg = "=" * width
msg += " " + aid
print(msg, file=sys.stderr)
ratio = width * 1. / asize
_ = lambda x: int(round(x * ratio, 0))
blasts = [BlastLine(x) for x in open(blatfile)]
for b in blasts:
if b.orientation == '+':
msg = " " * _(b.sstart) + "->"
else:
msg = " " * (_(b.sstop) - 2) + "<-"
msg += " " * (width - len(msg) + 2)
msg += b.query
if b.orientation == '+':
msg += " (hang={0})".format(b.sstart - 1)
else:
msg += " (hang={0})".format(asize - b.sstop)
print(msg, file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flip(args):
""" %prog flip fastafile Go through each FASTA record, check against Genbank file and determines whether or not to flip the sequence. This is useful before updates of the sequences to make sure the same orientation is used. """ |
p = OptionParser(flip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
outfastafile = fastafile.rsplit(".", 1)[0] + ".flipped.fasta"
fo = open(outfastafile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
tmpfasta = "a.fasta"
fw = open(tmpfasta, "w")
SeqIO.write([rec], fw, "fasta")
fw.close()
o = overlap([tmpfasta, name])
if o.orientation == '-':
rec.seq = rec.seq.reverse_complement()
SeqIO.write([rec], fo, "fasta")
os.remove(tmpfasta) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batchoverlap(args):
""" %prog batchoverlap pairs.txt outdir Check overlaps between pairs of sequences. """ |
p = OptionParser(batchoverlap.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, outdir = args
fp = open(pairsfile)
cmds = []
mkdir("overlaps")
for row in fp:
a, b = row.split()[:2]
oa = op.join(outdir, a + ".fa")
ob = op.join(outdir, b + ".fa")
cmd = "python -m jcvi.assembly.goldenpath overlap {0} {1}".format(oa, ob)
cmd += " -o overlaps/{0}_{1}.ov".format(a, b)
cmds.append(cmd)
print("\n".join(cmds)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def certificate(args):
""" %prog certificate tpffile certificatefile Generate certificate file for all overlaps in tpffile. tpffile can be generated by jcvi.formats.agp.tpf(). North chr1 2 0 AC229737.8 telomere 58443 South chr1 2 1 AC229737.8 AC202463.29 58443 37835 58443 + Non-terminal Each line describes a relationship between the current BAC and the north/south BAC. First, "North/South" tag, then the chromosome, phases of the two BACs, ids of the two BACs, the size and the overlap start-stop of the CURRENT BAC, and orientation. Each BAC will have two lines in the certificate file. """ |
p = OptionParser(certificate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tpffile, certificatefile = args
fastadir = "fasta"
tpf = TPF(tpffile)
data = check_certificate(certificatefile)
fw = must_open(certificatefile, "w")
for i, a in enumerate(tpf):
if a.is_gap:
continue
aid = a.component_id
af = op.join(fastadir, aid + ".fasta")
if not op.exists(af): # Check to avoid redownload
entrez([aid, "--skipcheck", "--outdir=" + fastadir])
north, south = tpf.getNorthSouthClone(i)
aphase, asize = phase(aid)
for tag, p in (("North", north), ("South", south)):
if not p: # end of the chromosome
ov = "telomere\t{0}".format(asize)
elif p.isCloneGap:
bphase = "0"
ov = "{0}\t{1}".format(p.gap_type, asize)
else:
bid = p.component_id
bphase, bsize = phase(bid)
key = (tag, aid, bid)
if key in data:
print(data[key], file=fw)
continue
ar = [aid, bid, "--dir=" + fastadir]
o = overlap(ar)
ov = o.certificateline if o \
else "{0}\t{1}\tNone".format(bid, asize)
print("\t".join(str(x) for x in \
(tag, a.object, aphase, bphase, aid, ov)), file=fw)
fw.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def neighbor(args):
""" %prog neighbor agpfile componentID Check overlaps of a particular component in agpfile. """ |
p = OptionParser(neighbor.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
agpfile, componentID = args
fastadir = "fasta"
cmd = "grep"
cmd += " --color -C2 {0} {1}".format(componentID, agpfile)
sh(cmd)
agp = AGP(agpfile)
aorder = agp.order
if not componentID in aorder:
print("Record {0} not present in `{1}`."\
.format(componentID, agpfile), file=sys.stderr)
return
i, c = aorder[componentID]
north, south = agp.getNorthSouthClone(i)
if not north.isCloneGap:
ar = [north.component_id, componentID, "--dir=" + fastadir]
if north.orientation == '-':
ar += ["--qreverse"]
overlap(ar)
if not south.isCloneGap:
ar = [componentID, south.component_id, "--dir=" + fastadir]
if c.orientation == '-':
ar += ["--qreverse"]
overlap(ar) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def agp(args):
""" %prog agp tpffile certificatefile agpfile Build agpfile from overlap certificates. Tiling Path File (tpf) is a file that lists the component and the gaps. It is a three-column file similar to below, also see jcvi.formats.agp.tpf():
telomere chr1 na AC229737.8 chr1 + AC202463.29 chr1 + Note: the orientation of the component is only used as a guide. If the orientation is derivable from a terminal overlap, it will use it regardless of what the tpf says. See jcvi.assembly.goldenpath.certificate() which generates a list of certificates based on agpfile. At first, it seems counter-productive to convert first agp to certificates then certificates back to agp. The certificates provide a way to edit the overlap information, so that the agpfile can be corrected (without changing agpfile directly). """ |
from jcvi.formats.base import DictFile
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
tpffile, certificatefile, agpfile = args
orientationguide = DictFile(tpffile, valuepos=2)
cert = Certificate(certificatefile)
cert.write_AGP(agpfile, orientationguide=orientationguide) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_clr(self, aclr, bclr):
""" Zip the two sequences together, using "left-greedy" rule ============= seqA |||| ====(===============) seqB """ |
print(aclr, bclr, file=sys.stderr)
otype = self.otype
if otype == 1:
if aclr.orientation == '+':
aclr.end = self.qstop
else:
aclr.start = self.qstart
if bclr.orientation == '+':
bclr.start = self.sstop + 1
else:
bclr.end = self.sstart - 1
elif otype == 3:
aclr.start = aclr.end
elif otype == 4:
bclr.start = bclr.end
print(aclr, bclr, file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gcdepth(args):
""" %prog gcdepth sample_name tag Plot GC content vs depth vs genomnic bins. Inputs are mosdepth output: - NA12878_S1.mosdepth.global.dist.txt - NA12878_S1.mosdepth.region.dist.txt - NA12878_S1.regions.bed.gz - NA12878_S1.regions.bed.gz.csi - NA12878_S1.regions.gc.bed.gz A sample mosdepth.sh script might look like: ``` #!/bin/bash LD_LIBRARY_PATH=mosdepth/htslib/ mosdepth/mosdepth $1 \\ bams/$1.bam -t 4 -c chr1 -n --by 1000 bedtools nuc -fi GRCh38/WholeGenomeFasta/genome.fa \\ -bed $1.regions.bed.gz \\ | pigz -c > $1.regions.gc.bed.gz ``` """ |
import hashlib
from jcvi.algorithms.formula import MAD_interval as confidence_interval
from jcvi.graphics.base import latex, plt, savefig, set2
p = OptionParser(gcdepth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
sample_name, tag = args
# The tag is used to add to title, also provide a random (hashed) color
coloridx = int(hashlib.sha1(tag).hexdigest(), 16) % len(set2)
color = set2[coloridx]
# mosdepth outputs a table that we can use to plot relationship
gcbedgz = sample_name + ".regions.gc.bed.gz"
df = pd.read_csv(gcbedgz, delimiter="\t")
mf = df.loc[:, ("4_usercol", "6_pct_gc")]
mf.columns = ["depth", "gc"]
# We discard any bins that are gaps
mf = mf[(mf["depth"] > .001) | (mf["gc"] > .001)]
# Create GC bins
gcbins = defaultdict(list)
for i, row in mf.iterrows():
gcp = int(round(row["gc"] * 100))
gcbins[gcp].append(row["depth"])
gcd = sorted((k * .01, confidence_interval(v))
for (k, v) in gcbins.items())
gcd_x, gcd_y = zip(*gcd)
m, lo, hi = zip(*gcd_y)
# Plot
plt.plot(mf["gc"], mf["depth"], ".", color="lightslategray", ms=2,
mec="lightslategray", alpha=.1)
patch = plt.fill_between(gcd_x, lo, hi,
facecolor=color, alpha=.25, zorder=10,
linewidth=0.0, label="Median +/- MAD band")
plt.plot(gcd_x, m, "-", color=color, lw=2, zorder=20)
ax = plt.gca()
ax.legend(handles=[patch], loc="best")
ax.set_xlim(0, 1)
ax.set_ylim(0, 100)
ax.set_title("{} ({})".format(latex(sample_name), tag))
ax.set_xlabel("GC content")
ax.set_ylabel("Depth")
savefig(sample_name + ".gcdepth.png") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exonunion(args):
""" %prog exonunion gencode.v26.annotation.exon.bed Collapse overlapping exons within the same gene. File `gencode.v26.annotation.exon.bed` can be generated by: $ zcat gencode.v26.annotation.gtf.gz | awk 'OFS="\t" {if ($3=="exon") {print $1,$4-1,$5,$10,$12,$14,$16,$7}}' | tr -d '";' """ |
p = OptionParser(exonunion.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gencodebed, = args
beds = BedTool(gencodebed)
# fields[3] is gene_id; fields[6] is gene_name
for g, gb in groupby(beds, key=lambda x: x.fields[3]):
gb = BedTool(gb)
sys.stdout.write(str(gb.sort().merge(c="4,5,6,7",
o=','.join(['first'] * 4)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summarycanvas(args):
""" %prog summarycanvas output.vcf.gz Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output. """ |
p = OptionParser(summarycanvas.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for vcffile in args:
counter = get_gain_loss_summary(vcffile)
pf = op.basename(vcffile).split(".")[0]
print(pf + " " +
" ".join("{}:{}".format(k, v)
for k, v in sorted(counter.items()))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_segments(vcffile):
""" Extract all copy number segments from a CANVAS file VCF line looks like: chr1 788879 Canvas:GAIN:chr1:788880-821005 N <CNV> 2 q10 SVTYPE=CNV;END=821005;CNVLEN=32126 RC:BC:CN:MCC 157:4:3:2 """ |
from cStringIO import StringIO
from cyvcf2 import VCF
output = StringIO()
for v in VCF(vcffile):
chrom = v.CHROM
start = v.start
end = v.INFO.get('END') - 1
cn, = v.format('CN')[0]
print("\t".join(str(x) for x in (chrom, start, end, cn)), file=output)
beds = BedTool(output.getvalue(), from_string=True)
return beds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def counter_mean_and_median(counter):
""" Calculate the mean and median value of a counter """ |
if not counter:
return np.nan, np.nan
total = sum(v for k, v in counter.items())
mid = total / 2
weighted_sum = 0
items_seen = 0
median_found = False
for k, v in sorted(counter.items()):
weighted_sum += k * v
items_seen += v
if not median_found and items_seen >= mid:
median = k
median_found = True
mean = weighted_sum * 1. / total
return mean, median |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.