text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grass(args):
""" %prog grass coge_master_table.txt james.txt Validate SynFind pan-grass set against James. This set can be generated: https://genomevolution.org/r/fhak """ |
p = OptionParser(grass._doc__)
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
master, james = args
fp = open(master)
next(fp)
master_store = defaultdict(set)
for row in fp:
atoms = row.split()
s = set()
for x in atoms[1:6]:
m = x.split(",")
s |= set(m)
if '-' in s:
s.remove('-')
a = atoms[1]
master_store[a] |= set(s)
fp = open(james)
next(fp)
james_store = {}
tandems = set()
for row in fp:
atoms = row.split()
s = set()
Os = set()
for x in atoms[:-1]:
m = x.split("||")
if m[0].startswith("Os"):
Os |= set(m)
if m[0].startswith("http"):
continue
if m[0].startswith("chr"):
m = ["proxy"]
if "||" in x:
tandems |= set(m)
s |= set(m)
for x in Os:
james_store[x] = s
jaccards = []
corr_jaccards = []
perfect_matches = 0
corr_perfect_matches = 0
for k, v in james_store.items():
if k not in master_store:
continue
m = master_store[k]
jaccard = len(v & m) * 100 / len(v | m)
jaccards.append(jaccard)
diff = (v ^ m ) - tandems
corr_jaccard = 100 - len(diff) * 100 / len(v | m)
corr_jaccards.append(corr_jaccard)
if opts.verbose:
print(k)
print(v)
print(m)
print(diff)
print(jaccard)
if jaccard > 99:
perfect_matches += 1
if corr_jaccard > 99:
corr_perfect_matches += 1
logging.debug("Perfect matches: {0}".format(perfect_matches))
logging.debug("Perfect matches (corrected): {0}".format(corr_perfect_matches))
print("Jaccards:", SummaryStats(jaccards))
print("Corrected Jaccards:", SummaryStats(corr_jaccards)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ecoli(args):
""" %prog ecoli coge_master_table.txt query.bed Perform gene presence / absence analysis in Ecoli master spreadsheet. Ecoli spresheets can be downloaded below: Ecoli K12 MG1655 (K) as query Regenerate this analysis: https://genomevolution.org/r/fggo Ecoli O157:H7 EDL933 (O) as query Regenerate this analysis: https://genomevolution.org/r/fgt7 Shigella flexneri 2a 301 (S) as query Regenerate this analysis: https://genomevolution.org/r/fgte Perform a similar analysis as in: Jin et al. (2002) Genome sequence of Shigella flexneri 2a: insights into pathogenicity through comparison with genomes of Escherichia coli K12 and O157. Nucleic Acid Research. """ |
p = OptionParser(ecoli.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
master, querybed = args
fp = open(master)
header = next(fp)
assert header[0] == '#'
qorg = header.strip().split("\t")[1]
qorg = qorg.split(":")[-1].strip()
store = {}
MISSING = ("proxy", "-")
for row in fp:
a, b, c = row.strip().split("\t")[1:4]
store[a] = b in MISSING and c in MISSING
bed = Bed(querybed)
tags = []
for i, b in enumerate(bed):
accn = b.accn
if accn not in store:
logging.warn("missing {0}".format(accn))
continue
tags.append((store[accn], accn))
large = 4 # large segments
II = []
II_large = []
for missing, aa in groupby(tags, key=lambda x: x[0]):
aa = list(aa)
if not missing:
continue
glist = list(a for missing, a in aa)
II.append(glist)
size = len(glist)
if size >= large:
II_large.append(glist)
fw = must_open(opts.outfile, "w")
for a, t in zip((II, II_large), ("", ">=4 ")):
nmissing = sum(len(x) for x in a)
logging.debug("A total of {0} {1}-specific {2}islands found with {3} genes.".\
format(len(a), qorg, t, nmissing))
for x in II:
print(len(x), ",".join(x), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parallel(args):
""" %prog parallel genome.fasta N Partition the genome into parts and run separately. This is useful if MAKER is to be run on the grid. """ |
from jcvi.formats.base import split
p = OptionParser(parallel.__doc__)
p.set_home("maker")
p.set_tmpdir(tmpdir="tmp")
p.set_grid_opts(array=True)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
genome, NN = args
threaded = opts.threaded or 1
tmpdir = opts.tmpdir
mkdir(tmpdir)
tmpdir = get_abs_path(tmpdir)
N = int(NN)
assert 1 <= N < 1000, "Required: 1 < N < 1000!"
outdir = "outdir"
fs = split([genome, outdir, NN])
c = CTLFile("maker_opts.ctl")
c.update_abs_path()
if threaded > 1:
c.update_tag("cpus", threaded)
cwd = os.getcwd()
dirs = []
for name in fs.names:
fn = get_abs_path(name)
bn = op.basename(name)
dirs.append(bn)
c.update_tag("genome", fn)
mkdir(bn)
sh("cp *.ctl {0}".format(bn))
os.chdir(bn)
c.write_file("maker_opts.ctl")
os.chdir(cwd)
jobs = "jobs"
fw = open(jobs, "w")
print("\n".join(dirs), file=fw)
fw.close()
# Submit to grid
ncmds = len(dirs)
runfile = "array.sh"
cmd = op.join(opts.maker_home, "bin/maker")
if tmpdir:
cmd += " -TMP {0}".format(tmpdir)
engine = get_grid_engine()
contents = arraysh.format(jobs, cmd) if engine == "SGE" \
else arraysh_ua.format(N, threaded, jobs, cmd)
write_file(runfile, contents)
if engine == "PBS":
return
# qsub script
outfile = "maker.\$TASK_ID.out"
p = GridProcess(runfile, outfile=outfile, errfile=outfile,
arr=ncmds, grid_opts=opts)
qsubfile = "qsub.sh"
qsub = p.build()
write_file(qsubfile, qsub) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" %prog merge outdir output.gff Follow-up command after grid jobs are completed after parallel(). """ |
from jcvi.formats.gff import merge as gmerge
p = OptionParser(merge.__doc__)
p.set_home("maker")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
outdir, outputgff = args
fsnames, suffix = get_fsnames(outdir)
nfs = len(fsnames)
cmd = op.join(opts.maker_home, "bin/gff3_merge")
outfile = "merge.sh"
write_file(outfile, mergesh.format(suffix, cmd))
# Generate per split directory
# Note that gff3_merge write to /tmp, so I limit processes here to avoid
# filling up disk space
sh("parallel -j 8 merge.sh {} ::: " + " ".join(fsnames))
# One final output
gffnames = glob("*.all.gff")
assert len(gffnames) == nfs
# Again, DO NOT USE gff3_merge to merge with a smallish /tmp/ area
gfflist = "gfflist"
fw = open(gfflist, "w")
print("\n".join(gffnames), file=fw)
fw.close()
nlines = sum(1 for x in open(gfflist))
assert nlines == nfs # Be extra, extra careful to include all results
gmerge([gfflist, "-o", outputgff])
logging.debug("Merged GFF file written to `{0}`".format(outputgff)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(args):
""" %prog validate outdir genome.fasta Validate current folder after MAKER run and check for failures. Failed batch will be written to a directory for additional work. """ |
from jcvi.utils.counter import Counter
p = OptionParser(validate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
outdir, genome = args
counter = Counter()
fsnames, suffix = get_fsnames(outdir)
dsfile = "{0}{1}/{0}.maker.output/{0}_master_datastore_index.log"
dslogs = [dsfile.format(x, suffix) for x in fsnames]
all_failed = []
for f, d in zip(fsnames, dslogs):
dslog = DatastoreIndexFile(d)
counter.update(dslog.scaffold_status.values())
all_failed.extend([(f, x) for x in dslog.failed])
cmd = 'tail maker.*.out | grep -c "now finished"'
n = int(popen(cmd).read())
assert len(fsnames) == n
print("ALL jobs have been finished", file=sys.stderr)
nfailed = len(all_failed)
if nfailed == 0:
print("ALL scaffolds are completed with no errors", file=sys.stderr)
return
print("Scaffold status:", file=sys.stderr)
print(counter, file=sys.stderr)
failed = "FAILED"
fw = open(failed, "w")
print("\n".join(["\t".join((f, x)) for f, x in all_failed]), file=fw)
fw.close()
nlines = sum(1 for x in open("FAILED"))
assert nlines == nfailed
print("FAILED !! {0} instances.".format(nfailed), file=sys.stderr)
# Rebuild the failed batch
failed_ids = failed + ".ids"
failed_fasta = failed + ".fasta"
cmd = "cut -f2 {0}".format(failed)
sh(cmd, outfile=failed_ids)
if need_update((genome, failed_ids), failed_fasta):
cmd = "faSomeRecords {0} {1} {2}".\
format(genome, failed_ids, failed_fasta)
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batcheval(args):
""" %prog batcheval model.ids gff_file evidences.bed fastafile Get the accuracy for a list of models against evidences in the range of the genes. For example: $ %prog batcheval all.gff3 isoforms.ids proteins.bed scaffolds.fasta Outfile contains the scores for the models can be found in models.scores """ |
from jcvi.formats.bed import evaluate
from jcvi.formats.gff import make_index
p = OptionParser(evaluate.__doc__)
p.add_option("--type", default="CDS",
help="list of features to extract, use comma to separate (e.g."
"'five_prime_UTR,CDS,three_prime_UTR') [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
model_ids, gff_file, evidences_bed, fastafile = args
type = set(opts.type.split(","))
g = make_index(gff_file)
fp = open(model_ids)
prefix = model_ids.rsplit(".", 1)[0]
fwscores = open(prefix + ".scores", "w")
for row in fp:
cid = row.strip()
b = next(g.parents(cid, 1))
query = "{0}:{1}-{2}".format(b.chrom, b.start, b.stop)
children = [c for c in g.children(cid, 1)]
cidbed = prefix + ".bed"
fw = open(cidbed, "w")
for c in children:
if c.featuretype not in type:
continue
fw.write(c.to_bed())
fw.close()
b = evaluate([cidbed, evidences_bed, fastafile, "--query={0}".format(query)])
print("\t".join((cid, b.score)), file=fwscores)
fwscores.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_splits(split_bed, gff_file, stype, key):
""" Use intersectBed to find the fused gene => split genes mappings. """ |
bed_file = get_bed_file(gff_file, stype, key)
cmd = "intersectBed -a {0} -b {1} -wao".format(split_bed, bed_file)
cmd += " | cut -f4,10"
p = popen(cmd)
splits = defaultdict(set)
for row in p:
a, b = row.split()
splits[a].add(b)
return splits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split(args):
""" %prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile Split MAKER models by checking against predictors (such as AUGUSTUS and FGENESH). For each region covered by a working model. Find out the combination of predictors that gives the best accuracy against evidences (such as PASA). `split.bed` can be generated by pulling out subset from a list of ids $ python -m jcvi.formats.base join split.ids working.bed --column=0,3 --noheader | cut -f2-7 > split.bed """ |
from jcvi.formats.bed import Bed
p = OptionParser(split.__doc__)
p.add_option("--key", default="Name",
help="Key in the attributes to extract predictor.gff [default: %default]")
p.add_option("--parents", default="match",
help="list of features to extract, use comma to separate (e.g."
"'gene,mRNA') [default: %default]")
p.add_option("--children", default="match_part",
help="list of features to extract, use comma to separate (e.g."
"'five_prime_UTR,CDS,three_prime_UTR') [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 5:
sys.exit(not p.print_help())
split_bed, evidences_bed, p1_gff, p2_gff, fastafile = args
parents = opts.parents
children = opts.children
key = opts.key
bed = Bed(split_bed)
s1 = get_splits(split_bed, p1_gff, parents, key)
s2 = get_splits(split_bed, p2_gff, parents, key)
for b in bed:
query = "{0}:{1}-{2}".format(b.seqid, b.start, b.end)
b1 = get_accuracy(query, p1_gff, evidences_bed, fastafile, children, key)
b2 = get_accuracy(query, p2_gff, evidences_bed, fastafile, children, key)
accn = b.accn
c1 = "|".join(s1[accn])
c2 = "|".join(s2[accn])
ac1 = b1.accuracy
ac2 = b2.accuracy
tag = p1_gff if ac1 >= ac2 else p2_gff
tag = tag.split(".")[0]
ac1 = "{0:.3f}".format(ac1)
ac2 = "{0:.3f}".format(ac2)
print("\t".join((accn, tag, ac1, ac2, c1, c2))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def datastore(args):
""" %prog datastore datastore.log > gfflist.log Generate a list of gff filenames to merge. The `datastore.log` file can be generated by something like: $ find /usr/local/scratch/htang/EVM_test/gannotation/maker/1132350111853_default/i1/ -maxdepth 4 -name "*datastore*.log" > datastore.log """ |
p = OptionParser(datastore.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
ds, = args
fp = open(ds)
for row in fp:
fn = row.strip()
assert op.exists(fn)
pp, logfile = op.split(fn)
flog = open(fn)
for row in flog:
ctg, folder, status = row.split()
if status != "FINISHED":
continue
gff_file = op.join(pp, folder, ctg + ".gff")
assert op.exists(gff_file)
print(gff_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def libsvm(args):
""" %prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/ """ |
from jcvi.formats.base import DictFile
p = OptionParser(libsvm.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
csvfile, prefixids = args
d = DictFile(prefixids)
fp = open(csvfile)
next(fp)
for row in fp:
atoms = row.split()
klass = atoms[0]
kp = klass.split("_")[0]
klass = d.get(kp, "0")
feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])]
print(" ".join([klass] + feats)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def determine_positions(nodes, edges):
""" Construct the problem instance to solve the positions of contigs. The input for spring_system() is A, K, L, which looks like the following. A = np.array([[1, -1, 0], [0, 1, -1], [1, 0, -1]]) K = np.eye(3, dtype=int) L = np.array([1, 2, 3]) For example, A-B distance 1, B-C distance 2, A-C distance 3, solve positions array([0, 1, 3]) """ |
N = len(nodes)
E = len(edges)
A = np.zeros((E, N), dtype=int)
for i, (a, b, distance) in enumerate(edges):
A[i, a] = 1
A[i, b] = -1
K = np.eye(E, dtype=int)
L = np.array([x[-1] for x in edges])
s = spring_system(A, K, L)
return np.array([0] + [int(round(x, 0)) for x in s]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def determine_signs(nodes, edges, cutoff=1e-10):
""" Construct the orientation matrix for the pairs on N molecules. array([ 1, 1, -1]) """ |
N = len(nodes)
M = np.zeros((N, N), dtype=float)
for a, b, w in edges:
M[a, b] += w
M = symmetrize(M)
return get_signs(M, cutoff=cutoff, validate=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix(args):
""" %prog fix ahrd.csv > ahrd.fixed.csv Fix ugly names from Uniprot. """ |
p = OptionParser(fix.__doc__)
p.add_option("--ignore_sym_pat", default=False, action="store_true",
help="Do not fix names matching symbol patterns i.e." + \
" names beginning or ending with gene symbols or a series of numbers." + \
" e.g. `ARM repeat superfamily protein`, `beta-hexosaminidase 3`," + \
" `CYCLIN A3;4`, `WALL ASSOCIATED KINASE (WAK)-LIKE 10`")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
csvfile, = args
fp = open(csvfile)
fw = must_open(opts.outfile, "w")
for row in fp:
if row[0] == '#':
continue
if row.strip() == "":
continue
atoms = row.rstrip("\r\n").split("\t")
name, hit, ahrd_code, desc = atoms[:4] \
if len(atoms) > 2 else \
(atoms[0], None, None, atoms[-1])
newdesc = fix_text(desc, ignore_sym_pat=opts.ignore_sym_pat)
if hit and hit.strip() != "" and newdesc == Hypothetical:
newdesc = "conserved " + newdesc
print("\t".join(atoms[:4] + [newdesc] + atoms[4:]), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batch(args):
""" %prog batch splits output The arguments are two folders. Input FASTA sequences are in splits/. Output csv files are in output/. Must have folders swissprot/, tair/, trembl/ that contains the respective BLAST output. Once finished, you can run, for example: $ parallel java -Xmx2g -jar ~/code/AHRD/dist/ahrd.jar {} ::: output/*.yml """ |
p = OptionParser(batch.__doc__)
ahrd_weights = { "blastp": [0.5, 0.3, 0.2],
"blastx": [0.6, 0.4, 0.0]
}
blast_progs = tuple(ahrd_weights.keys())
p.add_option("--path", default="~/code/AHRD/",
help="Path where AHRD is installed [default: %default]")
p.add_option("--blastprog", default="blastp", choices=blast_progs,
help="Specify the blast program being run. Based on this option," \
+ " the AHRD parameters (score_weights) will be modified." \
+ " [default: %default]")
p.add_option("--iprscan", default=None,
help="Specify path to InterProScan results file if available." \
+ " If specified, the yml conf file will be modified" \
+ " appropriately. [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
splits, output = args
mkdir(output)
bit_score, db_score, ovl_score = ahrd_weights[opts.blastprog]
for f in glob("{0}/*.fa*".format(splits)):
fb = op.basename(f).rsplit(".", 1)[0]
fw = open(op.join(output, fb + ".yml"), "w")
path = op.expanduser(opts.path)
dir = op.join(path, "test/resources")
outfile = op.join(output, fb + ".csv")
interpro = iprscanTemplate.format(opts.iprscan) if opts.iprscan else ""
print(Template.format(dir, fb, f, outfile, bit_score, db_score, ovl_score, interpro), file=fw)
if opts.iprscan:
if not op.lexists("interpro.xml"):
symlink(op.join(iprscan_datadir, "interpro.xml"), "interpro.xml")
if not op.lexists("interpro.dtd"):
symlink(op.join(iprscan_datadir, "interpro.dtd"), "interpro.dtd") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_range(obj, score=None, id=None, strand=None):
""" Given a gffutils object, convert it to a range object """ |
from jcvi.utils.range import Range
if score or id:
_score = score if score else obj.score
_id = id if id else obj.id
return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \
score=_score, id=_id)
elif strand:
return (obj.seqid, obj.start, obj.end, obj.strand)
return (obj.seqid, obj.start, obj.end) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sizes(args):
""" %prog sizes gffile Given a gff file of features, calculate the sizes of chosen parent feature based on summation of sizes of child features. For example, for parent 'mRNA' and child 'CDS' feature types, calcuate sizes of mRNA by summing the sizes of the disjoint CDS parts. """ |
p = OptionParser(sizes.__doc__)
p.set_outfile()
p.add_option("--parents", dest="parents", default="mRNA",
help="parent feature(s) for which size is to be calculated")
p.add_option("--child", dest="child", default="CDS",
help="child feature to use for size calculations")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
parents, cftype = set(opts.parents.split(",")), opts.child
gff = make_index(gffile)
fw = must_open(opts.outfile, "w")
for parent in parents:
for feat in gff.features_of_type(parent, order_by=('seqid', 'start')):
fsize = 0
fsize = feat.end - feat.start + 1 \
if cftype == parent else \
gff.children_bp(feat, child_featuretype=cftype)
print("\t".join(str(x) for x in (feat.id, fsize)), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary gffile Print summary stats for features of different types. """ |
from jcvi.formats.base import SetFile
from jcvi.formats.bed import BedSummary
from jcvi.utils.table import tabulate
p = OptionParser(summary.__doc__)
p.add_option("--isoform", default=False, action="store_true",
help="Find longest isoform of each id")
p.add_option("--ids", help="Only include features from certain IDs")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gff_file, = args
ids = opts.ids
if ids:
ids = SetFile(ids)
logging.debug("Total ids loaded: {0}".format(len(ids)))
if opts.isoform:
pids = set()
gff = Gff(gff_file)
for g in gff:
if g.type != "mRNA":
continue
if g.parent not in ids:
continue
if "longest" not in g.attributes:
pids = set(x + ".1" for x in ids)
break
if g.attributes["longest"][0] == "0":
continue
pids.add(g.id)
ids = pids
logging.debug("After checking longest: {0}".format(len(ids)))
# Collects aliases
gff = Gff(gff_file)
for g in gff:
if g.name in ids:
ids.add(g.id)
logging.debug("Total ids including aliases: {0}".format(len(ids)))
gff = Gff(gff_file)
beds = defaultdict(list)
for g in gff:
if ids and not (g.id in ids or g.name in ids or g.parent in ids):
continue
beds[g.type].append(g.bedline)
table = {}
for type, bb in sorted(beds.items()):
bs = BedSummary(bb)
table[(type, "Features")] = bs.nfeats
table[(type, "Unique bases")] = bs.unique_bases
table[(type, "Total bases")] = bs.total_bases
print(tabulate(table), file=sys.stdout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gb(args):
""" %prog gb gffile fastafile Convert GFF3 to Genbank format. Recipe taken from: <http://www.biostars.org/p/2492/> """ |
from Bio.Alphabet import generic_dna
try:
from BCBio import GFF
except ImportError:
print("You need to install dep first: $ easy_install bcbio-gff", file=sys.stderr)
p = OptionParser(gb.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, fasta_file = args
pf = op.splitext(gff_file)[0]
out_file = pf + ".gb"
fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna))
gff_iter = GFF.parse(gff_file, fasta_input)
SeqIO.write(gff_iter, out_file, "genbank") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def orient(args):
""" %prog orient in.gff3 features.fasta > out.gff3 Change the feature orientations based on translation. This script is often needed in fixing the strand information after mapping RNA-seq transcripts. You can generate the features.fasta similar to this command: $ %prog load --parents=EST_match --children=match_part clc.JCVIv4a.gff JCVI.Medtr.v4.fasta -o features.fasta """ |
from jcvi.formats.fasta import longestorf
p = OptionParser(orient.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ingff3, fastafile = args
idsfile = fastafile.rsplit(".", 1)[0] + ".orf.ids"
if need_update(fastafile, idsfile):
longestorf([fastafile, "--ids"])
orientations = DictFile(idsfile)
gff = Gff(ingff3)
flipped = 0
for g in gff:
id = None
for tag in ("ID", "Parent"):
if tag in g.attributes:
id, = g.attributes[tag]
break
assert id
orientation = orientations.get(id, "+")
if orientation == '-':
g.strand = {"+": "-", "-": "+"}[g.strand]
flipped += 1
print(g)
logging.debug("A total of {0} features flipped.".format(flipped)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rename(args):
""" %prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3. """ |
p = OptionParser(rename.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ingff3, switch = args
switch = DictFile(switch)
gff = Gff(ingff3)
for g in gff:
id, = g.attributes["ID"]
newname = switch.get(id, id)
g.attributes["ID"] = [newname]
if "Parent" in g.attributes:
parents = g.attributes["Parent"]
g.attributes["Parent"] = [switch.get(x, x) for x in parents]
g.update_attributes()
print(g) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parents(args):
""" %prog parents gffile models.ids Find the parents given a list of IDs in "models.ids". """ |
p = OptionParser(parents.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, idsfile = args
g = make_index(gff_file)
fp = open(idsfile)
for row in fp:
cid = row.strip()
b = next(g.parents(cid, 1))
print("\t".join((cid, b.id))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def liftover(args):
""" %prog liftover gffile > liftover.gff Adjust gff coordinates based on tile number. For example, "gannotation.asmbl.000095.7" is the 8-th tile on asmbl.000095. """ |
p = OptionParser(liftover.__doc__)
p.add_option("--tilesize", default=50000, type="int",
help="The size for each tile [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = Gff(gffile)
for g in gff:
seqid = g.seqid
seqid, tilenum = seqid.rsplit(".", 1)
tilenum = int(tilenum)
g.seqid = seqid
offset = tilenum * opts.tilesize
g.start += offset
g.end += offset
print(g) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_piles(allgenes):
""" Before running uniq, we need to compute all the piles. The piles are a set of redundant features we want to get rid of. Input are a list of GffLines features. Output are list of list of features distinct "piles". """ |
from jcvi.utils.range import Range, range_piles
ranges = [Range(a.seqid, a.start, a.end, 0, i) \
for i, a in enumerate(allgenes)]
for pile in range_piles(ranges):
yield [allgenes[x] for x in pile] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_subfeats(f1, f2, dbx1, dbx2, featuretype=None, slop=False):
""" Given 2 gffutils features located in 2 separate gffutils databases, iterate through all subfeatures of a certain type and check whether they are identical or not The `slop` parameter allows for variation in the terminal UTR region """ |
f1c, f2c = list(dbx1.children(f1, featuretype=featuretype, order_by='start')), \
list(dbx2.children(f2, featuretype=featuretype, order_by='start'))
lf1c, lf2c = len(f1c), len(f2c)
if match_nchildren(f1c, f2c):
if lf1c > 0 and lf2c > 0:
exclN = set()
if featuretype.endswith('UTR') or featuretype == 'exon':
N = []
if featuretype.startswith('five_prime'):
N = [1] if f1.strand == "+" else [lf1c]
elif featuretype.startswith('three_prime'):
N = [lf1c] if f1.strand == "+" else [1]
else: # infer UTR from exon collection
N = [1] if 1 == lf1c else [1, lf1c]
for n in N:
if match_Nth_child(f1c, f2c, N=n, slop=slop):
exclN.add(n-1)
else:
return False
for i, (cf1, cf2) in enumerate(zip(f1c, f2c)):
if i in exclN: continue
if not match_span(cf1, cf2):
return False
else:
if (lf1c, lf2c) in [(0, 1), (1, 0)] and slop \
and featuretype.endswith('UTR'):
return True
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uniq(args):
""" %prog uniq gffile > uniq.gff Remove redundant gene models. For overlapping gene models, take the longest gene. A second scan takes only the genes selected. --mode controls whether you want larger feature, or higher scoring feature. --best controls how many redundant features to keep, e.g. 10 for est2genome. """ |
supported_modes = ("span", "score")
p = OptionParser(uniq.__doc__)
p.add_option("--type", default="gene",
help="Types of features to non-redundify [default: %default]")
p.add_option("--mode", default="span", choices=supported_modes,
help="Pile mode [default: %default]")
p.add_option("--best", default=1, type="int",
help="Use best N features [default: %default]")
p.add_option("--name", default=False, action="store_true",
help="Non-redundify Name attribute [default: %default]")
p.add_option("--iter", default="2", choices=("1", "2"),
help="Number of iterations to grab children [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
mode = opts.mode
bestn = opts.best
allgenes = import_feats(gffile, opts.type)
g = get_piles(allgenes)
bestids = set()
for group in g:
if mode == "span":
scores_group = [(- x.span, x) for x in group]
else:
scores_group = [(- float(x.score), x) for x in group]
scores_group.sort()
seen = set()
for score, x in scores_group:
if len(seen) >= bestn:
break
name = x.attributes["Name"][0] if opts.name else x.accn
if name in seen:
continue
seen.add(name)
bestids.add(x.accn)
populate_children(opts.outfile, bestids, gffile, iter=opts.iter) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort(args):
""" %prog sort gffile Sort gff file using plain old unix sort based on [chromosome, start coordinate]. or topologically based on hierarchy of features using the gt (genometools) toolkit """ |
valid_sort_methods = ("unix", "topo")
p = OptionParser(sort.__doc__)
p.add_option("--method", default="unix", choices=valid_sort_methods,
help="Specify sort method [default: %default]")
p.add_option("-i", dest="inplace", default=False, action="store_true",
help="If doing a unix sort, perform sort inplace [default: %default]")
p.set_tmpdir()
p.set_outfile()
p.set_home("gt")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
sortedgff = opts.outfile
if opts.inplace:
if opts.method == "topo" or (opts.method == "unix" and gffile in ("-", "stdin")):
logging.error("Cannot perform inplace sort when method is `topo`" + \
" or method is `unix` and input is `stdin` stream")
sys.exit()
if opts.method == "unix":
cmd = "sort"
cmd += " -k1,1 -k4,4n {0}".format(gffile)
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
if opts.inplace:
cmd += " -o {0}".gffile
sortedgff = None
sh(cmd, outfile=sortedgff)
elif opts.method == "topo":
GT_HOME = opts.gt_home
if not op.isdir(GT_HOME):
logging.error("GT_HOME={0} directory does not exist".format(GT_HOME))
sys.exit()
cmd = "{0}".format(op.join(GT_HOME, "bin", "gt"))
cmd += " gff3 -sort -tidy -retainids -addids no {0}".format(gffile)
sh(cmd, outfile=sortedgff) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromgtf(args):
""" %prog fromgtf gtffile Convert gtf to gff file. In gtf, the "transcript_id" will convert to "ID=", the "transcript_id" in exon/CDS feature will be converted to "Parent=". """ |
p = OptionParser(fromgtf.__doc__)
p.add_option("--transcript_id", default="transcript_id",
help="Field name for transcript [default: %default]")
p.add_option("--gene_id", default="gene_id",
help="Field name for gene [default: %default]")
p.add_option("--augustus", default=False, action="store_true",
help="Input is AUGUSTUS gtf [default: %default]")
p.set_home("augustus")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gtffile, = args
outfile = opts.outfile
if opts.augustus:
ahome = opts.augustus_home
s = op.join(ahome, "scripts/gtf2gff.pl")
cmd = "{0} --gff3 < {1} --out={2}".format(s, gtffile, outfile)
sh(cmd)
return
gff = Gff(gtffile)
fw = must_open(outfile, "w")
transcript_id = opts.transcript_id
gene_id = opts.gene_id
nfeats = 0
for g in gff:
if g.type in ("transcript", "mRNA"):
g.type = "mRNA"
g.update_tag(transcript_id, "ID")
g.update_tag("mRNA", "ID")
g.update_tag(gene_id, "Parent")
g.update_tag("Gene", "Parent")
elif g.type in ("exon", "CDS") or "UTR" in g.type:
g.update_tag("transcript_id", "Parent")
g.update_tag(g.type, "Parent")
elif g.type == "gene":
g.update_tag(gene_id, "ID")
g.update_tag("Gene", "ID")
else:
assert 0, "Don't know how to deal with {0}".format(g.type)
g.update_attributes()
print(g, file=fw)
nfeats += 1
logging.debug("A total of {0} features written.".format(nfeats)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromsoap(args):
""" %prog fromsoap soapfile > gff_file """ |
p = OptionParser(fromsoap.__doc__)
p.add_option("--type", default="nucleotide_match",
help="GFF feature type [default: %default]")
p.add_option("--source", default="soap",
help="GFF source qualifier [default: %default]")
p.set_fixchrnames(orgn="maize")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
soapfile, = args
pad0 = len(str(sum(1 for line in open(soapfile))))
fw = must_open(opts.outfile, "w")
fp = must_open(soapfile)
for idx, line in enumerate(fp):
if opts.fix_chr_name:
from jcvi.utils.cbook import fixChromName
line = fixChromName(line, orgn=opts.fix_chr_name)
atoms = line.strip().split("\t")
attributes = "ID=match{0};Name={1}".format(str(idx).zfill(pad0), atoms[0])
start, end = int(atoms[8]), int(atoms[5]) + int(atoms[8]) - 1
seqid = atoms[7]
print("\t".join(str(x) for x in (seqid, opts.source, opts.type, \
start, end, ".", atoms[6], ".", attributes)), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gtf(args):
""" %prog gtf gffile Convert gff to gtf file. In gtf, only exon/CDS features are important. The first 8 columns are the same as gff, but in the attributes field, we need to specify "gene_id" and "transcript_id". """ |
p = OptionParser(gtf.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = Gff(gffile)
transcript_info = AutoVivification()
for g in gff:
if g.type.endswith(("RNA", "transcript")):
if "ID" in g.attributes and "Parent" in g.attributes:
transcript_id = g.get_attr("ID")
gene_id = g.get_attr("Parent")
elif "mRNA" in g.attributes and "Gene" in g.attributes:
transcript_id = g.get_attr("mRNA")
gene_id = g.get_attr("Gene")
else:
transcript_id = g.get_attr("ID")
gene_id = transcript_id
transcript_info[transcript_id]["gene_id"] = gene_id
transcript_info[transcript_id]["gene_type"] = g.type
continue
if g.type not in valid_gff_to_gtf_type.keys():
continue
try:
transcript_id = g.get_attr("Parent", first=False)
except IndexError:
transcript_id = g.get_attr("mRNA", first=False)
g.type = valid_gff_to_gtf_type[g.type]
for tid in transcript_id:
if tid not in transcript_info: continue
gene_type = transcript_info[tid]["gene_type"]
if not gene_type.endswith("RNA") and not gene_type.endswith("transcript"):
continue
gene_id = transcript_info[tid]["gene_id"]
g.attributes = dict(gene_id=[gene_id], transcript_id=[tid])
g.update_attributes(gtf=True, urlquote=False)
print(g) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" %prog merge gffiles Merge several gff files into one. When only one file is given, it is assumed to be a file with a list of gff files. """ |
p = OptionParser(merge.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Print FASTA sequences at the end")
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if nargs < 1:
sys.exit(not p.print_help())
if nargs == 1:
listfile, = args
fp = open(listfile)
gffiles = [x.strip() for x in fp]
else:
gffiles = args
outfile = opts.outfile
deflines = set()
fw = must_open(outfile, "w")
fastarecs = {}
for gffile in natsorted(gffiles, key=lambda x: op.basename(x)):
logging.debug(gffile)
fp = open(gffile)
for row in fp:
row = row.rstrip()
if not row or row[0] == '#':
if row == FastaTag:
break
if row in deflines:
continue
else:
deflines.add(row)
print(row, file=fw)
if not opts.seq:
continue
f = Fasta(gffile, lazy=True)
for key, rec in f.iteritems_ordered():
if key in fastarecs:
continue
fastarecs[key] = rec
if opts.seq:
print(FastaTag, file=fw)
SeqIO.write(fastarecs.values(), fw, "fasta")
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract(args):
""" %prog extract gffile --contigs: Extract particular contig(s) from the gff file. If multiple contigs are involved, use "," to separate, e.g. "contig_12,contig_150"; or provide a file with multiple contig IDs, one per line --names: Process particular ID(s) from the gff file. If multiple IDs are involved, use "," to separate; or provide a file with multiple IDs, one per line """ |
p = OptionParser(extract.__doc__)
p.add_option("--contigs",
help="Extract features from certain contigs [default: %default]")
p.add_option("--names",
help="Extract features with certain names [default: %default]")
p.add_option("--types", type="str", default=None,
help="Extract features of certain feature types [default: %default]")
p.add_option("--children", default=0, choices=["1", "2"],
help="Specify number of iterations: `1` grabs children, " + \
"`2` grabs grand-children [default: %default]")
p.add_option("--tag", default="ID",
help="Scan the tags for the names [default: %default]")
p.add_option("--fasta", default=False, action="store_true",
help="Write FASTA if available [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
contigfile = opts.contigs
namesfile = opts.names
typesfile = opts.types
nametag = opts.tag
contigID = parse_multi_values(contigfile)
names = parse_multi_values(namesfile)
types = parse_multi_values(typesfile)
outfile = opts.outfile
if opts.children:
assert types is not None or names is not None, "Must set --names or --types"
if names == None: names = list()
populate_children(outfile, names, gffile, iter=opts.children, types=types)
return
fp = must_open(gffile)
fw = must_open(opts.outfile, "w")
for row in fp:
atoms = row.split()
if len(atoms) == 0:
continue
tag = atoms[0]
if row[0] == "#":
if row.strip() == "###":
continue
if not (tag == RegionTag and contigID and atoms[1] not in contigID):
print(row.rstrip(), file=fw)
if tag == FastaTag:
break
continue
b = GffLine(row)
attrib = b.attributes
if contigID and tag not in contigID:
continue
if types and b.type in types:
_id = b.accn
if _id not in names:
names.append(_id)
if names is not None:
if nametag not in attrib:
continue
if attrib[nametag][0] not in names:
continue
print(row.rstrip(), file=fw)
if not opts.fasta:
return
f = Fasta(gffile)
for s in contigID:
if s in f:
SeqIO.write([f[s]], fw, "fasta") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split(args):
""" %prog split gffile outdir Split the gff into one contig per file. Will also take sequences if the file contains FASTA sequences. """ |
p = OptionParser(split.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, outdir = args
mkdir(outdir)
g = Gff(gffile)
seqids = g.seqids
for s in seqids:
outfile = op.join(outdir, s + ".gff")
extract([gffile, "--contigs=" + s, "--outfile=" + outfile]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def note(args):
""" %prog note gffile > tabfile Extract certain attribute field for each feature. """ |
p = OptionParser(note.__doc__)
p.add_option("--type", default=None,
help="Only process certain types, multiple types allowed with comma")
p.add_option("--attribute", default="Parent,Note",
help="Attribute field to extract, multiple fields allowd with comma")
p.add_option("--AED", type="float", help="Only extract lines with AED score <=")
p.add_option("--exoncount", default=False, action="store_true",
help="Get the exon count for each mRNA feat")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
type = opts.type
if type:
type = type.split(",")
g = make_index(gffile)
exoncounts = {}
if opts.exoncount:
for feat in g.features_of_type("mRNA"):
nexons = 0
for c in g.children(feat.id, 1):
if c.featuretype != "exon":
continue
nexons += 1
exoncounts[feat.id] = nexons
attrib = opts.attribute.split(",")
gff = Gff(gffile)
seen = set()
AED = opts.AED
for g in gff:
if type and g.type not in type:
continue
if AED is not None and float(g.attributes["_AED"][0]) > AED:
continue
keyval = [g.accn] + [",".join(g.attributes[x]) \
for x in attrib if x in g.attributes]
if exoncounts:
nexons = exoncounts.get(g.accn, 0)
keyval.append(str(nexons))
keyval = tuple(keyval)
if keyval not in seen:
print("\t".join(keyval))
seen.add(keyval) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_index(gff_file):
""" Make a sqlite database for fast retrieval of features. """ |
import gffutils
db_file = gff_file + ".db"
if need_update(gff_file, db_file):
if op.exists(db_file):
os.remove(db_file)
logging.debug("Indexing `{0}`".format(gff_file))
gffutils.create_db(gff_file, db_file, merge_strategy="create_unique")
else:
logging.debug("Load index `{0}`".format(gff_file))
return gffutils.FeatureDB(db_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def children(args):
""" %prog children gff_file Get the children that have the same parent. """ |
p = OptionParser(children.__doc__)
p.add_option("--parents", default="gene",
help="list of features to extract, use comma to separate (e.g."
"'gene,mRNA') [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gff_file, = args
g = make_index(gff_file)
parents = set(opts.parents.split(','))
for feat in get_parents(gff_file, parents):
cc = [c.id for c in g.children(feat.id, 1)]
if len(cc) <= 1:
continue
print("\t".join(str(x) for x in \
(feat.id, feat.start, feat.stop, "|".join(cc)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed12(args):
""" %prog bed12 gffile > bedfile Produce bed12 file for coding features. The exons will be converted to blocks. The CDS range will be shown between thickStart to thickEnd. For reference, bed format consists of the following fields: 1. chrom 2. chromStart 3. chromEnd 4. name 5. score 6. strand 7. thickStart 8. thickEnd 9. itemRgb 10. blockCount 11. blockSizes 12. blockStarts """ |
p = OptionParser(bed12.__doc__)
p.add_option("--parent", default="mRNA",
help="Top feature type [default: %default]")
p.add_option("--block", default="exon",
help="Feature type for regular blocks [default: %default]")
p.add_option("--thick", default="CDS",
help="Feature type for thick blocks [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
parent, block, thick = opts.parent, opts.block, opts.thick
outfile = opts.outfile
g = make_index(gffile)
fw = must_open(outfile, "w")
for f in g.features_of_type(parent):
chrom = f.chrom
chromStart = f.start - 1
chromEnd = f.stop
name = f.id
score = 0
strand = f.strand
thickStart = 1e15
thickEnd = 0
blocks = []
for c in g.children(name, 1):
cstart, cend = c.start - 1, c.stop
if c.featuretype == block:
blockStart = cstart - chromStart
blockSize = cend - cstart
blocks.append((blockStart, blockSize))
elif c.featuretype == thick:
thickStart = min(thickStart, cstart)
thickEnd = max(thickEnd, cend)
blocks.sort()
blockStarts, blockSizes = zip(*blocks)
blockCount = len(blocks)
blockSizes = ",".join(str(x) for x in blockSizes) + ","
blockStarts = ",".join(str(x) for x in blockStarts) + ","
itemRgb = 0
print("\t".join(str(x) for x in (chrom, chromStart, chromEnd, \
name, score, strand, thickStart, thickEnd, itemRgb,
blockCount, blockSizes, blockStarts)), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed maffiles > out.bed Convert a folder of maf alignments to the bed features then useful to check coverage, etc. """ |
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
flist = args
prefix = flist[0].split(".")[0]
j = 0
for f in flist:
reader = Maf(f).reader
for rec in reader:
a, b = rec.components
for a, tag in zip((a, b), "ab"):
name = "{0}_{1:07d}{2}".format(prefix, j, tag)
print("\t".join(str(x) for x in (a.src, a.forward_strand_start, \
a.forward_strand_end, name)))
j += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def blast(args):
'''
%prog blast maffiles > out.blast
From a folder of .maf files, generate .blast file with tabular format.
'''
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(p.print_help())
flist = args
for f in flist:
maf_to_blast8(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genome_mutation(candidate):
"""Return the mutants created by inversion mutation on the candidates. This function performs inversion or insertion. It randomly chooses two locations along the candidate and reverses the values within that slice. Insertion is done by popping one item and insert it back at random position. """ |
size = len(candidate)
prob = random.random()
if prob > .5: # Inversion
p = random.randint(0, size-1)
q = random.randint(0, size-1)
if p > q:
p, q = q, p
q += 1
s = candidate[p:q]
x = candidate[:p] + s[::-1] + candidate[q:]
return creator.Individual(x),
else: # Insertion
p = random.randint(0, size-1)
q = random.randint(0, size-1)
cq = candidate.pop(q)
candidate.insert(p, cq)
return candidate, |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def frombed(args):
""" %prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS. """ |
from jcvi.formats.fasta import Fasta
from jcvi.formats.bed import Bed
from jcvi.utils.cbook import fill
p = OptionParser(frombed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
bedfile, contigfasta, readfasta = args
prefix = bedfile.rsplit(".", 1)[0]
contigfile = prefix + ".contig"
idsfile = prefix + ".ids"
contigfasta = Fasta(contigfasta)
readfasta = Fasta(readfasta)
bed = Bed(bedfile)
checksum = "00000000 checksum."
fw_ids = open(idsfile, "w")
fw = open(contigfile, "w")
for ctg, reads in bed.sub_beds():
ctgseq = contigfasta[ctg]
ctgline = "##{0} {1} {2} bases, {3}".format(\
ctg, len(reads), len(ctgseq), checksum)
print(ctg, file=fw_ids)
print(ctgline, file=fw)
print(fill(ctgseq.seq), file=fw)
for b in reads:
read = b.accn
strand = b.strand
readseq = readfasta[read]
rc = " [RC]" if strand == "-" else ""
readlen = len(readseq)
rstart, rend = 1, readlen
if strand == "-":
rstart, rend = rend, rstart
readrange = "{{{0} {1}}}".format(rstart, rend)
conrange = "<{0} {1}>".format(b.start, b.end)
readline = "#{0}(0){1} {2} bases, {3} {4} {5}".format(\
read, rc, readlen, checksum, readrange, conrange)
print(readline, file=fw)
print(fill(readseq.seq), file=fw)
logging.debug("Mapped contigs written to `{0}`.".format(contigfile))
logging.debug("Contig IDs written to `{0}`.".format(idsfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed contigfile Prints out the contigs and their associated reads. """ |
p = OptionParser(main.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
contigfile, = args
bedfile = contigfile.rsplit(".", 1)[0] + ".bed"
fw = open(bedfile, "w")
c = ContigFile(contigfile)
for rec in c.iter_records():
for r in rec.reads:
print(r.bedline, file=fw)
logging.debug("File written to `{0}`.".format(bedfile))
return bedfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_errors(error_string):
'''
returns all lines in the error_string that start with the string "error"
'''
lines = error_string.splitlines()
error_lines = tuple(line for line in lines if line.find('Error') >= 0)
if len(error_lines) > 0:
return '\n'.join(error_lines)
else:
return error_string.strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def tempnam():
''' returns a temporary file-name '''
# prevent os.tmpname from printing an error...
stderr = sys.stderr
try:
sys.stderr = cStringIO.StringIO()
return os.tempnam(None, 'tess_')
finally:
sys.stderr = stderr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def image_to_string(image, lang=None, boxes=False):
'''
Runs tesseract on the specified image. First, the image is written to disk,
and then the tesseract command is run on the image. Resseract's result is
read, and the temporary files are erased.
'''
input_file_name = '%s.bmp' % tempnam()
output_file_name_base = tempnam()
if not boxes:
output_file_name = '%s.txt' % output_file_name_base
else:
output_file_name = '%s.box' % output_file_name_base
try:
image.save(input_file_name)
status, error_string = run_tesseract(input_file_name,
output_file_name_base,
lang=lang,
boxes=boxes)
if status:
errors = get_errors(error_string)
raise TesseractError(status, errors)
f = file(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
cleanup(input_file_name)
cleanup(output_file_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mitosomatic(args):
""" %prog mitosomatic t.piledriver Find mito mosaic somatic mutations in piledriver results. """ |
import pandas as pd
p = OptionParser(mitosomatic.__doc__)
p.add_option("--minaf", default=.005, type="float",
help="Minimum allele fraction")
p.add_option("--maxaf", default=.1, type="float",
help="Maximum allele fraction")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
df, = args
af_file = df.rsplit(".", 1)[0] + ".af"
fw = open(af_file, "w")
df = pd.read_csv(df, sep="\t")
for i, row in df.iterrows():
na = row["num_A"]
nt = row["num_T"]
nc = row["num_C"]
ng = row["num_G"]
nd = row["num_D"]
ni = row["num_I"]
depth = row["depth"]
#major, minor = sorted([na, nt, nc, ng], reverse=True)[:2]
#af = minor * 1. / (major + minor)
af = (nd + ni) * 1. / depth
if not (opts.minaf <= af <= opts.maxaf):
continue
print("{}\t{}\t{:.6f}".format(row["chrom"], row["start"], af), file=fw)
fw.close()
logging.debug("Allele freq written to `{}`".format(af_file)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed del.txt Convert `del.txt` to BED format. DELLY manual here: <http://www.embl.de/~rausch/delly.html> Deletion: chr, start, end, size, #supporting_pairs, avg._mapping_quality, deletion_id chr1, 10180, 10509, 329, 75, 15.8667, Deletion_Sample_00000000 """ |
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
delt, = args
dt = Delly(delt)
dt.write_bed("del.bed") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mito(args):
""" %prog mito chrM.fa input.bam Identify mitochondrial deletions. """ |
p = OptionParser(mito.__doc__)
p.set_aws_opts(store="hli-mv-data-science/htang/mito-deletions")
p.add_option("--realignonly", default=False, action="store_true",
help="Realign only")
p.add_option("--svonly", default=False, action="store_true",
help="Run Realign => SV calls only")
p.add_option("--support", default=1, type="int",
help="Minimum number of supporting reads")
p.set_home("speedseq", default="/mnt/software/speedseq/bin")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
chrMfa, bamfile = args
store = opts.output_path
cleanup = not opts.nocleanup
if not op.exists(chrMfa):
logging.debug("File `{}` missing. Exiting.".format(chrMfa))
return
chrMfai = chrMfa + ".fai"
if not op.exists(chrMfai):
cmd = "samtools index {}".format(chrMfa)
sh(cmd)
if not bamfile.endswith(".bam"):
bamfiles = [x.strip() for x in open(bamfile)]
else:
bamfiles = [bamfile]
if store:
computed = ls_s3(store)
computed = [op.basename(x).split('.')[0] for x in computed if
x.endswith(".depth")]
remaining_samples = [x for x in bamfiles
if op.basename(x).split(".")[0] not in computed]
logging.debug("Already computed on `{}`: {}".
format(store, len(bamfiles) - len(remaining_samples)))
bamfiles = remaining_samples
logging.debug("Total samples: {}".format(len(bamfiles)))
for bamfile in bamfiles:
run_mito(chrMfa, bamfile, opts,
realignonly=opts.realignonly,
svonly=opts.svonly,
store=store, cleanup=cleanup) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fcs(args):
""" %prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter """ |
p = OptionParser(fcs.__doc__)
p.add_option("--cutoff", default=200,
help="Skip small components less than [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fcsfile, = args
cutoff = opts.cutoff
fp = open(fcsfile)
for row in fp:
if row[0] == "#":
continue
sep = "\t" if "\t" in row else None
atoms = row.rstrip().split(sep, 3)
contig, length = atoms[:2]
length = int(length)
label = atoms[-1]
label = label.replace(" ", "_")
if len(atoms) == 3:
ranges = "{0}..{1}".format(1, length)
else:
assert len(atoms) == 4
ranges = atoms[2]
for ab in ranges.split(","):
a, b = ab.split("..")
a, b = int(a), int(b)
assert a <= b
ahang = a - 1
bhang = length - b
if ahang < cutoff:
a = 1
if bhang < cutoff:
b = length
print("\t".join(str(x) for x in (contig, a - 1, b, label))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def asn(args):
""" %prog asn asnfiles Mainly to get this block, and extract `str` field: general { db "TIGR" , tag str "mtg2_12952" } , genbank { accession "AC148996" , """ |
from jcvi.formats.base import must_open
p = OptionParser(asn.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fw = must_open(opts.outfile, "w")
for asnfile in args:
fp = open(asnfile)
ingeneralblock = False
ingenbankblock = False
gb, name = None, None
for row in fp:
if row.strip() == "":
continue
tag = row.split()[0]
if tag == "general":
ingeneralblock = True
if ingeneralblock and tag == "str":
if name is None: # Only allow first assignment
name = row.split("\"")[1]
ingeneralblock = False
if tag == "genbank":
ingenbankblock = True
if ingenbankblock and tag == "accession":
if gb is None:
gb = row.split("\"")[1]
ingenbankblock = False
assert gb and name
print("{0}\t{1}".format(gb, name), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def htgnew(args):
""" %prog htgnew fastafile phasefile template.sbt Prepare sqnfiles for submitting new Genbank HTG records. `fastafile` contains the sequences. `phasefile` contains the phase information, it is a two column file: mth2-45h12 3 `template.sbt` is the Genbank submission template. This function is simpler than htg, since the record names have not be assigned yet (so less bookkeeping). """ |
from jcvi.formats.fasta import sequin
p = OptionParser(htgnew.__doc__)
p.add_option("--comment", default="",
help="Comments for this submission [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
fastafile, phasefile, sbtfile = args
comment = opts.comment
fastadir = "fasta"
sqndir = "sqn"
mkdir(fastadir)
mkdir(sqndir)
cmd = "faSplit byname {0} {1}/".format(fastafile, fastadir)
sh(cmd, outfile="/dev/null", errfile="/dev/null")
acmd = 'tbl2asn -a z -p fasta -r {sqndir}'
acmd += ' -i {splitfile} -t {sbtfile} -C tigr'
acmd += ' -j "[tech=htgs {phase}] [organism=Medicago truncatula] [strain=A17]"'
acmd += ' -o {sqndir}/{accession_nv}.sqn -V Vbr'
acmd += ' -y "{comment}" -W T -T T'
nupdated = 0
for row in open(phasefile):
name, phase = row.split()[:2]
fafile = op.join(fastadir, name + ".fa")
cloneopt = "--clone={0}".format(name)
splitfile, gaps = sequin([fafile, cloneopt])
splitfile = op.basename(splitfile)
accession = accession_nv = name
phase = int(phase)
assert phase in (1, 2, 3)
cmd = acmd.format(accession_nv=accession_nv, sqndir=sqndir,
sbtfile=sbtfile, splitfile=splitfile, phase=phase,
comment=comment)
sh(cmd)
verify_sqn(sqndir, accession)
nupdated += 1
print("A total of {0} records updated.".format(nupdated), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_96_to_384(c96, quad, nrows=Nrows, ncols=Ncols):
""" Convert the 96-well number and quad number to 384-well number 'C3' 'P18' """ |
rows, cols = get_rows_cols()
plate, splate = get_plate()
n96 = rows.index(c96[0]) * ncols / 2 + int(c96[1:])
q = "{0:02d}{1}".format(n96, "ABCD"[quad - 1])
return splate[q] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_description(s):
""" Returns a dictionary based on the FASTA header, assuming JCVI data """ |
s = "".join(s.split()[1:]).replace("/", ";")
a = parse_qs(s)
return a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scaffold(args):
""" %prog scaffold ctgfasta reads1.fasta mapping1.bed Run BAMBUS on set of contigs, reads and read mappings. """ |
from jcvi.formats.base import FileMerger
from jcvi.formats.bed import mates
from jcvi.formats.contig import frombed
from jcvi.formats.fasta import join
from jcvi.utils.iter import grouper
p = OptionParser(scaffold.__doc__)
p.set_rclip(rclip=1)
p.add_option("--conf", help="BAMBUS configuration file [default: %default]")
p.add_option("--prefix", default=False, action="store_true",
help="Only keep links between IDs with same prefix [default: %default]")
opts, args = p.parse_args(args)
nargs = len(args)
if nargs < 3 or nargs % 2 != 1:
sys.exit(not p.print_help())
rclip = opts.rclip
ctgfasta = args[0]
duos = list(grouper(args[1:], 2))
trios = []
for fastafile, bedfile in duos:
prefix = bedfile.rsplit(".", 1)[0]
matefile = prefix + ".mates"
matebedfile = matefile + ".bed"
if need_update(bedfile, [matefile, matebedfile]):
matesopt = [bedfile, "--lib", "--nointra",
"--rclip={0}".format(rclip),
"--cutoff={0}".format(opts.cutoff)]
if opts.prefix:
matesopt += ["--prefix"]
matefile, matebedfile = mates(matesopt)
trios.append((fastafile, matebedfile, matefile))
# Merge the readfasta, bedfile and matefile
bbfasta, bbbed, bbmate = "bambus.reads.fasta", "bambus.bed", "bambus.mates"
for files, outfile in zip(zip(*trios), (bbfasta, bbbed, bbmate)):
FileMerger(files, outfile=outfile).merge(checkexists=True)
ctgfile = "bambus.contig"
idsfile = "bambus.ids"
frombedInputs = [bbbed, ctgfasta, bbfasta]
if need_update(frombedInputs, ctgfile):
frombed(frombedInputs)
inputfasta = "bambus.contigs.fasta"
singletonfasta = "bambus.singletons.fasta"
cmd = "faSomeRecords {0} {1} ".format(ctgfasta, idsfile)
sh(cmd + inputfasta)
sh(cmd + singletonfasta + " -exclude")
# Run bambus
prefix = "bambus"
cmd = "goBambus -c {0} -m {1} -o {2}".format(ctgfile, bbmate, prefix)
if opts.conf:
cmd += " -C {0}".format(opts.conf)
sh(cmd)
cmd = "untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml".\
format(prefix)
sh(cmd)
final = "final"
cmd = "printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib " \
"-merge -detail -oo -sum -o {1}".format(prefix, final)
sh(cmd)
oofile = final + ".oo"
join([inputfasta, "--oo={0}".format(oofile)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diginorm(args):
""" %prog diginorm fastqfile Run K-mer based normalization. Based on tutorial: <http://ged.msu.edu/angus/diginorm-2012/tutorial.html> Assume input is either an interleaved pairs file, or two separate files. To set up khmer: $ git clone git://github.com/ged-lab/screed.git $ git clone git://github.com/ged-lab/khmer.git $ cd screed $ python setup.py install $ cd ../khmer $ make test $ export PYTHONPATH=~/export/khmer """ |
from jcvi.formats.fastq import shuffle, pairinplace, split
from jcvi.apps.base import getfilesize
p = OptionParser(diginorm.__doc__)
p.add_option("--single", default=False, action="store_true",
help="Single end reads")
p.add_option("--tablesize", help="Memory size")
p.add_option("--npass", default="1", choices=("1", "2"),
help="How many passes of normalization")
p.set_depth(depth=50)
p.set_home("khmer", default="/usr/local/bin/")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 2:
fastq = shuffle(args + ["--tag"])
else:
fastq, = args
kh = opts.khmer_home
depth = opts.depth
PE = not opts.single
sys.path.insert(0, op.join(kh, "python"))
pf = fastq.rsplit(".", 1)[0]
keepfile = fastq + ".keep"
hashfile = pf + ".kh"
mints = 10000000
ts = opts.tablesize or ((getfilesize(fastq) / 16 / mints + 1) * mints)
norm_cmd = op.join(kh, "normalize-by-median.py")
filt_cmd = op.join(kh, "filter-abund.py")
if need_update(fastq, (hashfile, keepfile)):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth, ts)
if PE:
cmd += " -p"
cmd += " -s {0} {1}".format(hashfile, fastq)
sh(cmd)
abundfiltfile = keepfile + ".abundfilt"
if need_update((hashfile, keepfile), abundfiltfile):
cmd = filt_cmd
cmd += " {0} {1}".format(hashfile, keepfile)
sh(cmd)
if opts.npass == "1":
seckeepfile = abundfiltfile
else:
seckeepfile = abundfiltfile + ".keep"
if need_update(abundfiltfile, seckeepfile):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth - 10, ts / 2)
cmd += " {0}".format(abundfiltfile)
sh(cmd)
if PE:
pairsfile = pairinplace([seckeepfile,
"--base={0}".format(pf + "_norm"), "--rclip=2"])
split([pairsfile]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contamination(args):
""" %prog contamination Ecoli.fasta genome.fasta read.fastq Check read contamination on a folder of paired reads. Use bowtie2 to compare the reads against: 1. Ecoli.fsata - this will tell us the lower bound of contamination 2. genome.fasta - this will tell us the upper bound of contamination """ |
from jcvi.apps.bowtie import BowtieLogFile, align
p = OptionParser(contamination.__doc__)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ecoli, genome, fq = args
firstN_opt = "--firstN={0}".format(opts.firstN)
samfile, logfile = align([ecoli, fq, firstN_opt])
bl = BowtieLogFile(logfile)
lowerbound = bl.rate
samfile, logfile = align([genome, fq, firstN_opt])
bl = BowtieLogFile(logfile)
upperbound = 100 - bl.rate
median = (lowerbound + upperbound) / 2
clogfile = fq + ".Ecoli"
fw = open(clogfile, "w")
lowerbound = "{0:.1f}".format(lowerbound)
upperbound = "{0:.1f}".format(upperbound)
median = "{0:.1f}".format(median)
print("\t".join((fq, lowerbound, median, upperbound)), file=fw)
print("{0}: Ecoli contamination rate {1}-{2}".\
format(fq, lowerbound, upperbound), file=sys.stderr)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def alignextend(args):
""" %prog alignextend ref.fasta read.1.fastq read.2.fastq Wrapper around AMOS alignextend. """ |
choices = "prepare,align,filter,rmdup,genreads".split(",")
p = OptionParser(alignextend.__doc__)
p.add_option("--nosuffix", default=False, action="store_true",
help="Do not add /1/2 suffix to the read [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement the reads before alignment")
p.add_option("--len", default=100, type="int",
help="Extend to this length")
p.add_option("--stage", default="prepare", choices=choices,
help="Start from certain stage")
p.add_option("--dup", default=10, type="int",
help="Filter duplicates with coordinates within this distance")
p.add_option("--maxdiff", default=1, type="int",
help="Maximum number of differences")
p.set_home("amos")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ref, r1, r2 = args
pf = op.basename(r1).split(".")[0]
cmd = op.join(opts.amos_home, "src/Experimental/alignextend.pl")
if not opts.nosuffix:
cmd += " -suffix"
bwa_idx = "{0}.ref.fa.sa".format(pf)
if not need_update(ref, bwa_idx):
cmd += " -noindex"
cmd += " -threads {0}".format(opts.cpus)
offset = guessoffset([r1])
if offset == 64:
cmd += " -I"
if opts.rc:
cmd += " -rc"
cmd += " -allow -len {0} -dup {1}".format(opts.len, opts.dup)
cmd += " -min {0} -max {1}".format(2 * opts.len, 20 * opts.len)
cmd += " -maxdiff {0}".format(opts.maxdiff)
cmd += " -stage {0}".format(opts.stage)
cmd += " ".join(("", pf, ref, r1, r2))
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hetsmooth(args):
""" %prog hetsmooth reads_1.fq reads_2.fq jf-23_0 Wrapper against het-smooth. Below is the command used in het-smooth manual. $ het-smooth --kmer-len=23 --bottom-threshold=38 --top-threshold=220 --no-multibase-replacements --jellyfish-hash-file=23-mers.jf reads_1.fq reads_2.fq """ |
p = OptionParser(hetsmooth.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.add_option("-L", type="int",
help="Bottom threshold, first min [default: %default]")
p.add_option("-U", type="int",
help="Top threshold, second min [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
reads1fq, reads2fq, jfdb = args
K = opts.K
L = opts.L
U = opts.U
assert L is not None and U is not None, "Please specify -L and -U"
cmd = "het-smooth --kmer-len={0}".format(K)
cmd += " --bottom-threshold={0} --top-threshold={1}".format(L, U)
cmd += " --no-multibase-replacements --jellyfish-hash-file={0}".format(jfdb)
cmd += " --no-reads-log"
cmd += " " + " ".join((reads1fq, reads2fq))
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_intersect(a, b, extend=0):
""" Returns the intersection between two reanges. [48, 55] """ |
a_min, a_max = a
if a_min > a_max:
a_min, a_max = a_max, a_min
b_min, b_max = b
if b_min > b_max:
b_min, b_max = b_max, b_min
if a_max + extend < b_min or b_max + extend < a_min:
return None
i_min = max(a_min, b_min)
i_max = min(a_max, b_max)
if i_min > i_max + extend:
return None
return [i_min, i_max] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_overlap(a, b, ratio=False):
""" Returns whether two ranges overlap. Set percentage=True returns overlap ratio over the shorter range of the two. 5 0.2 16 1.0 0 0 0.0 """ |
a_chr, a_min, a_max = a
b_chr, b_min, b_max = b
a_min, a_max = sorted((a_min, a_max))
b_min, b_max = sorted((b_min, b_max))
shorter = min((a_max - a_min), (b_max - b_min)) + 1
# must be on the same chromosome
if a_chr != b_chr:
ov = 0
else:
ov = min(shorter, (a_max - b_min + 1), (b_max - a_min + 1))
ov = max(ov, 0)
if ratio:
ov /= float(shorter)
return ov |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_distance(a, b, distmode='ss'):
""" Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) (26, '++') (39, '--') (26, '-+') (2, '+-') """ |
assert distmode in ('ss', 'ee')
a_chr, a_min, a_max, a_strand = a
b_chr, b_min, b_max, b_strand = b
# must be on the same chromosome
if a_chr != b_chr:
dist = -1
#elif range_overlap(a[:3], b[:3]):
# dist = 0
else:
# If the two ranges do not overlap, check stranded-ness and distance
if a_min > b_min:
a_min, b_min = b_min, a_min
a_max, b_max = b_max, a_max
a_strand, b_strand = b_strand, a_strand
if distmode == "ss":
dist = b_max - a_min + 1
elif distmode == "ee":
dist = b_min - a_max - 1
orientation = a_strand + b_strand
return dist, orientation |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_minmax(ranges):
""" Returns the span of a collection of ranges where start is the smallest of all starts, and end is the largest of all ends. (10, 100) """ |
rmin = min(ranges)[0]
rmax = max(ranges, key=lambda x: x[1])[1]
return rmin, rmax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_interleave(ranges, sizes={}, empty=False):
""" Returns the ranges in between the given ranges. [('1', 41, 44)] [('1', 41, 41)] [('1', 1, 29), ('1', 41, 41), ('1', 51, 70)] """ |
from jcvi.utils.iter import pairwise
ranges = range_merge(ranges)
interleaved_ranges = []
for ch, cranges in groupby(ranges, key=lambda x: x[0]):
cranges = list(cranges)
size = sizes.get(ch, None)
if size:
ch, astart, aend = cranges[0]
if astart > 1:
interleaved_ranges.append((ch, 1, astart - 1))
elif empty:
interleaved_ranges.append(None)
for a, b in pairwise(cranges):
ch, astart, aend = a
ch, bstart, bend = b
istart, iend = aend + 1, bstart - 1
if istart <= iend:
interleaved_ranges.append((ch, istart, iend))
elif empty:
interleaved_ranges.append(None)
if size:
ch, astart, aend = cranges[-1]
if aend < size:
interleaved_ranges.append((ch, aend + 1, size))
elif empty:
interleaved_ranges.append(None)
return interleaved_ranges |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_merge(ranges, dist=0):
""" Returns merged range. Similar to range_union, except this returns new ranges. [('1', 10, 50)] [('1', 30, 40), ('1', 45, 50)] [('1', 30, 50)] """ |
if not ranges:
return []
ranges.sort()
cur_range = list(ranges[0])
merged_ranges = []
for r in ranges[1:]:
# open new range if start > cur_end or seqid != cur_seqid
if r[1] - cur_range[2] > dist or r[0] != cur_range[0]:
merged_ranges.append(tuple(cur_range))
cur_range = list(r)
else:
cur_range[2] = max(cur_range[2], r[2])
merged_ranges.append(tuple(cur_range))
return merged_ranges |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_span(ranges):
""" Returns the total span between the left most range to the right most range. 41 27 21 0 """ |
if not ranges:
return 0
ranges.sort()
ans = 0
for seq, lt in groupby(ranges, key=lambda x: x[0]):
lt = list(lt)
ans += max(max(lt)[1:]) - min(min(lt)[1:]) + 1
return ans |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_piles(ranges):
""" Return piles of intervals that overlap. The piles are only interrupted by regions of zero coverage. [[0, 1], [2]] """ |
endpoints = _make_endpoints(ranges)
for seqid, ends in groupby(endpoints, lambda x: x[0]):
active = []
depth = 0
for seqid, pos, leftright, i, score in ends:
if leftright == LEFT:
active.append(i)
depth += 1
else:
depth -= 1
if depth == 0 and active:
yield active
active = [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_conflict(ranges, depth=1):
""" Find intervals that are overlapping in 1-dimension. Return groups of block IDs that are in conflict. [(0, 1)] """ |
overlap = set()
active = set()
endpoints = _make_endpoints(ranges)
for seqid, ends in groupby(endpoints, lambda x: x[0]):
active.clear()
for seqid, pos, leftright, i, score in ends:
if leftright == LEFT:
active.add(i)
else:
active.remove(i)
if len(active) > depth:
overlap.add(tuple(sorted(active)))
for ov in overlap:
yield ov |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadtable(header, rows, major='=', minor='-', thousands=True):
""" Print a tabular output, with horizontal separators """ |
formatted = load_csv(header, rows, sep=" ", thousands=thousands)
header, rows = formatted[0], formatted[1:]
return banner(header, rows) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_csv(header, contents, sep=",", filename="stdout", thousands=False, tee=False, align=True, comment=False):
""" Write csv that are aligned with the column headers. x_value, y_value 1, 100 2, 200 """ |
from jcvi.formats.base import must_open
formatted = load_csv(header, contents,
sep=sep, thousands=thousands, align=align)
if comment:
formatted[0] = '#' + formatted[0][1:]
formatted = "\n".join(formatted)
fw = must_open(filename, "w")
print(formatted, file=fw)
if tee and filename != "stdout":
print(formatted) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blat(args):
""" %prog blat map1.txt ref.fasta Make ALLMAPS input csv based on sequences. The tab-delimited txt file include: name, LG, position, sequence. """ |
from jcvi.formats.base import is_number
from jcvi.formats.blast import best as blast_best, bed as blast_bed
from jcvi.apps.align import blat as blat_align
p = OptionParser(blat.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
maptxt, ref = args
pf = maptxt.rsplit(".", 1)[0]
register = {}
fastafile = pf + ".fasta"
fp = open(maptxt)
fw = open(fastafile, "w")
for row in fp:
name, lg, pos, seq = row.split()
if not is_number(pos):
continue
register[name] = (pf + '-' + lg, pos)
print(">{0}\n{1}\n".format(name, seq), file=fw)
fw.close()
blatfile = blat_align([ref, fastafile])
bestfile = blast_best([blatfile])
bedfile = blast_bed([bestfile])
b = Bed(bedfile).order
pf = ".".join((op.basename(maptxt).split(".")[0],
op.basename(ref).split(".")[0]))
csvfile = pf + ".csv"
fp = open(maptxt)
fw = open(csvfile, "w")
for row in fp:
name, lg, pos, seq = row.split()
if name not in b:
continue
bbi, bb = b[name]
scaffold, scaffold_pos = bb.seqid, bb.start
print(",".join(str(x) for x in \
(scaffold, scaffold_pos, lg, pos)), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def header(args):
""" %prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`. """ |
from jcvi.formats.base import DictFile
p = OptionParser(header.__doc__)
p.add_option("--prefix", default="",
help="Prepend text to line number [default: %default]")
p.add_option("--ids", help="Write ids to file [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mstmap, conversion_table = args
data = MSTMap(mstmap)
hd = data.header
conversion = DictFile(conversion_table)
newhd = [opts.prefix + conversion.get(x, x) for x in hd]
print("\t".join(hd))
print("--->")
print("\t".join(newhd))
ids = opts.ids
if ids:
fw = open(ids, "w")
print("\n".join(newhd), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rename(args):
""" %prog rename map markers.bed > renamed.map Rename markers according to the new mapping locations. """ |
p = OptionParser(rename.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mstmap, bedfile = args
markersbed = Bed(bedfile)
markers = markersbed.order
data = MSTMap(mstmap)
header = data.header
header = [header[0]] + ["seqid", "start"] + header[1:]
renamed = []
for b in data:
m, geno = b.id, b.genotype
om = m
if m not in markers:
m = m.rsplit(".", 1)[0]
if m not in markers:
continue
i, mb = markers[m]
renamed.append([om, mb.seqid, mb.start, "\t".join(list(geno))])
renamed.sort(key=lambda x: (x[1], x[2]))
fw = must_open(opts.outfile, "w")
print("\t".join(header), file=fw)
for d in renamed:
print("\t".join(str(x) for x in d), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def anchor(args):
""" %prog anchor map.bed markers.blast > anchored.bed Anchor scaffolds based on map. """ |
from jcvi.formats.blast import bed
p = OptionParser(anchor.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mapbed, blastfile = args
bedfile = bed([blastfile])
markersbed = Bed(bedfile)
markers = markersbed.order
mapbed = Bed(mapbed, sorted=False)
for b in mapbed:
m = b.accn
if m not in markers:
continue
i, mb = markers[m]
new_accn = "{0}:{1}-{2}".format(mb.seqid, mb.start, mb.end)
b.accn = new_accn
print(b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog fasta map.out Convert MSTMAP output into bed format. """ |
p = OptionParser(bed.__doc__)
p.add_option("--switch", default=False, action="store_true",
help="Switch reference and aligned map elements [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
mapout, = args
pf = mapout.split(".")[0]
mapbed = pf + ".bed"
bm = BinMap(mapout)
bm.print_to_bed(mapbed, switch=opts.switch)
return mapbed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fasta(args):
""" %prog fasta map.out scaffolds.fasta Extract marker sequences based on map. """ |
from jcvi.formats.sizes import Sizes
p = OptionParser(fasta.__doc__)
p.add_option("--extend", default=1000, type="int",
help="Extend seq flanking the gaps [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mapout, sfasta = args
Flank = opts.extend
pf = mapout.split(".")[0]
mapbed = pf + ".bed"
bm = BinMap(mapout)
bm.print_to_bed(mapbed)
bed = Bed(mapbed, sorted=False)
markersbed = pf + ".markers.bed"
fw = open(markersbed, "w")
sizes = Sizes(sfasta).mapping
for b in bed:
accn = b.accn
scf, pos = accn.split(".")
pos = int(pos)
start = max(0, pos - Flank)
end = min(pos + Flank, sizes[scf])
print("\t".join(str(x) for x in \
(scf, start, end, accn)), file=fw)
fw.close()
fastaFromBed(markersbed, sfasta, name=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def breakpoint(args):
""" %prog breakpoint mstmap.input > breakpoints.bed Find scaffold breakpoints using genetic map. Use variation.vcf.mstmap() to generate the input for this routine. """ |
from jcvi.utils.iter import pairwise
p = OptionParser(breakpoint.__doc__)
p.add_option("--diff", default=.1, type="float",
help="Maximum ratio of differences allowed [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
mstmap, = args
diff = opts.diff
data = MSTMap(mstmap)
# Remove singleton markers (avoid double cross-over)
good = []
nsingletons = 0
for i in xrange(1, len(data) - 1):
a = data[i]
left_label, left_rr = check_markers(data[i - 1], a, diff)
right_label, right_rr = check_markers(a, data[i + 1], diff)
if left_label == BREAK and right_label == BREAK:
nsingletons += 1
continue
good.append(a)
logging.debug("A total of {0} singleton markers removed.".format(nsingletons))
for a, b in pairwise(good):
label, rr = check_markers(a, b, diff)
if label == BREAK:
print("\t".join(str(x) for x in rr)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trimNs(seq, line, newagp):
""" Test if the sequences contain dangling N's on both sides. This component needs to be adjusted to the 'actual' sequence range. """ |
start, end = line.component_beg, line.component_end
size = end - start + 1
leftNs, rightNs = 0, 0
lid, lo = line.component_id, line.orientation
for s in seq:
if s in 'nN':
leftNs += 1
else:
break
for s in seq[::-1]:
if s in 'nN':
rightNs += 1
else:
break
if lo == '-':
trimstart = start + rightNs
trimend = end - leftNs
else:
trimstart = start + leftNs
trimend = end - rightNs
trimrange = (trimstart, trimend)
oldrange = (start, end)
if trimrange != oldrange:
logging.debug("{0} trimmed of N's: {1} => {2}".\
format(lid, oldrange, trimrange))
if leftNs:
print("\t".join(str(x) for x in (line.object, 0, 0, 0,
'N', leftNs, "fragment", "yes", "")), file=newagp)
if trimend > trimstart:
print("\t".join(str(x) for x in (line.object, 0, 0, 0,
line.component_type, lid, trimstart, trimend, lo)), file=newagp)
if rightNs and rightNs != size:
print("\t".join(str(x) for x in (line.object, 0, 0, 0,
'N', rightNs, "fragment", "yes", "")), file=newagp)
else:
print(line, file=newagp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromcsv(args):
""" %prog fromcsv contigs.fasta map.csv map.agp Convert csv which contains list of scaffolds/contigs to AGP file. """ |
import csv
from jcvi.formats.sizes import Sizes
p = OptionParser(fromcsv.__doc__)
p.add_option("--evidence", default="map",
help="Linkage evidence to add in AGP")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
contigsfasta, mapcsv, mapagp = args
reader = csv.reader(open(mapcsv))
sizes = Sizes(contigsfasta).mapping
next(reader) # Header
fwagp = must_open(mapagp, "w")
o = OO()
for row in reader:
if len(row) == 2:
object, ctg = row
strand = '?'
elif len(row) == 3:
object, ctg, strand = row
size = sizes[ctg]
o.add(object, ctg, size, strand)
o.write_AGP(fwagp, gapsize=100, gaptype="scaffold",
phases={}, evidence=opts.evidence) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compress(args):
""" %prog compress a.agp b.agp Convert coordinates based on multiple AGP files. Useful to simplify multiple liftOvers to compress multiple chain files into a single chain file, in upgrading locations of genomic features. Example: `a.agp` could contain split scaffolds: scaffold_0.1 1 600309 1 W scaffold_0 1 600309 + `b.agp` could contain mapping to chromosomes: LG05 6435690 7035998 53 W scaffold_0.1 1 600309 + The final AGP we want is: LG05 6435690 7035998 53 W scaffold_0 1 600309 + """ |
p = OptionParser(compress.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
aagpfile, bagpfile = args
# First AGP provides the mapping
store = {}
agp = AGP(aagpfile)
for a in agp:
if a.is_gap:
continue
# Ignore '?' in the mapping
if a.sign == 0:
a.sign = 1
store[(a.object, a.object_beg, a.object_end)] = \
(a.component_id, a.component_beg, a.component_end, a.sign)
# Second AGP forms the backbone
agp = AGP(bagpfile)
fw = must_open(opts.outfile, "w")
print("\n".join(agp.header), file=fw)
for a in agp:
if a.is_gap:
print(a, file=fw)
continue
component_id, component_beg, component_end, sign = \
store[(a.component_id, a.component_beg, a.component_end)]
orientation = {1: '+', -1: '-', 0: '?'}.get(sign * a.sign)
atoms = (a.object, a.object_beg, a.object_end, a.part_number, a.component_type,
component_id, component_beg, component_end, orientation)
a = AGPLine("\t".join(str(x) for x in atoms))
print(a, file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def infer(args):
""" %prog infer scaffolds.fasta genome.fasta Infer where the components are in the genome. This function is rarely used, but can be useful when distributor does not ship an AGP file. """ |
from jcvi.apps.grid import WriteJobs
from jcvi.formats.bed import sort
p = OptionParser(infer.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
scaffoldsf, genomef = args
inferbed = "infer-components.bed"
if need_update((scaffoldsf, genomef), inferbed):
scaffolds = Fasta(scaffoldsf, lazy=True)
genome = Fasta(genomef)
genome = genome.tostring()
args = [(scaffold_name, scaffold, genome) \
for scaffold_name, scaffold in scaffolds.iteritems_ordered()]
pool = WriteJobs(map_one_scaffold, args, inferbed, cpus=opts.cpus)
pool.run()
sort([inferbed, "-i"])
bed = Bed(inferbed)
inferagpbed = "infer.bed"
fw = open(inferagpbed, "w")
seen = []
for b in bed:
r = (b.seqid, b.start, b.end)
if check_seen(r, seen):
continue
print("\t".join(str(x) for x in \
(b.accn, 0, b.span, b.seqid, b.score, b.strand)), file=fw)
seen.append(r)
fw.close()
frombed([inferagpbed]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format(args):
""" %prog format oldagpfile newagpfile Reformat AGP file. --switch will replace the ids in the AGP file. """ |
from jcvi.formats.base import DictFile
p = OptionParser(format.__doc__)
p.add_option("--switchcomponent",
help="Switch component id based on")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
oldagpfile, newagpfile = args
switchcomponent = opts.switchcomponent
if switchcomponent:
switchcomponent = DictFile(switchcomponent)
agp = AGP(oldagpfile)
fw = open(newagpfile, "w")
nconverts = 0
for i, a in enumerate(agp):
if not a.is_gap and a.component_id in switchcomponent:
oldid = a.component_id
newid = switchcomponent[a.component_id]
a.component_id = newid
logging.debug("Covert {0} to {1} on line {2}".\
format(oldid, newid, i+1))
nconverts += 1
print(a, file=fw)
logging.debug("Total converted records: {0}".format(nconverts)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def frombed(args):
""" %prog frombed bedfile Generate AGP file based on bed file. The bed file must have at least 6 columns. With the 4-th column indicating the new object. """ |
p = OptionParser(frombed.__doc__)
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
gapsize = opts.gapsize
agpfile = bedfile.replace(".bed", ".agp")
fw = open(agpfile, "w")
bed = Bed(bedfile, sorted=False)
for object, beds in groupby(bed, key=lambda x: x.accn):
beds = list(beds)
for i, b in enumerate(beds):
if gapsize and i != 0:
print("\t".join(str(x) for x in \
(object, 0, 0, 0, "U", \
gapsize, "scaffold", "yes", "map")), file=fw)
print("\t".join(str(x) for x in \
(object, 0, 0, 0, "W", \
b.seqid, b.start, b.end, b.strand)), file=fw)
fw.close()
# Reindex
return reindex([agpfile, "--inplace"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def swap(args):
""" %prog swap agpfile Swap objects and components. Will add gap lines. This is often used in conjuction with formats.chain.fromagp() to convert between different coordinate systems. """ |
from jcvi.utils.range import range_interleave
p = OptionParser(swap.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
agpfile, = args
agp = AGP(agpfile, nogaps=True, validate=False)
agp.sort(key=lambda x: (x.component_id, x.component_beg))
newagpfile = agpfile.rsplit(".", 1)[0] + ".swapped.agp"
fw = open(newagpfile, "w")
agp.transfer_header(fw)
for cid, aa in groupby(agp, key=(lambda x: x.component_id)):
aa = list(aa)
aranges = [(x.component_id, x.component_beg, x.component_end) \
for x in aa]
gaps = range_interleave(aranges)
for a, g in zip_longest(aa, gaps):
a.object, a.component_id = a.component_id, a.object
a.component_beg = a.object_beg
a.component_end = a.object_end
print(a, file=fw)
if not g:
continue
aline = [cid, 0, 0, 0]
gseq, ga, gb = g
cspan = gb - ga + 1
aline += ["N", cspan, "fragment", "yes"]
print("\t".join(str(x) for x in aline), file=fw)
fw.close()
# Reindex
idxagpfile = reindex([newagpfile, "--inplace"])
return newagpfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stats(args):
""" %prog stats agpfile Print out a report for length of gaps and components. """ |
from jcvi.utils.table import tabulate
p = OptionParser(stats.__doc__)
p.add_option("--warn", default=False, action="store_true",
help="Warnings on small component spans [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
agpfile, = args
agp = AGP(agpfile)
gap_lengths = []
component_lengths = []
for a in agp:
span = a.object_span
if a.is_gap:
label = a.gap_type
gap_lengths.append((span, label))
else:
label = "{0}:{1}-{2}".format(a.component_id, a.component_beg, \
a.component_end)
component_lengths.append((span, label))
if opts.warn and span < 50:
logging.error("component span too small ({0}):\n{1}".\
format(span, a))
table = dict()
for label, lengths in zip(("Gaps", "Components"),
(gap_lengths, component_lengths)):
if not lengths:
table[(label, "Min")] = table[(label, "Max")] \
= table[(label, "Sum")] = "n.a."
continue
table[(label, "Min")] = "{0} ({1})".format(*min(lengths))
table[(label, "Max")] = "{0} ({1})".format(*max(lengths))
table[(label, "Sum")] = sum(x[0] for x in lengths)
print(tabulate(table), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cut(args):
""" %prog cut agpfile bedfile Cut at the boundaries of the ranges in the bedfile. """ |
p = OptionParser(cut.__doc__)
p.add_option("--sep", default=".", help="Separator for splits")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
agpfile, bedfile = args
sep = opts.sep
agp = AGP(agpfile)
bed = Bed(bedfile)
simple_agp = agp.order
newagpfile = agpfile.replace(".agp", ".cut.agp")
fw = open(newagpfile, "w")
agp_fixes = defaultdict(list)
for component, intervals in bed.sub_beds():
i, a = simple_agp[component]
object = a.object
component_span = a.component_span
orientation = a.orientation
assert a.component_beg, a.component_end
cuts = set()
for i in intervals:
start, end = i.start, i.end
end -= 1
assert start <= end
cuts.add(start)
cuts.add(end)
cuts.add(0)
cuts.add(component_span)
cuts = list(sorted(cuts))
sum_of_spans = 0
for i, (a, b) in enumerate(pairwise(cuts)):
oid = object + "{0}{1}".format(sep, i + 1)
aline = [oid, 0, 0, 0]
cspan = b - a
aline += ['D', component, a + 1, b, orientation]
sum_of_spans += cspan
aline = "\t".join(str(x) for x in aline)
agp_fixes[component].append(aline)
assert component_span == sum_of_spans
# Finally write the masked agp
for a in agp:
if not a.is_gap and a.component_id in agp_fixes:
print("\n".join(agp_fixes[a.component_id]), file=fw)
else:
print(a, file=fw)
fw.close()
# Reindex
reindex([newagpfile, "--inplace"])
return newagpfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary agpfile print a table of scaffold statistics, number of BACs, no of scaffolds, scaffold N50, scaffold L50, actual sequence, PSMOL NNNs, PSMOL-length, % of PSMOL sequenced. """ |
from jcvi.utils.table import write_csv
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
agpfile, = args
header = "Chromosome #_Distinct #_Components #_Scaffolds " \
"Scaff_N50 Scaff_L50 Length".split()
agp = AGP(agpfile)
data = list(agp.summary_all())
write_csv(header, data, sep=" ") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def phase(args):
""" %prog phase genbankfiles Input has to be gb file. Search the `KEYWORDS` section to look for PHASE. Also look for "chromosome" and "clone" in the definition line. """ |
p = OptionParser(phase.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fw = must_open(opts.outfile, "w")
for gbfile in args:
for rec in SeqIO.parse(gbfile, "gb"):
bac_phase, keywords = get_phase(rec)
chr, clone = get_clone(rec)
keyword_field = ";".join(keywords)
print("\t".join((rec.id, str(bac_phase), keyword_field,
chr, clone)), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tpf(args):
""" %prog tpf agpfile Print out a list of ids, one per line. Also known as the Tiling Path. AC225490.9 chr6 Can optionally output scaffold gaps. """ |
p = OptionParser(tpf.__doc__)
p.add_option("--noversion", default=False, action="store_true",
help="Remove trailing accession versions [default: %default]")
p.add_option("--gaps", default=False, action="store_true",
help="Include gaps in the output [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
agpfile, = args
agp = AGP(agpfile)
for a in agp:
object = a.object
if a.is_gap:
if opts.gaps and a.isCloneGap:
print("\t".join((a.gap_type, object, "na")))
continue
component_id = a.component_id
orientation = a.orientation
if opts.noversion:
component_id = component_id.rsplit(".", 1)[0]
print("\t".join((component_id, object, orientation))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed agpfile print out the tiling paths in bed/gff3 format """ |
from jcvi.formats.obo import validate_term
p = OptionParser(bed.__doc__)
p.add_option("--gaps", default=False, action="store_true",
help="Only print bed lines for gaps [default: %default]")
p.add_option("--nogaps", default=False, action="store_true",
help="Do not print bed lines for gaps [default: %default]")
p.add_option("--bed12", default=False, action="store_true",
help="Produce bed12 formatted output [default: %default]")
p.add_option("--component", default=False, action="store_true",
help="Generate bed file for components [default: %default]")
p.set_outfile()
g1 = OptionGroup(p, "GFF specific parameters",
"Note: If not specified, output will be in `bed` format")
g1.add_option("--gff", default=False, action="store_true",
help="Produce gff3 formatted output. By default, ignores " +\
"AGP gap lines. [default: %default]")
g1.add_option("--source", default="MGSC",
help="Specify a gff3 source [default: `%default`]")
g1.add_option("--feature", default="golden_path_fragment",
help="Specify a gff3 feature type [default: `%default`]")
p.add_option_group(g1)
p.set_SO_opts()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
if opts.component:
opts.nogaps = True
# If output format is gff3 and 'verifySO' option is invoked, validate the SO term
if opts.gff and opts.verifySO:
validate_term(opts.feature, method=opts.verifySO)
agpfile, = args
agp = AGP(agpfile)
fw = must_open(opts.outfile, "w")
if opts.gff:
print("##gff-version 3", file=fw)
for a in agp:
if opts.nogaps and a.is_gap:
continue
if opts.gaps and not a.is_gap:
continue
if opts.bed12:
print(a.bed12line, file=fw)
elif opts.gff:
print(a.gffline(gff_source=opts.source, gff_feat_type=opts.feature), file=fw)
elif opts.component:
name = "{0}:{1}-{2}".\
format(a.component_id, a.component_beg, a.component_end)
print("\t".join(str(x) for x in (a.component_id, a.component_beg - 1,
a.component_end, name,
a.component_type, a.orientation)), file=fw)
else:
print(a.bedline, file=fw)
fw.close()
return fw.name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extendbed(args):
""" %prog extend agpfile componentfasta Extend the components to fill the component range. For example, a bed/gff3 file that was converted from the agp will contain only the BAC sequence intervals that are 'represented' - sometimes leaving the 5` and 3` out (those that overlap with adjacent sequences. This script fill up those ranges, potentially to make graphics for tiling path. """ |
from jcvi.formats.sizes import Sizes
p = OptionParser(extendbed.__doc__)
p.add_option("--nogaps", default=False, action="store_true",
help="Do not print bed lines for gaps [default: %default]")
p.add_option("--bed12", default=False, action="store_true",
help="Produce bed12 formatted output [default: %default]")
p.add_option("--gff", default=False, action="store_true",
help="Produce gff3 formatted output. By default, ignores " +\
" AGP gap lines. [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
# If output format is GFF3, ignore AGP gap lines.
if opts.gff:
opts.nogaps = True
agpfile, fastafile = args
agp = AGP(agpfile)
fw = must_open(opts.outfile, "w")
if opts.gff:
print("##gff-version 3", file=fw)
ranges = defaultdict(list)
thickCoords = [] # These are the coordinates before modify ranges
# Make the first pass to record all the component ranges
for a in agp:
thickCoords.append((a.object_beg, a.object_end))
if a.is_gap:
continue
ranges[a.component_id].append(a)
# Modify the ranges
sizes = Sizes(fastafile).mapping
for accn, rr in ranges.items():
alen = sizes[accn]
a = rr[0]
if a.orientation == "+":
hang = a.component_beg - 1
else:
hang = alen - a.component_end
a.object_beg -= hang
a = rr[-1]
if a.orientation == "+":
hang = alen - a.component_end
else:
hang = a.component_beg - 1
a.object_end += hang
for a, (ts, te) in zip(agp, thickCoords):
if opts.nogaps and a.is_gap:
continue
if opts.bed12:
line = a.bedline
a.object_beg, a.object_end = ts, te
line += "\t" + a.bedextra
print(line, file=fw)
elif opts.gff:
print(a.gffline(), file=fw)
else:
print(a.bedline, file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gaps(args):
""" %prog gaps agpfile Print out the distribution of gapsizes. Option --merge allows merging of adjacent gaps which is used by tidy(). """ |
from jcvi.graphics.histogram import loghistogram
p = OptionParser(gaps.__doc__)
p.add_option("--merge", dest="merge", default=False, action="store_true",
help="Merge adjacent gaps (to conform to AGP specification)")
p.add_option("--header", default=False, action="store_true",
help="Produce an AGP header [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
merge = opts.merge
agpfile, = args
if merge:
merged_agpfile = agpfile.replace(".agp", ".merged.agp")
fw = open(merged_agpfile, "w")
agp = AGP(agpfile)
sizes = []
data = [] # store merged AGPLine's
priorities = ("centromere", "telomere", "scaffold", "contig", \
"clone", "fragment")
for is_gap, alines in groupby(agp, key=lambda x: (x.object, x.is_gap)):
alines = list(alines)
is_gap = is_gap[1]
if is_gap:
gap_size = sum(x.gap_length for x in alines)
gap_types = set(x.gap_type for x in alines)
for gtype in ("centromere", "telomere"):
if gtype in gap_types:
gap_size = gtype
sizes.append(gap_size)
b = deepcopy(alines[0])
b.object_beg = min(x.object_beg for x in alines)
b.object_end = max(x.object_end for x in alines)
b.gap_length = sum(x.gap_length for x in alines)
assert b.gap_length == b.object_end - b.object_beg + 1
b.component_type = 'U' if b.gap_length == 100 else 'N'
gtypes = [x.gap_type for x in alines]
for gtype in priorities:
if gtype in gtypes:
b.gap_type = gtype
break
linkages = [x.linkage for x in alines]
for linkage in ("no", "yes"):
if linkage in linkages:
b.linkage = linkage
break
alines = [b]
data.extend(alines)
loghistogram(sizes)
if opts.header:
AGP.print_header(fw, organism="Medicago truncatula",
taxid=3880, source="J. Craig Venter Institute")
if merge:
for ob, bb in groupby(data, lambda x: x.object):
for i, b in enumerate(bb):
b.part_number = i + 1
print(b, file=fw)
return merged_agpfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tidy(args):
""" %prog tidy agpfile componentfasta Given an agp file, run through the following steps: 1. Trim components with dangling N's 2. Merge adjacent gaps 3. Trim gaps at the end of an object 4. Reindex the agp Final output is in `.tidy.agp`. """ |
p = OptionParser(tidy.__doc__)
p.add_option("--nogaps", default=False, action="store_true",
help="Remove all gap lines [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
agpfile, componentfasta = args
originalagpfile = agpfile
# Step 1: Trim terminal Ns
tmpfasta = "tmp.fasta"
trimmed_agpfile = build([agpfile, componentfasta, tmpfasta,
"--newagp", "--novalidate"])
os.remove(tmpfasta)
agpfile = trimmed_agpfile
agpfile = reindex([agpfile, "--inplace"])
# Step 2: Merge adjacent gaps
merged_agpfile = gaps([agpfile, "--merge"])
os.remove(agpfile)
# Step 3: Trim gaps at the end of object
agpfile = merged_agpfile
agp = AGP(agpfile)
newagpfile = agpfile.replace(".agp", ".fixed.agp")
fw = open(newagpfile, "w")
for object, a in groupby(agp, key=lambda x: x.object):
a = list(a)
if a[0].is_gap:
g, a = a[0], a[1:]
logging.debug("Trim beginning Ns({0}) of {1}".\
format(g.gap_length, object))
if a and a[-1].is_gap:
a, g = a[:-1], a[-1]
logging.debug("Trim trailing Ns({0}) of {1}".\
format(g.gap_length, object))
print("\n".join(str(x) for x in a), file=fw)
fw.close()
os.remove(agpfile)
# Step 4: Final reindex
agpfile = newagpfile
reindex_opts = [agpfile, "--inplace"]
if opts.nogaps:
reindex_opts += ["--nogaps"]
agpfile = reindex(reindex_opts)
tidyagpfile = originalagpfile.replace(".agp", ".tidy.agp")
shutil.move(agpfile, tidyagpfile)
logging.debug("File written to `{0}`.".format(tidyagpfile))
return tidyagpfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(args):
""" %prog build agpfile componentfasta targetfasta Build targetfasta based on info from agpfile """ |
p = OptionParser(build.__doc__)
p.add_option("--newagp", dest="newagp", default=False, action="store_true",
help="Check components to trim dangling N's [default: %default]")
p.add_option("--novalidate", dest="novalidate", default=False,
action="store_true",
help="Don't validate the agpfile [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
agpfile, componentfasta, targetfasta = args
validate = not opts.novalidate
if opts.newagp:
assert agpfile.endswith(".agp")
newagpfile = agpfile.replace(".agp", ".trimmed.agp")
newagp = open(newagpfile, "w")
else:
newagpfile = None
newagp = None
agp = AGP(agpfile, validate=validate, sorted=True)
agp.build_all(componentfasta=componentfasta, targetfasta=targetfasta,
newagp=newagp)
logging.debug("Target fasta written to `{0}`.".format(targetfasta))
return newagpfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(args):
""" %prog validate agpfile componentfasta targetfasta validate consistency between agpfile and targetfasta """ |
p = OptionParser(validate.__doc__)
opts, args = p.parse_args(args)
try:
agpfile, componentfasta, targetfasta = args
except Exception as e:
sys.exit(p.print_help())
agp = AGP(agpfile)
build = Fasta(targetfasta)
bacs = Fasta(componentfasta, index=False)
# go through this line by line
for aline in agp:
try:
build_seq = build.sequence(dict(chr=aline.object,
start=aline.object_beg, stop=aline.object_end))
if aline.is_gap:
assert build_seq.upper() == aline.gap_length * 'N', \
"gap mismatch: %s" % aline
else:
bac_seq = bacs.sequence(dict(chr=aline.component_id,
start=aline.component_beg, stop=aline.component_end,
strand=aline.orientation))
assert build_seq.upper() == bac_seq.upper(), \
"sequence mismatch: %s" % aline
logging.debug("%s:%d-%d verified" % (aline.object,
aline.object_beg, aline.object_end))
except Exception as e:
logging.error(e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getNorthSouthClone(self, i):
""" Returns the adjacent clone name from both sides. """ |
north = self.getAdjacentClone(i, south=False)
south = self.getAdjacentClone(i)
return north, south |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_one(self, object, lines, fasta, fw, newagp=None):
""" Construct molecule using component fasta sequence """ |
components = []
total_bp = 0
for line in lines:
if line.is_gap:
seq = 'N' * line.gap_length
if newagp:
print(line, file=newagp)
else:
seq = fasta.sequence(dict(chr=line.component_id,
start=line.component_beg,
stop=line.component_end,
strand=line.orientation))
# Check for dangling N's
if newagp:
trimNs(seq, line, newagp)
components.append(seq)
total_bp += len(seq)
if self.validate:
assert total_bp == line.object_end, \
"cumulative base pairs (%d) does not match (%d)" % \
(total_bp, line.object_end)
if not newagp:
rec = SeqRecord(Seq(''.join(components)), id=object, description="")
SeqIO.write([rec], fw, "fasta")
if len(rec) > 1000000:
logging.debug("Write object %s to `%s`" % (object, fw.name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getAdjacentClone(self, i, south=True):
""" Returns adjacent clone name, either the line before or after the current line. """ |
rr = xrange(i + 1, len(self)) if south else xrange(i - 1, -1, -1)
a = self[i]
for ix in rr:
x = self[ix]
if x.object != a.object:
break
return x
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genemark(args):
""" %prog genemark species fastafile Train GENEMARK model given fastafile. GENEMARK self-trains so no trainig model gff file is needed. """ |
p = OptionParser(genemark.__doc__)
p.add_option("--junctions", help="Path to `junctions.bed` from Tophat2")
p.set_home("gmes")
p.set_cpus(cpus=32)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
species, fastafile = args
junctions = opts.junctions
mhome = opts.gmes_home
license = op.expanduser("~/.gm_key")
assert op.exists(license), "License key ({0}) not found!".format(license)
cmd = "{0}/gmes_petap.pl --sequence {1}".format(mhome, fastafile)
cmd += " --cores {0}".format(opts.cpus)
if junctions:
intronsgff = "introns.gff"
if need_update(junctions, intronsgff):
jcmd = "{0}/bet_to_gff.pl".format(mhome)
jcmd += " --bed {0} --gff {1} --label Tophat2".\
format(junctions, intronsgff)
sh(jcmd)
cmd += " --ET {0} --et_score 10".format(intronsgff)
else:
cmd += " --ES"
sh(cmd)
logging.debug("GENEMARK matrix written to `output/gmhmm.mod") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def snap(args):
""" %prog snap species gffile fastafile Train SNAP model given gffile and fastafile. Whole procedure taken from: <http://gmod.org/wiki/MAKER_Tutorial_2012> """ |
p = OptionParser(snap.__doc__)
p.set_home("maker")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
species, gffile, fastafile = args
mhome = opts.maker_home
snapdir = "snap"
mkdir(snapdir)
cwd = os.getcwd()
os.chdir(snapdir)
newgffile = "training.gff3"
logging.debug("Construct GFF file combined with sequence ...")
sh("cat ../{0} > {1}".format(gffile, newgffile))
sh('echo "##FASTA" >> {0}'.format(newgffile))
sh("cat ../{0} >> {1}".format(fastafile, newgffile))
logging.debug("Make models ...")
sh("{0}/src/bin/maker2zff training.gff3".format(mhome))
sh("{0}/exe/snap/fathom -categorize 1000 genome.ann genome.dna".format(mhome))
sh("{0}/exe/snap/fathom -export 1000 -plus uni.ann uni.dna".format(mhome))
sh("{0}/exe/snap/forge export.ann export.dna".format(mhome))
sh("{0}/exe/snap/hmm-assembler.pl {1} . > {1}.hmm".format(mhome, species))
os.chdir(cwd)
logging.debug("SNAP matrix written to `{0}/{1}.hmm`".format(snapdir, species)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def augustus(args):
""" %prog augustus species gffile fastafile Train AUGUSTUS model given gffile and fastafile. Whole procedure taken from: <http://www.molecularevolution.org/molevolfiles/exercises/augustus/training.html> """ |
p = OptionParser(augustus.__doc__)
p.add_option("--autotrain", default=False, action="store_true",
help="Run autoAugTrain.pl to iteratively train AUGUSTUS")
p.set_home("augustus")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
species, gffile, fastafile = args
mhome = opts.augustus_home
augdir = "augustus"
cwd = os.getcwd()
mkdir(augdir)
os.chdir(augdir)
target = "{0}/config/species/{1}".format(mhome, species)
if op.exists(target):
logging.debug("Removing existing target `{0}`".format(target))
sh("rm -rf {0}".format(target))
sh("{0}/scripts/new_species.pl --species={1}".format(mhome, species))
sh("{0}/scripts/gff2gbSmallDNA.pl ../{1} ../{2} 1000 raw.gb".\
format(mhome, gffile, fastafile))
sh("{0}/bin/etraining --species={1} raw.gb 2> train.err".\
format(mhome, species))
sh("cat train.err | perl -pe 's/.*in sequence (\S+): .*/$1/' > badgenes.lst")
sh("{0}/scripts/filterGenes.pl badgenes.lst raw.gb > training.gb".\
format(mhome))
sh("grep -c LOCUS raw.gb training.gb")
# autoAugTrain failed to execute, disable for now
if opts.autotrain:
sh("rm -rf {0}".format(target))
sh("{0}/scripts/autoAugTrain.pl --trainingset=training.gb --species={1}".\
format(mhome, species))
os.chdir(cwd)
sh("cp -r {0} augustus/".format(target)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merger(args):
""" %prog merger layout gkpStore contigs.fasta Merge reads into one contig. """ |
p = OptionParser(merger.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
layout, gkpstore, contigs = args
fp = open(layout)
pf = "0"
iidfile = pf + ".iids"
for i, row in enumerate(fp):
logging.debug("Read unitig {0}".format(i))
fw = open(iidfile, "w")
layout = row.split("|")
print("\n".join(layout), file=fw)
fw.close()
cmd = "gatekeeper -iid {0}.iids -dumpfasta {0} {1}".format(pf, gkpstore)
sh(cmd)
fastafile = "{0}.fasta".format(pf)
newfastafile = "{0}.new.fasta".format(pf)
format([fastafile, newfastafile, "--sequential=replace", \
"--sequentialoffset=1", "--nodesc"])
fasta([newfastafile])
sh("rm -rf {0}".format(pf))
cmd = "runCA {0}.frg -p {0} -d {0} consensus=pbutgcns".format(pf)
cmd += " unitigger=bogart doFragmentCorrection=0 doUnitigSplitting=0"
sh(cmd)
outdir = "{0}/9-terminator".format(pf)
cmd = "cat {0}/{1}.ctg.fasta {0}/{1}.deg.fasta {0}/{1}.singleton.fasta"\
.format(outdir, pf)
sh(cmd, outfile=contigs, append=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.