text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort(args):
""" %prog sort bedfile Sort bed file to have ascending order of seqid, then start. It uses the `sort` command. """ |
p = OptionParser(sort.__doc__)
p.add_option("-i", "--inplace", dest="inplace",
default=False, action="store_true",
help="Sort bed file in place [default: %default]")
p.add_option("-u", dest="unique",
default=False, action="store_true",
help="Uniqify the bed file")
p.add_option("--accn", default=False, action="store_true",
help="Sort based on the accessions [default: %default]")
p.set_outfile(outfile=None)
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
inplace = opts.inplace
if not inplace and ".sorted." in bedfile:
return bedfile
sortedbed = opts.outfile
if inplace:
sortedbed = bedfile
elif opts.outfile is None:
pf, sf = op.basename(bedfile).rsplit(".", 1)
sortedbed = pf + ".sorted." + sf
sortopt = "-k1,1 -k2,2n -k3,3n -k4,4" if not opts.accn else \
"-k4,4 -k1,1 -k2,2n -k3,3n"
cmd = "sort"
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
if opts.unique:
cmd += " -u"
cmd += " {0} {1} -o {2}".format(sortopt, bedfile, sortedbed)
if inplace or need_update(bedfile, sortedbed):
sh(cmd)
return sortedbed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mates(args):
""" %prog mates bedfile Generate the mates file by inferring from the names. """ |
p = OptionParser(mates.__doc__)
p.add_option("--lib", default=False, action="store_true",
help="Output library information along with pairs [default: %default]")
p.add_option("--nointra", default=False, action="store_true",
help="Remove mates that are intra-scaffold [default: %default]")
p.add_option("--prefix", default=False, action="store_true",
help="Only keep links between IDs with same prefix [default: %default]")
p.set_mates()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
rclip = opts.rclip
key = (lambda x: x.accn[:-rclip]) if rclip else (lambda x: x.accn)
bed = Bed(bedfile, key=key)
pf = bedfile.rsplit(".", 1)[0]
matesfile = pf + ".mates"
lib = pf if opts.lib else None
fw = open(matesfile, "w")
if lib:
bedfile, stats = pairs([bedfile, \
"--rclip={0}".format(rclip),
"--cutoff={0}".format(opts.cutoff)])
sv = int(2 * stats.sd)
mindist = max(stats.mean - sv, 1)
maxdist = stats.mean + sv
print("\t".join(str(x) for x in \
("library", pf, mindist, maxdist)), file=fw)
num_fragments = num_pairs = 0
matesbedfile = matesfile + ".bed"
fwm = open(matesbedfile, "w")
for pe, lines in groupby(bed, key=key):
lines = list(lines)
if len(lines) != 2:
num_fragments += len(lines)
continue
a, b = lines
if opts.nointra and a.seqid == b.seqid:
continue
# Use --prefix to limit the links between seqids with the same prefix
# For example, contigs of the same BAC, mth2-23j10_001, mth-23j10_002
if opts.prefix:
aprefix = a.seqid.split("_")[0]
bprefix = b.seqid.split("_")[0]
if aprefix != bprefix:
continue
num_pairs += 1
pair = [a.accn, b.accn]
if lib:
pair.append(lib)
print("\t".join(pair), file=fw)
print(a, file=fwm)
print(b, file=fwm)
logging.debug("Discard {0} frags and write {1} pairs to `{2}` and `{3}`.".\
format(num_fragments, num_pairs, matesfile, matesbedfile))
fw.close()
fwm.close()
return matesfile, matesbedfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def swapped(self):
""" Swap query and subject. """ |
args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]
args[0:2] = [self.subject, self.query]
args[6:10] = [self.sstart, self.sstop, self.qstart, self.qstop]
if self.orientation == '-':
args[8], args[9] = args[9], args[8]
b = "\t".join(str(x) for x in args)
return BlastLine(b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gff(args):
""" %prog gff seq.gbk Convert Genbank file to GFF and FASTA file. The Genbank file can contain multiple records. """ |
p = OptionParser(gff.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
MultiGenBank(gbkfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tee_lookahead(t, i):
"""Inspect the i-th upcomping value from a tee object while leaving the tee object at its current position. Raise an IndexError if the underlying iterator doesn't have enough values. """ |
for value in islice(t.__copy__(), i, None):
return value
raise IndexError(i) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uniq(args):
""" %prog uniq fastqfile Retain only first instance of duplicate reads. Duplicate is defined as having the same read name. """ |
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def suffix(args):
""" %prog suffix fastqfile CAG Filter reads based on suffix. """ |
p = OptionParser(suffix.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastqfile, sf = args
fw = must_open(opts.outfile, "w")
nreads = nselected = 0
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
if rec.seq.endswith(sf):
print(rec, file=fw)
nselected += 1
logging.debug("Selected reads with suffix {0}: {1}".\
format(sf, percentage(nselected, nreads))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readlen(args):
""" %prog readlen fastqfile Calculate read length, will only try the first N reads. Output min, max, and avg for each file. """ |
p = OptionParser(readlen.__doc__)
p.set_firstN()
p.add_option("--silent", default=False, action="store_true",
help="Do not print read length stats")
p.add_option("--nocheck", default=False, action="store_true",
help="Do not check file type suffix")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
f, = args
if (not opts.nocheck) and (not is_fastq(f)):
logging.debug("File `{}` does not endswith .fastq or .fq".format(f))
return 0
s = calc_readlen(f, opts.firstN)
if not opts.silent:
print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median)))
return int(s.max) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fasta(args):
""" %prog fasta fastqfiles Convert fastq to fasta and qual file. """ |
p = OptionParser(fasta.__doc__)
p.add_option("--seqtk", default=False, action="store_true",
help="Use seqtk to convert")
p.set_outdir()
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
outdir = opts.outdir
if outdir and outdir != ".":
mkdir(outdir)
fastqfile = fastqfiles[0]
pf = op.basename(fastqfile)
gzinput = pf.endswith(".gz")
if gzinput:
pf = pf.rsplit(".", 1)[0]
pf, sf = pf.rsplit(".", 1)
if sf not in ("fq", "fastq"):
logging.debug("Assumed FASTA: suffix not `fq` or `fastq`")
return fastqfile, None
fastafile, qualfile = pf + ".fasta", pf + ".qual"
outfile = opts.outfile or fastafile
outfile = op.join(outdir, outfile)
if opts.seqtk:
if need_update(fastqfiles, outfile):
for i, fastqfile in enumerate(fastqfiles):
cmd = "seqtk seq -A {0} -L 30 -l 70".format(fastqfile)
# First one creates file, following ones append to it
sh(cmd, outfile=outfile, append=i)
else:
logging.debug("Outfile `{0}` already exists.".format(outfile))
return outfile, None
for fastqfile in fastqfiles:
SeqIO.convert(fastqfile, "fastq", fastafile, "fasta")
SeqIO.convert(fastqfile, "fastq", qualfile, "qual")
return fastafile, qualfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter(args):
""" %prog filter paired.fastq Filter to get high qv reads. Use interleaved format (one file) or paired format (two files) to filter on paired reads. """ |
p = OptionParser(filter.__doc__)
p.add_option("-q", dest="qv", default=20, type="int",
help="Minimum quality score to keep [default: %default]")
p.add_option("-p", dest="pct", default=95, type="int",
help="Minimum percent of bases that have [-q] quality "\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 1:
r1 = r2 = args[0]
else:
r1, r2 = args
qv = opts.qv
pct = opts.pct
offset = guessoffset([r1])
qvchar = chr(offset + qv)
logging.debug("Call base qv >= {0} as good.".format(qvchar))
outfile = r1.rsplit(".", 1)[0] + ".q{0}.paired.fastq".format(qv)
fw = open(outfile, "w")
p1fp, p2fp = FastqPairedIterator(r1, r2)
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
q1 = a[-1].rstrip()
q2 = b[-1].rstrip()
if isHighQv(q1, qvchar, pct=pct) and isHighQv(q2, qvchar, pct=pct):
fw.writelines(a)
fw.writelines(b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shuffle(args):
""" %prog shuffle p1.fastq p2.fastq Shuffle pairs into interleaved format. """ |
p = OptionParser(shuffle.__doc__)
p.set_tag()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
p1, p2 = args
pairsfastq = pairspf((p1, p2)) + ".fastq"
tag = opts.tag
p1fp = must_open(p1)
p2fp = must_open(p2)
pairsfw = must_open(pairsfastq, "w")
nreads = 0
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
if tag:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
pairsfw.writelines(a)
pairsfw.writelines(b)
nreads += 2
pairsfw.close()
extra = nreads * 2 if tag else 0
checkShuffleSizes(p1, p2, pairsfastq, extra=extra)
logging.debug("File `{0}` verified after writing {1} reads.".\
format(pairsfastq, nreads))
return pairsfastq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split(args):
""" %prog split pairs.fastq Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work on gzipped file. <http://seqanswers.com/forums/showthread.php?t=13776> """ |
from jcvi.apps.grid import Jobs
p = OptionParser(split.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
gz = pairsfastq.endswith(".gz")
pf = pairsfastq.replace(".gz", "").rsplit(".", 1)[0]
p1 = pf + ".1.fastq"
p2 = pf + ".2.fastq"
cmd = "zcat" if gz else "cat"
p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'".format(pairsfastq)
p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'".format(pairsfastq)
if gz:
p1cmd += " | gzip"
p2cmd += " | gzip"
p1 += ".gz"
p2 += ".gz"
p1cmd += " > " + p1
p2cmd += " > " + p2
args = [(p1cmd, ), (p2cmd, )]
m = Jobs(target=sh, args=args)
m.run()
checkShuffleSizes(p1, p2, pairsfastq) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guessoffset(args):
""" %prog guessoffset fastqfile Guess the quality offset of the fastqfile, whether 33 or 64. See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format> !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh | | | | | 33 59 64 73 104 S - Sanger Phred+33, raw reads typically (0, 40) X - Solexa Solexa+64, raw reads typically (-5, 40) I - Illumina 1.3+ Phred+64, raw reads typically (0, 40) J - Illumina 1.5+ Phred+64, raw reads typically (3, 40) L - Illumina 1.8+ Phred+33, raw reads typically (0, 40) with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold) """ |
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = next(ai)
if offset == 33:
print("Sanger encoding (offset=33)", file=sys.stderr)
elif offset == 64:
print("Illumina encoding (offset=64)", file=sys.stderr)
return offset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format(args):
""" %prog format fastqfile Format FASTQ file. Currently provides option to convert FASTQ header from one dialect to another. """ |
p = OptionParser(format.__doc__)
p.add_option("--convert", default=None, choices=[">=1.8", "<1.8", "sra"],
help="Convert fastq header to a different format" +
" [default: %default]")
p.set_tag(specify_tag=True)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
dialect = None
while rec:
h = FastqHeader(rec.header)
if not dialect:
dialect = h.dialect
logging.debug("Input fastq dialect: `{0}`".format(dialect))
if opts.convert:
logging.debug("Output fastq dialect: `{0}`".format(opts.convert))
rec.name = h.format_header(dialect=opts.convert, tag=opts.tag)
print(rec)
rec = next(ai) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trim(args):
""" %prog trim fastqfile Wraps `fastx_trimmer` to trim from begin or end of reads. """ |
p = OptionParser(trim.__doc__)
p.add_option("-f", dest="first", default=0, type="int",
help="First base to keep. Default is 1.")
p.add_option("-l", dest="last", default=0, type="int",
help="Last base to keep. Default is entire read.")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
obfastqfile = op.basename(fastqfile)
fq = obfastqfile.rsplit(".", 1)[0] + ".ntrimmed.fastq"
if fastqfile.endswith(".gz"):
fq = obfastqfile.rsplit(".", 2)[0] + ".ntrimmed.fastq.gz"
cmd = "fastx_trimmer -Q33 "
if opts.first:
cmd += "-f {0.first} ".format(opts)
if opts.last:
cmd += "-l {0.last} ".format(opts)
sh(cmd, infile=fastqfile, outfile=fq) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def catread(args):
""" %prog catread fastqfile1 fastqfile2 Concatenate paired end reads into one. Useful for example to do single-end mapping and perform filtering on the whole read pair level. """ |
p = OptionParser(catread.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
r1, r2 = args
p1fp, p2fp = FastqPairedIterator(r1, r2)
outfile = pairspf((r1, r2)) + ".cat.fastq"
fw = must_open(outfile, "w")
while True:
a = list(islice(p1fp, 4))
if not a:
break
atitle, aseq, _, aqual = a
btitle, bseq, _, bqual = list(islice(p2fp, 4))
print("\n".join((atitle.strip(), aseq.strip() + bseq.strip(), \
"+", aqual.strip() + bqual.strip())), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def splitread(args):
""" %prog splitread fastqfile Split fastqfile into two read fastqfiles, cut in the middle. """ |
p = OptionParser(splitread.__doc__)
p.add_option("-n", dest="n", default=76, type="int",
help="Split at N-th base position [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement second read [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
base = op.basename(pairsfastq).split(".")[0]
fq1 = base + ".1.fastq"
fq2 = base + ".2.fastq"
fw1 = must_open(fq1, "w")
fw2 = must_open(fq2, "w")
fp = must_open(pairsfastq)
n = opts.n
minsize = n * 8 / 5
for name, seq, qual in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error("Skipping read {0}, length={1}".format(name, len(seq)))
continue
name = "@" + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if opts.rc:
rec2.rc()
print(rec1, file=fw1)
print(rec2, file=fw2)
logging.debug("Reads split into `{0},{1}`".format(fq1, fq2))
fw1.close()
fw2.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def size(args):
""" %prog size fastqfile Find the total base pairs in a list of fastq files """ |
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in \
(op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in \
("Total", total_numrecords, total_size))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert(args):
""" %prog convert in.fastq illumina fastq quality encoding uses offset 64, and sanger uses 33. This script creates a new file with the correct encoding. Output gzipped file if input is also gzipped. """ |
p = OptionParser(convert.__doc__)
p.set_phred()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
infastq, = args
phred = opts.phred or str(guessoffset([infastq]))
ophred = {"64": "33", "33": "64"}[phred]
gz = infastq.endswith(".gz")
outfastq = infastq.rsplit(".", 1)[0] if gz else infastq
pf, sf = outfastq.rsplit(".", 1)
outfastq = "{0}.q{1}.{2}".format(pf, ophred, sf)
if gz:
outfastq += ".gz"
fin = "illumina" if phred == "64" else "sanger"
fout = "sanger" if phred == "64" else "illumina"
seqret = "seqret"
if infastq.endswith(".gz"):
cmd = "zcat {0} | ".format(infastq)
cmd += seqret + " fastq-{0}::stdin fastq-{1}::stdout".\
format(fin, fout)
else:
cmd = seqret + " fastq-{0}::{1} fastq-{2}::stdout".\
format(fin, infastq, fout)
sh(cmd, outfile=outfastq)
return outfastq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pairinplace(args):
""" %prog pairinplace bulk.fastq Pair up the records in bulk.fastq by comparing the names for adjancent records. If they match, print to bulk.pairs.fastq, else print to bulk.frags.fastq. """ |
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.set_rclip()
p.set_tag()
p.add_option("--base",
help="Base name for the output files [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
base = opts.base or op.basename(fastqfile).split(".")[0]
frags = base + ".frags.fastq"
pairs = base + ".pairs.fastq"
if fastqfile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
tag = opts.tag
strip_name = (lambda x: x[:-N]) if N else None
fh_iter = iter_fastq(fastqfile, key=strip_name)
skipflag = False # controls the iterator skip
for a, b in pairwise(fh_iter):
if b is None: # hit the eof
break
if skipflag:
skipflag = False
continue
if a.name == b.name:
if tag:
a.name += "/1"
b.name += "/2"
print(a, file=pairsfw)
print(b, file=pairsfw)
skipflag = True
else:
print(a, file=fragsfw)
# don't forget the last one, when b is None
if not skipflag:
print(a, file=fragsfw)
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
return pairs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromsra(args):
""" %prog fromsra srafile Convert sra file to fastq using the sratoolkit `fastq-dump` """ |
p = OptionParser(fromsra.__doc__)
p.add_option("--paired", default=False, action="store_true",
help="Specify if library layout is paired-end " + \
"[default: %default]")
p.add_option("--compress", default=None, choices=["gzip", "bzip2"],
help="Compress output fastq files [default: %default]")
p.set_outdir()
p.set_grid()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
srafile, = args
paired = opts.paired
compress = opts.compress
outdir = opts.outdir
script_path = which("fastq-dump")
if not script_path:
logging.error("Cannot find `fastq-dump` in the PATH")
sys.exit()
cmd = [script_path]
if compress:
cmd.append("--{0}".format(compress))
if paired:
cmd.append("--split-files")
if outdir:
cmd.append("--outdir {0}".format(outdir))
cmd.append(srafile)
outcmd = " ".join(cmd)
sh(outcmd, grid=opts.grid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blast(args):
""" %prog blast btabfile Convert to BLAST -m8 format. """ |
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
print(b.blastline) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(args):
""" %prog bed btabfile Convert btab to bed format. """ |
from jcvi.formats.blast import BlastLine
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
Bline = BlastLine(b.blastline)
print(Bline.bedline) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gff(args):
""" %prog gff btabfile Convert btab file generated by AAT to gff3 format. """ |
from jcvi.utils.range import range_minmax
from jcvi.formats.gff import valid_gff_parent_child, valid_gff_type
p = OptionParser(gff.__doc__)
p.add_option("--source", default=None, help="Specify GFF source." +
" By default, it picks algorithm used to generate btab file." +
" [default: %default]")
p.add_option("--type", default="protein_match", choices=valid_gff_type,
help="GFF feature type [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btabdict = {}
btab = Btab(btabfile, aat_dialect=True)
osource = opts.source or "aat"
otype = opts.type
octype = valid_gff_parent_child[otype]
for b in btab:
nargs = b.nargs
id = b.query + "-" + otype + "{0:05d}".format(b.chainNum)
key = b.key
if key not in btabdict:
btabdict[key] = { 'id': id,
'method': b.method,
'query': b.query,
'subject': b.subject,
'strand': b.qStrand,
'sDesc': b.sDesc,
'coords': [],
'children': []
}
btabdict[key]['coords'].append((b.qStart, b.qStop))
btabdict[key]['children'].append(b.gffline(source=osource, type=octype, id=id))
for v in btabdict.itervalues():
b = BtabLine("\t".join(str(x) for x in [0] * nargs), aat_dialect=True)
id = v['id']
b.query = v['query']
b.method = v['method']
b.subject = v['subject']
b.qStrand = v['strand']
b.sDesc = v['sDesc']
b.qStart, b.qStop = range_minmax(v['coords'])
print(b.gffline(source=osource, type=otype, primary_tag="ID", id=id))
print("\n".join(v['children'])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batch_taxonomy(list_of_taxids):
""" Convert list of taxids to Latin names """ |
for taxid in list_of_taxids:
handle = Entrez.efetch(db='Taxonomy', id=taxid, retmode="xml")
records = Entrez.read(handle)
yield records[0]["ScientificName"] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batch_entrez(list_of_terms, db="nuccore", retmax=1, rettype="fasta", batchsize=1, email=myEmail):
""" Retrieve multiple rather than a single record """ |
for term in list_of_terms:
logging.debug("Search term %s" % term)
success = False
ids = None
if not term:
continue
while not success:
try:
search_handle = Entrez.esearch(db=db, retmax=retmax, term=term)
rec = Entrez.read(search_handle)
success = True
ids = rec["IdList"]
except (HTTPError, URLError,
RuntimeError, KeyError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
if not ids:
logging.error("term {0} not found".format(term))
continue
assert ids
nids = len(ids)
if nids > 1:
logging.debug("A total of {0} results found.".format(nids))
if batchsize != 1:
logging.debug("Use a batch size of {0}.".format(batchsize))
ids = list(grouper(ids, batchsize))
for id in ids:
id = [x for x in id if x]
size = len(id)
id = ",".join(id)
success = False
while not success:
try:
fetch_handle = Entrez.efetch(db=db, id=id, rettype=rettype,
email=email)
success = True
except (HTTPError, URLError,
RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
yield id, size, term, fetch_handle |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensembl(args):
""" %prog ensembl species Retrieve genomes and annotations from ensembl FTP. Available species listed below. Use comma to give a list of species to download. For example: $ %prog ensembl danio_rerio,gasterosteus_aculeatus """ |
p = OptionParser(ensembl.__doc__)
p.add_option("--version", default="75",
help="Ensembl version [default: %default]")
opts, args = p.parse_args(args)
version = opts.version
url = "ftp://ftp.ensembl.org/pub/release-{0}/".format(version)
fasta_url = url + "fasta/"
valid_species = [x for x in ls_ftp(fasta_url) if "." not in x]
doc = "\n".join((ensembl.__doc__, tile(valid_species)))
p.set_usage(doc)
if len(args) != 1:
sys.exit(not p.print_help())
species, = args
species = species.split(",")
for s in species:
download_species_ensembl(s, valid_species, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_first_rec(fastafile):
""" Returns the first record in the fastafile """ |
f = list(SeqIO.parse(fastafile, "fasta"))
if len(f) > 1:
logging.debug("{0} records found in {1}, using the first one".
format(len(f), fastafile))
return f[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bisect(args):
""" %prog bisect acc accession.fasta determine the version of the accession by querying entrez, based on a fasta file. This proceeds by a sequential search from xxxx.1 to the latest record. """ |
p = OptionParser(bisect.__doc__)
p.set_email()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
acc, fastafile = args
arec = get_first_rec(fastafile)
valid = None
for i in range(1, 100):
term = "%s.%d" % (acc, i)
try:
query = list(batch_entrez([term], email=opts.email))
except AssertionError as e:
logging.debug("no records found for %s. terminating." % term)
return
id, term, handle = query[0]
brec = next(SeqIO.parse(handle, "fasta"))
match = print_first_difference(arec, brec, ignore_case=True,
ignore_N=True, rc=True)
if match:
valid = term
break
if valid:
print()
print(green("%s matches the sequence in `%s`" % (valid, fastafile))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inspect(item, maxchar=80):
""" Inspect the attributes of an item. """ |
for i in dir(item):
try:
member = str(getattr(item, i))
if maxchar and len(member) > maxchar:
member = member[:maxchar] + "..."
except:
member = "[ERROR]"
print("{}: {}".format(i, member), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def depends(func):
""" Decorator to perform check on infile and outfile. When infile is not present, issue warning, and when outfile is present, skip function calls. """ |
from jcvi.apps.base import need_update, listify
infile = "infile"
outfile = "outfile"
def wrapper(*args, **kwargs):
assert outfile in kwargs, \
"You need to specify `outfile=` on function call"
if infile in kwargs:
infilename = listify(kwargs[infile])
for x in infilename:
assert op.exists(x), \
"The specified infile `{0}` does not exist".format(x)
outfilename = kwargs[outfile]
if need_update(infilename, outfilename):
return func(*args, **kwargs)
else:
msg = "File `{0}` exists. Computation skipped." \
.format(outfilename)
logging.debug(msg)
outfilename = listify(outfilename)
for x in outfilename:
assert op.exists(x), \
"Something went wrong, `{0}` not found".format(x)
return outfilename
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None):
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return '{0:.{1}f}{2}'.format(size, precision, suffix) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gene_name(st, exclude=("ev",), sep="."):
""" Helper functions in the BLAST filtering to get rid alternative splicings. This is ugly, but different annotation groups are inconsistent with respect to how the alternative splicings are named. Mostly it can be done by removing the suffix, except for ones in the exclude list. """ |
if any(st.startswith(x) for x in exclude):
sep = None
st = st.split('|')[0]
if sep and sep in st:
name, suffix = st.rsplit(sep, 1)
else:
name, suffix = st, ""
# We only want to remove suffix that are isoforms, longer suffix would
# suggest that it is part of the right gene name
if len(suffix) != 1:
name = st
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fixChromName(name, orgn="medicago"):
""" Convert quirky chromosome names encountered in different release files, which are very project specific, into a more general format. For example, in Medicago Convert a seqid like `Mt3.5.1_Chr1` to `chr1` `Mt3.5_Chr3` to `chr3` `chr01_pseudomolecule_IMGAG` to `chr1` Some examples from Maize Convert a seqid like `chromosome:AGPv2:2:1:237068873:1` to `2` Special cases `chromosome:AGPv2:mitochondrion:1:569630:1` to `Mt` `chromosome:AGPv2:chloroplast:1:140384:1` to `Pt` """ |
import re
mtr_pat1 = re.compile(r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)")
mtr_pat2 = re.compile(r"([A-z0-9]+)_[A-z]+_[A-z]+")
zmays_pat = re.compile(
r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+")
zmays_sub = {'mitochondrion': 'Mt', 'chloroplast': 'Pt'}
if orgn == "medicago":
for mtr_pat in (mtr_pat1, mtr_pat2):
match = re.search(mtr_pat, name)
if match:
n = match.group(1)
n = n.replace("0", "")
name = re.sub(mtr_pat, n, name)
elif orgn == "maize":
match = re.search(zmays_pat, name)
if match:
n = match.group(1)
name = re.sub(zmays_pat, n, name)
if name in zmays_sub:
name = zmays_sub[name]
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill(text, delimiter="", width=70):
""" Wrap text with width per line """ |
texts = []
for i in xrange(0, len(text), width):
t = delimiter.join(text[i:i + width])
texts.append(t)
return "\n".join(texts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tile(lt, width=70, gap=1):
""" Pretty print list of items. """ |
from jcvi.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width // max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize_lms_axis(ax, xlim=None, ylim=None, xfactor=1e-6, yfactor=1, xlabel=None, ylabel="Map (cM)"):
""" Normalize the axis limits and labels to beautify axis. """ |
if xlim:
ax.set_xlim(0, xlim)
if ylim:
ax.set_ylim(0, ylim)
if xlabel:
xticklabels = [int(round(x * xfactor)) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels, family='Helvetica')
ax.set_xlabel(xlabel)
else:
ax.set_xticks([])
if ylabel:
yticklabels = [int(round(x * yfactor)) for x in ax.get_yticks()]
ax.set_yticklabels(yticklabels, family='Helvetica')
ax.set_ylabel(ylabel)
else:
ax.set_yticks([]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fake(args):
""" %prog fake input.bed Make fake `scaffolds.fasta`. Use case for this is that sometimes I would receive just the csv/bed file and I'd like to use path() out of the box. """ |
from math import ceil
from random import choice
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
p = OptionParser(fake.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
bed = Bed(inputbed)
recs = []
for seqid, sb in bed.sub_beds():
maxend = max(x.end for x in sb)
size = int(ceil(maxend / 1000.) * 1000)
seq = "".join([choice("ACGT") for x in xrange(size)])
rec = SeqRecord(Seq(seq), id=seqid, description="")
recs.append(rec)
fw = must_open(opts.outfile, "w")
SeqIO.write(recs, fw, "fasta") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute_score(markers, bonus, penalty):
""" Compute chain score using dynamic programming. If a marker is the same linkage group as a previous one, we add bonus; otherwise, we penalize the chain switching. """ |
nmarkers = len(markers)
s = [bonus] * nmarkers # score
f = [-1] * nmarkers # from
for i in xrange(1, nmarkers):
for j in xrange(i):
mi, mj = markers[i], markers[j]
t = bonus if mi.mlg == mj.mlg else penalty + bonus
if s[i] < s[j] + t:
s[i] = s[j] + t
f[i] = j
# Recover the highest scoring chain
highest_score = max(s)
si = s.index(highest_score)
onchain = set()
while True:
if si < 0:
break
si = f[si]
onchain.add(si)
return [x for i, x in enumerate(markers) if i in onchain] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split(args):
""" %prog split input.bed Split suspicious scaffolds. Suspicious scaffolds are those that contain chunks that map to more than one linkage group. The chunk size can be modified through --chunk option. """ |
p = OptionParser(split.__doc__)
p.add_option("--chunk", default=4, type="int",
help="Split chunks of at least N markers")
p.add_option("--splitsingle", default=False, action="store_true",
help="Split breakpoint range right in the middle")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
bonus = 2
nchunk = opts.chunk
nbreaks = 0
penalty = -(nchunk * bonus - 1)
bed = Bed(inputbed)
for seqid, bb in bed.sub_beds():
markers = [Marker(x) for x in bb]
markers = compute_score(markers, bonus, penalty)
for mi, mj in pairwise(markers):
if mi.mlg == mj.mlg:
continue
assert mi.seqid == mj.seqid
start, end = mi.pos, mj.pos
if start > end:
start, end = end, start
if opts.splitsingle:
start = end = (start + end) / 2
print("\t".join(str(x) for x in (mi.seqid, start - 1, end)))
nbreaks += 1
logging.debug("A total of {} breakpoints inferred (--chunk={})".\
format(nbreaks, nchunk)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def movie(args):
""" %prog movie input.bed scaffolds.fasta chr1 Visualize history of scaffold OO. The history is contained within the tourfile, generated by path(). For each historical scaffold OO, the program plots a separate PDF file. The plots can be combined to show the progression as a little animation. The third argument limits the plotting to a specific pseudomolecule, for example `chr1`. """ |
p = OptionParser(movie.__doc__)
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size between scaffolds")
add_allmaps_plot_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
inputbed, scaffoldsfasta, seqid = args
gapsize = opts.gapsize
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
tourfile = pf + ".tour"
fp = open(tourfile)
sizes = Sizes(scaffoldsfasta).mapping
ffmpeg = "ffmpeg"
mkdir(ffmpeg)
score = cur_score = None
i = 1
for header, block in read_block(fp, ">"):
s, tag, label = header[1:].split()
if s != seqid:
continue
tour = block[0].split()
tour = [(x[:-1], x[-1]) for x in tour]
if label.startswith("GA"):
cur_score = label.split("-")[-1]
if cur_score == score:
i += 1
continue
score = cur_score
image_name = ".".join((seqid, "{0:04d}".format(i), label, "pdf"))
if need_update(tourfile, image_name):
fwagp = must_open(agpfile, "w")
order_to_agp(seqid, tour, sizes, fwagp, gapsize=gapsize,
gaptype="map")
fwagp.close()
logging.debug("{0} written to `{1}`".format(header, agpfile))
build([inputbed, scaffoldsfasta, "--cleanup"])
pdf_name = plot([inputbed, seqid, "--title={0}".format(label)])
sh("mv {0} {1}".format(pdf_name, image_name))
if label in ("INIT", "FLIP", "TSP", "FINAL"):
for j in xrange(5): # Delay for 5 frames
image_delay = image_name.rsplit(".", 1)[0] + \
".d{0}.pdf".format(j)
sh("cp {0} {1}/{2}".format(image_name, ffmpeg, image_delay))
else:
sh("cp {0} {1}/".format(image_name, ffmpeg))
i += 1
make_movie(ffmpeg, pf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_movie(workdir, pf, dpi=120, fps=1, format="pdf", engine="ffmpeg"):
""" Make the movie using either ffmpeg or gifsicle. """ |
os.chdir(workdir)
if format != "png":
cmd = "parallel convert -density {}".format(dpi)
cmd += " {} {.}.png ::: " + "*.{}".format(format)
sh(cmd)
assert engine in ("ffmpeg", "gifsicle"), \
"Only ffmpeg or gifsicle is currently supported"
if engine == "ffmpeg":
cmd = "ffmpeg -framerate {} -pattern_type glob -i '*.png' {}.mp4"\
.format(fps, pf)
elif engine == "gifsicle":
cmd = "convert *.png gif:- |"
cmd += " gifsicle --delay {} --loop --optimize=3".format(100 / fps)
cmd += " --colors=256 --multifile - > {}.gif".format(pf)
sh(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def estimategaps(args):
""" %prog estimategaps input.bed Estimate sizes of inter-scaffold gaps. The AGP file generated by path() command has unknown gap sizes with a generic number of Ns (often 100 Ns). The AGP file `input.chr.agp` will be modified in-place. """ |
p = OptionParser(estimategaps.__doc__)
p.add_option("--minsize", default=100, type="int",
help="Minimum gap size")
p.add_option("--maxsize", default=500000, type="int",
help="Maximum gap size")
p.add_option("--links", default=10, type="int",
help="Only use linkage grounds with matchings more than")
p.set_verbose(help="Print details for each gap calculation")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
bedfile = pf + ".lifted.bed"
cc = Map(bedfile, scaffold_info=True)
agp = AGP(agpfile)
minsize, maxsize = opts.minsize, opts.maxsize
links = opts.links
verbose = opts.verbose
outagpfile = pf + ".estimategaps.agp"
fw = must_open(outagpfile, "w")
for ob, components in agp.iter_object():
components = list(components)
s = Scaffold(ob, cc)
mlg_counts = s.mlg_counts
gaps = [x for x in components if x.is_gap]
gapsizes = [None] * len(gaps) # master
for mlg, count in mlg_counts.items():
if count < links:
continue
g = GapEstimator(cc, agp, ob, mlg)
g.compute_all_gaps(minsize=minsize, maxsize=maxsize, \
verbose=verbose)
# Merge evidence from this mlg into master
assert len(g.gapsizes) == len(gaps)
for i, gs in enumerate(gapsizes):
gg = g.gapsizes[i]
if gs is None:
gapsizes[i] = gg
elif gg:
gapsizes[i] = min(gs, gg)
print(gapsizes)
# Modify AGP
i = 0
for x in components:
if x.is_gap:
x.gap_length = gapsizes[i] or minsize
x.component_type = 'U' if x.gap_length == 100 else 'N'
i += 1
print(x, file=fw)
fw.close()
reindex([outagpfile, "--inplace"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" Convert csv maps to bed format. Each input map is csv formatted, for example: ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition scaffold_2707,11508,1,0 scaffold_2707,11525,1,1.2 scaffold_759,81336,1,9.7 """ |
p = OptionParser(merge.__doc__)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Write weights to file")
p.set_outfile("out.bed")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
maps = args
outfile = opts.outfile
fp = must_open(maps)
b = Bed()
mapnames = set()
for row in fp:
mapname = filename_to_mapname(fp.filename())
mapnames.add(mapname)
try:
m = CSVMapLine(row, mapname=mapname)
if m.cm < 0:
logging.error("Ignore marker with negative genetic distance")
print(row.strip(), file=sys.stderr)
else:
b.append(BedLine(m.bedline))
except (IndexError, ValueError): # header or mal-formed line
continue
b.print_to_file(filename=outfile, sorted=True)
logging.debug("A total of {0} markers written to `{1}`.".\
format(len(b), outfile))
assert len(maps) == len(mapnames), "You have a collision in map names"
write_weightsfile(mapnames, weightsfile=opts.weightsfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mergebed(args):
""" Combine bed maps to bed format, adding the map name. """ |
p = OptionParser(mergebed.__doc__)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Write weights to file")
p.set_outfile("out.bed")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
maps = args
outfile = opts.outfile
fp = must_open(maps)
b = Bed()
mapnames = set()
for row in fp:
mapname = filename_to_mapname(fp.filename())
mapnames.add(mapname)
try:
m = BedLine(row)
m.accn = "{0}-{1}".format(mapname, m.accn)
m.extra = ["{0}:{1}".format(m.seqid, m.start)]
b.append(m)
except (IndexError, ValueError): # header or mal-formed line
continue
b.print_to_file(filename=outfile, sorted=True)
logging.debug("A total of {0} markers written to `{1}`.".\
format(len(b), outfile))
assert len(maps) == len(mapnames), "You have a collision in map names"
write_weightsfile(mapnames, weightsfile=opts.weightsfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary input.bed scaffolds.fasta Print out summary statistics per map, followed by consensus summary of scaffold anchoring based on multiple maps. """ |
p = OptionParser(summary.__doc__)
p.set_table(sep="|", align=True)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, scaffolds = args
pf = inputbed.rsplit(".", 1)[0]
mapbed = pf + ".bed"
chr_agp = pf + ".chr.agp"
sep = opts.sep
align = opts.align
cc = Map(mapbed)
mapnames = cc.mapnames
s = Sizes(scaffolds)
total, l50, n50 = s.summary
r = {}
maps = []
fw = must_open(opts.outfile, "w")
print("*** Summary for each individual map ***", file=fw)
for mapname in mapnames:
markers = [x for x in cc if x.mapname == mapname]
ms = MapSummary(markers, l50, s)
r["Linkage Groups", mapname] = ms.num_lgs
ms.export_table(r, mapname, total)
maps.append(ms)
print(tabulate(r, sep=sep, align=align), file=fw)
r = {}
agp = AGP(chr_agp)
print("*** Summary for consensus map ***", file=fw)
consensus_scaffolds = set(x.component_id for x in agp if not x.is_gap)
oriented_scaffolds = set(x.component_id for x in agp \
if (not x.is_gap) and x.orientation != '?')
unplaced_scaffolds = set(s.mapping.keys()) - consensus_scaffolds
for mapname, sc in (("Anchored", consensus_scaffolds),
("Oriented", oriented_scaffolds),
("Unplaced", unplaced_scaffolds)):
markers = [x for x in cc if x.seqid in sc]
ms = MapSummary(markers, l50, s, scaffolds=sc)
ms.export_table(r, mapname, total)
print(tabulate(r, sep=sep, align=align), file=fw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(args):
""" %prog build input.bed scaffolds.fasta Build associated genome FASTA file and CHAIN file that can be used to lift old coordinates to new coordinates. The CHAIN file will be used to lift the original marker positions to new positions in the reconstructed genome. The new positions of the markers will be reported in *.lifted.bed. """ |
p = OptionParser(build.__doc__)
p.add_option("--cleanup", default=False, action="store_true",
help="Clean up bulky FASTA files, useful for plotting")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, scaffolds = args
pf = inputbed.rsplit(".", 1)[0]
mapbed = pf + ".bed"
chr_agp = pf + ".chr.agp"
chr_fasta = pf + ".chr.fasta"
if need_update((chr_agp, scaffolds), chr_fasta):
agp_build([chr_agp, scaffolds, chr_fasta])
unplaced_agp = pf + ".unplaced.agp"
if need_update((chr_agp, scaffolds), unplaced_agp):
write_unplaced_agp(chr_agp, scaffolds, unplaced_agp)
unplaced_fasta = pf + ".unplaced.fasta"
if need_update((unplaced_agp, scaffolds), unplaced_fasta):
agp_build([unplaced_agp, scaffolds, unplaced_fasta])
combined_agp = pf + ".agp"
if need_update((chr_agp, unplaced_agp), combined_agp):
FileMerger((chr_agp, unplaced_agp), combined_agp).merge()
combined_fasta = pf + ".fasta"
if need_update((chr_fasta, unplaced_fasta), combined_fasta):
FileMerger((chr_fasta, unplaced_fasta), combined_fasta).merge()
chainfile = pf + ".chain"
if need_update((combined_agp, scaffolds, combined_fasta), chainfile):
fromagp([combined_agp, scaffolds, combined_fasta])
liftedbed = mapbed.rsplit(".", 1)[0] + ".lifted.bed"
if need_update((mapbed, chainfile), liftedbed):
cmd = "liftOver -minMatch=1 {0} {1} {2} unmapped".\
format(mapbed, chainfile, liftedbed)
sh(cmd, check=True)
if opts.cleanup:
FileShredder([chr_fasta, unplaced_fasta, combined_fasta,
chainfile, unplaced_agp,
combined_fasta + ".sizes", "unmapped"])
sort([liftedbed, "-i"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plotall(xargs):
""" %prog plotall input.bed Plot the matchings between the reconstructed pseudomolecules and the maps. This command will plot each reconstructed object (non-singleton). """ |
p = OptionParser(plotall.__doc__)
add_allmaps_plot_options(p)
opts, args, iopts = p.set_image_options(xargs, figsize="10x6")
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
agp = AGP(agpfile)
objects = [ob for ob, lines in agp.iter_object()]
for seqid in natsorted(objects):
plot(xargs + [seqid]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_orientation(self, si, sj):
""" si, sj are two number series. To compute whether these two series have same orientation or not. We combine them in the two orientation configurations and compute length of the longest monotonic series. """ |
if not si or not sj:
return 0
# Same orientation configuration
a = lms(si + sj)
b = lms(sj + si)
# Opposite orientation configuration
c = lms(si + sj[::-1])
d = lms(sj[::-1] + si)
return max(a, b)[0] - max(c, d)[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix_tour(self, tour):
""" Test each scaffold if dropping does not decrease LMS. """ |
scaffolds, oos = zip(*tour)
keep = set()
for mlg in self.linkage_groups:
lg = mlg.lg
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s, o)
score_with = lms(L + M + U)[0]
score_without = lms(L + U)[0]
assert score_with >= score_without
if score_with > score_without:
keep.add(s)
dropped = len(tour) - len(keep)
logging.debug("Dropped {0} minor scaffolds".format(dropped))
return [(s, o) for (s, o) in tour if s in keep] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix_orientation(self, tour):
""" Test each scaffold if flipping will increass longest monotonic chain length. """ |
orientations = dict(tour) # old configuration here
scaffold_oo = defaultdict(list)
scaffolds, oos = zip(*tour)
for mlg in self.linkage_groups:
lg = mlg.lg
mapname = mlg.mapname
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s)
plus = lms(L + M + U)
minus = lms(L + M[::-1] + U)
d = plus[0] - minus[0]
if not d:
continue
scaffold_oo[s].append((d, mapname)) # reset orientation
fixed = 0
for s, v in scaffold_oo.items():
d = self.weighted_mean(v)
old_d = orientations[s]
new_d = np.sign(d)
if new_d != old_d:
orientations[s] = new_d
fixed += 1
tour = [(x, orientations[x]) for x in scaffolds]
logging.debug("Fixed orientations for {0} scaffolds.".format(fixed))
return tour |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spin(self):
""" Perform a single spin """ |
for x in self.spinchars:
self.string = self.msg + "...\t" + x + "\r"
self.out.write(self.string.encode('utf-8'))
self.out.flush()
time.sleep(self.waittime) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_sequence(seq, name="S"):
""" Make unique nodes for sequence graph. """ |
return ["{}_{}_{}".format(name, i, x) for i, x in enumerate(seq)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sequence_to_graph(G, seq, color='black'):
""" Automatically construct graph given a sequence of characters. """ |
for x in seq:
if x.endswith("_1"): # Mutation
G.node(x, color=color, width="0.1", shape="circle", label="")
else:
G.node(x, color=color)
for a, b in pairwise(seq):
G.edge(a, b, color=color) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zip_sequences(G, allseqs, color="white"):
""" Fuse certain nodes together, if they contain same data except for the sequence name. """ |
for s in zip(*allseqs):
groups = defaultdict(list)
for x in s:
part = x.split('_', 1)[1]
groups[part].append(x)
for part, g in groups.items():
with G.subgraph(name="cluster_" + part) as c:
for x in g:
c.node(x)
c.attr(style="invis") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gallery(args):
""" %prog gallery folder link_prefix Convert a folder of figures to a HTML table. For example: $ python -m jcvi.formats.html gallery Paper-figures/ https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/ Maps the images from local to remote. """ |
from jcvi.apps.base import iglob
from jcvi.utils.iter import grouper
p = OptionParser(gallery.__doc__)
p.add_option("--columns", default=3, type="int",
help="How many cells per row")
p.add_option("--width", default=200, type="int",
help="Image width")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
folder, link_prefix = args
width = opts.width
images = iglob(folder, "*.jpg,*.JPG,*.png")
td = '<td>{0}<br><a href="{1}"><img src="{1}" width="{2}"></a></td>'
print("<table>")
for ims in grouper(images, opts.columns):
print('<tr height="{0}" valign="top">'.format(width + 5))
for im in ims:
if not im:
continue
im = op.basename(im)
pf = im.split('.')[0].replace('_', '-')
link = link_prefix.rstrip("/") + "/" + im
print(td.format(pf, link, width))
print("</tr>")
print("</table>") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def links(args):
""" %prog links url Extract all the links "<a href=''>" from web page. """ |
p = OptionParser(links.__doc__)
p.add_option("--img", default=False, action="store_true",
help="Extract <img> tags [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
url, = args
img = opts.img
htmlfile = download(url)
page = open(htmlfile).read()
soup = BeautifulSoup(page)
tag = 'img' if img else 'a'
src = 'src' if img else 'href'
aa = soup.findAll(tag)
for a in aa:
link = a.get(src)
link = urljoin(url, link)
print(link) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unescape(s, unicode_action="replace"):
""" Unescape HTML strings, and convert & etc. """ |
import HTMLParser
hp = HTMLParser.HTMLParser()
s = hp.unescape(s)
s = s.encode('ascii', unicode_action)
s = s.replace("\n", "").strip()
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def table(args):
""" %prog table page.html Convert HTML tables to csv. """ |
import csv
p = OptionParser(table.__doc__)
p.set_sep(sep=",")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
htmlfile, = args
page = open(htmlfile).read()
soup = BeautifulSoup(page)
for i, tabl in enumerate(soup.findAll('table')):
nrows = 0
csvfile = htmlfile.rsplit(".", 1)[0] + ".{0}.csv".format(i)
writer = csv.writer(open(csvfile, "w"), delimiter=opts.sep)
rows = tabl.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
if not cols:
cols = tr.findAll('th')
row = []
for td in cols:
try:
cell = "".join(td.find(text=True))
cell = unescape(cell)
except TypeError:
cell = ""
row.append(cell)
writer.writerow(row)
nrows += 1
logging.debug("Table with {0} rows written to `{1}`.".format(nrows, csvfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blast(args):
""" %prog blast fastafile Run BLASTN against database (default is UniVec_Core). Output .bed format on the vector/contaminant ranges. """ |
p = OptionParser(blast.__doc__)
p.add_option("--dist", default=100, type="int",
help="Merge adjacent HSPs separated by [default: %default]")
p.add_option("--db",
help="Use a different database rather than UniVec_Core")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastaprefix = fastafile.split(".", 1)[0]
univec = opts.db or download("ftp://ftp.ncbi.nih.gov/pub/UniVec/UniVec_Core")
uniprefix = univec.split(".", 1)[0]
fastablast = fastaprefix + ".{0}.blast".format(uniprefix)
prog = run_megablast if opts.db else run_vecscreen
prog(infile=fastafile, outfile=fastablast, db=univec, pctid=95, hitlen=50)
fp = open(fastablast)
ranges = []
for row in fp:
b = BlastLine(row)
ranges.append((b.query, b.qstart, b.qstop))
merged_ranges = range_merge(ranges, dist=opts.dist)
bedfile = fastaprefix + ".{0}.bed".format(uniprefix)
fw = must_open(bedfile, "w")
for seqid, start, end in merged_ranges:
print("\t".join(str(x) for x in (seqid, start - 1, end, uniprefix)), file=fw)
return bedfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_exists(filename, oappend=False):
""" Avoid overwriting some files accidentally. """ |
if op.exists(filename):
if oappend:
return oappend
logging.error("`{0}` found, overwrite (Y/N)?".format(filename))
overwrite = (raw_input() == 'Y')
else:
overwrite = True
return overwrite |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def must_open(filename, mode="r", checkexists=False, skipcheck=False, \ oappend=False):
""" Accepts filename and returns filehandle. Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file. """ |
if isinstance(filename, list):
assert "r" in mode
if filename[0].endswith((".gz", ".bz2")):
filename = " ".join(filename) # allow opening multiple gz/bz2 files
else:
import fileinput
return fileinput.input(filename)
if filename.startswith("s3://"):
from jcvi.utils.aws import pull_from_s3
filename = pull_from_s3(filename)
if filename in ("-", "stdin"):
assert "r" in mode
fp = sys.stdin
elif filename == "stdout":
assert "w" in mode
fp = sys.stdout
elif filename == "stderr":
assert "w" in mode
fp = sys.stderr
elif filename == "tmp" and mode == "w":
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile(delete=False)
elif filename.endswith(".gz"):
if 'r' in mode:
cmd = "gunzip -c {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import gzip
fp = gzip.open(filename, mode)
elif filename.endswith(".bz2"):
if 'r' in mode:
cmd = "bzcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import bz2
fp = bz2.BZ2File(filename, mode)
else:
if checkexists:
assert mode == "w"
overwrite = (not op.exists(filename)) if skipcheck \
else check_exists(filename, oappend)
if overwrite:
if oappend:
fp = open(filename, "a")
else:
fp = open(filename, "w")
else:
logging.debug("File `{0}` already exists. Skipped."\
.format(filename))
return None
else:
fp = open(filename, mode)
return fp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_block(handle, signal):
""" Useful for reading block-like file formats, for example FASTA or OBO file, such file usually startswith some signal, and in-between the signals are a record """ |
signal_len = len(signal)
it = (x[1] for x in groupby(handle,
key=lambda row: row.strip()[:signal_len] == signal))
found_signal = False
for header in it:
header = list(header)
for h in header[:-1]:
h = h.strip()
if h[:signal_len] != signal:
continue
yield h, [] # Header only, no contents
header = header[-1].strip()
if header[:signal_len] != signal:
continue
found_signal = True
seq = list(s.strip() for s in next(it))
yield header, seq
if not found_signal:
handle.seek(0)
seq = list(s.strip() for s in handle)
yield None, seq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_number(s, cast=int):
""" Try to get a number out of a string, and cast it. """ |
import string
d = "".join(x for x in str(s) if x in string.digits)
return cast(d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def seqids(args):
""" %prog seqids prefix start end Make a list of seqids for graphics.karyotype. For example: $ python -m jcvi.formats.base seqids chromosome_ 1 3 chromosome_1,chromosome_2,chromosome_3 $ python -m jcvi.formats.base seqids A 3 1 --pad0=2 A03,A02,A01 """ |
p = OptionParser(seqids.__doc__)
p.add_option("--pad0", default=0, help="How many zeros to pad")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prefix, start, end = args
pad0 = opts.pad0
start, end = int(start), int(end)
step = 1 if start <= end else -1
print(",".join(["{}{:0{}d}".format(prefix, x, pad0) \
for x in xrange(start, end + step, step)])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pairwise(args):
""" %prog pairwise ids Convert a list of IDs into all pairs. """ |
from itertools import combinations
p = OptionParser(pairwise.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
ids = SetFile(idsfile)
ids = sorted(ids)
fw = open(idsfile + ".pairs", "w")
for a, b in combinations(ids, 2):
print("\t".join((a, b)), file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def truncate(args):
""" %prog truncate linecount filename Remove linecount lines from the end of the file in-place. Borrowed from: <http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file> """ |
p = OptionParser(truncate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
number, filename = args
number = int(number)
count = 0
f = open(filename, "r+b")
f.seek(0, os.SEEK_END)
while f.tell() > 0:
f.seek(-1, os.SEEK_CUR)
char = f.read(1)
if char == '\n':
count += 1
if count == number + 1:
f.truncate()
print("Removed {0} lines from end of file".format(number), file=sys.stderr)
return number
f.seek(-1, os.SEEK_CUR)
if count < number + 1:
print("No change: requested removal would leave empty file", file=sys.stderr)
return -1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten(args):
""" %prog flatten filename > ids Convert a list of IDs (say, multiple IDs per line) and move them into one per line. For example, convert this, to this: A,B,C | A 1 | B a,4 | C | 1 | a | 4 If multi-column file with multiple elements per column, zip then flatten like so: A,B,C 2,10,gg | A,2 1,3 4 | B,10 | C,gg | 1,4 | 3,na """ |
from six.moves import zip_longest
p = OptionParser(flatten.__doc__)
p.set_sep(sep=",")
p.add_option("--zipflatten", default=None, dest="zipsep",
help="Specify if columns of the file should be zipped before" +
" flattening. If so, specify delimiter separating column elements" +
" [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
zipsep = opts.zipsep
fp = must_open(tabfile)
for row in fp:
if zipsep:
row = row.rstrip()
atoms = row.split(opts.sep)
frows = []
for atom in atoms:
frows.append(atom.split(zipsep))
print("\n".join([zipsep.join(x) for x in list(zip_longest(*frows, fillvalue="na"))]))
else:
print(row.strip().replace(opts.sep, "\n")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reorder(args):
""" %prog reorder tabfile 1,2,4,3 > newtabfile Reorder columns in tab-delimited files. The above syntax will print out a new file with col-1,2,4,3 from the old file. """ |
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, order = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(",")]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split(args):
""" %prog split file outdir N Split file into N records. This allows splitting FASTA/FASTQ/TXT file properly at boundary of records. Split is useful for parallelization on input chunks. Option --mode is useful on how to break into chunks. 1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc. 2. cycle - chunk records in Round Robin fashion 3. optimal - try to make split file of roughly similar sizes, using LPT algorithm. This is the default. """ |
p = OptionParser(split.__doc__)
mode_choices = ("batch", "cycle", "optimal")
p.add_option("--all", default=False, action="store_true",
help="split all records [default: %default]")
p.add_option("--mode", default="optimal", choices=mode_choices,
help="Mode when splitting records [default: %default]")
p.add_option("--format", choices=("fasta", "fastq", "txt", "clust"),
help="input file format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
filename, outdir, N = args
fs = FileSplitter(filename, outputdir=outdir,
format=opts.format, mode=opts.mode)
if opts.all:
logging.debug("option -all override N")
N = fs.num_records
else:
N = min(fs.num_records, int(N))
assert N > 0, "N must be > 0"
logging.debug("split file into %d chunks" % N)
fs.split(N)
return fs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setop(args):
""" %prog setop "fileA & fileB" > newfile Perform set operations, except on files. The files (fileA and fileB) contain list of ids. The operator is one of the four: |: union (elements found in either file) &: intersection (elements found in both) -: difference (elements in fileA but not in fileB) ^: symmetric difference (elementes found in either set but not both) Please quote the argument to avoid shell interpreting | and &. """ |
from jcvi.utils.natsort import natsorted
p = OptionParser(setop.__doc__)
p.add_option("--column", default=0, type="int",
help="The column to extract, 0-based, -1 to disable [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
statement, = args
fa, op, fb = statement.split()
assert op in ('|', '&', '-', '^')
column = opts.column
fa = SetFile(fa, column=column)
fb = SetFile(fb, column=column)
if op == '|':
t = fa | fb
elif op == '&':
t = fa & fb
elif op == '-':
t = fa - fb
elif op == '^':
t = fa ^ fb
for x in natsorted(t):
print(x) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _batch_iterator(self, N=1):
"""Returns N lists of records. This can be used on any iterator, for example to batch up lines from a file handle. This is a generator function, and it returns lists of the entries from the supplied iterator. Each list will have batch_size entries, although the final list may be shorter. """ |
batch_size = math.ceil(self.num_records / float(N))
handle = self._open(self.filename)
while True:
batch = list(islice(handle, batch_size))
if not batch:
break
yield batch |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract(args):
""" %prog extract idsfile sizesfile Extract the lines containing only the given IDs. """ |
p = OptionParser(extract.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, sizesfile = args
sizes = Sizes(sizesfile).mapping
fp = open(idsfile)
for row in fp:
name = row.strip()
size = sizes[name]
print("\t".join(str(x) for x in (name, size))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _reversedict(d):
""" Internal helper for generating reverse mappings; given a dictionary, returns a new dictionary with keys and values swapped. """ |
return dict(list(zip(list(d.values()), list(d.keys())))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _percent_to_integer(percent):
""" Internal helper for converting a percentage value to an integer between 0 and 255 inclusive. """ |
num = float(percent.split('%')[0]) / 100.0 * 255
e = num - math.floor(num)
return e < 0.5 and int(math.floor(num)) or int(math.ceil(num)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def closest_color(requested_color):
""" Find closest color name for the request RGB tuple. """ |
logging.disable(logging.DEBUG)
colors = []
for key, name in css3_hex_to_names.items():
diff = color_diff(hex_to_rgb(key), requested_color)
colors.append((diff, name))
logging.disable(logging.NOTSET)
min_diff, min_color = min(colors)
return min_color |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def offdiag(args):
""" %prog offdiag diploid.napus.1x1.lifted.anchors Find gene pairs that are off diagnoal. "Off diagonal" are the pairs that are not on the orthologous chromosomes. For example, napus chrA01 and brapa A01. """ |
p = OptionParser(offdiag.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorsfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts)
fp = open(anchorsfile)
pf = "-".join(anchorsfile.split(".")[:2])
header = "Block-id|Napus|Diploid|Napus-chr|Diploid-chr|RBH?".split("|")
print("\t".join(header))
i = -1
for row in fp:
if row[0] == '#':
i += 1
continue
q, s, score = row.split()
rbh = 'no' if score[-1] == 'L' else 'yes'
qi, qq = qorder[q]
si, ss = sorder[s]
oqseqid = qseqid = qq.seqid
osseqid = sseqid = ss.seqid
sseqid = sseqid.split("_")[0][-3:]
if qseqid[0] == 'A':
qseqid = qseqid[-3:] # A09 => A09
elif qseqid[0] == 'C':
qseqid = 'C0' + qseqid[-1] # C9 => C09
else:
continue
if qseqid == sseqid or sseqid[-2:] == 'nn':
continue
block_id = pf + "-block-{0}".format(i)
print("\t".join((block_id, q, s, oqseqid, osseqid, rbh))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diff(args):
""" %prog diff simplefile Calculate difference of pairwise syntenic regions. """ |
from jcvi.utils.cbook import SummaryStats
p = OptionParser(diff.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
simplefile, = args
fp = open(simplefile)
data = [x.split() for x in fp]
spans = []
for block_id, ab in groupby(data[1:], key=lambda x: x[0]):
a, b = list(ab)
aspan, bspan = a[4], b[4]
aspan, bspan = int(aspan), int(bspan)
spans.append((aspan, bspan))
aspans, bspans = zip(*spans)
dspans = [b - a for a, b, in spans]
s = SummaryStats(dspans)
print("For a total of {0} blocks:".format(len(dspans)), file=sys.stderr)
print("Sum of A: {0}".format(sum(aspans)), file=sys.stderr)
print("Sum of B: {0}".format(sum(bspans)), file=sys.stderr)
print("Sum of Delta: {0} ({1})".format(sum(dspans), s), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def estimate_size(accns, bed, order, conservative=True):
""" Estimate the bp length for the deletion tracks, indicated by the gene accns. True different levels of estimates vary on conservativeness. """ |
accns = [order[x] for x in accns]
ii, bb = zip(*accns)
mini, maxi = min(ii), max(ii)
if not conservative: # extend one gene
mini -= 1
maxi += 1
minb = bed[mini]
maxb = bed[maxi]
assert minb.seqid == maxb.seqid
distmode = "ss" if conservative else "ee"
ra = (minb.seqid, minb.start, minb.end, "+")
rb = (maxb.seqid, maxb.start, maxb.end, "+")
dist, orientation = range_distance(ra, rb, distmode=distmode)
assert dist != -1
return dist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" %prog merge protein-quartets registry LOST Merge protein quartets table with dna quartets registry. This is specific to the napus project. """ |
from jcvi.formats.base import DictFile
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
quartets, registry, lost = args
qq = DictFile(registry, keypos=1, valuepos=3)
lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|')
qq.update(lost)
fp = open(quartets)
cases = {
"AN,CN": 4,
"BO,AN,CN": 8,
"BO,CN": 2,
"BR,AN": 1,
"BR,AN,CN": 6,
"BR,BO": 3,
"BR,BO,AN": 5,
"BR,BO,AN,CN": 9,
"BR,BO,CN": 7,
}
ip = {
"syntenic_model": "Syntenic_model_excluded_by_OMG",
"complete": "Predictable",
"partial": "Truncated",
"pseudogene": "Pseudogene",
"random": "Match_random",
"real_ns": "Transposed",
"gmap_fail": "GMAP_fail",
"AN LOST": "AN_LOST",
"CN LOST": "CN_LOST",
"BR LOST": "BR_LOST",
"BO LOST": "BO_LOST",
"outside": "Outside_synteny_blocks",
"[NF]": "Not_found",
}
for row in fp:
atoms = row.strip().split("\t")
genes = atoms[:4]
tag = atoms[4]
a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes]
qqs = [c, d, a, b]
for i, q in enumerate(qqs):
if atoms[i] != '.':
qqs[i] = "syntenic_model"
# Make comment
comment = "Case{0}".format(cases[tag])
dots = sum([1 for x in genes if x == '.'])
if dots == 1:
idx = genes.index(".")
status = qqs[idx]
status = ip[status]
comment += "-" + status
print(row.strip() + "\t" + "\t".join(qqs + [comment])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gffselect(args):
""" %prog gffselect gmaplocation.bed expectedlocation.bed translated.ids tag Try to match up the expected location and gmap locations for particular genes. translated.ids was generated by fasta.translate --ids. tag must be one of "complete|pseudogene|partial". """ |
from jcvi.formats.bed import intersectBed_wao
p = OptionParser(gffselect.__doc__)
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
gmapped, expected, idsfile, tag = args
data = get_tags(idsfile)
completeness = dict((a.replace("mrna", "path"), c) \
for (a, b, c) in data)
seen = set()
idsfile = expected.rsplit(".", 1)[0] + ".ids"
fw = open(idsfile, "w")
cnt = 0
for a, b in intersectBed_wao(expected, gmapped):
if b is None:
continue
aname, bbname = a.accn, b.accn
bname = bbname.split(".")[0]
if completeness[bbname] != tag:
continue
if aname == bname:
if bname in seen:
continue
seen.add(bname)
print(bbname, file=fw)
cnt += 1
fw.close()
logging.debug("Total {0} records written to `{1}`.".format(cnt, idsfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gaps(args):
""" %prog gaps idsfile fractionationfile gapsbed Check gene locations against gaps. `idsfile` contains a list of IDs to query into `fractionationfile` in order to get expected locations. """ |
from jcvi.formats.base import DictFile
from jcvi.apps.base import popen
from jcvi.utils.cbook import percentage
p = OptionParser(gaps.__doc__)
p.add_option("--bdist", default=0, type="int",
help="Base pair distance [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
idsfile, frfile, gapsbed = args
bdist = opts.bdist
d = DictFile(frfile, keypos=1, valuepos=2)
bedfile = idsfile + ".bed"
fw = open(bedfile, "w")
fp = open(idsfile)
total = 0
for row in fp:
id = row.strip()
hit = d[id]
tag, pos = get_tag(hit, None)
seqid, start, end = pos
start, end = max(start - bdist, 1), end + bdist
print("\t".join(str(x) for x in (seqid, start - 1, end, id)), file=fw)
total += 1
fw.close()
cmd = "intersectBed -a {0} -b {1} -v | wc -l".format(bedfile, gapsbed)
not_in_gaps = popen(cmd).read()
not_in_gaps = int(not_in_gaps)
in_gaps = total - not_in_gaps
print("Ids in gaps: {1}".\
format(total, percentage(in_gaps, total)), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genestatus(args):
""" %prog genestatus diploid.gff3.exon.ids Tag genes based on translation from GMAP models, using fasta.translate() --ids. """ |
p = OptionParser(genestatus.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
data = get_tags(idsfile)
key = lambda x: x[0].split(".")[0]
for gene, cc in groupby(data, key=key):
cc = list(cc)
tags = [x[-1] for x in cc]
if "complete" in tags:
tag = "complete"
elif "partial" in tags:
tag = "partial"
else:
tag = "pseudogene"
print("\t".join((gene, tag))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(args):
""" %prog validate diploid.napus.fractionation cds.bed Check whether [S] intervals overlap with CDS. """ |
from jcvi.formats.bed import intersectBed_wao
p = OptionParser(validate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fractionation, cdsbed = args
fp = open(fractionation)
sbed = "S.bed"
fw = open(sbed, "w")
for row in fp:
a, b, c = row.split()
if not c.startswith("[S]"):
continue
tag, (seqid, start, end) = get_tag(c, None)
print("\t".join(str(x) for x in (seqid, start - 1, end, b)), file=fw)
fw.close()
pairs = {}
for a, b in intersectBed_wao(sbed, cdsbed):
if b is None:
continue
pairs[a.accn] = b.accn
validated = fractionation + ".validated"
fw = open(validated, "w")
fp.seek(0)
fixed = 0
for row in fp:
a, b, c = row.split()
if b in pairs:
assert c.startswith("[S]")
c = pairs[b]
fixed += 1
print("\t".join((a, b, c)), file=fw)
logging.debug("Fixed {0} [S] cases in `{1}`.".format(fixed, validated))
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def longest(args):
""" %prog longest pasa.fasta output.subclusters.out Find the longest PASA assembly and label it as full-length. Also removes transcripts shorter than half the length of the longest, or shorter than 200bp. The assemblies for the same locus is found in `output.subclusters.out`. In particular the lines that look like: sub-cluster: asmbl_25 asmbl_26 asmbl_27 """ |
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.sizes import Sizes
p = OptionParser(longest.__doc__)
p.add_option("--prefix", default="pasa",
help="Replace asmbl_ with prefix [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, subclusters = args
prefix = fastafile.rsplit(".", 1)[0]
idsfile = prefix + ".fl.ids"
fw = open(idsfile, "w")
sizes = Sizes(fastafile).mapping
name_convert = lambda x: x.replace("asmbl", opts.prefix)
keep = set() # List of IDs to write
fp = open(subclusters)
nrecs = 0
for row in fp:
if not row.startswith("sub-cluster:"):
continue
asmbls = row.split()[1:]
longest_asmbl = max(asmbls, key=lambda x: sizes[x])
longest_size = sizes[longest_asmbl]
print(name_convert(longest_asmbl), file=fw)
nrecs += 1
cutoff = max(longest_size / 2, 200)
keep.update(set(x for x in asmbls if sizes[x] >= cutoff))
fw.close()
logging.debug("{0} fl-cDNA records written to `{1}`.".format(nrecs, idsfile))
f = Fasta(fastafile, lazy=True)
newfastafile = prefix + ".clean.fasta"
fw = open(newfastafile, "w")
nrecs = 0
for name, rec in f.iteritems_ordered():
if name not in keep:
continue
rec.id = name_convert(name)
rec.description = ""
SeqIO.write([rec], fw, "fasta")
nrecs += 1
fw.close()
logging.debug("{0} valid records written to `{1}`.".format(nrecs, newfastafile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ids(args):
""" %prog ids cdhit.clstr Get the representative ids from clstr file. """ |
p = OptionParser(ids.__doc__)
p.add_option("--prefix", type="int",
help="Find rep id for prefix of len [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
clstrfile, = args
cf = ClstrFile(clstrfile)
prefix = opts.prefix
if prefix:
reads = list(cf.iter_reps_prefix(prefix=prefix))
else:
reads = list(cf.iter_reps())
nreads = len(reads)
idsfile = clstrfile.replace(".clstr", ".ids")
fw = open(idsfile, "w")
for i, name in reads:
print("\t".join(str(x) for x in (i, name)), file=fw)
logging.debug("A total of {0} unique reads written to `{1}`.".\
format(nreads, idsfile))
fw.close()
return idsfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary cdhit.clstr Parse cdhit.clstr file to get distribution of cluster sizes. """ |
from jcvi.graphics.histogram import loghistogram
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
clstrfile, = args
cf = ClstrFile(clstrfile)
data = list(cf.iter_sizes())
loghistogram(data, summary=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deduplicate(args):
""" %prog deduplicate fastafile Wraps `cd-hit-est` to remove duplicate sequences. """ |
p = OptionParser(deduplicate.__doc__)
p.set_align(pctid=96, pctcov=0)
p.add_option("--fast", default=False, action="store_true",
help="Place sequence in the first cluster")
p.add_option("--consensus", default=False, action="store_true",
help="Compute consensus sequences")
p.add_option("--reads", default=False, action="store_true",
help="Use `cd-hit-454` to deduplicate [default: %default]")
p.add_option("--samestrand", default=False, action="store_true",
help="Enforce same strand alignment")
p.set_home("cdhit")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
identity = opts.pctid / 100.
fastafile, qualfile = fasta([fastafile, "--seqtk"])
ocmd = "cd-hit-454" if opts.reads else "cd-hit-est"
cmd = op.join(opts.cdhit_home, ocmd)
cmd += " -c {0}".format(identity)
if ocmd == "cd-hit-est":
cmd += " -d 0" # include complete defline
if opts.samestrand:
cmd += " -r 0"
if not opts.fast:
cmd += " -g 1"
if opts.pctcov != 0:
cmd += " -aL {0} -aS {0}".format(opts.pctcov / 100.)
dd = fastafile + ".P{0}.cdhit".format(opts.pctid)
clstr = dd + ".clstr"
cmd += " -M 0 -T {0} -i {1} -o {2}".format(opts.cpus, fastafile, dd)
if need_update(fastafile, (dd, clstr)):
sh(cmd)
if opts.consensus:
cons = dd + ".consensus"
cmd = op.join(opts.cdhit_home, "cdhit-cluster-consensus")
cmd += " clustfile={0} fastafile={1} output={2} maxlen=1".\
format(clstr, fastafile, cons)
if need_update((clstr, fastafile), cons):
sh(cmd)
return dd |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_blocks(ubi):
"""Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number. """ |
blocks = {}
ubi.file.seek(ubi.file.start_offset)
peb_count = 0
cur_offset = 0
bad_blocks = []
# range instead of xrange, as xrange breaks > 4GB end_offset.
for i in range(ubi.file.start_offset, ubi.file.end_offset, ubi.file.block_size):
try:
buf = ubi.file.read(ubi.file.block_size)
except Exception as e:
if settings.warn_only_block_read_errors:
error(extract_blocks, 'Error', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e)))
continue
else:
error(extract_blocks, 'Fatal', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e)))
if buf.startswith(UBI_EC_HDR_MAGIC):
blk = description(buf)
blk.file_offset = i
blk.peb_num = ubi.first_peb_num + peb_count
blk.size = ubi.file.block_size
blocks[blk.peb_num] = blk
peb_count += 1
log(extract_blocks, blk)
verbose_log(extract_blocks, 'file addr: %s' % (ubi.file.last_read_addr()))
ec_hdr_errors = ''
vid_hdr_errors = ''
if blk.ec_hdr.errors:
ec_hdr_errors = ','.join(blk.ec_hdr.errors)
if blk.vid_hdr and blk.vid_hdr.errors:
vid_hdr_errors = ','.join(blk.vid_hdr.errors)
if ec_hdr_errors or vid_hdr_errors:
if blk.peb_num not in bad_blocks:
bad_blocks.append(blk.peb_num)
log(extract_blocks, 'PEB: %s has possible issue EC_HDR [%s], VID_HDR [%s]' % (blk.peb_num, ec_hdr_errors, vid_hdr_errors))
verbose_display(blk)
else:
cur_offset += ubi.file.block_size
ubi.first_peb_num = cur_offset/ubi.file.block_size
ubi.file.start_offset = cur_offset
return blocks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def by_image_seq(blocks, image_seq):
"""Filter blocks to return only those associated with the provided image_seq number. Argument: List:blocks -- List of block objects to sort. Int:image_seq -- image_seq number found in ec_hdr. Returns: List -- List of block indexes matching image_seq number. """ |
return list(filter(lambda block: blocks[block].ec_hdr.image_seq == image_seq, blocks)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def by_vol_id(blocks, slist=None):
"""Sort blocks by volume id Arguments: Obj:blocks -- List of block objects. List:slist -- (optional) List of block indexes. Return: Dict -- blocks grouped in lists with dict key as volume id. """ |
vol_blocks = {}
# sort block by volume
# not reliable with multiple partitions (fifo)
for i in blocks:
if slist and i not in slist:
continue
elif not blocks[i].is_valid:
continue
if blocks[i].vid_hdr.vol_id not in vol_blocks:
vol_blocks[blocks[i].vid_hdr.vol_id] = []
vol_blocks[blocks[i].vid_hdr.vol_id].append(blocks[i].peb_num)
return vol_blocks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def by_type(blocks, slist=None):
"""Sort blocks into layout, internal volume, data or unknown Arguments: Obj:blocks -- List of block objects. List:slist -- (optional) List of block indexes. Returns: List:layout -- List of block indexes of blocks containing the volume table records. List:data -- List of block indexes containing filesystem data. List:int_vol -- List of block indexes containing volume ids greater than UBI_INTERNAL_VOL_START that are not layout volumes. List:unknown -- List of block indexes of blocks that failed validation of crc in ed_hdr or vid_hdr. """ |
layout = []
data = []
int_vol = []
unknown = []
for i in blocks:
if slist and i not in slist:
continue
if blocks[i].is_vtbl and blocks[i].is_valid:
layout.append(i)
elif blocks[i].is_internal_vol and blocks[i].is_valid:
int_vol.append(i)
elif blocks[i].is_valid:
data.append(i)
else:
unknown.append(i)
return layout, data, int_vol, unknown |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_newest(blocks, layout_blocks):
"""Filter out old layout blocks from list Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Newest layout blocks in list """ |
layout_temp = list(layout_blocks)
for i in range(0, len(layout_temp)):
for k in range(0, len(layout_blocks)):
if blocks[layout_temp[i]].ec_hdr.image_seq != blocks[layout_blocks[k]].ec_hdr.image_seq:
continue
if blocks[layout_temp[i]].leb_num != blocks[layout_blocks[k]].leb_num:
continue
if blocks[layout_temp[i]].vid_hdr.sqnum > blocks[layout_blocks[k]].vid_hdr.sqnum:
del layout_blocks[k]
break
return layout_blocks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def group_pairs(blocks, layout_blocks_list):
"""Sort a list of layout blocks into pairs Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Layout block pair indexes grouped in a list """ |
image_dict={}
for block_id in layout_blocks_list:
image_seq=blocks[block_id].ec_hdr.image_seq
if image_seq not in image_dict:
image_dict[image_seq]=[block_id]
else:
image_dict[image_seq].append(block_id)
log(group_pairs, 'Layout blocks found at PEBs: %s' % list(image_dict.values()))
return list(image_dict.values()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def associate_blocks(blocks, layout_pairs, start_peb_num):
"""Group block indexes with appropriate layout pairs Arguments: List:blocks -- List of block objects List:layout_pairs -- List of grouped layout blocks Int:start_peb_num -- Number of the PEB to start from. Returns: List -- Layout block pairs grouped with associated block ranges. """ |
seq_blocks = []
for layout_pair in layout_pairs:
seq_blocks = sort.by_image_seq(blocks, blocks[layout_pair[0]].ec_hdr.image_seq)
layout_pair.append(seq_blocks)
return layout_pairs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_volumes(blocks, layout_info):
"""Get a list of UBI volume objects from list of blocks Arguments: List:blocks -- List of layout block objects List:layout_info -- Layout info (indexes of layout blocks and associated data blocks.) Returns: Dict -- Of Volume objects by volume name, including any relevant blocks. """ |
volumes = {}
vol_blocks_lists = sort.by_vol_id(blocks, layout_info[2])
for vol_rec in blocks[layout_info[0]].vtbl_recs:
vol_name = vol_rec.name.strip(b'\x00').decode('utf-8')
if vol_rec.rec_index not in vol_blocks_lists:
vol_blocks_lists[vol_rec.rec_index] = []
volumes[vol_name] = description(vol_rec.rec_index, vol_rec, vol_blocks_lists[vol_rec.rec_index])
return volumes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_key(key):
"""Parse node key Arguments: Str:key -- Hex string literal of node key. Returns: Int:key_type -- Type of key, data, ino, dent, etc. Int:ino_num -- Inode number. Int:khash -- Key hash. """ |
hkey, lkey = struct.unpack('<II',key[0:UBIFS_SK_LEN])
ino_num = hkey & UBIFS_S_KEY_HASH_MASK
key_type = lkey >> UBIFS_S_KEY_BLOCK_BITS
khash = lkey
#if key_type < UBIFS_KEY_TYPES_CNT:
return {'type':key_type, 'ino_num':ino_num, 'khash': khash} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decompress(ctype, unc_len, data):
"""Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data. """ |
if ctype == UBIFS_COMPR_LZO:
try:
return lzo.decompress(b''.join((b'\xf0', struct.pack('>I', unc_len), data)))
except Exception as e:
error(decompress, 'Warn', 'LZO Error: %s' % e)
elif ctype == UBIFS_COMPR_ZLIB:
try:
return zlib.decompress(data, -11)
except Exception as e:
error(decompress, 'Warn', 'ZLib Error: %s' % e)
else:
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guess_leb_size(path):
"""Get LEB size from superblock Arguments: Str:path -- Path to file. Returns: Int -- LEB size. Searches file for superblock and retrieves leb size. """ |
f = open(path, 'rb')
f.seek(0,2)
file_size = f.tell()+1
f.seek(0)
block_size = None
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBIFS_NODE_MAGIC, buf):
start = m.start()
chdr = nodes.common_hdr(buf[start:start+UBIFS_COMMON_HDR_SZ])
if chdr and chdr.node_type == UBIFS_SB_NODE:
sb_start = start + UBIFS_COMMON_HDR_SZ
sb_end = sb_start + UBIFS_SB_NODE_SZ
if chdr.len != len(buf[sb_start:sb_end]):
f.seek(sb_start)
buf = f.read(UBIFS_SB_NODE_SZ)
else:
buf = buf[sb_start:sb_end]
sbn = nodes.sb_node(buf)
block_size = sbn.leb_size
f.close()
return block_size
f.close()
return block_size |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guess_peb_size(path):
"""Determine the most likely block size Arguments: Str:path -- Path to file. Returns: Int -- PEB size. Searches file for Magic Number, picks most common length between them. """ |
file_offset = 0
offsets = []
f = open(path, 'rb')
f.seek(0,2)
file_size = f.tell()+1
f.seek(0)
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBI_EC_HDR_MAGIC, buf):
start = m.start()
if not file_offset:
file_offset = start
idx = start
else:
idx = start+file_offset
offsets.append(idx)
file_offset += FILE_CHUNK_SZ
f.close()
occurances = {}
for i in range(0, len(offsets)):
try:
diff = offsets[i] - offsets[i-1]
except:
diff = offsets[i]
if diff not in occurances:
occurances[diff] = 0
occurances[diff] += 1
most_frequent = 0
block_size = None
for offset in occurances:
if occurances[offset] > most_frequent:
most_frequent = occurances[offset]
block_size = offset
return block_size |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.