repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
tanghaibao/jcvi | jcvi/formats/bed.py | chain | def chain(args):
"""
%prog chain bedfile
Chain BED segments together.
"""
p = OptionParser(chain.__doc__)
p.add_option("--dist", default=100000, help="Chaining distance")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
cmd = "sort -k4,4 -k1,1 -k2,2n -k3,3n {0} -o {0}".format(bedfile)
sh(cmd)
bed = Bed(bedfile, sorted=False)
newbed = Bed()
for accn, bb in groupby(bed, key=lambda x: x.accn):
bb = list(bb)
g = Grouper()
for a in bb:
g.join(a)
for a, b in pairwise(bb):
if a.seqid == b.seqid and b.start - a.end < opts.dist:
g.join(a, b)
data = []
for p in g:
seqid = p[0].seqid
start = min(x.start for x in p)
end = max(x.end for x in p)
score = sum(x.span for x in p)
data.append((seqid, start - 1, end, accn, score))
d = max(data, key=lambda x: x[-1])
newbed.append(BedLine("\t".join(str(x) for x in d)))
newbed.print_to_file(opts.outfile, sorted=True) | python | def chain(args):
"""
%prog chain bedfile
Chain BED segments together.
"""
p = OptionParser(chain.__doc__)
p.add_option("--dist", default=100000, help="Chaining distance")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
cmd = "sort -k4,4 -k1,1 -k2,2n -k3,3n {0} -o {0}".format(bedfile)
sh(cmd)
bed = Bed(bedfile, sorted=False)
newbed = Bed()
for accn, bb in groupby(bed, key=lambda x: x.accn):
bb = list(bb)
g = Grouper()
for a in bb:
g.join(a)
for a, b in pairwise(bb):
if a.seqid == b.seqid and b.start - a.end < opts.dist:
g.join(a, b)
data = []
for p in g:
seqid = p[0].seqid
start = min(x.start for x in p)
end = max(x.end for x in p)
score = sum(x.span for x in p)
data.append((seqid, start - 1, end, accn, score))
d = max(data, key=lambda x: x[-1])
newbed.append(BedLine("\t".join(str(x) for x in d)))
newbed.print_to_file(opts.outfile, sorted=True) | [
"def",
"chain",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"chain",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--dist\"",
",",
"default",
"=",
"100000",
",",
"help",
"=",
"\"Chaining distance\"",
")",
"p",
".",
"set_outfile",
"(",
... | %prog chain bedfile
Chain BED segments together. | [
"%prog",
"chain",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L539-L577 | train | 201,000 |
tanghaibao/jcvi | jcvi/formats/bed.py | density | def density(args):
"""
%prog density bedfile ref.fasta
Calculates density of features per seqid.
"""
p = OptionParser(density.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
bed = Bed(bedfile)
sizes = Sizes(fastafile).mapping
header = "seqid features size density_per_Mb".split()
print("\t".join(header))
for seqid, bb in bed.sub_beds():
nfeats = len(bb)
size = sizes[seqid]
ds = nfeats * 1e6 / size
print("\t".join(str(x) for x in \
(seqid, nfeats, size, "{0:.1f}".format(ds)))) | python | def density(args):
"""
%prog density bedfile ref.fasta
Calculates density of features per seqid.
"""
p = OptionParser(density.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
bed = Bed(bedfile)
sizes = Sizes(fastafile).mapping
header = "seqid features size density_per_Mb".split()
print("\t".join(header))
for seqid, bb in bed.sub_beds():
nfeats = len(bb)
size = sizes[seqid]
ds = nfeats * 1e6 / size
print("\t".join(str(x) for x in \
(seqid, nfeats, size, "{0:.1f}".format(ds)))) | [
"def",
"density",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"density",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
... | %prog density bedfile ref.fasta
Calculates density of features per seqid. | [
"%prog",
"density",
"bedfile",
"ref",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L580-L602 | train | 201,001 |
tanghaibao/jcvi | jcvi/formats/bed.py | alignextend | def alignextend(args):
"""
%prog alignextend bedpefile ref.fasta
Similar idea to alignextend, using mates from BEDPE and FASTA ref. See AMOS
script here:
https://github.com/nathanhaigh/amos/blob/master/src/Experimental/alignextend.pl
"""
p = OptionParser(alignextend.__doc__)
p.add_option("--len", default=100, type="int",
help="Extend to this length")
p.add_option("--qv", default=31, type="int",
help="Dummy qv score for extended bases")
p.add_option("--bedonly", default=False, action="store_true",
help="Only generate bed files, no FASTA")
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
qvchar = chr(opts.qv + 33)
pf = bedpe.split(".")[0]
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen, rlen=opts.rlen)
rmdup = filtered + ".filtered.sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
if opts.bedonly:
return
bed1, bed2 = pf + ".1e.bed", pf + ".2e.bed"
if need_update(rmdup, (bed1, bed2)):
sh("cut -f1-3,7-9 {0}".format(rmdup), outfile=bed1)
sh("cut -f4-6,7-8,10 {0}".format(rmdup), outfile=bed2)
sfa1, sfa2 = pf + ".1e.sfa", pf + ".2e.sfa"
if need_update((bed1, bed2, ref), (sfa1, sfa2)):
for bed in (bed1, bed2):
fastaFromBed(bed, ref, name=True, tab=True, stranded=True)
fq1, fq2 = pf + ".1e.fq", pf + ".2e.fq"
if need_update((sfa1, sfa2), (fq1, fq2)):
for sfa in (sfa1, sfa2):
sfa_to_fq(sfa, qvchar) | python | def alignextend(args):
"""
%prog alignextend bedpefile ref.fasta
Similar idea to alignextend, using mates from BEDPE and FASTA ref. See AMOS
script here:
https://github.com/nathanhaigh/amos/blob/master/src/Experimental/alignextend.pl
"""
p = OptionParser(alignextend.__doc__)
p.add_option("--len", default=100, type="int",
help="Extend to this length")
p.add_option("--qv", default=31, type="int",
help="Dummy qv score for extended bases")
p.add_option("--bedonly", default=False, action="store_true",
help="Only generate bed files, no FASTA")
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
qvchar = chr(opts.qv + 33)
pf = bedpe.split(".")[0]
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen, rlen=opts.rlen)
rmdup = filtered + ".filtered.sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
if opts.bedonly:
return
bed1, bed2 = pf + ".1e.bed", pf + ".2e.bed"
if need_update(rmdup, (bed1, bed2)):
sh("cut -f1-3,7-9 {0}".format(rmdup), outfile=bed1)
sh("cut -f4-6,7-8,10 {0}".format(rmdup), outfile=bed2)
sfa1, sfa2 = pf + ".1e.sfa", pf + ".2e.sfa"
if need_update((bed1, bed2, ref), (sfa1, sfa2)):
for bed in (bed1, bed2):
fastaFromBed(bed, ref, name=True, tab=True, stranded=True)
fq1, fq2 = pf + ".1e.fq", pf + ".2e.fq"
if need_update((sfa1, sfa2), (fq1, fq2)):
for sfa in (sfa1, sfa2):
sfa_to_fq(sfa, qvchar) | [
"def",
"alignextend",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"alignextend",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--len\"",
",",
"default",
"=",
"100",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Extend to this length\""... | %prog alignextend bedpefile ref.fasta
Similar idea to alignextend, using mates from BEDPE and FASTA ref. See AMOS
script here:
https://github.com/nathanhaigh/amos/blob/master/src/Experimental/alignextend.pl | [
"%prog",
"alignextend",
"bedpefile",
"ref",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L726-L777 | train | 201,002 |
tanghaibao/jcvi | jcvi/formats/bed.py | seqids | def seqids(args):
"""
%prog seqids bedfile
Print out all seqids on one line. Useful for graphics.karyotype.
"""
p = OptionParser(seqids.__doc__)
p.add_option("--maxn", default=100, type="int",
help="Maximum number of seqids")
p.add_option("--prefix", help="Seqids must start with")
p.add_option("--exclude", default="random", help="Seqids should not contain")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfile, = args
pf = opts.prefix
exclude = opts.exclude
bed = Bed(bedfile)
s = bed.seqids
if pf:
s = [x for x in s if x.startswith(pf)]
if exclude:
s = [x for x in s if not exclude in x]
s = s[:opts.maxn]
print(",".join(s)) | python | def seqids(args):
"""
%prog seqids bedfile
Print out all seqids on one line. Useful for graphics.karyotype.
"""
p = OptionParser(seqids.__doc__)
p.add_option("--maxn", default=100, type="int",
help="Maximum number of seqids")
p.add_option("--prefix", help="Seqids must start with")
p.add_option("--exclude", default="random", help="Seqids should not contain")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfile, = args
pf = opts.prefix
exclude = opts.exclude
bed = Bed(bedfile)
s = bed.seqids
if pf:
s = [x for x in s if x.startswith(pf)]
if exclude:
s = [x for x in s if not exclude in x]
s = s[:opts.maxn]
print(",".join(s)) | [
"def",
"seqids",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"seqids",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--maxn\"",
",",
"default",
"=",
"100",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Maximum number of seqids\"",
")... | %prog seqids bedfile
Print out all seqids on one line. Useful for graphics.karyotype. | [
"%prog",
"seqids",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L780-L806 | train | 201,003 |
tanghaibao/jcvi | jcvi/formats/bed.py | random | def random(args):
"""
%prog random bedfile number_of_features
Extract a random subset of features. Number of features can be an integer
number, or a fractional number in which case a random fraction (for example
0.1 = 10% of all features) will be extracted.
"""
from random import sample
from jcvi.formats.base import flexible_cast
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, N = args
assert is_number(N)
b = Bed(bedfile)
NN = flexible_cast(N)
if NN < 1:
NN = int(round(NN * len(b)))
beds = sample(b, NN)
new_bed = Bed()
new_bed.extend(beds)
outfile = bedfile.rsplit(".", 1)[0] + ".{0}.bed".format(N)
new_bed.print_to_file(outfile)
logging.debug("Write {0} features to `{1}`".format(NN, outfile)) | python | def random(args):
"""
%prog random bedfile number_of_features
Extract a random subset of features. Number of features can be an integer
number, or a fractional number in which case a random fraction (for example
0.1 = 10% of all features) will be extracted.
"""
from random import sample
from jcvi.formats.base import flexible_cast
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, N = args
assert is_number(N)
b = Bed(bedfile)
NN = flexible_cast(N)
if NN < 1:
NN = int(round(NN * len(b)))
beds = sample(b, NN)
new_bed = Bed()
new_bed.extend(beds)
outfile = bedfile.rsplit(".", 1)[0] + ".{0}.bed".format(N)
new_bed.print_to_file(outfile)
logging.debug("Write {0} features to `{1}`".format(NN, outfile)) | [
"def",
"random",
"(",
"args",
")",
":",
"from",
"random",
"import",
"sample",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"flexible_cast",
"p",
"=",
"OptionParser",
"(",
"random",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"par... | %prog random bedfile number_of_features
Extract a random subset of features. Number of features can be an integer
number, or a fractional number in which case a random fraction (for example
0.1 = 10% of all features) will be extracted. | [
"%prog",
"random",
"bedfile",
"number_of_features"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L857-L888 | train | 201,004 |
tanghaibao/jcvi | jcvi/formats/bed.py | filter | def filter(args):
"""
%prog filter bedfile
Filter the bedfile to retain records between certain size range.
"""
p = OptionParser(filter.__doc__)
p.add_option("--minsize", default=0, type="int",
help="Minimum feature length")
p.add_option("--maxsize", default=1000000000, type="int",
help="Minimum feature length")
p.add_option("--minaccn", type="int",
help="Minimum value of accn, useful to filter based on coverage")
p.add_option("--minscore", type="int", help="Minimum score")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fp = must_open(bedfile)
fw = must_open(opts.outfile, "w")
minsize, maxsize = opts.minsize, opts.maxsize
minaccn = opts.minaccn
minscore = opts.minscore
total = []
keep = []
for row in fp:
try:
b = BedLine(row)
except IndexError:
print(row.strip(), file=fw)
continue
span = b.span
total.append(span)
if not minsize <= span <= maxsize:
continue
if minaccn and int(b.accn) < minaccn:
continue
if minscore and int(b.score) < minscore:
continue
print(b, file=fw)
keep.append(span)
logging.debug("Stats: {0} features kept.".\
format(percentage(len(keep), len(total))))
logging.debug("Stats: {0} bases kept.".\
format(percentage(sum(keep), sum(total)))) | python | def filter(args):
"""
%prog filter bedfile
Filter the bedfile to retain records between certain size range.
"""
p = OptionParser(filter.__doc__)
p.add_option("--minsize", default=0, type="int",
help="Minimum feature length")
p.add_option("--maxsize", default=1000000000, type="int",
help="Minimum feature length")
p.add_option("--minaccn", type="int",
help="Minimum value of accn, useful to filter based on coverage")
p.add_option("--minscore", type="int", help="Minimum score")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fp = must_open(bedfile)
fw = must_open(opts.outfile, "w")
minsize, maxsize = opts.minsize, opts.maxsize
minaccn = opts.minaccn
minscore = opts.minscore
total = []
keep = []
for row in fp:
try:
b = BedLine(row)
except IndexError:
print(row.strip(), file=fw)
continue
span = b.span
total.append(span)
if not minsize <= span <= maxsize:
continue
if minaccn and int(b.accn) < minaccn:
continue
if minscore and int(b.score) < minscore:
continue
print(b, file=fw)
keep.append(span)
logging.debug("Stats: {0} features kept.".\
format(percentage(len(keep), len(total))))
logging.debug("Stats: {0} bases kept.".\
format(percentage(sum(keep), sum(total)))) | [
"def",
"filter",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"filter",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--minsize\"",
",",
"default",
"=",
"0",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Minimum feature length\"",
")"... | %prog filter bedfile
Filter the bedfile to retain records between certain size range. | [
"%prog",
"filter",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L891-L940 | train | 201,005 |
tanghaibao/jcvi | jcvi/formats/bed.py | mergebydepth | def mergebydepth(args):
"""
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
"""
p = OptionParser(mergebydepth.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth required")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + ".d{0}".format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, "--minaccn={0}".format(mindepth),
"--outfile={0}".format(bedgraphfiltered)])
merged = bedgraphfiltered + ".merge.fasta"
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True) | python | def mergebydepth(args):
"""
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
"""
p = OptionParser(mergebydepth.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth required")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + ".d{0}".format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, "--minaccn={0}".format(mindepth),
"--outfile={0}".format(bedgraphfiltered)])
merged = bedgraphfiltered + ".merge.fasta"
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True) | [
"def",
"mergebydepth",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"mergebydepth",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--mindepth\"",
",",
"default",
"=",
"3",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Minimum depth requi... | %prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth. | [
"%prog",
"mergebydepth",
"reads",
".",
"bed",
"genome",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L956-L981 | train | 201,006 |
tanghaibao/jcvi | jcvi/formats/bed.py | depth | def depth(args):
"""
%prog depth reads.bed features.bed
Calculate depth depth per feature using coverageBed.
"""
p = OptionParser(depth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
readsbed, featsbed = args
fp = open(featsbed)
nargs = len(fp.readline().split("\t"))
keepcols = ",".join(str(x) for x in range(1, nargs + 1))
cmd = "coverageBed -a {0} -b {1} -d".format(readsbed, featsbed)
cmd += " | groupBy -g {0} -c {1} -o mean".format(keepcols, nargs + 2)
sh(cmd, outfile=opts.outfile) | python | def depth(args):
"""
%prog depth reads.bed features.bed
Calculate depth depth per feature using coverageBed.
"""
p = OptionParser(depth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
readsbed, featsbed = args
fp = open(featsbed)
nargs = len(fp.readline().split("\t"))
keepcols = ",".join(str(x) for x in range(1, nargs + 1))
cmd = "coverageBed -a {0} -b {1} -d".format(readsbed, featsbed)
cmd += " | groupBy -g {0} -c {1} -o mean".format(keepcols, nargs + 2)
sh(cmd, outfile=opts.outfile) | [
"def",
"depth",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"depth",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
... | %prog depth reads.bed features.bed
Calculate depth depth per feature using coverageBed. | [
"%prog",
"depth",
"reads",
".",
"bed",
"features",
".",
"bed"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L984-L1003 | train | 201,007 |
tanghaibao/jcvi | jcvi/formats/bed.py | remove_isoforms | def remove_isoforms(ids):
"""
This is more or less a hack to remove the GMAP multiple mappings. Multiple
GMAP mappings can be seen given the names .mrna1, .mrna2, etc.
"""
key = lambda x: x.rsplit(".", 1)[0]
iso_number = lambda x: get_number(x.split(".")[-1])
ids = sorted(ids, key=key)
newids = []
for k, ii in groupby(ids, key=key):
min_i = min(list(ii), key=iso_number)
newids.append(min_i)
return newids | python | def remove_isoforms(ids):
"""
This is more or less a hack to remove the GMAP multiple mappings. Multiple
GMAP mappings can be seen given the names .mrna1, .mrna2, etc.
"""
key = lambda x: x.rsplit(".", 1)[0]
iso_number = lambda x: get_number(x.split(".")[-1])
ids = sorted(ids, key=key)
newids = []
for k, ii in groupby(ids, key=key):
min_i = min(list(ii), key=iso_number)
newids.append(min_i)
return newids | [
"def",
"remove_isoforms",
"(",
"ids",
")",
":",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"iso_number",
"=",
"lambda",
"x",
":",
"get_number",
"(",
"x",
".",
"split",
"(",
"\".\"",
")",
"[",
... | This is more or less a hack to remove the GMAP multiple mappings. Multiple
GMAP mappings can be seen given the names .mrna1, .mrna2, etc. | [
"This",
"is",
"more",
"or",
"less",
"a",
"hack",
"to",
"remove",
"the",
"GMAP",
"multiple",
"mappings",
".",
"Multiple",
"GMAP",
"mappings",
"can",
"be",
"seen",
"given",
"the",
"names",
".",
"mrna1",
".",
"mrna2",
"etc",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1006-L1018 | train | 201,008 |
tanghaibao/jcvi | jcvi/formats/bed.py | longest | def longest(args):
"""
%prog longest bedfile fastafile
Select longest feature within overlapping piles.
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(longest.__doc__)
p.add_option("--maxsize", default=20000, type="int",
help="Limit max size")
p.add_option("--minsize", default=60, type="int",
help="Limit min size")
p.add_option("--precedence", default="Medtr",
help="Accessions with prefix take precedence")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
maxsize = opts.maxsize
minsize = opts.minsize
prec = opts.precedence
mergedbed = mergeBed(bedfile, nms=True)
sizes = Sizes(fastafile).mapping
bed = Bed(mergedbed)
pf = bedfile.rsplit(".", 1)[0]
ids = set()
for b in bed:
accns = b.accn.split(";")
prec_accns = [x for x in accns if x.startswith(prec)]
if prec_accns:
accns = prec_accns
accn_sizes = [(sizes.get(x, 0), x) for x in accns]
accn_sizes = [(size, x) for size, x in accn_sizes if size < maxsize]
if not accn_sizes:
continue
max_size, max_accn = max(accn_sizes)
if max_size < minsize:
continue
ids.add(max_accn)
newids = remove_isoforms(ids)
logging.debug("Remove isoforms: before={0} after={1}".\
format(len(ids), len(newids)))
longestidsfile = pf + ".longest.ids"
fw = open(longestidsfile, "w")
print("\n".join(newids), file=fw)
fw.close()
logging.debug("A total of {0} records written to `{1}`.".\
format(len(newids), longestidsfile))
longestbedfile = pf + ".longest.bed"
some([bedfile, longestidsfile, "--outfile={0}".format(longestbedfile),
"--no_strip_names"]) | python | def longest(args):
"""
%prog longest bedfile fastafile
Select longest feature within overlapping piles.
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(longest.__doc__)
p.add_option("--maxsize", default=20000, type="int",
help="Limit max size")
p.add_option("--minsize", default=60, type="int",
help="Limit min size")
p.add_option("--precedence", default="Medtr",
help="Accessions with prefix take precedence")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
maxsize = opts.maxsize
minsize = opts.minsize
prec = opts.precedence
mergedbed = mergeBed(bedfile, nms=True)
sizes = Sizes(fastafile).mapping
bed = Bed(mergedbed)
pf = bedfile.rsplit(".", 1)[0]
ids = set()
for b in bed:
accns = b.accn.split(";")
prec_accns = [x for x in accns if x.startswith(prec)]
if prec_accns:
accns = prec_accns
accn_sizes = [(sizes.get(x, 0), x) for x in accns]
accn_sizes = [(size, x) for size, x in accn_sizes if size < maxsize]
if not accn_sizes:
continue
max_size, max_accn = max(accn_sizes)
if max_size < minsize:
continue
ids.add(max_accn)
newids = remove_isoforms(ids)
logging.debug("Remove isoforms: before={0} after={1}".\
format(len(ids), len(newids)))
longestidsfile = pf + ".longest.ids"
fw = open(longestidsfile, "w")
print("\n".join(newids), file=fw)
fw.close()
logging.debug("A total of {0} records written to `{1}`.".\
format(len(newids), longestidsfile))
longestbedfile = pf + ".longest.bed"
some([bedfile, longestidsfile, "--outfile={0}".format(longestbedfile),
"--no_strip_names"]) | [
"def",
"longest",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"sizes",
"import",
"Sizes",
"p",
"=",
"OptionParser",
"(",
"longest",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--maxsize\"",
",",
"default",
"=",
"20000",
",",
"t... | %prog longest bedfile fastafile
Select longest feature within overlapping piles. | [
"%prog",
"longest",
"bedfile",
"fastafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1021-L1078 | train | 201,009 |
tanghaibao/jcvi | jcvi/formats/bed.py | merge | def merge(args):
"""
%prog merge bedfiles > newbedfile
Concatenate bed files together. Performing seqid and name changes to avoid
conflicts in the new bed file.
"""
p = OptionParser(merge.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfiles = args
fw = must_open(opts.outfile, "w")
for bedfile in bedfiles:
bed = Bed(bedfile)
pf = op.basename(bedfile).split(".")[0]
for b in bed:
b.seqid = "_".join((pf, b.seqid))
print(b, file=fw) | python | def merge(args):
"""
%prog merge bedfiles > newbedfile
Concatenate bed files together. Performing seqid and name changes to avoid
conflicts in the new bed file.
"""
p = OptionParser(merge.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfiles = args
fw = must_open(opts.outfile, "w")
for bedfile in bedfiles:
bed = Bed(bedfile)
pf = op.basename(bedfile).split(".")[0]
for b in bed:
b.seqid = "_".join((pf, b.seqid))
print(b, file=fw) | [
"def",
"merge",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"merge",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"<",
"1",
... | %prog merge bedfiles > newbedfile
Concatenate bed files together. Performing seqid and name changes to avoid
conflicts in the new bed file. | [
"%prog",
"merge",
"bedfiles",
">",
"newbedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1081-L1102 | train | 201,010 |
tanghaibao/jcvi | jcvi/formats/bed.py | fix | def fix(args):
"""
%prog fix bedfile > newbedfile
Fix non-standard bed files. One typical problem is start > end.
"""
p = OptionParser(fix.__doc__)
p.add_option("--minspan", default=0, type="int",
help="Enforce minimum span [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
minspan = opts.minspan
fp = open(bedfile)
fw = must_open(opts.outfile, "w")
nfixed = nfiltered = ntotal = 0
for row in fp:
atoms = row.strip().split("\t")
assert len(atoms) >= 3, "Must be at least 3 columns"
seqid, start, end = atoms[:3]
start, end = int(start), int(end)
orientation = '+'
if start > end:
start, end = end, start
orientation = '-'
nfixed += 1
atoms[1:3] = [str(start), str(end)]
if len(atoms) > 6:
atoms[6] = orientation
line = "\t".join(atoms)
b = BedLine(line)
if b.span >= minspan:
print(b, file=fw)
nfiltered += 1
ntotal += 1
if nfixed:
logging.debug("Total fixed: {0}".format(percentage(nfixed, ntotal)))
if nfiltered:
logging.debug("Total filtered: {0}".format(percentage(nfiltered, ntotal))) | python | def fix(args):
"""
%prog fix bedfile > newbedfile
Fix non-standard bed files. One typical problem is start > end.
"""
p = OptionParser(fix.__doc__)
p.add_option("--minspan", default=0, type="int",
help="Enforce minimum span [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
minspan = opts.minspan
fp = open(bedfile)
fw = must_open(opts.outfile, "w")
nfixed = nfiltered = ntotal = 0
for row in fp:
atoms = row.strip().split("\t")
assert len(atoms) >= 3, "Must be at least 3 columns"
seqid, start, end = atoms[:3]
start, end = int(start), int(end)
orientation = '+'
if start > end:
start, end = end, start
orientation = '-'
nfixed += 1
atoms[1:3] = [str(start), str(end)]
if len(atoms) > 6:
atoms[6] = orientation
line = "\t".join(atoms)
b = BedLine(line)
if b.span >= minspan:
print(b, file=fw)
nfiltered += 1
ntotal += 1
if nfixed:
logging.debug("Total fixed: {0}".format(percentage(nfixed, ntotal)))
if nfiltered:
logging.debug("Total filtered: {0}".format(percentage(nfiltered, ntotal))) | [
"def",
"fix",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fix",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--minspan\"",
",",
"default",
"=",
"0",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Enforce minimum span [default: %defaul... | %prog fix bedfile > newbedfile
Fix non-standard bed files. One typical problem is start > end. | [
"%prog",
"fix",
"bedfile",
">",
"newbedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1105-L1151 | train | 201,011 |
tanghaibao/jcvi | jcvi/formats/bed.py | some | def some(args):
"""
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
"""
from jcvi.formats.base import SetFile
from jcvi.utils.cbook import gene_name
p = OptionParser(some.__doc__)
p.add_option("-v", dest="inverse", default=False, action="store_true",
help="Get the inverse, like grep -v [default: %default]")
p.set_outfile()
p.set_stripnames()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, idsfile = args
inverse = opts.inverse
ostrip = opts.strip_names
fw = must_open(opts.outfile, "w")
ids = SetFile(idsfile)
if ostrip:
ids = set(gene_name(x) for x in ids)
bed = Bed(bedfile)
ntotal = nkeep = 0
for b in bed:
ntotal += 1
keep = b.accn in ids
if inverse:
keep = not keep
if keep:
nkeep += 1
print(b, file=fw)
fw.close()
logging.debug("Stats: {0} features kept.".\
format(percentage(nkeep, ntotal))) | python | def some(args):
"""
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
"""
from jcvi.formats.base import SetFile
from jcvi.utils.cbook import gene_name
p = OptionParser(some.__doc__)
p.add_option("-v", dest="inverse", default=False, action="store_true",
help="Get the inverse, like grep -v [default: %default]")
p.set_outfile()
p.set_stripnames()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, idsfile = args
inverse = opts.inverse
ostrip = opts.strip_names
fw = must_open(opts.outfile, "w")
ids = SetFile(idsfile)
if ostrip:
ids = set(gene_name(x) for x in ids)
bed = Bed(bedfile)
ntotal = nkeep = 0
for b in bed:
ntotal += 1
keep = b.accn in ids
if inverse:
keep = not keep
if keep:
nkeep += 1
print(b, file=fw)
fw.close()
logging.debug("Stats: {0} features kept.".\
format(percentage(nkeep, ntotal))) | [
"def",
"some",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"SetFile",
"from",
"jcvi",
".",
"utils",
".",
"cbook",
"import",
"gene_name",
"p",
"=",
"OptionParser",
"(",
"some",
".",
"__doc__",
")",
"p",
".",
"add_option... | %prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids. | [
"%prog",
"some",
"bedfile",
"idsfile",
">",
"newbedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1154-L1195 | train | 201,012 |
tanghaibao/jcvi | jcvi/formats/bed.py | uniq | def uniq(args):
"""
%prog uniq bedfile
Remove overlapping features with higher scores.
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(uniq.__doc__)
p.add_option("--sizes", help="Use sequence length as score")
p.add_option("--mode", default="span", choices=("span", "score"),
help="Pile mode")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
uniqbedfile = bedfile.split(".")[0] + ".uniq.bed"
bed = Bed(bedfile)
if opts.sizes:
sizes = Sizes(opts.sizes).mapping
ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) \
for i, x in enumerate(bed)]
else:
if opts.mode == "span":
ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) \
for i, x in enumerate(bed)]
else:
ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) \
for i, x in enumerate(bed)]
selected, score = range_chain(ranges)
selected = [x.id for x in selected]
selected_ids = set(selected)
selected = [bed[x] for x in selected]
notselected = [x for i, x in enumerate(bed) if i not in selected_ids]
newbed = Bed()
newbed.extend(selected)
newbed.print_to_file(uniqbedfile, sorted=True)
if notselected:
leftoverfile = bedfile.split(".")[0] + ".leftover.bed"
leftoverbed = Bed()
leftoverbed.extend(notselected)
leftoverbed.print_to_file(leftoverfile, sorted=True)
logging.debug("Imported: {0}, Exported: {1}".format(len(bed), len(newbed)))
return uniqbedfile | python | def uniq(args):
"""
%prog uniq bedfile
Remove overlapping features with higher scores.
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(uniq.__doc__)
p.add_option("--sizes", help="Use sequence length as score")
p.add_option("--mode", default="span", choices=("span", "score"),
help="Pile mode")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
uniqbedfile = bedfile.split(".")[0] + ".uniq.bed"
bed = Bed(bedfile)
if opts.sizes:
sizes = Sizes(opts.sizes).mapping
ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) \
for i, x in enumerate(bed)]
else:
if opts.mode == "span":
ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) \
for i, x in enumerate(bed)]
else:
ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) \
for i, x in enumerate(bed)]
selected, score = range_chain(ranges)
selected = [x.id for x in selected]
selected_ids = set(selected)
selected = [bed[x] for x in selected]
notselected = [x for i, x in enumerate(bed) if i not in selected_ids]
newbed = Bed()
newbed.extend(selected)
newbed.print_to_file(uniqbedfile, sorted=True)
if notselected:
leftoverfile = bedfile.split(".")[0] + ".leftover.bed"
leftoverbed = Bed()
leftoverbed.extend(notselected)
leftoverbed.print_to_file(leftoverfile, sorted=True)
logging.debug("Imported: {0}, Exported: {1}".format(len(bed), len(newbed)))
return uniqbedfile | [
"def",
"uniq",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"sizes",
"import",
"Sizes",
"p",
"=",
"OptionParser",
"(",
"uniq",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--sizes\"",
",",
"help",
"=",
"\"Use sequence length as score... | %prog uniq bedfile
Remove overlapping features with higher scores. | [
"%prog",
"uniq",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1198-L1249 | train | 201,013 |
tanghaibao/jcvi | jcvi/formats/bed.py | pile | def pile(args):
"""
%prog pile abedfile bbedfile > piles
Call intersectBed on two bedfiles.
"""
from jcvi.utils.grouper import Grouper
p = OptionParser(pile.__doc__)
p.add_option("--minOverlap", default=0, type="int",
help="Minimum overlap required [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
abedfile, bbedfile = args
iw = intersectBed_wao(abedfile, bbedfile, minOverlap=opts.minOverlap)
groups = Grouper()
for a, b in iw:
groups.join(a.accn, b.accn)
ngroups = 0
for group in groups:
if len(group) > 1:
ngroups += 1
print("|".join(group))
logging.debug("A total of {0} piles (>= 2 members)".format(ngroups)) | python | def pile(args):
"""
%prog pile abedfile bbedfile > piles
Call intersectBed on two bedfiles.
"""
from jcvi.utils.grouper import Grouper
p = OptionParser(pile.__doc__)
p.add_option("--minOverlap", default=0, type="int",
help="Minimum overlap required [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
abedfile, bbedfile = args
iw = intersectBed_wao(abedfile, bbedfile, minOverlap=opts.minOverlap)
groups = Grouper()
for a, b in iw:
groups.join(a.accn, b.accn)
ngroups = 0
for group in groups:
if len(group) > 1:
ngroups += 1
print("|".join(group))
logging.debug("A total of {0} piles (>= 2 members)".format(ngroups)) | [
"def",
"pile",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"grouper",
"import",
"Grouper",
"p",
"=",
"OptionParser",
"(",
"pile",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--minOverlap\"",
",",
"default",
"=",
"0",
",",
"type",... | %prog pile abedfile bbedfile > piles
Call intersectBed on two bedfiles. | [
"%prog",
"pile",
"abedfile",
"bbedfile",
">",
"piles"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1378-L1406 | train | 201,014 |
tanghaibao/jcvi | jcvi/formats/bed.py | index | def index(args):
"""
%prog index bedfile
Compress and index bedfile using `tabix`. Use --fasta to give a FASTA file
so that a bedgraph file can be generated and indexed.
"""
p = OptionParser(index.__doc__)
p.add_option("--fasta", help="Generate bedgraph and index")
p.add_option("--query", help="Chromosome location")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fastafile = opts.fasta
if fastafile:
bedfile = make_bedgraph(bedfile, fastafile)
bedfile = sort([bedfile])
gzfile = bedfile + ".gz"
if need_update(bedfile, gzfile):
cmd = "bgzip {0}".format(bedfile)
sh(cmd)
tbifile = gzfile + ".tbi"
if need_update(gzfile, tbifile):
cmd = "tabix -p bed {0}".format(gzfile)
sh(cmd)
query = opts.query
if not query:
return
cmd = "tabix {0} {1}".format(gzfile, query)
sh(cmd, outfile=opts.outfile) | python | def index(args):
"""
%prog index bedfile
Compress and index bedfile using `tabix`. Use --fasta to give a FASTA file
so that a bedgraph file can be generated and indexed.
"""
p = OptionParser(index.__doc__)
p.add_option("--fasta", help="Generate bedgraph and index")
p.add_option("--query", help="Chromosome location")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fastafile = opts.fasta
if fastafile:
bedfile = make_bedgraph(bedfile, fastafile)
bedfile = sort([bedfile])
gzfile = bedfile + ".gz"
if need_update(bedfile, gzfile):
cmd = "bgzip {0}".format(bedfile)
sh(cmd)
tbifile = gzfile + ".tbi"
if need_update(gzfile, tbifile):
cmd = "tabix -p bed {0}".format(gzfile)
sh(cmd)
query = opts.query
if not query:
return
cmd = "tabix {0} {1}".format(gzfile, query)
sh(cmd, outfile=opts.outfile) | [
"def",
"index",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"index",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--fasta\"",
",",
"help",
"=",
"\"Generate bedgraph and index\"",
")",
"p",
".",
"add_option",
"(",
"\"--query\"",
",",
"help... | %prog index bedfile
Compress and index bedfile using `tabix`. Use --fasta to give a FASTA file
so that a bedgraph file can be generated and indexed. | [
"%prog",
"index",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1409-L1448 | train | 201,015 |
tanghaibao/jcvi | jcvi/formats/bed.py | evaluate | def evaluate(args):
"""
%prog evaluate prediction.bed reality.bed fastafile
Make a truth table like:
True False --- Reality
True TP FP
False FN TN
|----Prediction
Sn = TP / (all true in reality) = TP / (TP + FN)
Sp = TP / (all true in prediction) = TP / (TP + FP)
Ac = (TP + TN) / (TP + FP + FN + TN)
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(evaluate.__doc__)
p.add_option("--query",
help="Chromosome location [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prediction, reality, fastafile = args
query = opts.query
prediction = mergeBed(prediction)
reality = mergeBed(reality)
sizes = Sizes(fastafile)
sizesfile = sizes.filename
prediction_complement = complementBed(prediction, sizesfile)
reality_complement = complementBed(reality, sizesfile)
TPbed = intersectBed(prediction, reality)
FPbed = intersectBed(prediction, reality_complement)
FNbed = intersectBed(prediction_complement, reality)
TNbed = intersectBed(prediction_complement, reality_complement)
beds = (TPbed, FPbed, FNbed, TNbed)
if query:
subbeds = []
rr = query_to_range(query, sizes)
ce = 'echo "{0}"'.format("\t".join(str(x) for x in rr))
for b in beds:
subbed = ".".join((b, query))
cmd = ce + " | intersectBed -a stdin -b {0}".format(b)
sh(cmd, outfile=subbed)
subbeds.append(subbed)
beds = subbeds
be = BedEvaluate(*beds)
print(be, file=sys.stderr)
if query:
for b in subbeds:
os.remove(b)
return be | python | def evaluate(args):
"""
%prog evaluate prediction.bed reality.bed fastafile
Make a truth table like:
True False --- Reality
True TP FP
False FN TN
|----Prediction
Sn = TP / (all true in reality) = TP / (TP + FN)
Sp = TP / (all true in prediction) = TP / (TP + FP)
Ac = (TP + TN) / (TP + FP + FN + TN)
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(evaluate.__doc__)
p.add_option("--query",
help="Chromosome location [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prediction, reality, fastafile = args
query = opts.query
prediction = mergeBed(prediction)
reality = mergeBed(reality)
sizes = Sizes(fastafile)
sizesfile = sizes.filename
prediction_complement = complementBed(prediction, sizesfile)
reality_complement = complementBed(reality, sizesfile)
TPbed = intersectBed(prediction, reality)
FPbed = intersectBed(prediction, reality_complement)
FNbed = intersectBed(prediction_complement, reality)
TNbed = intersectBed(prediction_complement, reality_complement)
beds = (TPbed, FPbed, FNbed, TNbed)
if query:
subbeds = []
rr = query_to_range(query, sizes)
ce = 'echo "{0}"'.format("\t".join(str(x) for x in rr))
for b in beds:
subbed = ".".join((b, query))
cmd = ce + " | intersectBed -a stdin -b {0}".format(b)
sh(cmd, outfile=subbed)
subbeds.append(subbed)
beds = subbeds
be = BedEvaluate(*beds)
print(be, file=sys.stderr)
if query:
for b in subbeds:
os.remove(b)
return be | [
"def",
"evaluate",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"sizes",
"import",
"Sizes",
"p",
"=",
"OptionParser",
"(",
"evaluate",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--query\"",
",",
"help",
"=",
"\"Chromosome location ... | %prog evaluate prediction.bed reality.bed fastafile
Make a truth table like:
True False --- Reality
True TP FP
False FN TN
|----Prediction
Sn = TP / (all true in reality) = TP / (TP + FN)
Sp = TP / (all true in prediction) = TP / (TP + FP)
Ac = (TP + TN) / (TP + FP + FN + TN) | [
"%prog",
"evaluate",
"prediction",
".",
"bed",
"reality",
".",
"bed",
"fastafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1538-L1596 | train | 201,016 |
tanghaibao/jcvi | jcvi/formats/bed.py | refine | def refine(args):
"""
%prog refine bedfile1 bedfile2 refinedbed
Refine bed file using a second bed file. The final bed is keeping all the
intervals in bedfile1, but refined by bedfile2 whenever they have
intersection.
"""
p = OptionParser(refine.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
abedfile, bbedfile, refinedbed = args
fw = open(refinedbed, "w")
intersected = refined = 0
for a, b in intersectBed_wao(abedfile, bbedfile):
if b is None:
print(a, file=fw)
continue
intersected += 1
aspan_before = a.span
arange = (a.start, a.end)
brange = (b.start, b.end)
irange = range_intersect(arange, brange)
a.start, a.end = irange
aspan_after = a.span
if aspan_before > aspan_after:
refined += 1
print(a, file=fw)
fw.close()
print("Total intersected: {0}".format(intersected), file=sys.stderr)
print("Total refined: {0}".format(refined), file=sys.stderr)
summary([abedfile])
summary([refinedbed]) | python | def refine(args):
"""
%prog refine bedfile1 bedfile2 refinedbed
Refine bed file using a second bed file. The final bed is keeping all the
intervals in bedfile1, but refined by bedfile2 whenever they have
intersection.
"""
p = OptionParser(refine.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
abedfile, bbedfile, refinedbed = args
fw = open(refinedbed, "w")
intersected = refined = 0
for a, b in intersectBed_wao(abedfile, bbedfile):
if b is None:
print(a, file=fw)
continue
intersected += 1
aspan_before = a.span
arange = (a.start, a.end)
brange = (b.start, b.end)
irange = range_intersect(arange, brange)
a.start, a.end = irange
aspan_after = a.span
if aspan_before > aspan_after:
refined += 1
print(a, file=fw)
fw.close()
print("Total intersected: {0}".format(intersected), file=sys.stderr)
print("Total refined: {0}".format(refined), file=sys.stderr)
summary([abedfile])
summary([refinedbed]) | [
"def",
"refine",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"refine",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"3",
":",
"sys",
".",
"exit",
"(",
"n... | %prog refine bedfile1 bedfile2 refinedbed
Refine bed file using a second bed file. The final bed is keeping all the
intervals in bedfile1, but refined by bedfile2 whenever they have
intersection. | [
"%prog",
"refine",
"bedfile1",
"bedfile2",
"refinedbed"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1625-L1662 | train | 201,017 |
tanghaibao/jcvi | jcvi/formats/bed.py | distance | def distance(args):
"""
%prog distance bedfile
Calculate distance between bed features. The output file is a list of
distances, which can be used to plot histogram, etc.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(distance.__doc__)
p.add_option("--distmode", default="ss", choices=("ss", "ee"),
help="Distance mode between paired reads. ss is outer distance, " \
"ee is inner distance [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
sortedbedfile = sort([bedfile])
valid = total = 0
fp = open(sortedbedfile)
for a, b in pairwise(fp):
a = BedLine(a)
b = BedLine(b)
ar = (a.seqid, a.start, a.end, "+")
br = (b.seqid, b.start, b.end, "+")
dist, oo = range_distance(ar, br, distmode=opts.distmode)
total += 1
if dist > 0:
print(dist)
valid += 1
logging.debug("Total valid (> 0) distances: {0}.".\
format(percentage(valid, total))) | python | def distance(args):
"""
%prog distance bedfile
Calculate distance between bed features. The output file is a list of
distances, which can be used to plot histogram, etc.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(distance.__doc__)
p.add_option("--distmode", default="ss", choices=("ss", "ee"),
help="Distance mode between paired reads. ss is outer distance, " \
"ee is inner distance [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
sortedbedfile = sort([bedfile])
valid = total = 0
fp = open(sortedbedfile)
for a, b in pairwise(fp):
a = BedLine(a)
b = BedLine(b)
ar = (a.seqid, a.start, a.end, "+")
br = (b.seqid, b.start, b.end, "+")
dist, oo = range_distance(ar, br, distmode=opts.distmode)
total += 1
if dist > 0:
print(dist)
valid += 1
logging.debug("Total valid (> 0) distances: {0}.".\
format(percentage(valid, total))) | [
"def",
"distance",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"iter",
"import",
"pairwise",
"p",
"=",
"OptionParser",
"(",
"distance",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--distmode\"",
",",
"default",
"=",
"\"ss\"",
",",
... | %prog distance bedfile
Calculate distance between bed features. The output file is a list of
distances, which can be used to plot histogram, etc. | [
"%prog",
"distance",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1665-L1699 | train | 201,018 |
tanghaibao/jcvi | jcvi/formats/bed.py | bedpe | def bedpe(args):
"""
%prog bedpe bedfile
Convert to bedpe format. Use --span to write another bed file that contain
the span of the read pairs.
"""
from jcvi.assembly.coverage import bed_to_bedpe
p = OptionParser(bedpe.__doc__)
p.add_option("--span", default=False, action="store_true",
help="Write span bed file [default: %default]")
p.add_option("--strand", default=False, action="store_true",
help="Write the strand columns [default: %default]")
p.add_option("--mates", help="Check the library stats from .mates file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
bedpefile = pf + ".bedpe"
bedspanfile = pf + ".spans.bed" if opts.span else None
bed_to_bedpe(bedfile, bedpefile, \
pairsbedfile=bedspanfile, matesfile=opts.mates, \
strand=opts.strand)
return bedpefile, bedspanfile | python | def bedpe(args):
"""
%prog bedpe bedfile
Convert to bedpe format. Use --span to write another bed file that contain
the span of the read pairs.
"""
from jcvi.assembly.coverage import bed_to_bedpe
p = OptionParser(bedpe.__doc__)
p.add_option("--span", default=False, action="store_true",
help="Write span bed file [default: %default]")
p.add_option("--strand", default=False, action="store_true",
help="Write the strand columns [default: %default]")
p.add_option("--mates", help="Check the library stats from .mates file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
bedpefile = pf + ".bedpe"
bedspanfile = pf + ".spans.bed" if opts.span else None
bed_to_bedpe(bedfile, bedpefile, \
pairsbedfile=bedspanfile, matesfile=opts.mates, \
strand=opts.strand)
return bedpefile, bedspanfile | [
"def",
"bedpe",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"assembly",
".",
"coverage",
"import",
"bed_to_bedpe",
"p",
"=",
"OptionParser",
"(",
"bedpe",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--span\"",
",",
"default",
"=",
"False",
",",
... | %prog bedpe bedfile
Convert to bedpe format. Use --span to write another bed file that contain
the span of the read pairs. | [
"%prog",
"bedpe",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1793-L1820 | train | 201,019 |
tanghaibao/jcvi | jcvi/formats/bed.py | sizes | def sizes(args):
"""
%prog sizes bedfile
Infer the sizes for each seqid. Useful before dot plots.
"""
p = OptionParser(sizes.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
assert op.exists(bedfile)
sizesfile = bedfile.rsplit(".", 1)[0] + ".sizes"
fw = must_open(sizesfile, "w", checkexists=True, skipcheck=True)
if fw:
b = Bed(bedfile)
for s, sbeds in b.sub_beds():
print("{0}\t{1}".format(\
s, max(x.end for x in sbeds)), file=fw)
logging.debug("Sizes file written to `{0}`.".format(sizesfile))
return sizesfile | python | def sizes(args):
"""
%prog sizes bedfile
Infer the sizes for each seqid. Useful before dot plots.
"""
p = OptionParser(sizes.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
assert op.exists(bedfile)
sizesfile = bedfile.rsplit(".", 1)[0] + ".sizes"
fw = must_open(sizesfile, "w", checkexists=True, skipcheck=True)
if fw:
b = Bed(bedfile)
for s, sbeds in b.sub_beds():
print("{0}\t{1}".format(\
s, max(x.end for x in sbeds)), file=fw)
logging.debug("Sizes file written to `{0}`.".format(sizesfile))
return sizesfile | [
"def",
"sizes",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"sizes",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not... | %prog sizes bedfile
Infer the sizes for each seqid. Useful before dot plots. | [
"%prog",
"sizes",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1823-L1848 | train | 201,020 |
tanghaibao/jcvi | jcvi/formats/bed.py | analyze_dists | def analyze_dists(dists, cutoff=1000, alpha=.1):
"""
The dists can show bimodal distribution if they come from a mate-pair
library. Assume bimodal distribution and then separate the two peaks. Based
on the percentage in each peak, we can decide if it is indeed one peak or
two peaks, and report the median respectively.
"""
peak0 = [d for d in dists if d < cutoff]
peak1 = [d for d in dists if d >= cutoff]
c0, c1 = len(peak0), len(peak1)
logging.debug("Component counts: {0} {1}".format(c0, c1))
if c0 == 0 or c1 == 0 or float(c1) / len(dists) < alpha:
logging.debug("Single peak identified ({0} / {1} < {2})".\
format(c1, len(dists), alpha))
return np.median(dists)
peak0_median = np.median(peak0)
peak1_median = np.median(peak1)
logging.debug("Dual peaks identified: {0}bp ({1}), {2}bp ({3}) (selected)".\
format(int(peak0_median), c0, int(peak1_median), c1))
return peak1_median | python | def analyze_dists(dists, cutoff=1000, alpha=.1):
"""
The dists can show bimodal distribution if they come from a mate-pair
library. Assume bimodal distribution and then separate the two peaks. Based
on the percentage in each peak, we can decide if it is indeed one peak or
two peaks, and report the median respectively.
"""
peak0 = [d for d in dists if d < cutoff]
peak1 = [d for d in dists if d >= cutoff]
c0, c1 = len(peak0), len(peak1)
logging.debug("Component counts: {0} {1}".format(c0, c1))
if c0 == 0 or c1 == 0 or float(c1) / len(dists) < alpha:
logging.debug("Single peak identified ({0} / {1} < {2})".\
format(c1, len(dists), alpha))
return np.median(dists)
peak0_median = np.median(peak0)
peak1_median = np.median(peak1)
logging.debug("Dual peaks identified: {0}bp ({1}), {2}bp ({3}) (selected)".\
format(int(peak0_median), c0, int(peak1_median), c1))
return peak1_median | [
"def",
"analyze_dists",
"(",
"dists",
",",
"cutoff",
"=",
"1000",
",",
"alpha",
"=",
".1",
")",
":",
"peak0",
"=",
"[",
"d",
"for",
"d",
"in",
"dists",
"if",
"d",
"<",
"cutoff",
"]",
"peak1",
"=",
"[",
"d",
"for",
"d",
"in",
"dists",
"if",
"d",... | The dists can show bimodal distribution if they come from a mate-pair
library. Assume bimodal distribution and then separate the two peaks. Based
on the percentage in each peak, we can decide if it is indeed one peak or
two peaks, and report the median respectively. | [
"The",
"dists",
"can",
"show",
"bimodal",
"distribution",
"if",
"they",
"come",
"from",
"a",
"mate",
"-",
"pair",
"library",
".",
"Assume",
"bimodal",
"distribution",
"and",
"then",
"separate",
"the",
"two",
"peaks",
".",
"Based",
"on",
"the",
"percentage",
... | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1851-L1872 | train | 201,021 |
tanghaibao/jcvi | jcvi/formats/bed.py | summary | def summary(args):
"""
%prog summary bedfile
Sum the total lengths of the intervals.
"""
p = OptionParser(summary.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Write .sizes file")
p.add_option("--all", default=False, action="store_true",
help="Write summary stats per seqid")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
bed = Bed(bedfile)
bs = BedSummary(bed)
if opts.sizes:
sizesfile = bedfile + ".sizes"
fw = open(sizesfile, "w")
for span, accn in bs.mspans:
print(span, file=fw)
fw.close()
logging.debug("Spans written to `{0}`.".format(sizesfile))
return bs
if not opts.all:
bs.report()
return bs
for seqid, subbeds in bed.sub_beds():
bs = BedSummary(subbeds)
print("\t".join((seqid, str(bs)))) | python | def summary(args):
"""
%prog summary bedfile
Sum the total lengths of the intervals.
"""
p = OptionParser(summary.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Write .sizes file")
p.add_option("--all", default=False, action="store_true",
help="Write summary stats per seqid")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
bed = Bed(bedfile)
bs = BedSummary(bed)
if opts.sizes:
sizesfile = bedfile + ".sizes"
fw = open(sizesfile, "w")
for span, accn in bs.mspans:
print(span, file=fw)
fw.close()
logging.debug("Spans written to `{0}`.".format(sizesfile))
return bs
if not opts.all:
bs.report()
return bs
for seqid, subbeds in bed.sub_beds():
bs = BedSummary(subbeds)
print("\t".join((seqid, str(bs)))) | [
"def",
"summary",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"summary",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--sizes\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Write .sizes file\"... | %prog summary bedfile
Sum the total lengths of the intervals. | [
"%prog",
"summary",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L2019-L2053 | train | 201,022 |
tanghaibao/jcvi | jcvi/formats/bed.py | sort | def sort(args):
"""
%prog sort bedfile
Sort bed file to have ascending order of seqid, then start. It uses the
`sort` command.
"""
p = OptionParser(sort.__doc__)
p.add_option("-i", "--inplace", dest="inplace",
default=False, action="store_true",
help="Sort bed file in place [default: %default]")
p.add_option("-u", dest="unique",
default=False, action="store_true",
help="Uniqify the bed file")
p.add_option("--accn", default=False, action="store_true",
help="Sort based on the accessions [default: %default]")
p.set_outfile(outfile=None)
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
inplace = opts.inplace
if not inplace and ".sorted." in bedfile:
return bedfile
sortedbed = opts.outfile
if inplace:
sortedbed = bedfile
elif opts.outfile is None:
pf, sf = op.basename(bedfile).rsplit(".", 1)
sortedbed = pf + ".sorted." + sf
sortopt = "-k1,1 -k2,2n -k3,3n -k4,4" if not opts.accn else \
"-k4,4 -k1,1 -k2,2n -k3,3n"
cmd = "sort"
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
if opts.unique:
cmd += " -u"
cmd += " {0} {1} -o {2}".format(sortopt, bedfile, sortedbed)
if inplace or need_update(bedfile, sortedbed):
sh(cmd)
return sortedbed | python | def sort(args):
"""
%prog sort bedfile
Sort bed file to have ascending order of seqid, then start. It uses the
`sort` command.
"""
p = OptionParser(sort.__doc__)
p.add_option("-i", "--inplace", dest="inplace",
default=False, action="store_true",
help="Sort bed file in place [default: %default]")
p.add_option("-u", dest="unique",
default=False, action="store_true",
help="Uniqify the bed file")
p.add_option("--accn", default=False, action="store_true",
help="Sort based on the accessions [default: %default]")
p.set_outfile(outfile=None)
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
inplace = opts.inplace
if not inplace and ".sorted." in bedfile:
return bedfile
sortedbed = opts.outfile
if inplace:
sortedbed = bedfile
elif opts.outfile is None:
pf, sf = op.basename(bedfile).rsplit(".", 1)
sortedbed = pf + ".sorted." + sf
sortopt = "-k1,1 -k2,2n -k3,3n -k4,4" if not opts.accn else \
"-k4,4 -k1,1 -k2,2n -k3,3n"
cmd = "sort"
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
if opts.unique:
cmd += " -u"
cmd += " {0} {1} -o {2}".format(sortopt, bedfile, sortedbed)
if inplace or need_update(bedfile, sortedbed):
sh(cmd)
return sortedbed | [
"def",
"sort",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"sort",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-i\"",
",",
"\"--inplace\"",
",",
"dest",
"=",
"\"inplace\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_t... | %prog sort bedfile
Sort bed file to have ascending order of seqid, then start. It uses the
`sort` command. | [
"%prog",
"sort",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L2056-L2104 | train | 201,023 |
tanghaibao/jcvi | jcvi/formats/bed.py | mates | def mates(args):
"""
%prog mates bedfile
Generate the mates file by inferring from the names.
"""
p = OptionParser(mates.__doc__)
p.add_option("--lib", default=False, action="store_true",
help="Output library information along with pairs [default: %default]")
p.add_option("--nointra", default=False, action="store_true",
help="Remove mates that are intra-scaffold [default: %default]")
p.add_option("--prefix", default=False, action="store_true",
help="Only keep links between IDs with same prefix [default: %default]")
p.set_mates()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
rclip = opts.rclip
key = (lambda x: x.accn[:-rclip]) if rclip else (lambda x: x.accn)
bed = Bed(bedfile, key=key)
pf = bedfile.rsplit(".", 1)[0]
matesfile = pf + ".mates"
lib = pf if opts.lib else None
fw = open(matesfile, "w")
if lib:
bedfile, stats = pairs([bedfile, \
"--rclip={0}".format(rclip),
"--cutoff={0}".format(opts.cutoff)])
sv = int(2 * stats.sd)
mindist = max(stats.mean - sv, 1)
maxdist = stats.mean + sv
print("\t".join(str(x) for x in \
("library", pf, mindist, maxdist)), file=fw)
num_fragments = num_pairs = 0
matesbedfile = matesfile + ".bed"
fwm = open(matesbedfile, "w")
for pe, lines in groupby(bed, key=key):
lines = list(lines)
if len(lines) != 2:
num_fragments += len(lines)
continue
a, b = lines
if opts.nointra and a.seqid == b.seqid:
continue
# Use --prefix to limit the links between seqids with the same prefix
# For example, contigs of the same BAC, mth2-23j10_001, mth-23j10_002
if opts.prefix:
aprefix = a.seqid.split("_")[0]
bprefix = b.seqid.split("_")[0]
if aprefix != bprefix:
continue
num_pairs += 1
pair = [a.accn, b.accn]
if lib:
pair.append(lib)
print("\t".join(pair), file=fw)
print(a, file=fwm)
print(b, file=fwm)
logging.debug("Discard {0} frags and write {1} pairs to `{2}` and `{3}`.".\
format(num_fragments, num_pairs, matesfile, matesbedfile))
fw.close()
fwm.close()
return matesfile, matesbedfile | python | def mates(args):
"""
%prog mates bedfile
Generate the mates file by inferring from the names.
"""
p = OptionParser(mates.__doc__)
p.add_option("--lib", default=False, action="store_true",
help="Output library information along with pairs [default: %default]")
p.add_option("--nointra", default=False, action="store_true",
help="Remove mates that are intra-scaffold [default: %default]")
p.add_option("--prefix", default=False, action="store_true",
help="Only keep links between IDs with same prefix [default: %default]")
p.set_mates()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
rclip = opts.rclip
key = (lambda x: x.accn[:-rclip]) if rclip else (lambda x: x.accn)
bed = Bed(bedfile, key=key)
pf = bedfile.rsplit(".", 1)[0]
matesfile = pf + ".mates"
lib = pf if opts.lib else None
fw = open(matesfile, "w")
if lib:
bedfile, stats = pairs([bedfile, \
"--rclip={0}".format(rclip),
"--cutoff={0}".format(opts.cutoff)])
sv = int(2 * stats.sd)
mindist = max(stats.mean - sv, 1)
maxdist = stats.mean + sv
print("\t".join(str(x) for x in \
("library", pf, mindist, maxdist)), file=fw)
num_fragments = num_pairs = 0
matesbedfile = matesfile + ".bed"
fwm = open(matesbedfile, "w")
for pe, lines in groupby(bed, key=key):
lines = list(lines)
if len(lines) != 2:
num_fragments += len(lines)
continue
a, b = lines
if opts.nointra and a.seqid == b.seqid:
continue
# Use --prefix to limit the links between seqids with the same prefix
# For example, contigs of the same BAC, mth2-23j10_001, mth-23j10_002
if opts.prefix:
aprefix = a.seqid.split("_")[0]
bprefix = b.seqid.split("_")[0]
if aprefix != bprefix:
continue
num_pairs += 1
pair = [a.accn, b.accn]
if lib:
pair.append(lib)
print("\t".join(pair), file=fw)
print(a, file=fwm)
print(b, file=fwm)
logging.debug("Discard {0} frags and write {1} pairs to `{2}` and `{3}`.".\
format(num_fragments, num_pairs, matesfile, matesbedfile))
fw.close()
fwm.close()
return matesfile, matesbedfile | [
"def",
"mates",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"mates",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--lib\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Output library informatio... | %prog mates bedfile
Generate the mates file by inferring from the names. | [
"%prog",
"mates",
"bedfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L2107-L2184 | train | 201,024 |
tanghaibao/jcvi | jcvi/formats/pyblast.py | BlastLine.swapped | def swapped(self):
"""
Swap query and subject.
"""
args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]
args[0:2] = [self.subject, self.query]
args[6:10] = [self.sstart, self.sstop, self.qstart, self.qstop]
if self.orientation == '-':
args[8], args[9] = args[9], args[8]
b = "\t".join(str(x) for x in args)
return BlastLine(b) | python | def swapped(self):
"""
Swap query and subject.
"""
args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]
args[0:2] = [self.subject, self.query]
args[6:10] = [self.sstart, self.sstop, self.qstart, self.qstop]
if self.orientation == '-':
args[8], args[9] = args[9], args[8]
b = "\t".join(str(x) for x in args)
return BlastLine(b) | [
"def",
"swapped",
"(",
"self",
")",
":",
"args",
"=",
"[",
"getattr",
"(",
"self",
",",
"attr",
")",
"for",
"attr",
"in",
"BlastLine",
".",
"__slots__",
"[",
":",
"12",
"]",
"]",
"args",
"[",
"0",
":",
"2",
"]",
"=",
"[",
"self",
".",
"subject"... | Swap query and subject. | [
"Swap",
"query",
"and",
"subject",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/pyblast.py#L56-L66 | train | 201,025 |
tanghaibao/jcvi | jcvi/formats/genbank.py | gff | def gff(args):
"""
%prog gff seq.gbk
Convert Genbank file to GFF and FASTA file.
The Genbank file can contain multiple records.
"""
p = OptionParser(gff.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
MultiGenBank(gbkfile) | python | def gff(args):
"""
%prog gff seq.gbk
Convert Genbank file to GFF and FASTA file.
The Genbank file can contain multiple records.
"""
p = OptionParser(gff.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
MultiGenBank(gbkfile) | [
"def",
"gff",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"gff",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
... | %prog gff seq.gbk
Convert Genbank file to GFF and FASTA file.
The Genbank file can contain multiple records. | [
"%prog",
"gff",
"seq",
".",
"gbk"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/genbank.py#L301-L315 | train | 201,026 |
tanghaibao/jcvi | jcvi/utils/iter.py | tee_lookahead | def tee_lookahead(t, i):
"""Inspect the i-th upcomping value from a tee object
while leaving the tee object at its current position.
Raise an IndexError if the underlying iterator doesn't
have enough values.
"""
for value in islice(t.__copy__(), i, None):
return value
raise IndexError(i) | python | def tee_lookahead(t, i):
"""Inspect the i-th upcomping value from a tee object
while leaving the tee object at its current position.
Raise an IndexError if the underlying iterator doesn't
have enough values.
"""
for value in islice(t.__copy__(), i, None):
return value
raise IndexError(i) | [
"def",
"tee_lookahead",
"(",
"t",
",",
"i",
")",
":",
"for",
"value",
"in",
"islice",
"(",
"t",
".",
"__copy__",
"(",
")",
",",
"i",
",",
"None",
")",
":",
"return",
"value",
"raise",
"IndexError",
"(",
"i",
")"
] | Inspect the i-th upcomping value from a tee object
while leaving the tee object at its current position.
Raise an IndexError if the underlying iterator doesn't
have enough values. | [
"Inspect",
"the",
"i",
"-",
"th",
"upcomping",
"value",
"from",
"a",
"tee",
"object",
"while",
"leaving",
"the",
"tee",
"object",
"at",
"its",
"current",
"position",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/iter.py#L188-L197 | train | 201,027 |
tanghaibao/jcvi | jcvi/formats/fastq.py | uniq | def uniq(args):
"""
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads))) | python | def uniq(args):
"""
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads))) | [
"def",
"uniq",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"uniq",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
"... | %prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name. | [
"%prog",
"uniq",
"fastqfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L238-L267 | train | 201,028 |
tanghaibao/jcvi | jcvi/formats/fastq.py | suffix | def suffix(args):
"""
%prog suffix fastqfile CAG
Filter reads based on suffix.
"""
p = OptionParser(suffix.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastqfile, sf = args
fw = must_open(opts.outfile, "w")
nreads = nselected = 0
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
if rec.seq.endswith(sf):
print(rec, file=fw)
nselected += 1
logging.debug("Selected reads with suffix {0}: {1}".\
format(sf, percentage(nselected, nreads))) | python | def suffix(args):
"""
%prog suffix fastqfile CAG
Filter reads based on suffix.
"""
p = OptionParser(suffix.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastqfile, sf = args
fw = must_open(opts.outfile, "w")
nreads = nselected = 0
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
if rec.seq.endswith(sf):
print(rec, file=fw)
nselected += 1
logging.debug("Selected reads with suffix {0}: {1}".\
format(sf, percentage(nselected, nreads))) | [
"def",
"suffix",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"suffix",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",... | %prog suffix fastqfile CAG
Filter reads based on suffix. | [
"%prog",
"suffix",
"fastqfile",
"CAG"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L270-L294 | train | 201,029 |
tanghaibao/jcvi | jcvi/formats/fastq.py | readlen | def readlen(args):
"""
%prog readlen fastqfile
Calculate read length, will only try the first N reads. Output min, max, and
avg for each file.
"""
p = OptionParser(readlen.__doc__)
p.set_firstN()
p.add_option("--silent", default=False, action="store_true",
help="Do not print read length stats")
p.add_option("--nocheck", default=False, action="store_true",
help="Do not check file type suffix")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
f, = args
if (not opts.nocheck) and (not is_fastq(f)):
logging.debug("File `{}` does not endswith .fastq or .fq".format(f))
return 0
s = calc_readlen(f, opts.firstN)
if not opts.silent:
print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median)))
return int(s.max) | python | def readlen(args):
"""
%prog readlen fastqfile
Calculate read length, will only try the first N reads. Output min, max, and
avg for each file.
"""
p = OptionParser(readlen.__doc__)
p.set_firstN()
p.add_option("--silent", default=False, action="store_true",
help="Do not print read length stats")
p.add_option("--nocheck", default=False, action="store_true",
help="Do not check file type suffix")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
f, = args
if (not opts.nocheck) and (not is_fastq(f)):
logging.debug("File `{}` does not endswith .fastq or .fq".format(f))
return 0
s = calc_readlen(f, opts.firstN)
if not opts.silent:
print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median)))
return int(s.max) | [
"def",
"readlen",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"readlen",
".",
"__doc__",
")",
"p",
".",
"set_firstN",
"(",
")",
"p",
".",
"add_option",
"(",
"\"--silent\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
"... | %prog readlen fastqfile
Calculate read length, will only try the first N reads. Output min, max, and
avg for each file. | [
"%prog",
"readlen",
"fastqfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L318-L345 | train | 201,030 |
tanghaibao/jcvi | jcvi/formats/fastq.py | fasta | def fasta(args):
"""
%prog fasta fastqfiles
Convert fastq to fasta and qual file.
"""
p = OptionParser(fasta.__doc__)
p.add_option("--seqtk", default=False, action="store_true",
help="Use seqtk to convert")
p.set_outdir()
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
outdir = opts.outdir
if outdir and outdir != ".":
mkdir(outdir)
fastqfile = fastqfiles[0]
pf = op.basename(fastqfile)
gzinput = pf.endswith(".gz")
if gzinput:
pf = pf.rsplit(".", 1)[0]
pf, sf = pf.rsplit(".", 1)
if sf not in ("fq", "fastq"):
logging.debug("Assumed FASTA: suffix not `fq` or `fastq`")
return fastqfile, None
fastafile, qualfile = pf + ".fasta", pf + ".qual"
outfile = opts.outfile or fastafile
outfile = op.join(outdir, outfile)
if opts.seqtk:
if need_update(fastqfiles, outfile):
for i, fastqfile in enumerate(fastqfiles):
cmd = "seqtk seq -A {0} -L 30 -l 70".format(fastqfile)
# First one creates file, following ones append to it
sh(cmd, outfile=outfile, append=i)
else:
logging.debug("Outfile `{0}` already exists.".format(outfile))
return outfile, None
for fastqfile in fastqfiles:
SeqIO.convert(fastqfile, "fastq", fastafile, "fasta")
SeqIO.convert(fastqfile, "fastq", qualfile, "qual")
return fastafile, qualfile | python | def fasta(args):
"""
%prog fasta fastqfiles
Convert fastq to fasta and qual file.
"""
p = OptionParser(fasta.__doc__)
p.add_option("--seqtk", default=False, action="store_true",
help="Use seqtk to convert")
p.set_outdir()
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
outdir = opts.outdir
if outdir and outdir != ".":
mkdir(outdir)
fastqfile = fastqfiles[0]
pf = op.basename(fastqfile)
gzinput = pf.endswith(".gz")
if gzinput:
pf = pf.rsplit(".", 1)[0]
pf, sf = pf.rsplit(".", 1)
if sf not in ("fq", "fastq"):
logging.debug("Assumed FASTA: suffix not `fq` or `fastq`")
return fastqfile, None
fastafile, qualfile = pf + ".fasta", pf + ".qual"
outfile = opts.outfile or fastafile
outfile = op.join(outdir, outfile)
if opts.seqtk:
if need_update(fastqfiles, outfile):
for i, fastqfile in enumerate(fastqfiles):
cmd = "seqtk seq -A {0} -L 30 -l 70".format(fastqfile)
# First one creates file, following ones append to it
sh(cmd, outfile=outfile, append=i)
else:
logging.debug("Outfile `{0}` already exists.".format(outfile))
return outfile, None
for fastqfile in fastqfiles:
SeqIO.convert(fastqfile, "fastq", fastafile, "fasta")
SeqIO.convert(fastqfile, "fastq", qualfile, "qual")
return fastafile, qualfile | [
"def",
"fasta",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fasta",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--seqtk\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Use seqtk to convert\""... | %prog fasta fastqfiles
Convert fastq to fasta and qual file. | [
"%prog",
"fasta",
"fastqfiles"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L348-L397 | train | 201,031 |
tanghaibao/jcvi | jcvi/formats/fastq.py | filter | def filter(args):
"""
%prog filter paired.fastq
Filter to get high qv reads. Use interleaved format (one file) or paired
format (two files) to filter on paired reads.
"""
p = OptionParser(filter.__doc__)
p.add_option("-q", dest="qv", default=20, type="int",
help="Minimum quality score to keep [default: %default]")
p.add_option("-p", dest="pct", default=95, type="int",
help="Minimum percent of bases that have [-q] quality "\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 1:
r1 = r2 = args[0]
else:
r1, r2 = args
qv = opts.qv
pct = opts.pct
offset = guessoffset([r1])
qvchar = chr(offset + qv)
logging.debug("Call base qv >= {0} as good.".format(qvchar))
outfile = r1.rsplit(".", 1)[0] + ".q{0}.paired.fastq".format(qv)
fw = open(outfile, "w")
p1fp, p2fp = FastqPairedIterator(r1, r2)
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
q1 = a[-1].rstrip()
q2 = b[-1].rstrip()
if isHighQv(q1, qvchar, pct=pct) and isHighQv(q2, qvchar, pct=pct):
fw.writelines(a)
fw.writelines(b) | python | def filter(args):
"""
%prog filter paired.fastq
Filter to get high qv reads. Use interleaved format (one file) or paired
format (two files) to filter on paired reads.
"""
p = OptionParser(filter.__doc__)
p.add_option("-q", dest="qv", default=20, type="int",
help="Minimum quality score to keep [default: %default]")
p.add_option("-p", dest="pct", default=95, type="int",
help="Minimum percent of bases that have [-q] quality "\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 1:
r1 = r2 = args[0]
else:
r1, r2 = args
qv = opts.qv
pct = opts.pct
offset = guessoffset([r1])
qvchar = chr(offset + qv)
logging.debug("Call base qv >= {0} as good.".format(qvchar))
outfile = r1.rsplit(".", 1)[0] + ".q{0}.paired.fastq".format(qv)
fw = open(outfile, "w")
p1fp, p2fp = FastqPairedIterator(r1, r2)
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
q1 = a[-1].rstrip()
q2 = b[-1].rstrip()
if isHighQv(q1, qvchar, pct=pct) and isHighQv(q2, qvchar, pct=pct):
fw.writelines(a)
fw.writelines(b) | [
"def",
"filter",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"filter",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-q\"",
",",
"dest",
"=",
"\"qv\"",
",",
"default",
"=",
"20",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Mi... | %prog filter paired.fastq
Filter to get high qv reads. Use interleaved format (one file) or paired
format (two files) to filter on paired reads. | [
"%prog",
"filter",
"paired",
".",
"fastq"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L450-L495 | train | 201,032 |
tanghaibao/jcvi | jcvi/formats/fastq.py | shuffle | def shuffle(args):
"""
%prog shuffle p1.fastq p2.fastq
Shuffle pairs into interleaved format.
"""
p = OptionParser(shuffle.__doc__)
p.set_tag()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
p1, p2 = args
pairsfastq = pairspf((p1, p2)) + ".fastq"
tag = opts.tag
p1fp = must_open(p1)
p2fp = must_open(p2)
pairsfw = must_open(pairsfastq, "w")
nreads = 0
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
if tag:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
pairsfw.writelines(a)
pairsfw.writelines(b)
nreads += 2
pairsfw.close()
extra = nreads * 2 if tag else 0
checkShuffleSizes(p1, p2, pairsfastq, extra=extra)
logging.debug("File `{0}` verified after writing {1} reads.".\
format(pairsfastq, nreads))
return pairsfastq | python | def shuffle(args):
"""
%prog shuffle p1.fastq p2.fastq
Shuffle pairs into interleaved format.
"""
p = OptionParser(shuffle.__doc__)
p.set_tag()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
p1, p2 = args
pairsfastq = pairspf((p1, p2)) + ".fastq"
tag = opts.tag
p1fp = must_open(p1)
p2fp = must_open(p2)
pairsfw = must_open(pairsfastq, "w")
nreads = 0
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
if tag:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
pairsfw.writelines(a)
pairsfw.writelines(b)
nreads += 2
pairsfw.close()
extra = nreads * 2 if tag else 0
checkShuffleSizes(p1, p2, pairsfastq, extra=extra)
logging.debug("File `{0}` verified after writing {1} reads.".\
format(pairsfastq, nreads))
return pairsfastq | [
"def",
"shuffle",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"shuffle",
".",
"__doc__",
")",
"p",
".",
"set_tag",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
... | %prog shuffle p1.fastq p2.fastq
Shuffle pairs into interleaved format. | [
"%prog",
"shuffle",
"p1",
".",
"fastq",
"p2",
".",
"fastq"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L509-L551 | train | 201,033 |
tanghaibao/jcvi | jcvi/formats/fastq.py | split | def split(args):
"""
%prog split pairs.fastq
Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work
on gzipped file.
<http://seqanswers.com/forums/showthread.php?t=13776>
"""
from jcvi.apps.grid import Jobs
p = OptionParser(split.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
gz = pairsfastq.endswith(".gz")
pf = pairsfastq.replace(".gz", "").rsplit(".", 1)[0]
p1 = pf + ".1.fastq"
p2 = pf + ".2.fastq"
cmd = "zcat" if gz else "cat"
p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'".format(pairsfastq)
p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'".format(pairsfastq)
if gz:
p1cmd += " | gzip"
p2cmd += " | gzip"
p1 += ".gz"
p2 += ".gz"
p1cmd += " > " + p1
p2cmd += " > " + p2
args = [(p1cmd, ), (p2cmd, )]
m = Jobs(target=sh, args=args)
m.run()
checkShuffleSizes(p1, p2, pairsfastq) | python | def split(args):
"""
%prog split pairs.fastq
Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work
on gzipped file.
<http://seqanswers.com/forums/showthread.php?t=13776>
"""
from jcvi.apps.grid import Jobs
p = OptionParser(split.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
gz = pairsfastq.endswith(".gz")
pf = pairsfastq.replace(".gz", "").rsplit(".", 1)[0]
p1 = pf + ".1.fastq"
p2 = pf + ".2.fastq"
cmd = "zcat" if gz else "cat"
p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'".format(pairsfastq)
p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'".format(pairsfastq)
if gz:
p1cmd += " | gzip"
p2cmd += " | gzip"
p1 += ".gz"
p2 += ".gz"
p1cmd += " > " + p1
p2cmd += " > " + p2
args = [(p1cmd, ), (p2cmd, )]
m = Jobs(target=sh, args=args)
m.run()
checkShuffleSizes(p1, p2, pairsfastq) | [
"def",
"split",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"grid",
"import",
"Jobs",
"p",
"=",
"OptionParser",
"(",
"split",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
... | %prog split pairs.fastq
Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work
on gzipped file.
<http://seqanswers.com/forums/showthread.php?t=13776> | [
"%prog",
"split",
"pairs",
".",
"fastq"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L554-L594 | train | 201,034 |
tanghaibao/jcvi | jcvi/formats/fastq.py | guessoffset | def guessoffset(args):
"""
%prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
"""
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = next(ai)
if offset == 33:
print("Sanger encoding (offset=33)", file=sys.stderr)
elif offset == 64:
print("Illumina encoding (offset=64)", file=sys.stderr)
return offset | python | def guessoffset(args):
"""
%prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
"""
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = next(ai)
if offset == 33:
print("Sanger encoding (offset=33)", file=sys.stderr)
elif offset == 64:
print("Illumina encoding (offset=64)", file=sys.stderr)
return offset | [
"def",
"guessoffset",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"guessoffset",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
... | %prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold) | [
"%prog",
"guessoffset",
"fastqfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L597-L647 | train | 201,035 |
tanghaibao/jcvi | jcvi/formats/fastq.py | format | def format(args):
"""
%prog format fastqfile
Format FASTQ file. Currently provides option to convert FASTQ header from
one dialect to another.
"""
p = OptionParser(format.__doc__)
p.add_option("--convert", default=None, choices=[">=1.8", "<1.8", "sra"],
help="Convert fastq header to a different format" +
" [default: %default]")
p.set_tag(specify_tag=True)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
dialect = None
while rec:
h = FastqHeader(rec.header)
if not dialect:
dialect = h.dialect
logging.debug("Input fastq dialect: `{0}`".format(dialect))
if opts.convert:
logging.debug("Output fastq dialect: `{0}`".format(opts.convert))
rec.name = h.format_header(dialect=opts.convert, tag=opts.tag)
print(rec)
rec = next(ai) | python | def format(args):
"""
%prog format fastqfile
Format FASTQ file. Currently provides option to convert FASTQ header from
one dialect to another.
"""
p = OptionParser(format.__doc__)
p.add_option("--convert", default=None, choices=[">=1.8", "<1.8", "sra"],
help="Convert fastq header to a different format" +
" [default: %default]")
p.set_tag(specify_tag=True)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
dialect = None
while rec:
h = FastqHeader(rec.header)
if not dialect:
dialect = h.dialect
logging.debug("Input fastq dialect: `{0}`".format(dialect))
if opts.convert:
logging.debug("Output fastq dialect: `{0}`".format(opts.convert))
rec.name = h.format_header(dialect=opts.convert, tag=opts.tag)
print(rec)
rec = next(ai) | [
"def",
"format",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"format",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--convert\"",
",",
"default",
"=",
"None",
",",
"choices",
"=",
"[",
"\">=1.8\"",
",",
"\"<1.8\"",
",",
"\"sra\"",
"]"... | %prog format fastqfile
Format FASTQ file. Currently provides option to convert FASTQ header from
one dialect to another. | [
"%prog",
"format",
"fastqfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L650-L683 | train | 201,036 |
tanghaibao/jcvi | jcvi/formats/fastq.py | trim | def trim(args):
"""
%prog trim fastqfile
Wraps `fastx_trimmer` to trim from begin or end of reads.
"""
p = OptionParser(trim.__doc__)
p.add_option("-f", dest="first", default=0, type="int",
help="First base to keep. Default is 1.")
p.add_option("-l", dest="last", default=0, type="int",
help="Last base to keep. Default is entire read.")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
obfastqfile = op.basename(fastqfile)
fq = obfastqfile.rsplit(".", 1)[0] + ".ntrimmed.fastq"
if fastqfile.endswith(".gz"):
fq = obfastqfile.rsplit(".", 2)[0] + ".ntrimmed.fastq.gz"
cmd = "fastx_trimmer -Q33 "
if opts.first:
cmd += "-f {0.first} ".format(opts)
if opts.last:
cmd += "-l {0.last} ".format(opts)
sh(cmd, infile=fastqfile, outfile=fq) | python | def trim(args):
"""
%prog trim fastqfile
Wraps `fastx_trimmer` to trim from begin or end of reads.
"""
p = OptionParser(trim.__doc__)
p.add_option("-f", dest="first", default=0, type="int",
help="First base to keep. Default is 1.")
p.add_option("-l", dest="last", default=0, type="int",
help="Last base to keep. Default is entire read.")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
obfastqfile = op.basename(fastqfile)
fq = obfastqfile.rsplit(".", 1)[0] + ".ntrimmed.fastq"
if fastqfile.endswith(".gz"):
fq = obfastqfile.rsplit(".", 2)[0] + ".ntrimmed.fastq.gz"
cmd = "fastx_trimmer -Q33 "
if opts.first:
cmd += "-f {0.first} ".format(opts)
if opts.last:
cmd += "-l {0.last} ".format(opts)
sh(cmd, infile=fastqfile, outfile=fq) | [
"def",
"trim",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"trim",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-f\"",
",",
"dest",
"=",
"\"first\"",
",",
"default",
"=",
"0",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Firs... | %prog trim fastqfile
Wraps `fastx_trimmer` to trim from begin or end of reads. | [
"%prog",
"trim",
"fastqfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L721-L749 | train | 201,037 |
tanghaibao/jcvi | jcvi/formats/fastq.py | catread | def catread(args):
"""
%prog catread fastqfile1 fastqfile2
Concatenate paired end reads into one. Useful for example to do single-end
mapping and perform filtering on the whole read pair level.
"""
p = OptionParser(catread.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
r1, r2 = args
p1fp, p2fp = FastqPairedIterator(r1, r2)
outfile = pairspf((r1, r2)) + ".cat.fastq"
fw = must_open(outfile, "w")
while True:
a = list(islice(p1fp, 4))
if not a:
break
atitle, aseq, _, aqual = a
btitle, bseq, _, bqual = list(islice(p2fp, 4))
print("\n".join((atitle.strip(), aseq.strip() + bseq.strip(), \
"+", aqual.strip() + bqual.strip())), file=fw) | python | def catread(args):
"""
%prog catread fastqfile1 fastqfile2
Concatenate paired end reads into one. Useful for example to do single-end
mapping and perform filtering on the whole read pair level.
"""
p = OptionParser(catread.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
r1, r2 = args
p1fp, p2fp = FastqPairedIterator(r1, r2)
outfile = pairspf((r1, r2)) + ".cat.fastq"
fw = must_open(outfile, "w")
while True:
a = list(islice(p1fp, 4))
if not a:
break
atitle, aseq, _, aqual = a
btitle, bseq, _, bqual = list(islice(p2fp, 4))
print("\n".join((atitle.strip(), aseq.strip() + bseq.strip(), \
"+", aqual.strip() + bqual.strip())), file=fw) | [
"def",
"catread",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"catread",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
... | %prog catread fastqfile1 fastqfile2
Concatenate paired end reads into one. Useful for example to do single-end
mapping and perform filtering on the whole read pair level. | [
"%prog",
"catread",
"fastqfile1",
"fastqfile2"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L752-L776 | train | 201,038 |
tanghaibao/jcvi | jcvi/formats/fastq.py | splitread | def splitread(args):
"""
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
"""
p = OptionParser(splitread.__doc__)
p.add_option("-n", dest="n", default=76, type="int",
help="Split at N-th base position [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement second read [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
base = op.basename(pairsfastq).split(".")[0]
fq1 = base + ".1.fastq"
fq2 = base + ".2.fastq"
fw1 = must_open(fq1, "w")
fw2 = must_open(fq2, "w")
fp = must_open(pairsfastq)
n = opts.n
minsize = n * 8 / 5
for name, seq, qual in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error("Skipping read {0}, length={1}".format(name, len(seq)))
continue
name = "@" + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if opts.rc:
rec2.rc()
print(rec1, file=fw1)
print(rec2, file=fw2)
logging.debug("Reads split into `{0},{1}`".format(fq1, fq2))
fw1.close()
fw2.close() | python | def splitread(args):
"""
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
"""
p = OptionParser(splitread.__doc__)
p.add_option("-n", dest="n", default=76, type="int",
help="Split at N-th base position [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement second read [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
base = op.basename(pairsfastq).split(".")[0]
fq1 = base + ".1.fastq"
fq2 = base + ".2.fastq"
fw1 = must_open(fq1, "w")
fw2 = must_open(fq2, "w")
fp = must_open(pairsfastq)
n = opts.n
minsize = n * 8 / 5
for name, seq, qual in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error("Skipping read {0}, length={1}".format(name, len(seq)))
continue
name = "@" + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if opts.rc:
rec2.rc()
print(rec1, file=fw1)
print(rec2, file=fw2)
logging.debug("Reads split into `{0},{1}`".format(fq1, fq2))
fw1.close()
fw2.close() | [
"def",
"splitread",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"splitread",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-n\"",
",",
"dest",
"=",
"\"n\"",
",",
"default",
"=",
"76",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
... | %prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle. | [
"%prog",
"splitread",
"fastqfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L779-L823 | train | 201,039 |
tanghaibao/jcvi | jcvi/formats/fastq.py | size | def size(args):
"""
%prog size fastqfile
Find the total base pairs in a list of fastq files
"""
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in \
(op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in \
("Total", total_numrecords, total_size))) | python | def size(args):
"""
%prog size fastqfile
Find the total base pairs in a list of fastq files
"""
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in \
(op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in \
("Total", total_numrecords, total_size))) | [
"def",
"size",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"size",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"<",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
... | %prog size fastqfile
Find the total base pairs in a list of fastq files | [
"%prog",
"size",
"fastqfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L826-L854 | train | 201,040 |
tanghaibao/jcvi | jcvi/formats/fastq.py | convert | def convert(args):
"""
%prog convert in.fastq
illumina fastq quality encoding uses offset 64, and sanger uses 33. This
script creates a new file with the correct encoding. Output gzipped file if
input is also gzipped.
"""
p = OptionParser(convert.__doc__)
p.set_phred()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
infastq, = args
phred = opts.phred or str(guessoffset([infastq]))
ophred = {"64": "33", "33": "64"}[phred]
gz = infastq.endswith(".gz")
outfastq = infastq.rsplit(".", 1)[0] if gz else infastq
pf, sf = outfastq.rsplit(".", 1)
outfastq = "{0}.q{1}.{2}".format(pf, ophred, sf)
if gz:
outfastq += ".gz"
fin = "illumina" if phred == "64" else "sanger"
fout = "sanger" if phred == "64" else "illumina"
seqret = "seqret"
if infastq.endswith(".gz"):
cmd = "zcat {0} | ".format(infastq)
cmd += seqret + " fastq-{0}::stdin fastq-{1}::stdout".\
format(fin, fout)
else:
cmd = seqret + " fastq-{0}::{1} fastq-{2}::stdout".\
format(fin, infastq, fout)
sh(cmd, outfile=outfastq)
return outfastq | python | def convert(args):
"""
%prog convert in.fastq
illumina fastq quality encoding uses offset 64, and sanger uses 33. This
script creates a new file with the correct encoding. Output gzipped file if
input is also gzipped.
"""
p = OptionParser(convert.__doc__)
p.set_phred()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
infastq, = args
phred = opts.phred or str(guessoffset([infastq]))
ophred = {"64": "33", "33": "64"}[phred]
gz = infastq.endswith(".gz")
outfastq = infastq.rsplit(".", 1)[0] if gz else infastq
pf, sf = outfastq.rsplit(".", 1)
outfastq = "{0}.q{1}.{2}".format(pf, ophred, sf)
if gz:
outfastq += ".gz"
fin = "illumina" if phred == "64" else "sanger"
fout = "sanger" if phred == "64" else "illumina"
seqret = "seqret"
if infastq.endswith(".gz"):
cmd = "zcat {0} | ".format(infastq)
cmd += seqret + " fastq-{0}::stdin fastq-{1}::stdout".\
format(fin, fout)
else:
cmd = seqret + " fastq-{0}::{1} fastq-{2}::stdout".\
format(fin, infastq, fout)
sh(cmd, outfile=outfastq)
return outfastq | [
"def",
"convert",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"convert",
".",
"__doc__",
")",
"p",
".",
"set_phred",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",... | %prog convert in.fastq
illumina fastq quality encoding uses offset 64, and sanger uses 33. This
script creates a new file with the correct encoding. Output gzipped file if
input is also gzipped. | [
"%prog",
"convert",
"in",
".",
"fastq"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L857-L897 | train | 201,041 |
tanghaibao/jcvi | jcvi/formats/fastq.py | pairinplace | def pairinplace(args):
"""
%prog pairinplace bulk.fastq
Pair up the records in bulk.fastq by comparing the names for adjancent
records. If they match, print to bulk.pairs.fastq, else print to
bulk.frags.fastq.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.set_rclip()
p.set_tag()
p.add_option("--base",
help="Base name for the output files [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
base = opts.base or op.basename(fastqfile).split(".")[0]
frags = base + ".frags.fastq"
pairs = base + ".pairs.fastq"
if fastqfile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
tag = opts.tag
strip_name = (lambda x: x[:-N]) if N else None
fh_iter = iter_fastq(fastqfile, key=strip_name)
skipflag = False # controls the iterator skip
for a, b in pairwise(fh_iter):
if b is None: # hit the eof
break
if skipflag:
skipflag = False
continue
if a.name == b.name:
if tag:
a.name += "/1"
b.name += "/2"
print(a, file=pairsfw)
print(b, file=pairsfw)
skipflag = True
else:
print(a, file=fragsfw)
# don't forget the last one, when b is None
if not skipflag:
print(a, file=fragsfw)
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
return pairs | python | def pairinplace(args):
"""
%prog pairinplace bulk.fastq
Pair up the records in bulk.fastq by comparing the names for adjancent
records. If they match, print to bulk.pairs.fastq, else print to
bulk.frags.fastq.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.set_rclip()
p.set_tag()
p.add_option("--base",
help="Base name for the output files [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
base = opts.base or op.basename(fastqfile).split(".")[0]
frags = base + ".frags.fastq"
pairs = base + ".pairs.fastq"
if fastqfile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
tag = opts.tag
strip_name = (lambda x: x[:-N]) if N else None
fh_iter = iter_fastq(fastqfile, key=strip_name)
skipflag = False # controls the iterator skip
for a, b in pairwise(fh_iter):
if b is None: # hit the eof
break
if skipflag:
skipflag = False
continue
if a.name == b.name:
if tag:
a.name += "/1"
b.name += "/2"
print(a, file=pairsfw)
print(b, file=pairsfw)
skipflag = True
else:
print(a, file=fragsfw)
# don't forget the last one, when b is None
if not skipflag:
print(a, file=fragsfw)
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
return pairs | [
"def",
"pairinplace",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"iter",
"import",
"pairwise",
"p",
"=",
"OptionParser",
"(",
"pairinplace",
".",
"__doc__",
")",
"p",
".",
"set_rclip",
"(",
")",
"p",
".",
"set_tag",
"(",
")",
"p",
".",... | %prog pairinplace bulk.fastq
Pair up the records in bulk.fastq by comparing the names for adjancent
records. If they match, print to bulk.pairs.fastq, else print to
bulk.frags.fastq. | [
"%prog",
"pairinplace",
"bulk",
".",
"fastq"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L900-L961 | train | 201,042 |
tanghaibao/jcvi | jcvi/formats/fastq.py | fromsra | def fromsra(args):
"""
%prog fromsra srafile
Convert sra file to fastq using the sratoolkit `fastq-dump`
"""
p = OptionParser(fromsra.__doc__)
p.add_option("--paired", default=False, action="store_true",
help="Specify if library layout is paired-end " + \
"[default: %default]")
p.add_option("--compress", default=None, choices=["gzip", "bzip2"],
help="Compress output fastq files [default: %default]")
p.set_outdir()
p.set_grid()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
srafile, = args
paired = opts.paired
compress = opts.compress
outdir = opts.outdir
script_path = which("fastq-dump")
if not script_path:
logging.error("Cannot find `fastq-dump` in the PATH")
sys.exit()
cmd = [script_path]
if compress:
cmd.append("--{0}".format(compress))
if paired:
cmd.append("--split-files")
if outdir:
cmd.append("--outdir {0}".format(outdir))
cmd.append(srafile)
outcmd = " ".join(cmd)
sh(outcmd, grid=opts.grid) | python | def fromsra(args):
"""
%prog fromsra srafile
Convert sra file to fastq using the sratoolkit `fastq-dump`
"""
p = OptionParser(fromsra.__doc__)
p.add_option("--paired", default=False, action="store_true",
help="Specify if library layout is paired-end " + \
"[default: %default]")
p.add_option("--compress", default=None, choices=["gzip", "bzip2"],
help="Compress output fastq files [default: %default]")
p.set_outdir()
p.set_grid()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
srafile, = args
paired = opts.paired
compress = opts.compress
outdir = opts.outdir
script_path = which("fastq-dump")
if not script_path:
logging.error("Cannot find `fastq-dump` in the PATH")
sys.exit()
cmd = [script_path]
if compress:
cmd.append("--{0}".format(compress))
if paired:
cmd.append("--split-files")
if outdir:
cmd.append("--outdir {0}".format(outdir))
cmd.append(srafile)
outcmd = " ".join(cmd)
sh(outcmd, grid=opts.grid) | [
"def",
"fromsra",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"fromsra",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--paired\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Specify if library... | %prog fromsra srafile
Convert sra file to fastq using the sratoolkit `fastq-dump` | [
"%prog",
"fromsra",
"srafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L964-L1003 | train | 201,043 |
tanghaibao/jcvi | jcvi/formats/btab.py | blast | def blast(args):
"""
%prog blast btabfile
Convert to BLAST -m8 format.
"""
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
print(b.blastline) | python | def blast(args):
"""
%prog blast btabfile
Convert to BLAST -m8 format.
"""
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
print(b.blastline) | [
"def",
"blast",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"blast",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not... | %prog blast btabfile
Convert to BLAST -m8 format. | [
"%prog",
"blast",
"btabfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/btab.py#L116-L132 | train | 201,044 |
tanghaibao/jcvi | jcvi/formats/btab.py | bed | def bed(args):
"""
%prog bed btabfile
Convert btab to bed format.
"""
from jcvi.formats.blast import BlastLine
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
Bline = BlastLine(b.blastline)
print(Bline.bedline) | python | def bed(args):
"""
%prog bed btabfile
Convert btab to bed format.
"""
from jcvi.formats.blast import BlastLine
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
Bline = BlastLine(b.blastline)
print(Bline.bedline) | [
"def",
"bed",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"blast",
"import",
"BlastLine",
"p",
"=",
"OptionParser",
"(",
"bed",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(... | %prog bed btabfile
Convert btab to bed format. | [
"%prog",
"bed",
"btabfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/btab.py#L135-L153 | train | 201,045 |
tanghaibao/jcvi | jcvi/formats/btab.py | gff | def gff(args):
"""
%prog gff btabfile
Convert btab file generated by AAT to gff3 format.
"""
from jcvi.utils.range import range_minmax
from jcvi.formats.gff import valid_gff_parent_child, valid_gff_type
p = OptionParser(gff.__doc__)
p.add_option("--source", default=None, help="Specify GFF source." +
" By default, it picks algorithm used to generate btab file." +
" [default: %default]")
p.add_option("--type", default="protein_match", choices=valid_gff_type,
help="GFF feature type [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btabdict = {}
btab = Btab(btabfile, aat_dialect=True)
osource = opts.source or "aat"
otype = opts.type
octype = valid_gff_parent_child[otype]
for b in btab:
nargs = b.nargs
id = b.query + "-" + otype + "{0:05d}".format(b.chainNum)
key = b.key
if key not in btabdict:
btabdict[key] = { 'id': id,
'method': b.method,
'query': b.query,
'subject': b.subject,
'strand': b.qStrand,
'sDesc': b.sDesc,
'coords': [],
'children': []
}
btabdict[key]['coords'].append((b.qStart, b.qStop))
btabdict[key]['children'].append(b.gffline(source=osource, type=octype, id=id))
for v in btabdict.itervalues():
b = BtabLine("\t".join(str(x) for x in [0] * nargs), aat_dialect=True)
id = v['id']
b.query = v['query']
b.method = v['method']
b.subject = v['subject']
b.qStrand = v['strand']
b.sDesc = v['sDesc']
b.qStart, b.qStop = range_minmax(v['coords'])
print(b.gffline(source=osource, type=otype, primary_tag="ID", id=id))
print("\n".join(v['children'])) | python | def gff(args):
"""
%prog gff btabfile
Convert btab file generated by AAT to gff3 format.
"""
from jcvi.utils.range import range_minmax
from jcvi.formats.gff import valid_gff_parent_child, valid_gff_type
p = OptionParser(gff.__doc__)
p.add_option("--source", default=None, help="Specify GFF source." +
" By default, it picks algorithm used to generate btab file." +
" [default: %default]")
p.add_option("--type", default="protein_match", choices=valid_gff_type,
help="GFF feature type [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btabdict = {}
btab = Btab(btabfile, aat_dialect=True)
osource = opts.source or "aat"
otype = opts.type
octype = valid_gff_parent_child[otype]
for b in btab:
nargs = b.nargs
id = b.query + "-" + otype + "{0:05d}".format(b.chainNum)
key = b.key
if key not in btabdict:
btabdict[key] = { 'id': id,
'method': b.method,
'query': b.query,
'subject': b.subject,
'strand': b.qStrand,
'sDesc': b.sDesc,
'coords': [],
'children': []
}
btabdict[key]['coords'].append((b.qStart, b.qStop))
btabdict[key]['children'].append(b.gffline(source=osource, type=octype, id=id))
for v in btabdict.itervalues():
b = BtabLine("\t".join(str(x) for x in [0] * nargs), aat_dialect=True)
id = v['id']
b.query = v['query']
b.method = v['method']
b.subject = v['subject']
b.qStrand = v['strand']
b.sDesc = v['sDesc']
b.qStart, b.qStop = range_minmax(v['coords'])
print(b.gffline(source=osource, type=otype, primary_tag="ID", id=id))
print("\n".join(v['children'])) | [
"def",
"gff",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"range",
"import",
"range_minmax",
"from",
"jcvi",
".",
"formats",
".",
"gff",
"import",
"valid_gff_parent_child",
",",
"valid_gff_type",
"p",
"=",
"OptionParser",
"(",
"gff",
".",
"__... | %prog gff btabfile
Convert btab file generated by AAT to gff3 format. | [
"%prog",
"gff",
"btabfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/btab.py#L156-L211 | train | 201,046 |
tanghaibao/jcvi | jcvi/apps/fetch.py | batch_taxonomy | def batch_taxonomy(list_of_taxids):
"""
Convert list of taxids to Latin names
"""
for taxid in list_of_taxids:
handle = Entrez.efetch(db='Taxonomy', id=taxid, retmode="xml")
records = Entrez.read(handle)
yield records[0]["ScientificName"] | python | def batch_taxonomy(list_of_taxids):
"""
Convert list of taxids to Latin names
"""
for taxid in list_of_taxids:
handle = Entrez.efetch(db='Taxonomy', id=taxid, retmode="xml")
records = Entrez.read(handle)
yield records[0]["ScientificName"] | [
"def",
"batch_taxonomy",
"(",
"list_of_taxids",
")",
":",
"for",
"taxid",
"in",
"list_of_taxids",
":",
"handle",
"=",
"Entrez",
".",
"efetch",
"(",
"db",
"=",
"'Taxonomy'",
",",
"id",
"=",
"taxid",
",",
"retmode",
"=",
"\"xml\"",
")",
"records",
"=",
"En... | Convert list of taxids to Latin names | [
"Convert",
"list",
"of",
"taxids",
"to",
"Latin",
"names"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/fetch.py#L31-L38 | train | 201,047 |
tanghaibao/jcvi | jcvi/apps/fetch.py | batch_entrez | def batch_entrez(list_of_terms, db="nuccore", retmax=1, rettype="fasta",
batchsize=1, email=myEmail):
"""
Retrieve multiple rather than a single record
"""
for term in list_of_terms:
logging.debug("Search term %s" % term)
success = False
ids = None
if not term:
continue
while not success:
try:
search_handle = Entrez.esearch(db=db, retmax=retmax, term=term)
rec = Entrez.read(search_handle)
success = True
ids = rec["IdList"]
except (HTTPError, URLError,
RuntimeError, KeyError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
if not ids:
logging.error("term {0} not found".format(term))
continue
assert ids
nids = len(ids)
if nids > 1:
logging.debug("A total of {0} results found.".format(nids))
if batchsize != 1:
logging.debug("Use a batch size of {0}.".format(batchsize))
ids = list(grouper(ids, batchsize))
for id in ids:
id = [x for x in id if x]
size = len(id)
id = ",".join(id)
success = False
while not success:
try:
fetch_handle = Entrez.efetch(db=db, id=id, rettype=rettype,
email=email)
success = True
except (HTTPError, URLError,
RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
yield id, size, term, fetch_handle | python | def batch_entrez(list_of_terms, db="nuccore", retmax=1, rettype="fasta",
batchsize=1, email=myEmail):
"""
Retrieve multiple rather than a single record
"""
for term in list_of_terms:
logging.debug("Search term %s" % term)
success = False
ids = None
if not term:
continue
while not success:
try:
search_handle = Entrez.esearch(db=db, retmax=retmax, term=term)
rec = Entrez.read(search_handle)
success = True
ids = rec["IdList"]
except (HTTPError, URLError,
RuntimeError, KeyError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
if not ids:
logging.error("term {0} not found".format(term))
continue
assert ids
nids = len(ids)
if nids > 1:
logging.debug("A total of {0} results found.".format(nids))
if batchsize != 1:
logging.debug("Use a batch size of {0}.".format(batchsize))
ids = list(grouper(ids, batchsize))
for id in ids:
id = [x for x in id if x]
size = len(id)
id = ",".join(id)
success = False
while not success:
try:
fetch_handle = Entrez.efetch(db=db, id=id, rettype=rettype,
email=email)
success = True
except (HTTPError, URLError,
RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
yield id, size, term, fetch_handle | [
"def",
"batch_entrez",
"(",
"list_of_terms",
",",
"db",
"=",
"\"nuccore\"",
",",
"retmax",
"=",
"1",
",",
"rettype",
"=",
"\"fasta\"",
",",
"batchsize",
"=",
"1",
",",
"email",
"=",
"myEmail",
")",
":",
"for",
"term",
"in",
"list_of_terms",
":",
"logging... | Retrieve multiple rather than a single record | [
"Retrieve",
"multiple",
"rather",
"than",
"a",
"single",
"record"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/fetch.py#L53-L110 | train | 201,048 |
tanghaibao/jcvi | jcvi/apps/fetch.py | ensembl | def ensembl(args):
"""
%prog ensembl species
Retrieve genomes and annotations from ensembl FTP. Available species
listed below. Use comma to give a list of species to download. For example:
$ %prog ensembl danio_rerio,gasterosteus_aculeatus
"""
p = OptionParser(ensembl.__doc__)
p.add_option("--version", default="75",
help="Ensembl version [default: %default]")
opts, args = p.parse_args(args)
version = opts.version
url = "ftp://ftp.ensembl.org/pub/release-{0}/".format(version)
fasta_url = url + "fasta/"
valid_species = [x for x in ls_ftp(fasta_url) if "." not in x]
doc = "\n".join((ensembl.__doc__, tile(valid_species)))
p.set_usage(doc)
if len(args) != 1:
sys.exit(not p.print_help())
species, = args
species = species.split(",")
for s in species:
download_species_ensembl(s, valid_species, url) | python | def ensembl(args):
"""
%prog ensembl species
Retrieve genomes and annotations from ensembl FTP. Available species
listed below. Use comma to give a list of species to download. For example:
$ %prog ensembl danio_rerio,gasterosteus_aculeatus
"""
p = OptionParser(ensembl.__doc__)
p.add_option("--version", default="75",
help="Ensembl version [default: %default]")
opts, args = p.parse_args(args)
version = opts.version
url = "ftp://ftp.ensembl.org/pub/release-{0}/".format(version)
fasta_url = url + "fasta/"
valid_species = [x for x in ls_ftp(fasta_url) if "." not in x]
doc = "\n".join((ensembl.__doc__, tile(valid_species)))
p.set_usage(doc)
if len(args) != 1:
sys.exit(not p.print_help())
species, = args
species = species.split(",")
for s in species:
download_species_ensembl(s, valid_species, url) | [
"def",
"ensembl",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"ensembl",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--version\"",
",",
"default",
"=",
"\"75\"",
",",
"help",
"=",
"\"Ensembl version [default: %default]\"",
")",
"opts",
","... | %prog ensembl species
Retrieve genomes and annotations from ensembl FTP. Available species
listed below. Use comma to give a list of species to download. For example:
$ %prog ensembl danio_rerio,gasterosteus_aculeatus | [
"%prog",
"ensembl",
"species"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/fetch.py#L127-L155 | train | 201,049 |
tanghaibao/jcvi | jcvi/apps/fetch.py | get_first_rec | def get_first_rec(fastafile):
"""
Returns the first record in the fastafile
"""
f = list(SeqIO.parse(fastafile, "fasta"))
if len(f) > 1:
logging.debug("{0} records found in {1}, using the first one".
format(len(f), fastafile))
return f[0] | python | def get_first_rec(fastafile):
"""
Returns the first record in the fastafile
"""
f = list(SeqIO.parse(fastafile, "fasta"))
if len(f) > 1:
logging.debug("{0} records found in {1}, using the first one".
format(len(f), fastafile))
return f[0] | [
"def",
"get_first_rec",
"(",
"fastafile",
")",
":",
"f",
"=",
"list",
"(",
"SeqIO",
".",
"parse",
"(",
"fastafile",
",",
"\"fasta\"",
")",
")",
"if",
"len",
"(",
"f",
")",
">",
"1",
":",
"logging",
".",
"debug",
"(",
"\"{0} records found in {1}, using th... | Returns the first record in the fastafile | [
"Returns",
"the",
"first",
"record",
"in",
"the",
"fastafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/fetch.py#L306-L316 | train | 201,050 |
tanghaibao/jcvi | jcvi/apps/fetch.py | bisect | def bisect(args):
"""
%prog bisect acc accession.fasta
determine the version of the accession by querying entrez, based on a fasta file.
This proceeds by a sequential search from xxxx.1 to the latest record.
"""
p = OptionParser(bisect.__doc__)
p.set_email()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
acc, fastafile = args
arec = get_first_rec(fastafile)
valid = None
for i in range(1, 100):
term = "%s.%d" % (acc, i)
try:
query = list(batch_entrez([term], email=opts.email))
except AssertionError as e:
logging.debug("no records found for %s. terminating." % term)
return
id, term, handle = query[0]
brec = next(SeqIO.parse(handle, "fasta"))
match = print_first_difference(arec, brec, ignore_case=True,
ignore_N=True, rc=True)
if match:
valid = term
break
if valid:
print()
print(green("%s matches the sequence in `%s`" % (valid, fastafile))) | python | def bisect(args):
"""
%prog bisect acc accession.fasta
determine the version of the accession by querying entrez, based on a fasta file.
This proceeds by a sequential search from xxxx.1 to the latest record.
"""
p = OptionParser(bisect.__doc__)
p.set_email()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
acc, fastafile = args
arec = get_first_rec(fastafile)
valid = None
for i in range(1, 100):
term = "%s.%d" % (acc, i)
try:
query = list(batch_entrez([term], email=opts.email))
except AssertionError as e:
logging.debug("no records found for %s. terminating." % term)
return
id, term, handle = query[0]
brec = next(SeqIO.parse(handle, "fasta"))
match = print_first_difference(arec, brec, ignore_case=True,
ignore_N=True, rc=True)
if match:
valid = term
break
if valid:
print()
print(green("%s matches the sequence in `%s`" % (valid, fastafile))) | [
"def",
"bisect",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"bisect",
".",
"__doc__",
")",
"p",
".",
"set_email",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
... | %prog bisect acc accession.fasta
determine the version of the accession by querying entrez, based on a fasta file.
This proceeds by a sequential search from xxxx.1 to the latest record. | [
"%prog",
"bisect",
"acc",
"accession",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/fetch.py#L319-L357 | train | 201,051 |
tanghaibao/jcvi | jcvi/utils/cbook.py | inspect | def inspect(item, maxchar=80):
"""
Inspect the attributes of an item.
"""
for i in dir(item):
try:
member = str(getattr(item, i))
if maxchar and len(member) > maxchar:
member = member[:maxchar] + "..."
except:
member = "[ERROR]"
print("{}: {}".format(i, member), file=sys.stderr) | python | def inspect(item, maxchar=80):
"""
Inspect the attributes of an item.
"""
for i in dir(item):
try:
member = str(getattr(item, i))
if maxchar and len(member) > maxchar:
member = member[:maxchar] + "..."
except:
member = "[ERROR]"
print("{}: {}".format(i, member), file=sys.stderr) | [
"def",
"inspect",
"(",
"item",
",",
"maxchar",
"=",
"80",
")",
":",
"for",
"i",
"in",
"dir",
"(",
"item",
")",
":",
"try",
":",
"member",
"=",
"str",
"(",
"getattr",
"(",
"item",
",",
"i",
")",
")",
"if",
"maxchar",
"and",
"len",
"(",
"member",... | Inspect the attributes of an item. | [
"Inspect",
"the",
"attributes",
"of",
"an",
"item",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L50-L61 | train | 201,052 |
tanghaibao/jcvi | jcvi/utils/cbook.py | depends | def depends(func):
"""
Decorator to perform check on infile and outfile. When infile is not present, issue
warning, and when outfile is present, skip function calls.
"""
from jcvi.apps.base import need_update, listify
infile = "infile"
outfile = "outfile"
def wrapper(*args, **kwargs):
assert outfile in kwargs, \
"You need to specify `outfile=` on function call"
if infile in kwargs:
infilename = listify(kwargs[infile])
for x in infilename:
assert op.exists(x), \
"The specified infile `{0}` does not exist".format(x)
outfilename = kwargs[outfile]
if need_update(infilename, outfilename):
return func(*args, **kwargs)
else:
msg = "File `{0}` exists. Computation skipped." \
.format(outfilename)
logging.debug(msg)
outfilename = listify(outfilename)
for x in outfilename:
assert op.exists(x), \
"Something went wrong, `{0}` not found".format(x)
return outfilename
return wrapper | python | def depends(func):
"""
Decorator to perform check on infile and outfile. When infile is not present, issue
warning, and when outfile is present, skip function calls.
"""
from jcvi.apps.base import need_update, listify
infile = "infile"
outfile = "outfile"
def wrapper(*args, **kwargs):
assert outfile in kwargs, \
"You need to specify `outfile=` on function call"
if infile in kwargs:
infilename = listify(kwargs[infile])
for x in infilename:
assert op.exists(x), \
"The specified infile `{0}` does not exist".format(x)
outfilename = kwargs[outfile]
if need_update(infilename, outfilename):
return func(*args, **kwargs)
else:
msg = "File `{0}` exists. Computation skipped." \
.format(outfilename)
logging.debug(msg)
outfilename = listify(outfilename)
for x in outfilename:
assert op.exists(x), \
"Something went wrong, `{0}` not found".format(x)
return outfilename
return wrapper | [
"def",
"depends",
"(",
"func",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"base",
"import",
"need_update",
",",
"listify",
"infile",
"=",
"\"infile\"",
"outfile",
"=",
"\"outfile\"",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":... | Decorator to perform check on infile and outfile. When infile is not present, issue
warning, and when outfile is present, skip function calls. | [
"Decorator",
"to",
"perform",
"check",
"on",
"infile",
"and",
"outfile",
".",
"When",
"infile",
"is",
"not",
"present",
"issue",
"warning",
"and",
"when",
"outfile",
"is",
"present",
"skip",
"function",
"calls",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L83-L118 | train | 201,053 |
tanghaibao/jcvi | jcvi/utils/cbook.py | human_size | def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None):
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return '{0:.{1}f}{2}'.format(size, precision, suffix) | python | def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None):
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return '{0:.{1}f}{2}'.format(size, precision, suffix) | [
"def",
"human_size",
"(",
"size",
",",
"a_kilobyte_is_1024_bytes",
"=",
"False",
",",
"precision",
"=",
"1",
",",
"target",
"=",
"None",
")",
":",
"if",
"size",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'number must be non-negative'",
")",
"multiple",
"=",... | Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0 | [
"Convert",
"a",
"file",
"size",
"to",
"human",
"-",
"readable",
"form",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L271-L305 | train | 201,054 |
tanghaibao/jcvi | jcvi/utils/cbook.py | gene_name | def gene_name(st, exclude=("ev",), sep="."):
"""
Helper functions in the BLAST filtering to get rid alternative splicings.
This is ugly, but different annotation groups are inconsistent with respect
to how the alternative splicings are named. Mostly it can be done by removing
the suffix, except for ones in the exclude list.
"""
if any(st.startswith(x) for x in exclude):
sep = None
st = st.split('|')[0]
if sep and sep in st:
name, suffix = st.rsplit(sep, 1)
else:
name, suffix = st, ""
# We only want to remove suffix that are isoforms, longer suffix would
# suggest that it is part of the right gene name
if len(suffix) != 1:
name = st
return name | python | def gene_name(st, exclude=("ev",), sep="."):
"""
Helper functions in the BLAST filtering to get rid alternative splicings.
This is ugly, but different annotation groups are inconsistent with respect
to how the alternative splicings are named. Mostly it can be done by removing
the suffix, except for ones in the exclude list.
"""
if any(st.startswith(x) for x in exclude):
sep = None
st = st.split('|')[0]
if sep and sep in st:
name, suffix = st.rsplit(sep, 1)
else:
name, suffix = st, ""
# We only want to remove suffix that are isoforms, longer suffix would
# suggest that it is part of the right gene name
if len(suffix) != 1:
name = st
return name | [
"def",
"gene_name",
"(",
"st",
",",
"exclude",
"=",
"(",
"\"ev\"",
",",
")",
",",
"sep",
"=",
"\".\"",
")",
":",
"if",
"any",
"(",
"st",
".",
"startswith",
"(",
"x",
")",
"for",
"x",
"in",
"exclude",
")",
":",
"sep",
"=",
"None",
"st",
"=",
"... | Helper functions in the BLAST filtering to get rid alternative splicings.
This is ugly, but different annotation groups are inconsistent with respect
to how the alternative splicings are named. Mostly it can be done by removing
the suffix, except for ones in the exclude list. | [
"Helper",
"functions",
"in",
"the",
"BLAST",
"filtering",
"to",
"get",
"rid",
"alternative",
"splicings",
".",
"This",
"is",
"ugly",
"but",
"different",
"annotation",
"groups",
"are",
"inconsistent",
"with",
"respect",
"to",
"how",
"the",
"alternative",
"splicin... | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L329-L350 | train | 201,055 |
tanghaibao/jcvi | jcvi/utils/cbook.py | fixChromName | def fixChromName(name, orgn="medicago"):
"""
Convert quirky chromosome names encountered in different
release files, which are very project specific, into a more
general format.
For example, in Medicago
Convert a seqid like
`Mt3.5.1_Chr1` to `chr1`
`Mt3.5_Chr3` to `chr3`
`chr01_pseudomolecule_IMGAG` to `chr1`
Some examples from Maize
Convert a seqid like
`chromosome:AGPv2:2:1:237068873:1` to `2`
Special cases
`chromosome:AGPv2:mitochondrion:1:569630:1` to `Mt`
`chromosome:AGPv2:chloroplast:1:140384:1` to `Pt`
"""
import re
mtr_pat1 = re.compile(r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)")
mtr_pat2 = re.compile(r"([A-z0-9]+)_[A-z]+_[A-z]+")
zmays_pat = re.compile(
r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+")
zmays_sub = {'mitochondrion': 'Mt', 'chloroplast': 'Pt'}
if orgn == "medicago":
for mtr_pat in (mtr_pat1, mtr_pat2):
match = re.search(mtr_pat, name)
if match:
n = match.group(1)
n = n.replace("0", "")
name = re.sub(mtr_pat, n, name)
elif orgn == "maize":
match = re.search(zmays_pat, name)
if match:
n = match.group(1)
name = re.sub(zmays_pat, n, name)
if name in zmays_sub:
name = zmays_sub[name]
return name | python | def fixChromName(name, orgn="medicago"):
"""
Convert quirky chromosome names encountered in different
release files, which are very project specific, into a more
general format.
For example, in Medicago
Convert a seqid like
`Mt3.5.1_Chr1` to `chr1`
`Mt3.5_Chr3` to `chr3`
`chr01_pseudomolecule_IMGAG` to `chr1`
Some examples from Maize
Convert a seqid like
`chromosome:AGPv2:2:1:237068873:1` to `2`
Special cases
`chromosome:AGPv2:mitochondrion:1:569630:1` to `Mt`
`chromosome:AGPv2:chloroplast:1:140384:1` to `Pt`
"""
import re
mtr_pat1 = re.compile(r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)")
mtr_pat2 = re.compile(r"([A-z0-9]+)_[A-z]+_[A-z]+")
zmays_pat = re.compile(
r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+")
zmays_sub = {'mitochondrion': 'Mt', 'chloroplast': 'Pt'}
if orgn == "medicago":
for mtr_pat in (mtr_pat1, mtr_pat2):
match = re.search(mtr_pat, name)
if match:
n = match.group(1)
n = n.replace("0", "")
name = re.sub(mtr_pat, n, name)
elif orgn == "maize":
match = re.search(zmays_pat, name)
if match:
n = match.group(1)
name = re.sub(zmays_pat, n, name)
if name in zmays_sub:
name = zmays_sub[name]
return name | [
"def",
"fixChromName",
"(",
"name",
",",
"orgn",
"=",
"\"medicago\"",
")",
":",
"import",
"re",
"mtr_pat1",
"=",
"re",
".",
"compile",
"(",
"r\"Mt[0-9]+\\.[0-9]+[\\.[0-9]+]{0,}_([a-z]+[0-9]+)\"",
")",
"mtr_pat2",
"=",
"re",
".",
"compile",
"(",
"r\"([A-z0-9]+)_[A-... | Convert quirky chromosome names encountered in different
release files, which are very project specific, into a more
general format.
For example, in Medicago
Convert a seqid like
`Mt3.5.1_Chr1` to `chr1`
`Mt3.5_Chr3` to `chr3`
`chr01_pseudomolecule_IMGAG` to `chr1`
Some examples from Maize
Convert a seqid like
`chromosome:AGPv2:2:1:237068873:1` to `2`
Special cases
`chromosome:AGPv2:mitochondrion:1:569630:1` to `Mt`
`chromosome:AGPv2:chloroplast:1:140384:1` to `Pt` | [
"Convert",
"quirky",
"chromosome",
"names",
"encountered",
"in",
"different",
"release",
"files",
"which",
"are",
"very",
"project",
"specific",
"into",
"a",
"more",
"general",
"format",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L418-L459 | train | 201,056 |
tanghaibao/jcvi | jcvi/utils/cbook.py | fill | def fill(text, delimiter="", width=70):
"""
Wrap text with width per line
"""
texts = []
for i in xrange(0, len(text), width):
t = delimiter.join(text[i:i + width])
texts.append(t)
return "\n".join(texts) | python | def fill(text, delimiter="", width=70):
"""
Wrap text with width per line
"""
texts = []
for i in xrange(0, len(text), width):
t = delimiter.join(text[i:i + width])
texts.append(t)
return "\n".join(texts) | [
"def",
"fill",
"(",
"text",
",",
"delimiter",
"=",
"\"\"",
",",
"width",
"=",
"70",
")",
":",
"texts",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"text",
")",
",",
"width",
")",
":",
"t",
"=",
"delimiter",
".",
"joi... | Wrap text with width per line | [
"Wrap",
"text",
"with",
"width",
"per",
"line"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L462-L470 | train | 201,057 |
tanghaibao/jcvi | jcvi/utils/cbook.py | tile | def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from jcvi.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width // max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g) | python | def tile(lt, width=70, gap=1):
"""
Pretty print list of items.
"""
from jcvi.utils.iter import grouper
max_len = max(len(x) for x in lt) + gap
items_per_line = max(width // max_len, 1)
lt = [x.rjust(max_len) for x in lt]
g = list(grouper(lt, items_per_line, fillvalue=""))
return "\n".join("".join(x) for x in g) | [
"def",
"tile",
"(",
"lt",
",",
"width",
"=",
"70",
",",
"gap",
"=",
"1",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"iter",
"import",
"grouper",
"max_len",
"=",
"max",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"lt",
")",
"+",
"gap",
"ite... | Pretty print list of items. | [
"Pretty",
"print",
"list",
"of",
"items",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L473-L484 | train | 201,058 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | normalize_lms_axis | def normalize_lms_axis(ax, xlim=None, ylim=None, xfactor=1e-6, yfactor=1,
xlabel=None, ylabel="Map (cM)"):
""" Normalize the axis limits and labels to beautify axis.
"""
if xlim:
ax.set_xlim(0, xlim)
if ylim:
ax.set_ylim(0, ylim)
if xlabel:
xticklabels = [int(round(x * xfactor)) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels, family='Helvetica')
ax.set_xlabel(xlabel)
else:
ax.set_xticks([])
if ylabel:
yticklabels = [int(round(x * yfactor)) for x in ax.get_yticks()]
ax.set_yticklabels(yticklabels, family='Helvetica')
ax.set_ylabel(ylabel)
else:
ax.set_yticks([]) | python | def normalize_lms_axis(ax, xlim=None, ylim=None, xfactor=1e-6, yfactor=1,
xlabel=None, ylabel="Map (cM)"):
""" Normalize the axis limits and labels to beautify axis.
"""
if xlim:
ax.set_xlim(0, xlim)
if ylim:
ax.set_ylim(0, ylim)
if xlabel:
xticklabels = [int(round(x * xfactor)) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels, family='Helvetica')
ax.set_xlabel(xlabel)
else:
ax.set_xticks([])
if ylabel:
yticklabels = [int(round(x * yfactor)) for x in ax.get_yticks()]
ax.set_yticklabels(yticklabels, family='Helvetica')
ax.set_ylabel(ylabel)
else:
ax.set_yticks([]) | [
"def",
"normalize_lms_axis",
"(",
"ax",
",",
"xlim",
"=",
"None",
",",
"ylim",
"=",
"None",
",",
"xfactor",
"=",
"1e-6",
",",
"yfactor",
"=",
"1",
",",
"xlabel",
"=",
"None",
",",
"ylabel",
"=",
"\"Map (cM)\"",
")",
":",
"if",
"xlim",
":",
"ax",
".... | Normalize the axis limits and labels to beautify axis. | [
"Normalize",
"the",
"axis",
"limits",
"and",
"labels",
"to",
"beautify",
"axis",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L831-L850 | train | 201,059 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | fake | def fake(args):
"""
%prog fake input.bed
Make fake `scaffolds.fasta`. Use case for this is that sometimes I would
receive just the csv/bed file and I'd like to use path() out of the box.
"""
from math import ceil
from random import choice
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
p = OptionParser(fake.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
bed = Bed(inputbed)
recs = []
for seqid, sb in bed.sub_beds():
maxend = max(x.end for x in sb)
size = int(ceil(maxend / 1000.) * 1000)
seq = "".join([choice("ACGT") for x in xrange(size)])
rec = SeqRecord(Seq(seq), id=seqid, description="")
recs.append(rec)
fw = must_open(opts.outfile, "w")
SeqIO.write(recs, fw, "fasta") | python | def fake(args):
"""
%prog fake input.bed
Make fake `scaffolds.fasta`. Use case for this is that sometimes I would
receive just the csv/bed file and I'd like to use path() out of the box.
"""
from math import ceil
from random import choice
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
p = OptionParser(fake.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
bed = Bed(inputbed)
recs = []
for seqid, sb in bed.sub_beds():
maxend = max(x.end for x in sb)
size = int(ceil(maxend / 1000.) * 1000)
seq = "".join([choice("ACGT") for x in xrange(size)])
rec = SeqRecord(Seq(seq), id=seqid, description="")
recs.append(rec)
fw = must_open(opts.outfile, "w")
SeqIO.write(recs, fw, "fasta") | [
"def",
"fake",
"(",
"args",
")",
":",
"from",
"math",
"import",
"ceil",
"from",
"random",
"import",
"choice",
"from",
"Bio",
"import",
"SeqIO",
"from",
"Bio",
".",
"Seq",
"import",
"Seq",
"from",
"Bio",
".",
"SeqRecord",
"import",
"SeqRecord",
"p",
"=",
... | %prog fake input.bed
Make fake `scaffolds.fasta`. Use case for this is that sometimes I would
receive just the csv/bed file and I'd like to use path() out of the box. | [
"%prog",
"fake",
"input",
".",
"bed"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L922-L954 | train | 201,060 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | compute_score | def compute_score(markers, bonus, penalty):
"""
Compute chain score using dynamic programming. If a marker is the same
linkage group as a previous one, we add bonus; otherwise, we penalize the
chain switching.
"""
nmarkers = len(markers)
s = [bonus] * nmarkers # score
f = [-1] * nmarkers # from
for i in xrange(1, nmarkers):
for j in xrange(i):
mi, mj = markers[i], markers[j]
t = bonus if mi.mlg == mj.mlg else penalty + bonus
if s[i] < s[j] + t:
s[i] = s[j] + t
f[i] = j
# Recover the highest scoring chain
highest_score = max(s)
si = s.index(highest_score)
onchain = set()
while True:
if si < 0:
break
si = f[si]
onchain.add(si)
return [x for i, x in enumerate(markers) if i in onchain] | python | def compute_score(markers, bonus, penalty):
"""
Compute chain score using dynamic programming. If a marker is the same
linkage group as a previous one, we add bonus; otherwise, we penalize the
chain switching.
"""
nmarkers = len(markers)
s = [bonus] * nmarkers # score
f = [-1] * nmarkers # from
for i in xrange(1, nmarkers):
for j in xrange(i):
mi, mj = markers[i], markers[j]
t = bonus if mi.mlg == mj.mlg else penalty + bonus
if s[i] < s[j] + t:
s[i] = s[j] + t
f[i] = j
# Recover the highest scoring chain
highest_score = max(s)
si = s.index(highest_score)
onchain = set()
while True:
if si < 0:
break
si = f[si]
onchain.add(si)
return [x for i, x in enumerate(markers) if i in onchain] | [
"def",
"compute_score",
"(",
"markers",
",",
"bonus",
",",
"penalty",
")",
":",
"nmarkers",
"=",
"len",
"(",
"markers",
")",
"s",
"=",
"[",
"bonus",
"]",
"*",
"nmarkers",
"# score",
"f",
"=",
"[",
"-",
"1",
"]",
"*",
"nmarkers",
"# from",
"for",
"i... | Compute chain score using dynamic programming. If a marker is the same
linkage group as a previous one, we add bonus; otherwise, we penalize the
chain switching. | [
"Compute",
"chain",
"score",
"using",
"dynamic",
"programming",
".",
"If",
"a",
"marker",
"is",
"the",
"same",
"linkage",
"group",
"as",
"a",
"previous",
"one",
"we",
"add",
"bonus",
";",
"otherwise",
"we",
"penalize",
"the",
"chain",
"switching",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L957-L982 | train | 201,061 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | split | def split(args):
"""
%prog split input.bed
Split suspicious scaffolds. Suspicious scaffolds are those that contain
chunks that map to more than one linkage group. The chunk size can be
modified through --chunk option.
"""
p = OptionParser(split.__doc__)
p.add_option("--chunk", default=4, type="int",
help="Split chunks of at least N markers")
p.add_option("--splitsingle", default=False, action="store_true",
help="Split breakpoint range right in the middle")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
bonus = 2
nchunk = opts.chunk
nbreaks = 0
penalty = -(nchunk * bonus - 1)
bed = Bed(inputbed)
for seqid, bb in bed.sub_beds():
markers = [Marker(x) for x in bb]
markers = compute_score(markers, bonus, penalty)
for mi, mj in pairwise(markers):
if mi.mlg == mj.mlg:
continue
assert mi.seqid == mj.seqid
start, end = mi.pos, mj.pos
if start > end:
start, end = end, start
if opts.splitsingle:
start = end = (start + end) / 2
print("\t".join(str(x) for x in (mi.seqid, start - 1, end)))
nbreaks += 1
logging.debug("A total of {} breakpoints inferred (--chunk={})".\
format(nbreaks, nchunk)) | python | def split(args):
"""
%prog split input.bed
Split suspicious scaffolds. Suspicious scaffolds are those that contain
chunks that map to more than one linkage group. The chunk size can be
modified through --chunk option.
"""
p = OptionParser(split.__doc__)
p.add_option("--chunk", default=4, type="int",
help="Split chunks of at least N markers")
p.add_option("--splitsingle", default=False, action="store_true",
help="Split breakpoint range right in the middle")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
bonus = 2
nchunk = opts.chunk
nbreaks = 0
penalty = -(nchunk * bonus - 1)
bed = Bed(inputbed)
for seqid, bb in bed.sub_beds():
markers = [Marker(x) for x in bb]
markers = compute_score(markers, bonus, penalty)
for mi, mj in pairwise(markers):
if mi.mlg == mj.mlg:
continue
assert mi.seqid == mj.seqid
start, end = mi.pos, mj.pos
if start > end:
start, end = end, start
if opts.splitsingle:
start = end = (start + end) / 2
print("\t".join(str(x) for x in (mi.seqid, start - 1, end)))
nbreaks += 1
logging.debug("A total of {} breakpoints inferred (--chunk={})".\
format(nbreaks, nchunk)) | [
"def",
"split",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"split",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--chunk\"",
",",
"default",
"=",
"4",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Split chunks of at least N markers\"... | %prog split input.bed
Split suspicious scaffolds. Suspicious scaffolds are those that contain
chunks that map to more than one linkage group. The chunk size can be
modified through --chunk option. | [
"%prog",
"split",
"input",
".",
"bed"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L985-L1024 | train | 201,062 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | movie | def movie(args):
"""
%prog movie input.bed scaffolds.fasta chr1
Visualize history of scaffold OO. The history is contained within the
tourfile, generated by path(). For each historical scaffold OO, the program
plots a separate PDF file. The plots can be combined to show the progression
as a little animation. The third argument limits the plotting to a
specific pseudomolecule, for example `chr1`.
"""
p = OptionParser(movie.__doc__)
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size between scaffolds")
add_allmaps_plot_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
inputbed, scaffoldsfasta, seqid = args
gapsize = opts.gapsize
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
tourfile = pf + ".tour"
fp = open(tourfile)
sizes = Sizes(scaffoldsfasta).mapping
ffmpeg = "ffmpeg"
mkdir(ffmpeg)
score = cur_score = None
i = 1
for header, block in read_block(fp, ">"):
s, tag, label = header[1:].split()
if s != seqid:
continue
tour = block[0].split()
tour = [(x[:-1], x[-1]) for x in tour]
if label.startswith("GA"):
cur_score = label.split("-")[-1]
if cur_score == score:
i += 1
continue
score = cur_score
image_name = ".".join((seqid, "{0:04d}".format(i), label, "pdf"))
if need_update(tourfile, image_name):
fwagp = must_open(agpfile, "w")
order_to_agp(seqid, tour, sizes, fwagp, gapsize=gapsize,
gaptype="map")
fwagp.close()
logging.debug("{0} written to `{1}`".format(header, agpfile))
build([inputbed, scaffoldsfasta, "--cleanup"])
pdf_name = plot([inputbed, seqid, "--title={0}".format(label)])
sh("mv {0} {1}".format(pdf_name, image_name))
if label in ("INIT", "FLIP", "TSP", "FINAL"):
for j in xrange(5): # Delay for 5 frames
image_delay = image_name.rsplit(".", 1)[0] + \
".d{0}.pdf".format(j)
sh("cp {0} {1}/{2}".format(image_name, ffmpeg, image_delay))
else:
sh("cp {0} {1}/".format(image_name, ffmpeg))
i += 1
make_movie(ffmpeg, pf) | python | def movie(args):
"""
%prog movie input.bed scaffolds.fasta chr1
Visualize history of scaffold OO. The history is contained within the
tourfile, generated by path(). For each historical scaffold OO, the program
plots a separate PDF file. The plots can be combined to show the progression
as a little animation. The third argument limits the plotting to a
specific pseudomolecule, for example `chr1`.
"""
p = OptionParser(movie.__doc__)
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size between scaffolds")
add_allmaps_plot_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
inputbed, scaffoldsfasta, seqid = args
gapsize = opts.gapsize
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
tourfile = pf + ".tour"
fp = open(tourfile)
sizes = Sizes(scaffoldsfasta).mapping
ffmpeg = "ffmpeg"
mkdir(ffmpeg)
score = cur_score = None
i = 1
for header, block in read_block(fp, ">"):
s, tag, label = header[1:].split()
if s != seqid:
continue
tour = block[0].split()
tour = [(x[:-1], x[-1]) for x in tour]
if label.startswith("GA"):
cur_score = label.split("-")[-1]
if cur_score == score:
i += 1
continue
score = cur_score
image_name = ".".join((seqid, "{0:04d}".format(i), label, "pdf"))
if need_update(tourfile, image_name):
fwagp = must_open(agpfile, "w")
order_to_agp(seqid, tour, sizes, fwagp, gapsize=gapsize,
gaptype="map")
fwagp.close()
logging.debug("{0} written to `{1}`".format(header, agpfile))
build([inputbed, scaffoldsfasta, "--cleanup"])
pdf_name = plot([inputbed, seqid, "--title={0}".format(label)])
sh("mv {0} {1}".format(pdf_name, image_name))
if label in ("INIT", "FLIP", "TSP", "FINAL"):
for j in xrange(5): # Delay for 5 frames
image_delay = image_name.rsplit(".", 1)[0] + \
".d{0}.pdf".format(j)
sh("cp {0} {1}/{2}".format(image_name, ffmpeg, image_delay))
else:
sh("cp {0} {1}/".format(image_name, ffmpeg))
i += 1
make_movie(ffmpeg, pf) | [
"def",
"movie",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"movie",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--gapsize\"",
",",
"default",
"=",
"100",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Insert gaps of size between scaf... | %prog movie input.bed scaffolds.fasta chr1
Visualize history of scaffold OO. The history is contained within the
tourfile, generated by path(). For each historical scaffold OO, the program
plots a separate PDF file. The plots can be combined to show the progression
as a little animation. The third argument limits the plotting to a
specific pseudomolecule, for example `chr1`. | [
"%prog",
"movie",
"input",
".",
"bed",
"scaffolds",
".",
"fasta",
"chr1"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1027-L1090 | train | 201,063 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | make_movie | def make_movie(workdir, pf, dpi=120, fps=1, format="pdf", engine="ffmpeg"):
""" Make the movie using either ffmpeg or gifsicle.
"""
os.chdir(workdir)
if format != "png":
cmd = "parallel convert -density {}".format(dpi)
cmd += " {} {.}.png ::: " + "*.{}".format(format)
sh(cmd)
assert engine in ("ffmpeg", "gifsicle"), \
"Only ffmpeg or gifsicle is currently supported"
if engine == "ffmpeg":
cmd = "ffmpeg -framerate {} -pattern_type glob -i '*.png' {}.mp4"\
.format(fps, pf)
elif engine == "gifsicle":
cmd = "convert *.png gif:- |"
cmd += " gifsicle --delay {} --loop --optimize=3".format(100 / fps)
cmd += " --colors=256 --multifile - > {}.gif".format(pf)
sh(cmd) | python | def make_movie(workdir, pf, dpi=120, fps=1, format="pdf", engine="ffmpeg"):
""" Make the movie using either ffmpeg or gifsicle.
"""
os.chdir(workdir)
if format != "png":
cmd = "parallel convert -density {}".format(dpi)
cmd += " {} {.}.png ::: " + "*.{}".format(format)
sh(cmd)
assert engine in ("ffmpeg", "gifsicle"), \
"Only ffmpeg or gifsicle is currently supported"
if engine == "ffmpeg":
cmd = "ffmpeg -framerate {} -pattern_type glob -i '*.png' {}.mp4"\
.format(fps, pf)
elif engine == "gifsicle":
cmd = "convert *.png gif:- |"
cmd += " gifsicle --delay {} --loop --optimize=3".format(100 / fps)
cmd += " --colors=256 --multifile - > {}.gif".format(pf)
sh(cmd) | [
"def",
"make_movie",
"(",
"workdir",
",",
"pf",
",",
"dpi",
"=",
"120",
",",
"fps",
"=",
"1",
",",
"format",
"=",
"\"pdf\"",
",",
"engine",
"=",
"\"ffmpeg\"",
")",
":",
"os",
".",
"chdir",
"(",
"workdir",
")",
"if",
"format",
"!=",
"\"png\"",
":",
... | Make the movie using either ffmpeg or gifsicle. | [
"Make",
"the",
"movie",
"using",
"either",
"ffmpeg",
"or",
"gifsicle",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1093-L1112 | train | 201,064 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | estimategaps | def estimategaps(args):
"""
%prog estimategaps input.bed
Estimate sizes of inter-scaffold gaps. The AGP file generated by path()
command has unknown gap sizes with a generic number of Ns (often 100 Ns).
The AGP file `input.chr.agp` will be modified in-place.
"""
p = OptionParser(estimategaps.__doc__)
p.add_option("--minsize", default=100, type="int",
help="Minimum gap size")
p.add_option("--maxsize", default=500000, type="int",
help="Maximum gap size")
p.add_option("--links", default=10, type="int",
help="Only use linkage grounds with matchings more than")
p.set_verbose(help="Print details for each gap calculation")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
bedfile = pf + ".lifted.bed"
cc = Map(bedfile, scaffold_info=True)
agp = AGP(agpfile)
minsize, maxsize = opts.minsize, opts.maxsize
links = opts.links
verbose = opts.verbose
outagpfile = pf + ".estimategaps.agp"
fw = must_open(outagpfile, "w")
for ob, components in agp.iter_object():
components = list(components)
s = Scaffold(ob, cc)
mlg_counts = s.mlg_counts
gaps = [x for x in components if x.is_gap]
gapsizes = [None] * len(gaps) # master
for mlg, count in mlg_counts.items():
if count < links:
continue
g = GapEstimator(cc, agp, ob, mlg)
g.compute_all_gaps(minsize=minsize, maxsize=maxsize, \
verbose=verbose)
# Merge evidence from this mlg into master
assert len(g.gapsizes) == len(gaps)
for i, gs in enumerate(gapsizes):
gg = g.gapsizes[i]
if gs is None:
gapsizes[i] = gg
elif gg:
gapsizes[i] = min(gs, gg)
print(gapsizes)
# Modify AGP
i = 0
for x in components:
if x.is_gap:
x.gap_length = gapsizes[i] or minsize
x.component_type = 'U' if x.gap_length == 100 else 'N'
i += 1
print(x, file=fw)
fw.close()
reindex([outagpfile, "--inplace"]) | python | def estimategaps(args):
"""
%prog estimategaps input.bed
Estimate sizes of inter-scaffold gaps. The AGP file generated by path()
command has unknown gap sizes with a generic number of Ns (often 100 Ns).
The AGP file `input.chr.agp` will be modified in-place.
"""
p = OptionParser(estimategaps.__doc__)
p.add_option("--minsize", default=100, type="int",
help="Minimum gap size")
p.add_option("--maxsize", default=500000, type="int",
help="Maximum gap size")
p.add_option("--links", default=10, type="int",
help="Only use linkage grounds with matchings more than")
p.set_verbose(help="Print details for each gap calculation")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
bedfile = pf + ".lifted.bed"
cc = Map(bedfile, scaffold_info=True)
agp = AGP(agpfile)
minsize, maxsize = opts.minsize, opts.maxsize
links = opts.links
verbose = opts.verbose
outagpfile = pf + ".estimategaps.agp"
fw = must_open(outagpfile, "w")
for ob, components in agp.iter_object():
components = list(components)
s = Scaffold(ob, cc)
mlg_counts = s.mlg_counts
gaps = [x for x in components if x.is_gap]
gapsizes = [None] * len(gaps) # master
for mlg, count in mlg_counts.items():
if count < links:
continue
g = GapEstimator(cc, agp, ob, mlg)
g.compute_all_gaps(minsize=minsize, maxsize=maxsize, \
verbose=verbose)
# Merge evidence from this mlg into master
assert len(g.gapsizes) == len(gaps)
for i, gs in enumerate(gapsizes):
gg = g.gapsizes[i]
if gs is None:
gapsizes[i] = gg
elif gg:
gapsizes[i] = min(gs, gg)
print(gapsizes)
# Modify AGP
i = 0
for x in components:
if x.is_gap:
x.gap_length = gapsizes[i] or minsize
x.component_type = 'U' if x.gap_length == 100 else 'N'
i += 1
print(x, file=fw)
fw.close()
reindex([outagpfile, "--inplace"]) | [
"def",
"estimategaps",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"estimategaps",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--minsize\"",
",",
"default",
"=",
"100",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Minimum gap size\"... | %prog estimategaps input.bed
Estimate sizes of inter-scaffold gaps. The AGP file generated by path()
command has unknown gap sizes with a generic number of Ns (often 100 Ns).
The AGP file `input.chr.agp` will be modified in-place. | [
"%prog",
"estimategaps",
"input",
".",
"bed"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1115-L1182 | train | 201,065 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | merge | def merge(args):
"""
%prog merge map1 map2 map3 ...
Convert csv maps to bed format.
Each input map is csv formatted, for example:
ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition
scaffold_2707,11508,1,0
scaffold_2707,11525,1,1.2
scaffold_759,81336,1,9.7
"""
p = OptionParser(merge.__doc__)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Write weights to file")
p.set_outfile("out.bed")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
maps = args
outfile = opts.outfile
fp = must_open(maps)
b = Bed()
mapnames = set()
for row in fp:
mapname = filename_to_mapname(fp.filename())
mapnames.add(mapname)
try:
m = CSVMapLine(row, mapname=mapname)
if m.cm < 0:
logging.error("Ignore marker with negative genetic distance")
print(row.strip(), file=sys.stderr)
else:
b.append(BedLine(m.bedline))
except (IndexError, ValueError): # header or mal-formed line
continue
b.print_to_file(filename=outfile, sorted=True)
logging.debug("A total of {0} markers written to `{1}`.".\
format(len(b), outfile))
assert len(maps) == len(mapnames), "You have a collision in map names"
write_weightsfile(mapnames, weightsfile=opts.weightsfile) | python | def merge(args):
"""
%prog merge map1 map2 map3 ...
Convert csv maps to bed format.
Each input map is csv formatted, for example:
ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition
scaffold_2707,11508,1,0
scaffold_2707,11525,1,1.2
scaffold_759,81336,1,9.7
"""
p = OptionParser(merge.__doc__)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Write weights to file")
p.set_outfile("out.bed")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
maps = args
outfile = opts.outfile
fp = must_open(maps)
b = Bed()
mapnames = set()
for row in fp:
mapname = filename_to_mapname(fp.filename())
mapnames.add(mapname)
try:
m = CSVMapLine(row, mapname=mapname)
if m.cm < 0:
logging.error("Ignore marker with negative genetic distance")
print(row.strip(), file=sys.stderr)
else:
b.append(BedLine(m.bedline))
except (IndexError, ValueError): # header or mal-formed line
continue
b.print_to_file(filename=outfile, sorted=True)
logging.debug("A total of {0} markers written to `{1}`.".\
format(len(b), outfile))
assert len(maps) == len(mapnames), "You have a collision in map names"
write_weightsfile(mapnames, weightsfile=opts.weightsfile) | [
"def",
"merge",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"merge",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-w\"",
",",
"\"--weightsfile\"",
",",
"default",
"=",
"\"weights.txt\"",
",",
"help",
"=",
"\"Write weights to file\"",
")",
... | %prog merge map1 map2 map3 ...
Convert csv maps to bed format.
Each input map is csv formatted, for example:
ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition
scaffold_2707,11508,1,0
scaffold_2707,11525,1,1.2
scaffold_759,81336,1,9.7 | [
"%prog",
"merge",
"map1",
"map2",
"map3",
"..."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1191-L1236 | train | 201,066 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | mergebed | def mergebed(args):
"""
%prog mergebed map1.bed map2.bed map3.bed ...
Combine bed maps to bed format, adding the map name.
"""
p = OptionParser(mergebed.__doc__)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Write weights to file")
p.set_outfile("out.bed")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
maps = args
outfile = opts.outfile
fp = must_open(maps)
b = Bed()
mapnames = set()
for row in fp:
mapname = filename_to_mapname(fp.filename())
mapnames.add(mapname)
try:
m = BedLine(row)
m.accn = "{0}-{1}".format(mapname, m.accn)
m.extra = ["{0}:{1}".format(m.seqid, m.start)]
b.append(m)
except (IndexError, ValueError): # header or mal-formed line
continue
b.print_to_file(filename=outfile, sorted=True)
logging.debug("A total of {0} markers written to `{1}`.".\
format(len(b), outfile))
assert len(maps) == len(mapnames), "You have a collision in map names"
write_weightsfile(mapnames, weightsfile=opts.weightsfile) | python | def mergebed(args):
"""
%prog mergebed map1.bed map2.bed map3.bed ...
Combine bed maps to bed format, adding the map name.
"""
p = OptionParser(mergebed.__doc__)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Write weights to file")
p.set_outfile("out.bed")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
maps = args
outfile = opts.outfile
fp = must_open(maps)
b = Bed()
mapnames = set()
for row in fp:
mapname = filename_to_mapname(fp.filename())
mapnames.add(mapname)
try:
m = BedLine(row)
m.accn = "{0}-{1}".format(mapname, m.accn)
m.extra = ["{0}:{1}".format(m.seqid, m.start)]
b.append(m)
except (IndexError, ValueError): # header or mal-formed line
continue
b.print_to_file(filename=outfile, sorted=True)
logging.debug("A total of {0} markers written to `{1}`.".\
format(len(b), outfile))
assert len(maps) == len(mapnames), "You have a collision in map names"
write_weightsfile(mapnames, weightsfile=opts.weightsfile) | [
"def",
"mergebed",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"mergebed",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-w\"",
",",
"\"--weightsfile\"",
",",
"default",
"=",
"\"weights.txt\"",
",",
"help",
"=",
"\"Write weights to file\"",
... | %prog mergebed map1.bed map2.bed map3.bed ...
Combine bed maps to bed format, adding the map name. | [
"%prog",
"mergebed",
"map1",
".",
"bed",
"map2",
".",
"bed",
"map3",
".",
"bed",
"..."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1239-L1275 | train | 201,067 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | summary | def summary(args):
"""
%prog summary input.bed scaffolds.fasta
Print out summary statistics per map, followed by consensus summary of
scaffold anchoring based on multiple maps.
"""
p = OptionParser(summary.__doc__)
p.set_table(sep="|", align=True)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, scaffolds = args
pf = inputbed.rsplit(".", 1)[0]
mapbed = pf + ".bed"
chr_agp = pf + ".chr.agp"
sep = opts.sep
align = opts.align
cc = Map(mapbed)
mapnames = cc.mapnames
s = Sizes(scaffolds)
total, l50, n50 = s.summary
r = {}
maps = []
fw = must_open(opts.outfile, "w")
print("*** Summary for each individual map ***", file=fw)
for mapname in mapnames:
markers = [x for x in cc if x.mapname == mapname]
ms = MapSummary(markers, l50, s)
r["Linkage Groups", mapname] = ms.num_lgs
ms.export_table(r, mapname, total)
maps.append(ms)
print(tabulate(r, sep=sep, align=align), file=fw)
r = {}
agp = AGP(chr_agp)
print("*** Summary for consensus map ***", file=fw)
consensus_scaffolds = set(x.component_id for x in agp if not x.is_gap)
oriented_scaffolds = set(x.component_id for x in agp \
if (not x.is_gap) and x.orientation != '?')
unplaced_scaffolds = set(s.mapping.keys()) - consensus_scaffolds
for mapname, sc in (("Anchored", consensus_scaffolds),
("Oriented", oriented_scaffolds),
("Unplaced", unplaced_scaffolds)):
markers = [x for x in cc if x.seqid in sc]
ms = MapSummary(markers, l50, s, scaffolds=sc)
ms.export_table(r, mapname, total)
print(tabulate(r, sep=sep, align=align), file=fw) | python | def summary(args):
"""
%prog summary input.bed scaffolds.fasta
Print out summary statistics per map, followed by consensus summary of
scaffold anchoring based on multiple maps.
"""
p = OptionParser(summary.__doc__)
p.set_table(sep="|", align=True)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, scaffolds = args
pf = inputbed.rsplit(".", 1)[0]
mapbed = pf + ".bed"
chr_agp = pf + ".chr.agp"
sep = opts.sep
align = opts.align
cc = Map(mapbed)
mapnames = cc.mapnames
s = Sizes(scaffolds)
total, l50, n50 = s.summary
r = {}
maps = []
fw = must_open(opts.outfile, "w")
print("*** Summary for each individual map ***", file=fw)
for mapname in mapnames:
markers = [x for x in cc if x.mapname == mapname]
ms = MapSummary(markers, l50, s)
r["Linkage Groups", mapname] = ms.num_lgs
ms.export_table(r, mapname, total)
maps.append(ms)
print(tabulate(r, sep=sep, align=align), file=fw)
r = {}
agp = AGP(chr_agp)
print("*** Summary for consensus map ***", file=fw)
consensus_scaffolds = set(x.component_id for x in agp if not x.is_gap)
oriented_scaffolds = set(x.component_id for x in agp \
if (not x.is_gap) and x.orientation != '?')
unplaced_scaffolds = set(s.mapping.keys()) - consensus_scaffolds
for mapname, sc in (("Anchored", consensus_scaffolds),
("Oriented", oriented_scaffolds),
("Unplaced", unplaced_scaffolds)):
markers = [x for x in cc if x.seqid in sc]
ms = MapSummary(markers, l50, s, scaffolds=sc)
ms.export_table(r, mapname, total)
print(tabulate(r, sep=sep, align=align), file=fw) | [
"def",
"summary",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"summary",
".",
"__doc__",
")",
"p",
".",
"set_table",
"(",
"sep",
"=",
"\"|\"",
",",
"align",
"=",
"True",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
... | %prog summary input.bed scaffolds.fasta
Print out summary statistics per map, followed by consensus summary of
scaffold anchoring based on multiple maps. | [
"%prog",
"summary",
"input",
".",
"bed",
"scaffolds",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1541-L1593 | train | 201,068 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | build | def build(args):
"""
%prog build input.bed scaffolds.fasta
Build associated genome FASTA file and CHAIN file that can be used to lift
old coordinates to new coordinates. The CHAIN file will be used to lift the
original marker positions to new positions in the reconstructed genome. The
new positions of the markers will be reported in *.lifted.bed.
"""
p = OptionParser(build.__doc__)
p.add_option("--cleanup", default=False, action="store_true",
help="Clean up bulky FASTA files, useful for plotting")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, scaffolds = args
pf = inputbed.rsplit(".", 1)[0]
mapbed = pf + ".bed"
chr_agp = pf + ".chr.agp"
chr_fasta = pf + ".chr.fasta"
if need_update((chr_agp, scaffolds), chr_fasta):
agp_build([chr_agp, scaffolds, chr_fasta])
unplaced_agp = pf + ".unplaced.agp"
if need_update((chr_agp, scaffolds), unplaced_agp):
write_unplaced_agp(chr_agp, scaffolds, unplaced_agp)
unplaced_fasta = pf + ".unplaced.fasta"
if need_update((unplaced_agp, scaffolds), unplaced_fasta):
agp_build([unplaced_agp, scaffolds, unplaced_fasta])
combined_agp = pf + ".agp"
if need_update((chr_agp, unplaced_agp), combined_agp):
FileMerger((chr_agp, unplaced_agp), combined_agp).merge()
combined_fasta = pf + ".fasta"
if need_update((chr_fasta, unplaced_fasta), combined_fasta):
FileMerger((chr_fasta, unplaced_fasta), combined_fasta).merge()
chainfile = pf + ".chain"
if need_update((combined_agp, scaffolds, combined_fasta), chainfile):
fromagp([combined_agp, scaffolds, combined_fasta])
liftedbed = mapbed.rsplit(".", 1)[0] + ".lifted.bed"
if need_update((mapbed, chainfile), liftedbed):
cmd = "liftOver -minMatch=1 {0} {1} {2} unmapped".\
format(mapbed, chainfile, liftedbed)
sh(cmd, check=True)
if opts.cleanup:
FileShredder([chr_fasta, unplaced_fasta, combined_fasta,
chainfile, unplaced_agp,
combined_fasta + ".sizes", "unmapped"])
sort([liftedbed, "-i"]) | python | def build(args):
"""
%prog build input.bed scaffolds.fasta
Build associated genome FASTA file and CHAIN file that can be used to lift
old coordinates to new coordinates. The CHAIN file will be used to lift the
original marker positions to new positions in the reconstructed genome. The
new positions of the markers will be reported in *.lifted.bed.
"""
p = OptionParser(build.__doc__)
p.add_option("--cleanup", default=False, action="store_true",
help="Clean up bulky FASTA files, useful for plotting")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, scaffolds = args
pf = inputbed.rsplit(".", 1)[0]
mapbed = pf + ".bed"
chr_agp = pf + ".chr.agp"
chr_fasta = pf + ".chr.fasta"
if need_update((chr_agp, scaffolds), chr_fasta):
agp_build([chr_agp, scaffolds, chr_fasta])
unplaced_agp = pf + ".unplaced.agp"
if need_update((chr_agp, scaffolds), unplaced_agp):
write_unplaced_agp(chr_agp, scaffolds, unplaced_agp)
unplaced_fasta = pf + ".unplaced.fasta"
if need_update((unplaced_agp, scaffolds), unplaced_fasta):
agp_build([unplaced_agp, scaffolds, unplaced_fasta])
combined_agp = pf + ".agp"
if need_update((chr_agp, unplaced_agp), combined_agp):
FileMerger((chr_agp, unplaced_agp), combined_agp).merge()
combined_fasta = pf + ".fasta"
if need_update((chr_fasta, unplaced_fasta), combined_fasta):
FileMerger((chr_fasta, unplaced_fasta), combined_fasta).merge()
chainfile = pf + ".chain"
if need_update((combined_agp, scaffolds, combined_fasta), chainfile):
fromagp([combined_agp, scaffolds, combined_fasta])
liftedbed = mapbed.rsplit(".", 1)[0] + ".lifted.bed"
if need_update((mapbed, chainfile), liftedbed):
cmd = "liftOver -minMatch=1 {0} {1} {2} unmapped".\
format(mapbed, chainfile, liftedbed)
sh(cmd, check=True)
if opts.cleanup:
FileShredder([chr_fasta, unplaced_fasta, combined_fasta,
chainfile, unplaced_agp,
combined_fasta + ".sizes", "unmapped"])
sort([liftedbed, "-i"]) | [
"def",
"build",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"build",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--cleanup\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Clean up bulky FASTA ... | %prog build input.bed scaffolds.fasta
Build associated genome FASTA file and CHAIN file that can be used to lift
old coordinates to new coordinates. The CHAIN file will be used to lift the
original marker positions to new positions in the reconstructed genome. The
new positions of the markers will be reported in *.lifted.bed. | [
"%prog",
"build",
"input",
".",
"bed",
"scaffolds",
".",
"fasta"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1596-L1652 | train | 201,069 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | plotall | def plotall(xargs):
"""
%prog plotall input.bed
Plot the matchings between the reconstructed pseudomolecules and the maps.
This command will plot each reconstructed object (non-singleton).
"""
p = OptionParser(plotall.__doc__)
add_allmaps_plot_options(p)
opts, args, iopts = p.set_image_options(xargs, figsize="10x6")
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
agp = AGP(agpfile)
objects = [ob for ob, lines in agp.iter_object()]
for seqid in natsorted(objects):
plot(xargs + [seqid]) | python | def plotall(xargs):
"""
%prog plotall input.bed
Plot the matchings between the reconstructed pseudomolecules and the maps.
This command will plot each reconstructed object (non-singleton).
"""
p = OptionParser(plotall.__doc__)
add_allmaps_plot_options(p)
opts, args, iopts = p.set_image_options(xargs, figsize="10x6")
if len(args) != 1:
sys.exit(not p.print_help())
inputbed, = args
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
agp = AGP(agpfile)
objects = [ob for ob, lines in agp.iter_object()]
for seqid in natsorted(objects):
plot(xargs + [seqid]) | [
"def",
"plotall",
"(",
"xargs",
")",
":",
"p",
"=",
"OptionParser",
"(",
"plotall",
".",
"__doc__",
")",
"add_allmaps_plot_options",
"(",
"p",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"xargs",
",",
"figsize",
"=",
... | %prog plotall input.bed
Plot the matchings between the reconstructed pseudomolecules and the maps.
This command will plot each reconstructed object (non-singleton). | [
"%prog",
"plotall",
"input",
".",
"bed"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L1848-L1868 | train | 201,070 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | ScaffoldOO.get_orientation | def get_orientation(self, si, sj):
"""
si, sj are two number series. To compute whether these two series have
same orientation or not. We combine them in the two orientation
configurations and compute length of the longest monotonic series.
"""
if not si or not sj:
return 0
# Same orientation configuration
a = lms(si + sj)
b = lms(sj + si)
# Opposite orientation configuration
c = lms(si + sj[::-1])
d = lms(sj[::-1] + si)
return max(a, b)[0] - max(c, d)[0] | python | def get_orientation(self, si, sj):
"""
si, sj are two number series. To compute whether these two series have
same orientation or not. We combine them in the two orientation
configurations and compute length of the longest monotonic series.
"""
if not si or not sj:
return 0
# Same orientation configuration
a = lms(si + sj)
b = lms(sj + si)
# Opposite orientation configuration
c = lms(si + sj[::-1])
d = lms(sj[::-1] + si)
return max(a, b)[0] - max(c, d)[0] | [
"def",
"get_orientation",
"(",
"self",
",",
"si",
",",
"sj",
")",
":",
"if",
"not",
"si",
"or",
"not",
"sj",
":",
"return",
"0",
"# Same orientation configuration",
"a",
"=",
"lms",
"(",
"si",
"+",
"sj",
")",
"b",
"=",
"lms",
"(",
"sj",
"+",
"si",
... | si, sj are two number series. To compute whether these two series have
same orientation or not. We combine them in the two orientation
configurations and compute length of the longest monotonic series. | [
"si",
"sj",
"are",
"two",
"number",
"series",
".",
"To",
"compute",
"whether",
"these",
"two",
"series",
"have",
"same",
"orientation",
"or",
"not",
".",
"We",
"combine",
"them",
"in",
"the",
"two",
"orientation",
"configurations",
"and",
"compute",
"length"... | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L343-L357 | train | 201,071 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | ScaffoldOO.fix_tour | def fix_tour(self, tour):
"""
Test each scaffold if dropping does not decrease LMS.
"""
scaffolds, oos = zip(*tour)
keep = set()
for mlg in self.linkage_groups:
lg = mlg.lg
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s, o)
score_with = lms(L + M + U)[0]
score_without = lms(L + U)[0]
assert score_with >= score_without
if score_with > score_without:
keep.add(s)
dropped = len(tour) - len(keep)
logging.debug("Dropped {0} minor scaffolds".format(dropped))
return [(s, o) for (s, o) in tour if s in keep] | python | def fix_tour(self, tour):
"""
Test each scaffold if dropping does not decrease LMS.
"""
scaffolds, oos = zip(*tour)
keep = set()
for mlg in self.linkage_groups:
lg = mlg.lg
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s, o)
score_with = lms(L + M + U)[0]
score_without = lms(L + U)[0]
assert score_with >= score_without
if score_with > score_without:
keep.add(s)
dropped = len(tour) - len(keep)
logging.debug("Dropped {0} minor scaffolds".format(dropped))
return [(s, o) for (s, o) in tour if s in keep] | [
"def",
"fix_tour",
"(",
"self",
",",
"tour",
")",
":",
"scaffolds",
",",
"oos",
"=",
"zip",
"(",
"*",
"tour",
")",
"keep",
"=",
"set",
"(",
")",
"for",
"mlg",
"in",
"self",
".",
"linkage_groups",
":",
"lg",
"=",
"mlg",
".",
"lg",
"for",
"s",
",... | Test each scaffold if dropping does not decrease LMS. | [
"Test",
"each",
"scaffold",
"if",
"dropping",
"does",
"not",
"decrease",
"LMS",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L391-L412 | train | 201,072 |
tanghaibao/jcvi | jcvi/assembly/allmaps.py | ScaffoldOO.fix_orientation | def fix_orientation(self, tour):
"""
Test each scaffold if flipping will increass longest monotonic chain
length.
"""
orientations = dict(tour) # old configuration here
scaffold_oo = defaultdict(list)
scaffolds, oos = zip(*tour)
for mlg in self.linkage_groups:
lg = mlg.lg
mapname = mlg.mapname
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s)
plus = lms(L + M + U)
minus = lms(L + M[::-1] + U)
d = plus[0] - minus[0]
if not d:
continue
scaffold_oo[s].append((d, mapname)) # reset orientation
fixed = 0
for s, v in scaffold_oo.items():
d = self.weighted_mean(v)
old_d = orientations[s]
new_d = np.sign(d)
if new_d != old_d:
orientations[s] = new_d
fixed += 1
tour = [(x, orientations[x]) for x in scaffolds]
logging.debug("Fixed orientations for {0} scaffolds.".format(fixed))
return tour | python | def fix_orientation(self, tour):
"""
Test each scaffold if flipping will increass longest monotonic chain
length.
"""
orientations = dict(tour) # old configuration here
scaffold_oo = defaultdict(list)
scaffolds, oos = zip(*tour)
for mlg in self.linkage_groups:
lg = mlg.lg
mapname = mlg.mapname
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s)
plus = lms(L + M + U)
minus = lms(L + M[::-1] + U)
d = plus[0] - minus[0]
if not d:
continue
scaffold_oo[s].append((d, mapname)) # reset orientation
fixed = 0
for s, v in scaffold_oo.items():
d = self.weighted_mean(v)
old_d = orientations[s]
new_d = np.sign(d)
if new_d != old_d:
orientations[s] = new_d
fixed += 1
tour = [(x, orientations[x]) for x in scaffolds]
logging.debug("Fixed orientations for {0} scaffolds.".format(fixed))
return tour | [
"def",
"fix_orientation",
"(",
"self",
",",
"tour",
")",
":",
"orientations",
"=",
"dict",
"(",
"tour",
")",
"# old configuration here",
"scaffold_oo",
"=",
"defaultdict",
"(",
"list",
")",
"scaffolds",
",",
"oos",
"=",
"zip",
"(",
"*",
"tour",
")",
"for",... | Test each scaffold if flipping will increass longest monotonic chain
length. | [
"Test",
"each",
"scaffold",
"if",
"flipping",
"will",
"increass",
"longest",
"monotonic",
"chain",
"length",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L414-L449 | train | 201,073 |
tanghaibao/jcvi | jcvi/apps/console.py | SpinCursor.spin | def spin(self):
""" Perform a single spin """
for x in self.spinchars:
self.string = self.msg + "...\t" + x + "\r"
self.out.write(self.string.encode('utf-8'))
self.out.flush()
time.sleep(self.waittime) | python | def spin(self):
""" Perform a single spin """
for x in self.spinchars:
self.string = self.msg + "...\t" + x + "\r"
self.out.write(self.string.encode('utf-8'))
self.out.flush()
time.sleep(self.waittime) | [
"def",
"spin",
"(",
"self",
")",
":",
"for",
"x",
"in",
"self",
".",
"spinchars",
":",
"self",
".",
"string",
"=",
"self",
".",
"msg",
"+",
"\"...\\t\"",
"+",
"x",
"+",
"\"\\r\"",
"self",
".",
"out",
".",
"write",
"(",
"self",
".",
"string",
".",... | Perform a single spin | [
"Perform",
"a",
"single",
"spin"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/console.py#L40-L47 | train | 201,074 |
tanghaibao/jcvi | jcvi/graphics/graph.py | make_sequence | def make_sequence(seq, name="S"):
"""
Make unique nodes for sequence graph.
"""
return ["{}_{}_{}".format(name, i, x) for i, x in enumerate(seq)] | python | def make_sequence(seq, name="S"):
"""
Make unique nodes for sequence graph.
"""
return ["{}_{}_{}".format(name, i, x) for i, x in enumerate(seq)] | [
"def",
"make_sequence",
"(",
"seq",
",",
"name",
"=",
"\"S\"",
")",
":",
"return",
"[",
"\"{}_{}_{}\"",
".",
"format",
"(",
"name",
",",
"i",
",",
"x",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"seq",
")",
"]"
] | Make unique nodes for sequence graph. | [
"Make",
"unique",
"nodes",
"for",
"sequence",
"graph",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/graph.py#L18-L22 | train | 201,075 |
tanghaibao/jcvi | jcvi/graphics/graph.py | sequence_to_graph | def sequence_to_graph(G, seq, color='black'):
"""
Automatically construct graph given a sequence of characters.
"""
for x in seq:
if x.endswith("_1"): # Mutation
G.node(x, color=color, width="0.1", shape="circle", label="")
else:
G.node(x, color=color)
for a, b in pairwise(seq):
G.edge(a, b, color=color) | python | def sequence_to_graph(G, seq, color='black'):
"""
Automatically construct graph given a sequence of characters.
"""
for x in seq:
if x.endswith("_1"): # Mutation
G.node(x, color=color, width="0.1", shape="circle", label="")
else:
G.node(x, color=color)
for a, b in pairwise(seq):
G.edge(a, b, color=color) | [
"def",
"sequence_to_graph",
"(",
"G",
",",
"seq",
",",
"color",
"=",
"'black'",
")",
":",
"for",
"x",
"in",
"seq",
":",
"if",
"x",
".",
"endswith",
"(",
"\"_1\"",
")",
":",
"# Mutation",
"G",
".",
"node",
"(",
"x",
",",
"color",
"=",
"color",
","... | Automatically construct graph given a sequence of characters. | [
"Automatically",
"construct",
"graph",
"given",
"a",
"sequence",
"of",
"characters",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/graph.py#L25-L35 | train | 201,076 |
tanghaibao/jcvi | jcvi/graphics/graph.py | zip_sequences | def zip_sequences(G, allseqs, color="white"):
"""
Fuse certain nodes together, if they contain same data except for the
sequence name.
"""
for s in zip(*allseqs):
groups = defaultdict(list)
for x in s:
part = x.split('_', 1)[1]
groups[part].append(x)
for part, g in groups.items():
with G.subgraph(name="cluster_" + part) as c:
for x in g:
c.node(x)
c.attr(style="invis") | python | def zip_sequences(G, allseqs, color="white"):
"""
Fuse certain nodes together, if they contain same data except for the
sequence name.
"""
for s in zip(*allseqs):
groups = defaultdict(list)
for x in s:
part = x.split('_', 1)[1]
groups[part].append(x)
for part, g in groups.items():
with G.subgraph(name="cluster_" + part) as c:
for x in g:
c.node(x)
c.attr(style="invis") | [
"def",
"zip_sequences",
"(",
"G",
",",
"allseqs",
",",
"color",
"=",
"\"white\"",
")",
":",
"for",
"s",
"in",
"zip",
"(",
"*",
"allseqs",
")",
":",
"groups",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"x",
"in",
"s",
":",
"part",
"=",
"x",
".",... | Fuse certain nodes together, if they contain same data except for the
sequence name. | [
"Fuse",
"certain",
"nodes",
"together",
"if",
"they",
"contain",
"same",
"data",
"except",
"for",
"the",
"sequence",
"name",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/graph.py#L38-L52 | train | 201,077 |
tanghaibao/jcvi | jcvi/formats/html.py | gallery | def gallery(args):
"""
%prog gallery folder link_prefix
Convert a folder of figures to a HTML table. For example:
$ python -m jcvi.formats.html gallery Paper-figures/
https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/
Maps the images from local to remote.
"""
from jcvi.apps.base import iglob
from jcvi.utils.iter import grouper
p = OptionParser(gallery.__doc__)
p.add_option("--columns", default=3, type="int",
help="How many cells per row")
p.add_option("--width", default=200, type="int",
help="Image width")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
folder, link_prefix = args
width = opts.width
images = iglob(folder, "*.jpg,*.JPG,*.png")
td = '<td>{0}<br><a href="{1}"><img src="{1}" width="{2}"></a></td>'
print("<table>")
for ims in grouper(images, opts.columns):
print('<tr height="{0}" valign="top">'.format(width + 5))
for im in ims:
if not im:
continue
im = op.basename(im)
pf = im.split('.')[0].replace('_', '-')
link = link_prefix.rstrip("/") + "/" + im
print(td.format(pf, link, width))
print("</tr>")
print("</table>") | python | def gallery(args):
"""
%prog gallery folder link_prefix
Convert a folder of figures to a HTML table. For example:
$ python -m jcvi.formats.html gallery Paper-figures/
https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/
Maps the images from local to remote.
"""
from jcvi.apps.base import iglob
from jcvi.utils.iter import grouper
p = OptionParser(gallery.__doc__)
p.add_option("--columns", default=3, type="int",
help="How many cells per row")
p.add_option("--width", default=200, type="int",
help="Image width")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
folder, link_prefix = args
width = opts.width
images = iglob(folder, "*.jpg,*.JPG,*.png")
td = '<td>{0}<br><a href="{1}"><img src="{1}" width="{2}"></a></td>'
print("<table>")
for ims in grouper(images, opts.columns):
print('<tr height="{0}" valign="top">'.format(width + 5))
for im in ims:
if not im:
continue
im = op.basename(im)
pf = im.split('.')[0].replace('_', '-')
link = link_prefix.rstrip("/") + "/" + im
print(td.format(pf, link, width))
print("</tr>")
print("</table>") | [
"def",
"gallery",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"base",
"import",
"iglob",
"from",
"jcvi",
".",
"utils",
".",
"iter",
"import",
"grouper",
"p",
"=",
"OptionParser",
"(",
"gallery",
".",
"__doc__",
")",
"p",
".",
"add_option",... | %prog gallery folder link_prefix
Convert a folder of figures to a HTML table. For example:
$ python -m jcvi.formats.html gallery Paper-figures/
https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/
Maps the images from local to remote. | [
"%prog",
"gallery",
"folder",
"link_prefix"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/html.py#L30-L69 | train | 201,078 |
tanghaibao/jcvi | jcvi/formats/html.py | links | def links(args):
"""
%prog links url
Extract all the links "<a href=''>" from web page.
"""
p = OptionParser(links.__doc__)
p.add_option("--img", default=False, action="store_true",
help="Extract <img> tags [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
url, = args
img = opts.img
htmlfile = download(url)
page = open(htmlfile).read()
soup = BeautifulSoup(page)
tag = 'img' if img else 'a'
src = 'src' if img else 'href'
aa = soup.findAll(tag)
for a in aa:
link = a.get(src)
link = urljoin(url, link)
print(link) | python | def links(args):
"""
%prog links url
Extract all the links "<a href=''>" from web page.
"""
p = OptionParser(links.__doc__)
p.add_option("--img", default=False, action="store_true",
help="Extract <img> tags [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
url, = args
img = opts.img
htmlfile = download(url)
page = open(htmlfile).read()
soup = BeautifulSoup(page)
tag = 'img' if img else 'a'
src = 'src' if img else 'href'
aa = soup.findAll(tag)
for a in aa:
link = a.get(src)
link = urljoin(url, link)
print(link) | [
"def",
"links",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"links",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--img\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Extract <img> tags [defau... | %prog links url
Extract all the links "<a href=''>" from web page. | [
"%prog",
"links",
"url"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/html.py#L72-L99 | train | 201,079 |
tanghaibao/jcvi | jcvi/formats/html.py | unescape | def unescape(s, unicode_action="replace"):
"""
Unescape HTML strings, and convert & etc.
"""
import HTMLParser
hp = HTMLParser.HTMLParser()
s = hp.unescape(s)
s = s.encode('ascii', unicode_action)
s = s.replace("\n", "").strip()
return s | python | def unescape(s, unicode_action="replace"):
"""
Unescape HTML strings, and convert & etc.
"""
import HTMLParser
hp = HTMLParser.HTMLParser()
s = hp.unescape(s)
s = s.encode('ascii', unicode_action)
s = s.replace("\n", "").strip()
return s | [
"def",
"unescape",
"(",
"s",
",",
"unicode_action",
"=",
"\"replace\"",
")",
":",
"import",
"HTMLParser",
"hp",
"=",
"HTMLParser",
".",
"HTMLParser",
"(",
")",
"s",
"=",
"hp",
".",
"unescape",
"(",
"s",
")",
"s",
"=",
"s",
".",
"encode",
"(",
"'ascii... | Unescape HTML strings, and convert & etc. | [
"Unescape",
"HTML",
"strings",
"and",
"convert",
"&",
";",
"etc",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/html.py#L102-L111 | train | 201,080 |
tanghaibao/jcvi | jcvi/formats/html.py | table | def table(args):
"""
%prog table page.html
Convert HTML tables to csv.
"""
import csv
p = OptionParser(table.__doc__)
p.set_sep(sep=",")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
htmlfile, = args
page = open(htmlfile).read()
soup = BeautifulSoup(page)
for i, tabl in enumerate(soup.findAll('table')):
nrows = 0
csvfile = htmlfile.rsplit(".", 1)[0] + ".{0}.csv".format(i)
writer = csv.writer(open(csvfile, "w"), delimiter=opts.sep)
rows = tabl.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
if not cols:
cols = tr.findAll('th')
row = []
for td in cols:
try:
cell = "".join(td.find(text=True))
cell = unescape(cell)
except TypeError:
cell = ""
row.append(cell)
writer.writerow(row)
nrows += 1
logging.debug("Table with {0} rows written to `{1}`.".format(nrows, csvfile)) | python | def table(args):
"""
%prog table page.html
Convert HTML tables to csv.
"""
import csv
p = OptionParser(table.__doc__)
p.set_sep(sep=",")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
htmlfile, = args
page = open(htmlfile).read()
soup = BeautifulSoup(page)
for i, tabl in enumerate(soup.findAll('table')):
nrows = 0
csvfile = htmlfile.rsplit(".", 1)[0] + ".{0}.csv".format(i)
writer = csv.writer(open(csvfile, "w"), delimiter=opts.sep)
rows = tabl.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
if not cols:
cols = tr.findAll('th')
row = []
for td in cols:
try:
cell = "".join(td.find(text=True))
cell = unescape(cell)
except TypeError:
cell = ""
row.append(cell)
writer.writerow(row)
nrows += 1
logging.debug("Table with {0} rows written to `{1}`.".format(nrows, csvfile)) | [
"def",
"table",
"(",
"args",
")",
":",
"import",
"csv",
"p",
"=",
"OptionParser",
"(",
"table",
".",
"__doc__",
")",
"p",
".",
"set_sep",
"(",
"sep",
"=",
"\",\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"le... | %prog table page.html
Convert HTML tables to csv. | [
"%prog",
"table",
"page",
".",
"html"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/html.py#L114-L153 | train | 201,081 |
tanghaibao/jcvi | jcvi/apps/vecscreen.py | blast | def blast(args):
"""
%prog blast fastafile
Run BLASTN against database (default is UniVec_Core). Output .bed format
on the vector/contaminant ranges.
"""
p = OptionParser(blast.__doc__)
p.add_option("--dist", default=100, type="int",
help="Merge adjacent HSPs separated by [default: %default]")
p.add_option("--db",
help="Use a different database rather than UniVec_Core")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastaprefix = fastafile.split(".", 1)[0]
univec = opts.db or download("ftp://ftp.ncbi.nih.gov/pub/UniVec/UniVec_Core")
uniprefix = univec.split(".", 1)[0]
fastablast = fastaprefix + ".{0}.blast".format(uniprefix)
prog = run_megablast if opts.db else run_vecscreen
prog(infile=fastafile, outfile=fastablast, db=univec, pctid=95, hitlen=50)
fp = open(fastablast)
ranges = []
for row in fp:
b = BlastLine(row)
ranges.append((b.query, b.qstart, b.qstop))
merged_ranges = range_merge(ranges, dist=opts.dist)
bedfile = fastaprefix + ".{0}.bed".format(uniprefix)
fw = must_open(bedfile, "w")
for seqid, start, end in merged_ranges:
print("\t".join(str(x) for x in (seqid, start - 1, end, uniprefix)), file=fw)
return bedfile | python | def blast(args):
"""
%prog blast fastafile
Run BLASTN against database (default is UniVec_Core). Output .bed format
on the vector/contaminant ranges.
"""
p = OptionParser(blast.__doc__)
p.add_option("--dist", default=100, type="int",
help="Merge adjacent HSPs separated by [default: %default]")
p.add_option("--db",
help="Use a different database rather than UniVec_Core")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastaprefix = fastafile.split(".", 1)[0]
univec = opts.db or download("ftp://ftp.ncbi.nih.gov/pub/UniVec/UniVec_Core")
uniprefix = univec.split(".", 1)[0]
fastablast = fastaprefix + ".{0}.blast".format(uniprefix)
prog = run_megablast if opts.db else run_vecscreen
prog(infile=fastafile, outfile=fastablast, db=univec, pctid=95, hitlen=50)
fp = open(fastablast)
ranges = []
for row in fp:
b = BlastLine(row)
ranges.append((b.query, b.qstart, b.qstop))
merged_ranges = range_merge(ranges, dist=opts.dist)
bedfile = fastaprefix + ".{0}.bed".format(uniprefix)
fw = must_open(bedfile, "w")
for seqid, start, end in merged_ranges:
print("\t".join(str(x) for x in (seqid, start - 1, end, uniprefix)), file=fw)
return bedfile | [
"def",
"blast",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"blast",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--dist\"",
",",
"default",
"=",
"100",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Merge adjacent HSPs separated by [d... | %prog blast fastafile
Run BLASTN against database (default is UniVec_Core). Output .bed format
on the vector/contaminant ranges. | [
"%prog",
"blast",
"fastafile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/vecscreen.py#L65-L105 | train | 201,082 |
tanghaibao/jcvi | jcvi/formats/base.py | check_exists | def check_exists(filename, oappend=False):
"""
Avoid overwriting some files accidentally.
"""
if op.exists(filename):
if oappend:
return oappend
logging.error("`{0}` found, overwrite (Y/N)?".format(filename))
overwrite = (raw_input() == 'Y')
else:
overwrite = True
return overwrite | python | def check_exists(filename, oappend=False):
"""
Avoid overwriting some files accidentally.
"""
if op.exists(filename):
if oappend:
return oappend
logging.error("`{0}` found, overwrite (Y/N)?".format(filename))
overwrite = (raw_input() == 'Y')
else:
overwrite = True
return overwrite | [
"def",
"check_exists",
"(",
"filename",
",",
"oappend",
"=",
"False",
")",
":",
"if",
"op",
".",
"exists",
"(",
"filename",
")",
":",
"if",
"oappend",
":",
"return",
"oappend",
"logging",
".",
"error",
"(",
"\"`{0}` found, overwrite (Y/N)?\"",
".",
"format",... | Avoid overwriting some files accidentally. | [
"Avoid",
"overwriting",
"some",
"files",
"accidentally",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L306-L318 | train | 201,083 |
tanghaibao/jcvi | jcvi/formats/base.py | must_open | def must_open(filename, mode="r", checkexists=False, skipcheck=False, \
oappend=False):
"""
Accepts filename and returns filehandle.
Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
"""
if isinstance(filename, list):
assert "r" in mode
if filename[0].endswith((".gz", ".bz2")):
filename = " ".join(filename) # allow opening multiple gz/bz2 files
else:
import fileinput
return fileinput.input(filename)
if filename.startswith("s3://"):
from jcvi.utils.aws import pull_from_s3
filename = pull_from_s3(filename)
if filename in ("-", "stdin"):
assert "r" in mode
fp = sys.stdin
elif filename == "stdout":
assert "w" in mode
fp = sys.stdout
elif filename == "stderr":
assert "w" in mode
fp = sys.stderr
elif filename == "tmp" and mode == "w":
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile(delete=False)
elif filename.endswith(".gz"):
if 'r' in mode:
cmd = "gunzip -c {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import gzip
fp = gzip.open(filename, mode)
elif filename.endswith(".bz2"):
if 'r' in mode:
cmd = "bzcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import bz2
fp = bz2.BZ2File(filename, mode)
else:
if checkexists:
assert mode == "w"
overwrite = (not op.exists(filename)) if skipcheck \
else check_exists(filename, oappend)
if overwrite:
if oappend:
fp = open(filename, "a")
else:
fp = open(filename, "w")
else:
logging.debug("File `{0}` already exists. Skipped."\
.format(filename))
return None
else:
fp = open(filename, mode)
return fp | python | def must_open(filename, mode="r", checkexists=False, skipcheck=False, \
oappend=False):
"""
Accepts filename and returns filehandle.
Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
"""
if isinstance(filename, list):
assert "r" in mode
if filename[0].endswith((".gz", ".bz2")):
filename = " ".join(filename) # allow opening multiple gz/bz2 files
else:
import fileinput
return fileinput.input(filename)
if filename.startswith("s3://"):
from jcvi.utils.aws import pull_from_s3
filename = pull_from_s3(filename)
if filename in ("-", "stdin"):
assert "r" in mode
fp = sys.stdin
elif filename == "stdout":
assert "w" in mode
fp = sys.stdout
elif filename == "stderr":
assert "w" in mode
fp = sys.stderr
elif filename == "tmp" and mode == "w":
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile(delete=False)
elif filename.endswith(".gz"):
if 'r' in mode:
cmd = "gunzip -c {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import gzip
fp = gzip.open(filename, mode)
elif filename.endswith(".bz2"):
if 'r' in mode:
cmd = "bzcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import bz2
fp = bz2.BZ2File(filename, mode)
else:
if checkexists:
assert mode == "w"
overwrite = (not op.exists(filename)) if skipcheck \
else check_exists(filename, oappend)
if overwrite:
if oappend:
fp = open(filename, "a")
else:
fp = open(filename, "w")
else:
logging.debug("File `{0}` already exists. Skipped."\
.format(filename))
return None
else:
fp = open(filename, mode)
return fp | [
"def",
"must_open",
"(",
"filename",
",",
"mode",
"=",
"\"r\"",
",",
"checkexists",
"=",
"False",
",",
"skipcheck",
"=",
"False",
",",
"oappend",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"filename",
",",
"list",
")",
":",
"assert",
"\"r\"",
"in... | Accepts filename and returns filehandle.
Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file. | [
"Accepts",
"filename",
"and",
"returns",
"filehandle",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L326-L395 | train | 201,084 |
tanghaibao/jcvi | jcvi/formats/base.py | read_block | def read_block(handle, signal):
"""
Useful for reading block-like file formats, for example FASTA or OBO file,
such file usually startswith some signal, and in-between the signals are a
record
"""
signal_len = len(signal)
it = (x[1] for x in groupby(handle,
key=lambda row: row.strip()[:signal_len] == signal))
found_signal = False
for header in it:
header = list(header)
for h in header[:-1]:
h = h.strip()
if h[:signal_len] != signal:
continue
yield h, [] # Header only, no contents
header = header[-1].strip()
if header[:signal_len] != signal:
continue
found_signal = True
seq = list(s.strip() for s in next(it))
yield header, seq
if not found_signal:
handle.seek(0)
seq = list(s.strip() for s in handle)
yield None, seq | python | def read_block(handle, signal):
"""
Useful for reading block-like file formats, for example FASTA or OBO file,
such file usually startswith some signal, and in-between the signals are a
record
"""
signal_len = len(signal)
it = (x[1] for x in groupby(handle,
key=lambda row: row.strip()[:signal_len] == signal))
found_signal = False
for header in it:
header = list(header)
for h in header[:-1]:
h = h.strip()
if h[:signal_len] != signal:
continue
yield h, [] # Header only, no contents
header = header[-1].strip()
if header[:signal_len] != signal:
continue
found_signal = True
seq = list(s.strip() for s in next(it))
yield header, seq
if not found_signal:
handle.seek(0)
seq = list(s.strip() for s in handle)
yield None, seq | [
"def",
"read_block",
"(",
"handle",
",",
"signal",
")",
":",
"signal_len",
"=",
"len",
"(",
"signal",
")",
"it",
"=",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"groupby",
"(",
"handle",
",",
"key",
"=",
"lambda",
"row",
":",
"row",
".",
"strip",... | Useful for reading block-like file formats, for example FASTA or OBO file,
such file usually startswith some signal, and in-between the signals are a
record | [
"Useful",
"for",
"reading",
"block",
"-",
"like",
"file",
"formats",
"for",
"example",
"FASTA",
"or",
"OBO",
"file",
"such",
"file",
"usually",
"startswith",
"some",
"signal",
"and",
"in",
"-",
"between",
"the",
"signals",
"are",
"a",
"record"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L453-L480 | train | 201,085 |
tanghaibao/jcvi | jcvi/formats/base.py | get_number | def get_number(s, cast=int):
"""
Try to get a number out of a string, and cast it.
"""
import string
d = "".join(x for x in str(s) if x in string.digits)
return cast(d) | python | def get_number(s, cast=int):
"""
Try to get a number out of a string, and cast it.
"""
import string
d = "".join(x for x in str(s) if x in string.digits)
return cast(d) | [
"def",
"get_number",
"(",
"s",
",",
"cast",
"=",
"int",
")",
":",
"import",
"string",
"d",
"=",
"\"\"",
".",
"join",
"(",
"x",
"for",
"x",
"in",
"str",
"(",
"s",
")",
"if",
"x",
"in",
"string",
".",
"digits",
")",
"return",
"cast",
"(",
"d",
... | Try to get a number out of a string, and cast it. | [
"Try",
"to",
"get",
"a",
"number",
"out",
"of",
"a",
"string",
"and",
"cast",
"it",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L495-L501 | train | 201,086 |
tanghaibao/jcvi | jcvi/formats/base.py | seqids | def seqids(args):
"""
%prog seqids prefix start end
Make a list of seqids for graphics.karyotype. For example:
$ python -m jcvi.formats.base seqids chromosome_ 1 3
chromosome_1,chromosome_2,chromosome_3
$ python -m jcvi.formats.base seqids A 3 1 --pad0=2
A03,A02,A01
"""
p = OptionParser(seqids.__doc__)
p.add_option("--pad0", default=0, help="How many zeros to pad")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prefix, start, end = args
pad0 = opts.pad0
start, end = int(start), int(end)
step = 1 if start <= end else -1
print(",".join(["{}{:0{}d}".format(prefix, x, pad0) \
for x in xrange(start, end + step, step)])) | python | def seqids(args):
"""
%prog seqids prefix start end
Make a list of seqids for graphics.karyotype. For example:
$ python -m jcvi.formats.base seqids chromosome_ 1 3
chromosome_1,chromosome_2,chromosome_3
$ python -m jcvi.formats.base seqids A 3 1 --pad0=2
A03,A02,A01
"""
p = OptionParser(seqids.__doc__)
p.add_option("--pad0", default=0, help="How many zeros to pad")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prefix, start, end = args
pad0 = opts.pad0
start, end = int(start), int(end)
step = 1 if start <= end else -1
print(",".join(["{}{:0{}d}".format(prefix, x, pad0) \
for x in xrange(start, end + step, step)])) | [
"def",
"seqids",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"seqids",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--pad0\"",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"How many zeros to pad\"",
")",
"opts",
",",
"args",
"=",
"p"... | %prog seqids prefix start end
Make a list of seqids for graphics.karyotype. For example:
$ python -m jcvi.formats.base seqids chromosome_ 1 3
chromosome_1,chromosome_2,chromosome_3
$ python -m jcvi.formats.base seqids A 3 1 --pad0=2
A03,A02,A01 | [
"%prog",
"seqids",
"prefix",
"start",
"end"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L532-L556 | train | 201,087 |
tanghaibao/jcvi | jcvi/formats/base.py | pairwise | def pairwise(args):
"""
%prog pairwise ids
Convert a list of IDs into all pairs.
"""
from itertools import combinations
p = OptionParser(pairwise.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
ids = SetFile(idsfile)
ids = sorted(ids)
fw = open(idsfile + ".pairs", "w")
for a, b in combinations(ids, 2):
print("\t".join((a, b)), file=fw)
fw.close() | python | def pairwise(args):
"""
%prog pairwise ids
Convert a list of IDs into all pairs.
"""
from itertools import combinations
p = OptionParser(pairwise.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
ids = SetFile(idsfile)
ids = sorted(ids)
fw = open(idsfile + ".pairs", "w")
for a, b in combinations(ids, 2):
print("\t".join((a, b)), file=fw)
fw.close() | [
"def",
"pairwise",
"(",
"args",
")",
":",
"from",
"itertools",
"import",
"combinations",
"p",
"=",
"OptionParser",
"(",
"pairwise",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
... | %prog pairwise ids
Convert a list of IDs into all pairs. | [
"%prog",
"pairwise",
"ids"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L559-L579 | train | 201,088 |
tanghaibao/jcvi | jcvi/formats/base.py | truncate | def truncate(args):
"""
%prog truncate linecount filename
Remove linecount lines from the end of the file in-place. Borrowed from:
<http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file>
"""
p = OptionParser(truncate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
number, filename = args
number = int(number)
count = 0
f = open(filename, "r+b")
f.seek(0, os.SEEK_END)
while f.tell() > 0:
f.seek(-1, os.SEEK_CUR)
char = f.read(1)
if char == '\n':
count += 1
if count == number + 1:
f.truncate()
print("Removed {0} lines from end of file".format(number), file=sys.stderr)
return number
f.seek(-1, os.SEEK_CUR)
if count < number + 1:
print("No change: requested removal would leave empty file", file=sys.stderr)
return -1 | python | def truncate(args):
"""
%prog truncate linecount filename
Remove linecount lines from the end of the file in-place. Borrowed from:
<http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file>
"""
p = OptionParser(truncate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
number, filename = args
number = int(number)
count = 0
f = open(filename, "r+b")
f.seek(0, os.SEEK_END)
while f.tell() > 0:
f.seek(-1, os.SEEK_CUR)
char = f.read(1)
if char == '\n':
count += 1
if count == number + 1:
f.truncate()
print("Removed {0} lines from end of file".format(number), file=sys.stderr)
return number
f.seek(-1, os.SEEK_CUR)
if count < number + 1:
print("No change: requested removal would leave empty file", file=sys.stderr)
return -1 | [
"def",
"truncate",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"truncate",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
... | %prog truncate linecount filename
Remove linecount lines from the end of the file in-place. Borrowed from:
<http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file> | [
"%prog",
"truncate",
"linecount",
"filename"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L608-L641 | train | 201,089 |
tanghaibao/jcvi | jcvi/formats/base.py | flatten | def flatten(args):
"""
%prog flatten filename > ids
Convert a list of IDs (say, multiple IDs per line) and move them into one
per line.
For example, convert this, to this:
A,B,C | A
1 | B
a,4 | C
| 1
| a
| 4
If multi-column file with multiple elements per column, zip then flatten like so:
A,B,C 2,10,gg | A,2
1,3 4 | B,10
| C,gg
| 1,4
| 3,na
"""
from six.moves import zip_longest
p = OptionParser(flatten.__doc__)
p.set_sep(sep=",")
p.add_option("--zipflatten", default=None, dest="zipsep",
help="Specify if columns of the file should be zipped before" +
" flattening. If so, specify delimiter separating column elements" +
" [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
zipsep = opts.zipsep
fp = must_open(tabfile)
for row in fp:
if zipsep:
row = row.rstrip()
atoms = row.split(opts.sep)
frows = []
for atom in atoms:
frows.append(atom.split(zipsep))
print("\n".join([zipsep.join(x) for x in list(zip_longest(*frows, fillvalue="na"))]))
else:
print(row.strip().replace(opts.sep, "\n")) | python | def flatten(args):
"""
%prog flatten filename > ids
Convert a list of IDs (say, multiple IDs per line) and move them into one
per line.
For example, convert this, to this:
A,B,C | A
1 | B
a,4 | C
| 1
| a
| 4
If multi-column file with multiple elements per column, zip then flatten like so:
A,B,C 2,10,gg | A,2
1,3 4 | B,10
| C,gg
| 1,4
| 3,na
"""
from six.moves import zip_longest
p = OptionParser(flatten.__doc__)
p.set_sep(sep=",")
p.add_option("--zipflatten", default=None, dest="zipsep",
help="Specify if columns of the file should be zipped before" +
" flattening. If so, specify delimiter separating column elements" +
" [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
zipsep = opts.zipsep
fp = must_open(tabfile)
for row in fp:
if zipsep:
row = row.rstrip()
atoms = row.split(opts.sep)
frows = []
for atom in atoms:
frows.append(atom.split(zipsep))
print("\n".join([zipsep.join(x) for x in list(zip_longest(*frows, fillvalue="na"))]))
else:
print(row.strip().replace(opts.sep, "\n")) | [
"def",
"flatten",
"(",
"args",
")",
":",
"from",
"six",
".",
"moves",
"import",
"zip_longest",
"p",
"=",
"OptionParser",
"(",
"flatten",
".",
"__doc__",
")",
"p",
".",
"set_sep",
"(",
"sep",
"=",
"\",\"",
")",
"p",
".",
"add_option",
"(",
"\"--zipflatt... | %prog flatten filename > ids
Convert a list of IDs (say, multiple IDs per line) and move them into one
per line.
For example, convert this, to this:
A,B,C | A
1 | B
a,4 | C
| 1
| a
| 4
If multi-column file with multiple elements per column, zip then flatten like so:
A,B,C 2,10,gg | A,2
1,3 4 | B,10
| C,gg
| 1,4
| 3,na | [
"%prog",
"flatten",
"filename",
">",
"ids"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L644-L692 | train | 201,090 |
tanghaibao/jcvi | jcvi/formats/base.py | reorder | def reorder(args):
"""
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
"""
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, order = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(",")]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow) | python | def reorder(args):
"""
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
"""
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, order = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(",")]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow) | [
"def",
"reorder",
"(",
"args",
")",
":",
"import",
"csv",
"p",
"=",
"OptionParser",
"(",
"reorder",
".",
"__doc__",
")",
"p",
".",
"set_sep",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
... | %prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file. | [
"%prog",
"reorder",
"tabfile",
"1",
"2",
"4",
"3",
">",
"newtabfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L791-L814 | train | 201,091 |
tanghaibao/jcvi | jcvi/formats/base.py | split | def split(args):
"""
%prog split file outdir N
Split file into N records. This allows splitting FASTA/FASTQ/TXT file
properly at boundary of records. Split is useful for parallelization
on input chunks.
Option --mode is useful on how to break into chunks.
1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc.
2. cycle - chunk records in Round Robin fashion
3. optimal - try to make split file of roughly similar sizes, using LPT
algorithm. This is the default.
"""
p = OptionParser(split.__doc__)
mode_choices = ("batch", "cycle", "optimal")
p.add_option("--all", default=False, action="store_true",
help="split all records [default: %default]")
p.add_option("--mode", default="optimal", choices=mode_choices,
help="Mode when splitting records [default: %default]")
p.add_option("--format", choices=("fasta", "fastq", "txt", "clust"),
help="input file format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
filename, outdir, N = args
fs = FileSplitter(filename, outputdir=outdir,
format=opts.format, mode=opts.mode)
if opts.all:
logging.debug("option -all override N")
N = fs.num_records
else:
N = min(fs.num_records, int(N))
assert N > 0, "N must be > 0"
logging.debug("split file into %d chunks" % N)
fs.split(N)
return fs | python | def split(args):
"""
%prog split file outdir N
Split file into N records. This allows splitting FASTA/FASTQ/TXT file
properly at boundary of records. Split is useful for parallelization
on input chunks.
Option --mode is useful on how to break into chunks.
1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc.
2. cycle - chunk records in Round Robin fashion
3. optimal - try to make split file of roughly similar sizes, using LPT
algorithm. This is the default.
"""
p = OptionParser(split.__doc__)
mode_choices = ("batch", "cycle", "optimal")
p.add_option("--all", default=False, action="store_true",
help="split all records [default: %default]")
p.add_option("--mode", default="optimal", choices=mode_choices,
help="Mode when splitting records [default: %default]")
p.add_option("--format", choices=("fasta", "fastq", "txt", "clust"),
help="input file format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
filename, outdir, N = args
fs = FileSplitter(filename, outputdir=outdir,
format=opts.format, mode=opts.mode)
if opts.all:
logging.debug("option -all override N")
N = fs.num_records
else:
N = min(fs.num_records, int(N))
assert N > 0, "N must be > 0"
logging.debug("split file into %d chunks" % N)
fs.split(N)
return fs | [
"def",
"split",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"split",
".",
"__doc__",
")",
"mode_choices",
"=",
"(",
"\"batch\"",
",",
"\"cycle\"",
",",
"\"optimal\"",
")",
"p",
".",
"add_option",
"(",
"\"--all\"",
",",
"default",
"=",
"False",
... | %prog split file outdir N
Split file into N records. This allows splitting FASTA/FASTQ/TXT file
properly at boundary of records. Split is useful for parallelization
on input chunks.
Option --mode is useful on how to break into chunks.
1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc.
2. cycle - chunk records in Round Robin fashion
3. optimal - try to make split file of roughly similar sizes, using LPT
algorithm. This is the default. | [
"%prog",
"split",
"file",
"outdir",
"N"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L817-L859 | train | 201,092 |
tanghaibao/jcvi | jcvi/formats/base.py | setop | def setop(args):
"""
%prog setop "fileA & fileB" > newfile
Perform set operations, except on files. The files (fileA and fileB) contain
list of ids. The operator is one of the four:
|: union (elements found in either file)
&: intersection (elements found in both)
-: difference (elements in fileA but not in fileB)
^: symmetric difference (elementes found in either set but not both)
Please quote the argument to avoid shell interpreting | and &.
"""
from jcvi.utils.natsort import natsorted
p = OptionParser(setop.__doc__)
p.add_option("--column", default=0, type="int",
help="The column to extract, 0-based, -1 to disable [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
statement, = args
fa, op, fb = statement.split()
assert op in ('|', '&', '-', '^')
column = opts.column
fa = SetFile(fa, column=column)
fb = SetFile(fb, column=column)
if op == '|':
t = fa | fb
elif op == '&':
t = fa & fb
elif op == '-':
t = fa - fb
elif op == '^':
t = fa ^ fb
for x in natsorted(t):
print(x) | python | def setop(args):
"""
%prog setop "fileA & fileB" > newfile
Perform set operations, except on files. The files (fileA and fileB) contain
list of ids. The operator is one of the four:
|: union (elements found in either file)
&: intersection (elements found in both)
-: difference (elements in fileA but not in fileB)
^: symmetric difference (elementes found in either set but not both)
Please quote the argument to avoid shell interpreting | and &.
"""
from jcvi.utils.natsort import natsorted
p = OptionParser(setop.__doc__)
p.add_option("--column", default=0, type="int",
help="The column to extract, 0-based, -1 to disable [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
statement, = args
fa, op, fb = statement.split()
assert op in ('|', '&', '-', '^')
column = opts.column
fa = SetFile(fa, column=column)
fb = SetFile(fb, column=column)
if op == '|':
t = fa | fb
elif op == '&':
t = fa & fb
elif op == '-':
t = fa - fb
elif op == '^':
t = fa ^ fb
for x in natsorted(t):
print(x) | [
"def",
"setop",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"natsort",
"import",
"natsorted",
"p",
"=",
"OptionParser",
"(",
"setop",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--column\"",
",",
"default",
"=",
"0",
",",
"type",... | %prog setop "fileA & fileB" > newfile
Perform set operations, except on files. The files (fileA and fileB) contain
list of ids. The operator is one of the four:
|: union (elements found in either file)
&: intersection (elements found in both)
-: difference (elements in fileA but not in fileB)
^: symmetric difference (elementes found in either set but not both)
Please quote the argument to avoid shell interpreting | and &. | [
"%prog",
"setop",
"fileA",
"&",
"fileB",
">",
"newfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L1027-L1069 | train | 201,093 |
tanghaibao/jcvi | jcvi/formats/base.py | FileSplitter._batch_iterator | def _batch_iterator(self, N=1):
"""Returns N lists of records.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
"""
batch_size = math.ceil(self.num_records / float(N))
handle = self._open(self.filename)
while True:
batch = list(islice(handle, batch_size))
if not batch:
break
yield batch | python | def _batch_iterator(self, N=1):
"""Returns N lists of records.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
"""
batch_size = math.ceil(self.num_records / float(N))
handle = self._open(self.filename)
while True:
batch = list(islice(handle, batch_size))
if not batch:
break
yield batch | [
"def",
"_batch_iterator",
"(",
"self",
",",
"N",
"=",
"1",
")",
":",
"batch_size",
"=",
"math",
".",
"ceil",
"(",
"self",
".",
"num_records",
"/",
"float",
"(",
"N",
")",
")",
"handle",
"=",
"self",
".",
"_open",
"(",
"self",
".",
"filename",
")",
... | Returns N lists of records.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter. | [
"Returns",
"N",
"lists",
"of",
"records",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L192-L210 | train | 201,094 |
tanghaibao/jcvi | jcvi/formats/sizes.py | extract | def extract(args):
"""
%prog extract idsfile sizesfile
Extract the lines containing only the given IDs.
"""
p = OptionParser(extract.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, sizesfile = args
sizes = Sizes(sizesfile).mapping
fp = open(idsfile)
for row in fp:
name = row.strip()
size = sizes[name]
print("\t".join(str(x) for x in (name, size))) | python | def extract(args):
"""
%prog extract idsfile sizesfile
Extract the lines containing only the given IDs.
"""
p = OptionParser(extract.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, sizesfile = args
sizes = Sizes(sizesfile).mapping
fp = open(idsfile)
for row in fp:
name = row.strip()
size = sizes[name]
print("\t".join(str(x) for x in (name, size))) | [
"def",
"extract",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"extract",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
... | %prog extract idsfile sizesfile
Extract the lines containing only the given IDs. | [
"%prog",
"extract",
"idsfile",
"sizesfile"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sizes.py#L222-L240 | train | 201,095 |
tanghaibao/jcvi | jcvi/utils/webcolors.py | _reversedict | def _reversedict(d):
"""
Internal helper for generating reverse mappings; given a
dictionary, returns a new dictionary with keys and values swapped.
"""
return dict(list(zip(list(d.values()), list(d.keys())))) | python | def _reversedict(d):
"""
Internal helper for generating reverse mappings; given a
dictionary, returns a new dictionary with keys and values swapped.
"""
return dict(list(zip(list(d.values()), list(d.keys())))) | [
"def",
"_reversedict",
"(",
"d",
")",
":",
"return",
"dict",
"(",
"list",
"(",
"zip",
"(",
"list",
"(",
"d",
".",
"values",
"(",
")",
")",
",",
"list",
"(",
"d",
".",
"keys",
"(",
")",
")",
")",
")",
")"
] | Internal helper for generating reverse mappings; given a
dictionary, returns a new dictionary with keys and values swapped. | [
"Internal",
"helper",
"for",
"generating",
"reverse",
"mappings",
";",
"given",
"a",
"dictionary",
"returns",
"a",
"new",
"dictionary",
"with",
"keys",
"and",
"values",
"swapped",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/webcolors.py#L154-L160 | train | 201,096 |
tanghaibao/jcvi | jcvi/utils/webcolors.py | _percent_to_integer | def _percent_to_integer(percent):
"""
Internal helper for converting a percentage value to an integer
between 0 and 255 inclusive.
"""
num = float(percent.split('%')[0]) / 100.0 * 255
e = num - math.floor(num)
return e < 0.5 and int(math.floor(num)) or int(math.ceil(num)) | python | def _percent_to_integer(percent):
"""
Internal helper for converting a percentage value to an integer
between 0 and 255 inclusive.
"""
num = float(percent.split('%')[0]) / 100.0 * 255
e = num - math.floor(num)
return e < 0.5 and int(math.floor(num)) or int(math.ceil(num)) | [
"def",
"_percent_to_integer",
"(",
"percent",
")",
":",
"num",
"=",
"float",
"(",
"percent",
".",
"split",
"(",
"'%'",
")",
"[",
"0",
"]",
")",
"/",
"100.0",
"*",
"255",
"e",
"=",
"num",
"-",
"math",
".",
"floor",
"(",
"num",
")",
"return",
"e",
... | Internal helper for converting a percentage value to an integer
between 0 and 255 inclusive. | [
"Internal",
"helper",
"for",
"converting",
"a",
"percentage",
"value",
"to",
"an",
"integer",
"between",
"0",
"and",
"255",
"inclusive",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/webcolors.py#L811-L819 | train | 201,097 |
tanghaibao/jcvi | jcvi/utils/webcolors.py | closest_color | def closest_color(requested_color):
"""
Find closest color name for the request RGB tuple.
"""
logging.disable(logging.DEBUG)
colors = []
for key, name in css3_hex_to_names.items():
diff = color_diff(hex_to_rgb(key), requested_color)
colors.append((diff, name))
logging.disable(logging.NOTSET)
min_diff, min_color = min(colors)
return min_color | python | def closest_color(requested_color):
"""
Find closest color name for the request RGB tuple.
"""
logging.disable(logging.DEBUG)
colors = []
for key, name in css3_hex_to_names.items():
diff = color_diff(hex_to_rgb(key), requested_color)
colors.append((diff, name))
logging.disable(logging.NOTSET)
min_diff, min_color = min(colors)
return min_color | [
"def",
"closest_color",
"(",
"requested_color",
")",
":",
"logging",
".",
"disable",
"(",
"logging",
".",
"DEBUG",
")",
"colors",
"=",
"[",
"]",
"for",
"key",
",",
"name",
"in",
"css3_hex_to_names",
".",
"items",
"(",
")",
":",
"diff",
"=",
"color_diff",... | Find closest color name for the request RGB tuple. | [
"Find",
"closest",
"color",
"name",
"for",
"the",
"request",
"RGB",
"tuple",
"."
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/webcolors.py#L868-L880 | train | 201,098 |
tanghaibao/jcvi | jcvi/compara/fractionation.py | offdiag | def offdiag(args):
"""
%prog offdiag diploid.napus.1x1.lifted.anchors
Find gene pairs that are off diagnoal. "Off diagonal" are the pairs that are
not on the orthologous chromosomes. For example, napus chrA01 and brapa A01.
"""
p = OptionParser(offdiag.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorsfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts)
fp = open(anchorsfile)
pf = "-".join(anchorsfile.split(".")[:2])
header = "Block-id|Napus|Diploid|Napus-chr|Diploid-chr|RBH?".split("|")
print("\t".join(header))
i = -1
for row in fp:
if row[0] == '#':
i += 1
continue
q, s, score = row.split()
rbh = 'no' if score[-1] == 'L' else 'yes'
qi, qq = qorder[q]
si, ss = sorder[s]
oqseqid = qseqid = qq.seqid
osseqid = sseqid = ss.seqid
sseqid = sseqid.split("_")[0][-3:]
if qseqid[0] == 'A':
qseqid = qseqid[-3:] # A09 => A09
elif qseqid[0] == 'C':
qseqid = 'C0' + qseqid[-1] # C9 => C09
else:
continue
if qseqid == sseqid or sseqid[-2:] == 'nn':
continue
block_id = pf + "-block-{0}".format(i)
print("\t".join((block_id, q, s, oqseqid, osseqid, rbh))) | python | def offdiag(args):
"""
%prog offdiag diploid.napus.1x1.lifted.anchors
Find gene pairs that are off diagnoal. "Off diagonal" are the pairs that are
not on the orthologous chromosomes. For example, napus chrA01 and brapa A01.
"""
p = OptionParser(offdiag.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorsfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts)
fp = open(anchorsfile)
pf = "-".join(anchorsfile.split(".")[:2])
header = "Block-id|Napus|Diploid|Napus-chr|Diploid-chr|RBH?".split("|")
print("\t".join(header))
i = -1
for row in fp:
if row[0] == '#':
i += 1
continue
q, s, score = row.split()
rbh = 'no' if score[-1] == 'L' else 'yes'
qi, qq = qorder[q]
si, ss = sorder[s]
oqseqid = qseqid = qq.seqid
osseqid = sseqid = ss.seqid
sseqid = sseqid.split("_")[0][-3:]
if qseqid[0] == 'A':
qseqid = qseqid[-3:] # A09 => A09
elif qseqid[0] == 'C':
qseqid = 'C0' + qseqid[-1] # C9 => C09
else:
continue
if qseqid == sseqid or sseqid[-2:] == 'nn':
continue
block_id = pf + "-block-{0}".format(i)
print("\t".join((block_id, q, s, oqseqid, osseqid, rbh))) | [
"def",
"offdiag",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"offdiag",
".",
"__doc__",
")",
"p",
".",
"set_beds",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
... | %prog offdiag diploid.napus.1x1.lifted.anchors
Find gene pairs that are off diagnoal. "Off diagonal" are the pairs that are
not on the orthologous chromosomes. For example, napus chrA01 and brapa A01. | [
"%prog",
"offdiag",
"diploid",
".",
"napus",
".",
"1x1",
".",
"lifted",
".",
"anchors"
] | d2e31a77b6ade7f41f3b321febc2b4744d1cdeca | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/fractionation.py#L45-L87 | train | 201,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.