repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
tanghaibao/jcvi
jcvi/formats/fasta.py
pairinplace
def pairinplace(args): """ %prog pairinplace bulk.fasta Pair up the records in bulk.fasta by comparing the names for adjacent records. If they match, print to bulk.pairs.fasta, else print to bulk.frags.fasta. """ from jcvi.utils.iter import pairwise p = OptionParser(pairinplace.__doc__) p.add_option("-r", dest="rclip", default=1, type="int", help="pair ID is derived from rstrip N chars [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args base = op.basename(fastafile).split(".")[0] frags = base + ".frags.fasta" pairs = base + ".pairs.fasta" if fastafile.endswith(".gz"): frags += ".gz" pairs += ".gz" fragsfw = must_open(frags, "w") pairsfw = must_open(pairs, "w") N = opts.rclip strip_name = lambda x: x[:-N] if N else str skipflag = False # controls the iterator skip fastaiter = SeqIO.parse(fastafile, "fasta") for a, b in pairwise(fastaiter): aid, bid = [strip_name(x) for x in (a.id, b.id)] if skipflag: skipflag = False continue if aid == bid: SeqIO.write([a, b], pairsfw, "fasta") skipflag = True else: SeqIO.write([a], fragsfw, "fasta") # don't forget the last one, when b is None if not skipflag: SeqIO.write([a], fragsfw, "fasta") logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
python
def pairinplace(args): """ %prog pairinplace bulk.fasta Pair up the records in bulk.fasta by comparing the names for adjacent records. If they match, print to bulk.pairs.fasta, else print to bulk.frags.fasta. """ from jcvi.utils.iter import pairwise p = OptionParser(pairinplace.__doc__) p.add_option("-r", dest="rclip", default=1, type="int", help="pair ID is derived from rstrip N chars [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args base = op.basename(fastafile).split(".")[0] frags = base + ".frags.fasta" pairs = base + ".pairs.fasta" if fastafile.endswith(".gz"): frags += ".gz" pairs += ".gz" fragsfw = must_open(frags, "w") pairsfw = must_open(pairs, "w") N = opts.rclip strip_name = lambda x: x[:-N] if N else str skipflag = False # controls the iterator skip fastaiter = SeqIO.parse(fastafile, "fasta") for a, b in pairwise(fastaiter): aid, bid = [strip_name(x) for x in (a.id, b.id)] if skipflag: skipflag = False continue if aid == bid: SeqIO.write([a, b], pairsfw, "fasta") skipflag = True else: SeqIO.write([a], fragsfw, "fasta") # don't forget the last one, when b is None if not skipflag: SeqIO.write([a], fragsfw, "fasta") logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
[ "def", "pairinplace", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "iter", "import", "pairwise", "p", "=", "OptionParser", "(", "pairinplace", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-r\"", ",", "dest", "=", "\"rclip\"", ",", ...
%prog pairinplace bulk.fasta Pair up the records in bulk.fasta by comparing the names for adjacent records. If they match, print to bulk.pairs.fasta, else print to bulk.frags.fasta.
[ "%prog", "pairinplace", "bulk", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L1752-L1805
train
200,600
tanghaibao/jcvi
jcvi/formats/fasta.py
_uniq_rec
def _uniq_rec(fastafile, seq=False): """ Returns unique records """ seen = set() for rec in SeqIO.parse(fastafile, "fasta"): name = str(rec.seq) if seq else rec.id if name in seen: logging.debug("ignore {0}".format(rec.id)) continue seen.add(name) yield rec
python
def _uniq_rec(fastafile, seq=False): """ Returns unique records """ seen = set() for rec in SeqIO.parse(fastafile, "fasta"): name = str(rec.seq) if seq else rec.id if name in seen: logging.debug("ignore {0}".format(rec.id)) continue seen.add(name) yield rec
[ "def", "_uniq_rec", "(", "fastafile", ",", "seq", "=", "False", ")", ":", "seen", "=", "set", "(", ")", "for", "rec", "in", "SeqIO", ".", "parse", "(", "fastafile", ",", "\"fasta\"", ")", ":", "name", "=", "str", "(", "rec", ".", "seq", ")", "if"...
Returns unique records
[ "Returns", "unique", "records" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L1920-L1931
train
200,601
tanghaibao/jcvi
jcvi/formats/fasta.py
uniq
def uniq(args): """ %prog uniq fasta uniq.fasta remove fasta records that are the same """ p = OptionParser(uniq.__doc__) p.add_option("--seq", default=False, action="store_true", help="Uniqify the sequences [default: %default]") p.add_option("-t", "--trimname", dest="trimname", action="store_true", default=False, help="turn on the defline trim to first space [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, uniqfastafile = args fw = must_open(uniqfastafile, "w") seq = opts.seq for rec in _uniq_rec(fastafile, seq=seq): if opts.trimname: rec.description = "" SeqIO.write([rec], fw, "fasta")
python
def uniq(args): """ %prog uniq fasta uniq.fasta remove fasta records that are the same """ p = OptionParser(uniq.__doc__) p.add_option("--seq", default=False, action="store_true", help="Uniqify the sequences [default: %default]") p.add_option("-t", "--trimname", dest="trimname", action="store_true", default=False, help="turn on the defline trim to first space [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, uniqfastafile = args fw = must_open(uniqfastafile, "w") seq = opts.seq for rec in _uniq_rec(fastafile, seq=seq): if opts.trimname: rec.description = "" SeqIO.write([rec], fw, "fasta")
[ "def", "uniq", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "uniq", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--seq\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Uniqify the sequences [defa...
%prog uniq fasta uniq.fasta remove fasta records that are the same
[ "%prog", "uniq", "fasta", "uniq", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L1934-L1958
train
200,602
tanghaibao/jcvi
jcvi/formats/fasta.py
random
def random(args): """ %prog random fasta 100 > random100.fasta Take number of records randomly from fasta """ from random import sample p = OptionParser(random.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, N = args N = int(N) assert N > 0 f = Fasta(fastafile) fw = must_open("stdout", "w") for key in sample(f.keys(), N): rec = f[key] SeqIO.write([rec], fw, "fasta") fw.close()
python
def random(args): """ %prog random fasta 100 > random100.fasta Take number of records randomly from fasta """ from random import sample p = OptionParser(random.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, N = args N = int(N) assert N > 0 f = Fasta(fastafile) fw = must_open("stdout", "w") for key in sample(f.keys(), N): rec = f[key] SeqIO.write([rec], fw, "fasta") fw.close()
[ "def", "random", "(", "args", ")", ":", "from", "random", "import", "sample", "p", "=", "OptionParser", "(", "random", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2"...
%prog random fasta 100 > random100.fasta Take number of records randomly from fasta
[ "%prog", "random", "fasta", "100", ">", "random100", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L1961-L1986
train
200,603
tanghaibao/jcvi
jcvi/formats/fasta.py
iter_fasta_qual
def iter_fasta_qual(fastafile, qualfile, defaultqual=OKQUAL, modify=False): """ used by trim, emits one SeqRecord with quality values in it """ from Bio.SeqIO.QualityIO import PairedFastaQualIterator if not qualfile: qualfile = make_qual(fastafile, score=defaultqual) rec_iter = PairedFastaQualIterator(open(fastafile), open(qualfile)) for rec in rec_iter: yield rec if not modify else modify_qual(rec)
python
def iter_fasta_qual(fastafile, qualfile, defaultqual=OKQUAL, modify=False): """ used by trim, emits one SeqRecord with quality values in it """ from Bio.SeqIO.QualityIO import PairedFastaQualIterator if not qualfile: qualfile = make_qual(fastafile, score=defaultqual) rec_iter = PairedFastaQualIterator(open(fastafile), open(qualfile)) for rec in rec_iter: yield rec if not modify else modify_qual(rec)
[ "def", "iter_fasta_qual", "(", "fastafile", ",", "qualfile", ",", "defaultqual", "=", "OKQUAL", ",", "modify", "=", "False", ")", ":", "from", "Bio", ".", "SeqIO", ".", "QualityIO", "import", "PairedFastaQualIterator", "if", "not", "qualfile", ":", "qualfile",...
used by trim, emits one SeqRecord with quality values in it
[ "used", "by", "trim", "emits", "one", "SeqRecord", "with", "quality", "values", "in", "it" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L2018-L2028
train
200,604
tanghaibao/jcvi
jcvi/formats/fasta.py
trim
def trim(args): """ %prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum """ from jcvi.algorithms.maxsum import max_sum p = OptionParser(trim.__doc__) p.add_option("-c", dest="min_length", type="int", default=64, help="minimum sequence length after trimming") p.add_option("-s", dest="score", default=QUAL, help="quality trimming cutoff [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, newfastafile = args qualfile = get_qual(fastafile) newqualfile = get_qual(newfastafile, check=False) logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \ (fastafile, newfastafile)) fw = must_open(newfastafile, "w") fw_qual = open(newqualfile, "w") dropped = trimmed = 0 for rec in iter_fasta_qual(fastafile, qualfile, modify=True): qv = [x - opts.score for x in \ rec.letter_annotations["phred_quality"]] msum, trim_start, trim_end = max_sum(qv) score = trim_end - trim_start + 1 if score < opts.min_length: dropped += 1 continue if score < len(rec): trimmed += 1 rec = rec[trim_start:trim_end + 1] write_fasta_qual(rec, fw, fw_qual) print("A total of %d sequences modified." % trimmed, file=sys.stderr) print("A total of %d sequences dropped (length < %d)." % \ (dropped, opts.min_length), file=sys.stderr) fw.close() fw_qual.close()
python
def trim(args): """ %prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum """ from jcvi.algorithms.maxsum import max_sum p = OptionParser(trim.__doc__) p.add_option("-c", dest="min_length", type="int", default=64, help="minimum sequence length after trimming") p.add_option("-s", dest="score", default=QUAL, help="quality trimming cutoff [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, newfastafile = args qualfile = get_qual(fastafile) newqualfile = get_qual(newfastafile, check=False) logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \ (fastafile, newfastafile)) fw = must_open(newfastafile, "w") fw_qual = open(newqualfile, "w") dropped = trimmed = 0 for rec in iter_fasta_qual(fastafile, qualfile, modify=True): qv = [x - opts.score for x in \ rec.letter_annotations["phred_quality"]] msum, trim_start, trim_end = max_sum(qv) score = trim_end - trim_start + 1 if score < opts.min_length: dropped += 1 continue if score < len(rec): trimmed += 1 rec = rec[trim_start:trim_end + 1] write_fasta_qual(rec, fw, fw_qual) print("A total of %d sequences modified." % trimmed, file=sys.stderr) print("A total of %d sequences dropped (length < %d)." % \ (dropped, opts.min_length), file=sys.stderr) fw.close() fw_qual.close()
[ "def", "trim", "(", "args", ")", ":", "from", "jcvi", ".", "algorithms", ".", "maxsum", "import", "max_sum", "p", "=", "OptionParser", "(", "trim", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-c\"", ",", "dest", "=", "\"min_length\"", ",", "ty...
%prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum
[ "%prog", "trim", "fasta", ".", "screen", "newfasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L2038-L2093
train
200,605
tanghaibao/jcvi
jcvi/formats/fasta.py
sequin
def sequin(args): """ %prog sequin inputfasta Generate a gapped fasta format with known gap sizes embedded. suitable for Sequin submission. A gapped sequence represents a newer method for describing non-contiguous sequences, but only requires a single sequence identifier. A gap is represented by a line that starts with >? and is immediately followed by either a length (for gaps of known length) or "unk100" for gaps of unknown length. For example, ">?200". The next sequence segment continues on the next line, with no separate definition line or identifier. The difference between a gapped sequence and a segmented sequence is that the gapped sequence uses a single identifier and can specify known length gaps. Gapped sequences are preferred over segmented sequences. A sample gapped sequence file is shown here: >m_gagei [organism=Mansonia gagei] Mansonia gagei NADH dehydrogenase ... ATGGAGCATACATATCAATATTCATGGATCATACCGTTTGTGCCACTTCCAATTCCTATTTTAATAGGAA TTGGACTCCTACTTTTTCCGACGGCAACAAAAAATCTTCGTCGTATGTGGGCTCTTCCCAATATTTTATT >?200 GGTATAATAACAGTATTATTAGGGGCTACTTTAGCTCTTGC TCAAAAAGATATTAAGAGGGGTTTAGCCTATTCTACAATGTCCCAACTGGGTTATATGATGTTAGCTCTA >?unk100 TCAATAAAACTATGGGGTAAAGAAGAACAAAAAATAATTAACAGAAATTTTCGTTTATCTCCTTTATTAA TATTAACGATGAATAATAATGAGAAGCCATATAGAATTGGTGATAATGTAAAAAAAGGGGCTCTTATTAC """ p = OptionParser(sequin.__doc__) p.add_option("--unk", default=100, type="int", help="The size for unknown gaps [default: %default]") p.add_option("--newid", default=None, help="Use this identifier instead [default: %default]") p.add_option("--chromosome", default=None, help="Add [chromosome= ] to FASTA header [default: %default]") p.add_option("--clone", default=None, help="Add [clone= ] to FASTA header [default: %default]") p.set_mingap(default=100) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) inputfasta, = args unk = opts.unk outputfasta = inputfasta.rsplit(".", 1)[0] + ".split" rec = next(SeqIO.parse(must_open(inputfasta), "fasta")) seq = "" unknowns, knowns = 0, 0 for gap, gap_group in groupby(rec.seq, lambda x: x.upper() == 'N'): subseq = "".join(gap_group) if gap: gap_length = len(subseq) if gap_length == unk: subseq = "\n>?unk{0}\n".format(unk) unknowns += 1 elif gap_length >= opts.mingap: subseq = "\n>?{0}\n".format(gap_length) knowns += 1 seq += subseq fw = must_open(outputfasta, "w") id = opts.newid or rec.id fastaheader = ">{0}".format(id) if opts.chromosome: fastaheader += " [chromosome={0}]".format(opts.chromosome) if opts.clone: fastaheader += " [clone={0}]".format(opts.clone) print(fastaheader, file=fw) print(seq, file=fw) fw.close() logging.debug("Sequin FASTA written to `{0}` (gaps: {1} unknowns, {2} knowns).".\ format(outputfasta, unknowns, knowns)) return outputfasta, unknowns + knowns
python
def sequin(args): """ %prog sequin inputfasta Generate a gapped fasta format with known gap sizes embedded. suitable for Sequin submission. A gapped sequence represents a newer method for describing non-contiguous sequences, but only requires a single sequence identifier. A gap is represented by a line that starts with >? and is immediately followed by either a length (for gaps of known length) or "unk100" for gaps of unknown length. For example, ">?200". The next sequence segment continues on the next line, with no separate definition line or identifier. The difference between a gapped sequence and a segmented sequence is that the gapped sequence uses a single identifier and can specify known length gaps. Gapped sequences are preferred over segmented sequences. A sample gapped sequence file is shown here: >m_gagei [organism=Mansonia gagei] Mansonia gagei NADH dehydrogenase ... ATGGAGCATACATATCAATATTCATGGATCATACCGTTTGTGCCACTTCCAATTCCTATTTTAATAGGAA TTGGACTCCTACTTTTTCCGACGGCAACAAAAAATCTTCGTCGTATGTGGGCTCTTCCCAATATTTTATT >?200 GGTATAATAACAGTATTATTAGGGGCTACTTTAGCTCTTGC TCAAAAAGATATTAAGAGGGGTTTAGCCTATTCTACAATGTCCCAACTGGGTTATATGATGTTAGCTCTA >?unk100 TCAATAAAACTATGGGGTAAAGAAGAACAAAAAATAATTAACAGAAATTTTCGTTTATCTCCTTTATTAA TATTAACGATGAATAATAATGAGAAGCCATATAGAATTGGTGATAATGTAAAAAAAGGGGCTCTTATTAC """ p = OptionParser(sequin.__doc__) p.add_option("--unk", default=100, type="int", help="The size for unknown gaps [default: %default]") p.add_option("--newid", default=None, help="Use this identifier instead [default: %default]") p.add_option("--chromosome", default=None, help="Add [chromosome= ] to FASTA header [default: %default]") p.add_option("--clone", default=None, help="Add [clone= ] to FASTA header [default: %default]") p.set_mingap(default=100) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) inputfasta, = args unk = opts.unk outputfasta = inputfasta.rsplit(".", 1)[0] + ".split" rec = next(SeqIO.parse(must_open(inputfasta), "fasta")) seq = "" unknowns, knowns = 0, 0 for gap, gap_group in groupby(rec.seq, lambda x: x.upper() == 'N'): subseq = "".join(gap_group) if gap: gap_length = len(subseq) if gap_length == unk: subseq = "\n>?unk{0}\n".format(unk) unknowns += 1 elif gap_length >= opts.mingap: subseq = "\n>?{0}\n".format(gap_length) knowns += 1 seq += subseq fw = must_open(outputfasta, "w") id = opts.newid or rec.id fastaheader = ">{0}".format(id) if opts.chromosome: fastaheader += " [chromosome={0}]".format(opts.chromosome) if opts.clone: fastaheader += " [clone={0}]".format(opts.clone) print(fastaheader, file=fw) print(seq, file=fw) fw.close() logging.debug("Sequin FASTA written to `{0}` (gaps: {1} unknowns, {2} knowns).".\ format(outputfasta, unknowns, knowns)) return outputfasta, unknowns + knowns
[ "def", "sequin", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "sequin", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--unk\"", ",", "default", "=", "100", ",", "type", "=", "\"int\"", ",", "help", "=", "\"The size for unknown gaps [default...
%prog sequin inputfasta Generate a gapped fasta format with known gap sizes embedded. suitable for Sequin submission. A gapped sequence represents a newer method for describing non-contiguous sequences, but only requires a single sequence identifier. A gap is represented by a line that starts with >? and is immediately followed by either a length (for gaps of known length) or "unk100" for gaps of unknown length. For example, ">?200". The next sequence segment continues on the next line, with no separate definition line or identifier. The difference between a gapped sequence and a segmented sequence is that the gapped sequence uses a single identifier and can specify known length gaps. Gapped sequences are preferred over segmented sequences. A sample gapped sequence file is shown here: >m_gagei [organism=Mansonia gagei] Mansonia gagei NADH dehydrogenase ... ATGGAGCATACATATCAATATTCATGGATCATACCGTTTGTGCCACTTCCAATTCCTATTTTAATAGGAA TTGGACTCCTACTTTTTCCGACGGCAACAAAAAATCTTCGTCGTATGTGGGCTCTTCCCAATATTTTATT >?200 GGTATAATAACAGTATTATTAGGGGCTACTTTAGCTCTTGC TCAAAAAGATATTAAGAGGGGTTTAGCCTATTCTACAATGTCCCAACTGGGTTATATGATGTTAGCTCTA >?unk100 TCAATAAAACTATGGGGTAAAGAAGAACAAAAAATAATTAACAGAAATTTTCGTTTATCTCCTTTATTAA TATTAACGATGAATAATAATGAGAAGCCATATAGAATTGGTGATAATGTAAAAAAAGGGGCTCTTATTAC
[ "%prog", "sequin", "inputfasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L2096-L2172
train
200,606
tanghaibao/jcvi
jcvi/formats/fasta.py
tidy
def tidy(args): """ %prog tidy fastafile Trim terminal Ns, normalize gap sizes and remove small components. """ p = OptionParser(tidy.__doc__) p.add_option("--gapsize", dest="gapsize", default=0, type="int", help="Set all gaps to the same size [default: %default]") p.add_option("--minlen", dest="minlen", default=100, type="int", help="Minimum component size [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args gapsize = opts.gapsize minlen = opts.minlen tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta" fw = must_open(tidyfastafile, "w") removed = normalized = 0 fasta = Fasta(fastafile, lazy=True) for name, rec in fasta.iteritems_ordered(): rec.seq = rec.seq.upper() if minlen: removed += remove_small_components(rec, minlen) trim_terminal_Ns(rec) if gapsize: normalized += normalize_gaps(rec, gapsize) if len(rec) == 0: logging.debug("Drop seq {0}".format(rec.id)) continue SeqIO.write([rec], fw, "fasta") # Print statistics if removed: logging.debug("Total discarded bases: {0}".format(removed)) if normalized: logging.debug("Gaps normalized: {0}".format(normalized)) logging.debug("Tidy FASTA written to `{0}`.".format(tidyfastafile)) fw.close() return tidyfastafile
python
def tidy(args): """ %prog tidy fastafile Trim terminal Ns, normalize gap sizes and remove small components. """ p = OptionParser(tidy.__doc__) p.add_option("--gapsize", dest="gapsize", default=0, type="int", help="Set all gaps to the same size [default: %default]") p.add_option("--minlen", dest="minlen", default=100, type="int", help="Minimum component size [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args gapsize = opts.gapsize minlen = opts.minlen tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta" fw = must_open(tidyfastafile, "w") removed = normalized = 0 fasta = Fasta(fastafile, lazy=True) for name, rec in fasta.iteritems_ordered(): rec.seq = rec.seq.upper() if minlen: removed += remove_small_components(rec, minlen) trim_terminal_Ns(rec) if gapsize: normalized += normalize_gaps(rec, gapsize) if len(rec) == 0: logging.debug("Drop seq {0}".format(rec.id)) continue SeqIO.write([rec], fw, "fasta") # Print statistics if removed: logging.debug("Total discarded bases: {0}".format(removed)) if normalized: logging.debug("Gaps normalized: {0}".format(normalized)) logging.debug("Tidy FASTA written to `{0}`.".format(tidyfastafile)) fw.close() return tidyfastafile
[ "def", "tidy", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "tidy", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--gapsize\"", ",", "dest", "=", "\"gapsize\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", ...
%prog tidy fastafile Trim terminal Ns, normalize gap sizes and remove small components.
[ "%prog", "tidy", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L2209-L2257
train
200,607
tanghaibao/jcvi
jcvi/formats/fasta.py
gaps
def gaps(args): """ %prog gaps fastafile Print out a list of gaps in BED format (.gaps.bed). """ from jcvi.formats.sizes import agp from jcvi.formats.agp import mask, build p = OptionParser(gaps.__doc__) p.add_option("--split", default=False, action="store_true", help="Generate .split.fasta [default: %default]") p.set_mingap(default=100) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) inputfasta, = args mingap = opts.mingap split = opts.split prefix = inputfasta.rsplit(".", 1)[0] bedfile = prefix + ".gaps.bed" if need_update(inputfasta, bedfile): write_gaps_bed(inputfasta, prefix, mingap, opts.cpus) if split: splitfile = prefix + ".split.fasta" oagpfile = prefix + ".splitobject.agp" cagpfile = prefix + ".splitcomponent.agp" if need_update((inputfasta, bedfile), splitfile): sizesagpfile = agp([inputfasta]) maskedagpfile = mask([sizesagpfile, bedfile, "--splitobject"]) shutil.move(maskedagpfile, oagpfile) logging.debug("AGP file written to `{0}`.".format(oagpfile)) maskedagpfile = mask([sizesagpfile, bedfile, "--splitcomponent"]) shutil.move(maskedagpfile, cagpfile) logging.debug("AGP file written to `{0}`.".format(cagpfile)) build([oagpfile, inputfasta, splitfile]) os.remove(sizesagpfile) return splitfile, oagpfile, cagpfile
python
def gaps(args): """ %prog gaps fastafile Print out a list of gaps in BED format (.gaps.bed). """ from jcvi.formats.sizes import agp from jcvi.formats.agp import mask, build p = OptionParser(gaps.__doc__) p.add_option("--split", default=False, action="store_true", help="Generate .split.fasta [default: %default]") p.set_mingap(default=100) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) inputfasta, = args mingap = opts.mingap split = opts.split prefix = inputfasta.rsplit(".", 1)[0] bedfile = prefix + ".gaps.bed" if need_update(inputfasta, bedfile): write_gaps_bed(inputfasta, prefix, mingap, opts.cpus) if split: splitfile = prefix + ".split.fasta" oagpfile = prefix + ".splitobject.agp" cagpfile = prefix + ".splitcomponent.agp" if need_update((inputfasta, bedfile), splitfile): sizesagpfile = agp([inputfasta]) maskedagpfile = mask([sizesagpfile, bedfile, "--splitobject"]) shutil.move(maskedagpfile, oagpfile) logging.debug("AGP file written to `{0}`.".format(oagpfile)) maskedagpfile = mask([sizesagpfile, bedfile, "--splitcomponent"]) shutil.move(maskedagpfile, cagpfile) logging.debug("AGP file written to `{0}`.".format(cagpfile)) build([oagpfile, inputfasta, splitfile]) os.remove(sizesagpfile) return splitfile, oagpfile, cagpfile
[ "def", "gaps", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "sizes", "import", "agp", "from", "jcvi", ".", "formats", ".", "agp", "import", "mask", ",", "build", "p", "=", "OptionParser", "(", "gaps", ".", "__doc__", ")", "p", ".", "...
%prog gaps fastafile Print out a list of gaps in BED format (.gaps.bed).
[ "%prog", "gaps", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L2307-L2355
train
200,608
tanghaibao/jcvi
jcvi/formats/fasta.py
ORFFinder.scan_sequence
def scan_sequence(self, frame, direction): """ Search in one reading frame """ orf_start = None for c, index in self.codons(frame): if (c not in self.stop and (c in self.start or not self.start) and orf_start is None): orf_start = index elif c in self.stop and orf_start is not None: self._update_longest(orf_start, index + 3, direction, frame) orf_start = None if orf_start is not None: self._update_longest(orf_start, index + 3, direction, frame)
python
def scan_sequence(self, frame, direction): """ Search in one reading frame """ orf_start = None for c, index in self.codons(frame): if (c not in self.stop and (c in self.start or not self.start) and orf_start is None): orf_start = index elif c in self.stop and orf_start is not None: self._update_longest(orf_start, index + 3, direction, frame) orf_start = None if orf_start is not None: self._update_longest(orf_start, index + 3, direction, frame)
[ "def", "scan_sequence", "(", "self", ",", "frame", ",", "direction", ")", ":", "orf_start", "=", "None", "for", "c", ",", "index", "in", "self", ".", "codons", "(", "frame", ")", ":", "if", "(", "c", "not", "in", "self", ".", "stop", "and", "(", ...
Search in one reading frame
[ "Search", "in", "one", "reading", "frame" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L218-L230
train
200,609
tanghaibao/jcvi
jcvi/assembly/coverage.py
bed_to_bedpe
def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False): """ This converts the bedfile to bedpefile, assuming the reads are from CA. """ fp = must_open(bedfile) fw = must_open(bedpefile, "w") if pairsbedfile: fwpairs = must_open(pairsbedfile, "w") clones = defaultdict(list) for row in fp: b = BedLine(row) name = b.accn clonename = clone_name(name, ca=ca) clones[clonename].append(b) if matesfile: fp = open(matesfile) libraryline = next(fp) # 'library bes 37896 126916' lib, name, smin, smax = libraryline.split() assert lib == "library" smin, smax = int(smin), int(smax) logging.debug("Happy mates for lib {0} fall between {1} - {2}".\ format(name, smin, smax)) nbedpe = 0 nspan = 0 for clonename, blines in clones.items(): nlines = len(blines) if nlines == 2: a, b = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = b.seqid, b.start, b.end outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename] if strand: outcols.extend([0, a.strand, b.strand]) print("\t".join(str(x) for x in outcols), file=fw) nbedpe += 1 elif nlines == 1: a, = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = 0, 0, 0 else: # More than two lines per pair pass if pairsbedfile: start = min(astart, bstart) if bstart > 0 else astart end = max(aend, bend) if bend > 0 else aend if aseqid != bseqid: continue span = end - start + 1 if (not matesfile) or (smin <= span <= smax): print("\t".join(str(x) for x in \ (aseqid, start - 1, end, clonename)), file=fwpairs) nspan += 1 fw.close() logging.debug("A total of {0} bedpe written to `{1}`.".\ format(nbedpe, bedpefile)) if pairsbedfile: fwpairs.close() logging.debug("A total of {0} spans written to `{1}`.".\ format(nspan, pairsbedfile))
python
def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False): """ This converts the bedfile to bedpefile, assuming the reads are from CA. """ fp = must_open(bedfile) fw = must_open(bedpefile, "w") if pairsbedfile: fwpairs = must_open(pairsbedfile, "w") clones = defaultdict(list) for row in fp: b = BedLine(row) name = b.accn clonename = clone_name(name, ca=ca) clones[clonename].append(b) if matesfile: fp = open(matesfile) libraryline = next(fp) # 'library bes 37896 126916' lib, name, smin, smax = libraryline.split() assert lib == "library" smin, smax = int(smin), int(smax) logging.debug("Happy mates for lib {0} fall between {1} - {2}".\ format(name, smin, smax)) nbedpe = 0 nspan = 0 for clonename, blines in clones.items(): nlines = len(blines) if nlines == 2: a, b = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = b.seqid, b.start, b.end outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename] if strand: outcols.extend([0, a.strand, b.strand]) print("\t".join(str(x) for x in outcols), file=fw) nbedpe += 1 elif nlines == 1: a, = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = 0, 0, 0 else: # More than two lines per pair pass if pairsbedfile: start = min(astart, bstart) if bstart > 0 else astart end = max(aend, bend) if bend > 0 else aend if aseqid != bseqid: continue span = end - start + 1 if (not matesfile) or (smin <= span <= smax): print("\t".join(str(x) for x in \ (aseqid, start - 1, end, clonename)), file=fwpairs) nspan += 1 fw.close() logging.debug("A total of {0} bedpe written to `{1}`.".\ format(nbedpe, bedpefile)) if pairsbedfile: fwpairs.close() logging.debug("A total of {0} spans written to `{1}`.".\ format(nspan, pairsbedfile))
[ "def", "bed_to_bedpe", "(", "bedfile", ",", "bedpefile", ",", "pairsbedfile", "=", "None", ",", "matesfile", "=", "None", ",", "ca", "=", "False", ",", "strand", "=", "False", ")", ":", "fp", "=", "must_open", "(", "bedfile", ")", "fw", "=", "must_open...
This converts the bedfile to bedpefile, assuming the reads are from CA.
[ "This", "converts", "the", "bedfile", "to", "bedpefile", "assuming", "the", "reads", "are", "from", "CA", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/coverage.py#L96-L160
train
200,610
tanghaibao/jcvi
jcvi/assembly/coverage.py
posmap
def posmap(args): """ %prog posmap frgscf.sorted scf.fasta scfID Perform QC on the selected scfID, generate multiple BED files for plotting. """ p = OptionParser(posmap.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(p.print_help()) frgscffile, fastafile, scf = args # fasta cmd = "faOneRecord {0} {1}".format(fastafile, scf) scffastafile = scf + ".fasta" if not op.exists(scffastafile): sh(cmd, outfile=scffastafile) # sizes sizesfile = scffastafile + ".sizes" sizes = Sizes(scffastafile).mapping scfsize = sizes[scf] logging.debug("`{0}` has length of {1}.".format(scf, scfsize)) # gaps.bed gapsbedfile = scf + ".gaps.bed" if not op.exists(gapsbedfile): args = [scffastafile, "--bed", "--mingap=100"] gaps(args) # reads frgscf posmap posmapfile = scf + ".posmap" if not op.exists(posmapfile): args = [frgscffile, scf] query(args) # reads bed bedfile = scf + ".bed" if not op.exists(bedfile): args = [posmapfile] bed(args) # reads bedpe bedpefile = scf + ".bedpe" pairsbedfile = scf + ".pairs.bed" if not (op.exists(bedpefile) and op.exists(pairsbedfile)): bed_to_bedpe(bedfile, bedpefile, pairsbedfile=pairsbedfile, ca=True) # base coverage Coverage(bedfile, sizesfile) Coverage(pairsbedfile, sizesfile)
python
def posmap(args): """ %prog posmap frgscf.sorted scf.fasta scfID Perform QC on the selected scfID, generate multiple BED files for plotting. """ p = OptionParser(posmap.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(p.print_help()) frgscffile, fastafile, scf = args # fasta cmd = "faOneRecord {0} {1}".format(fastafile, scf) scffastafile = scf + ".fasta" if not op.exists(scffastafile): sh(cmd, outfile=scffastafile) # sizes sizesfile = scffastafile + ".sizes" sizes = Sizes(scffastafile).mapping scfsize = sizes[scf] logging.debug("`{0}` has length of {1}.".format(scf, scfsize)) # gaps.bed gapsbedfile = scf + ".gaps.bed" if not op.exists(gapsbedfile): args = [scffastafile, "--bed", "--mingap=100"] gaps(args) # reads frgscf posmap posmapfile = scf + ".posmap" if not op.exists(posmapfile): args = [frgscffile, scf] query(args) # reads bed bedfile = scf + ".bed" if not op.exists(bedfile): args = [posmapfile] bed(args) # reads bedpe bedpefile = scf + ".bedpe" pairsbedfile = scf + ".pairs.bed" if not (op.exists(bedpefile) and op.exists(pairsbedfile)): bed_to_bedpe(bedfile, bedpefile, pairsbedfile=pairsbedfile, ca=True) # base coverage Coverage(bedfile, sizesfile) Coverage(pairsbedfile, sizesfile)
[ "def", "posmap", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "posmap", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "3", ":", "sys", ".", "exit", "(", "p...
%prog posmap frgscf.sorted scf.fasta scfID Perform QC on the selected scfID, generate multiple BED files for plotting.
[ "%prog", "posmap", "frgscf", ".", "sorted", "scf", ".", "fasta", "scfID" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/coverage.py#L163-L216
train
200,611
tanghaibao/jcvi
jcvi/utils/progressbar.py
AnimatedMarker.update
def update(self, pbar): '''Updates the widget to show the next marker or the first marker when finished''' if pbar.finished: return self.markers[0] self.curmark = (self.curmark + 1) % len(self.markers) return self.markers[self.curmark]
python
def update(self, pbar): '''Updates the widget to show the next marker or the first marker when finished''' if pbar.finished: return self.markers[0] self.curmark = (self.curmark + 1) % len(self.markers) return self.markers[self.curmark]
[ "def", "update", "(", "self", ",", "pbar", ")", ":", "if", "pbar", ".", "finished", ":", "return", "self", ".", "markers", "[", "0", "]", "self", ".", "curmark", "=", "(", "self", ".", "curmark", "+", "1", ")", "%", "len", "(", "self", ".", "ma...
Updates the widget to show the next marker or the first marker when finished
[ "Updates", "the", "widget", "to", "show", "the", "next", "marker", "or", "the", "first", "marker", "when", "finished" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L195-L202
train
200,612
tanghaibao/jcvi
jcvi/utils/progressbar.py
ProgressBar._format_line
def _format_line(self): 'Joins the widgets and justifies the line' widgets = ''.join(self._format_widgets()) if self.left_justify: return widgets.ljust(self.term_width) else: return widgets.rjust(self.term_width)
python
def _format_line(self): 'Joins the widgets and justifies the line' widgets = ''.join(self._format_widgets()) if self.left_justify: return widgets.ljust(self.term_width) else: return widgets.rjust(self.term_width)
[ "def", "_format_line", "(", "self", ")", ":", "widgets", "=", "''", ".", "join", "(", "self", ".", "_format_widgets", "(", ")", ")", "if", "self", ".", "left_justify", ":", "return", "widgets", ".", "ljust", "(", "self", ".", "term_width", ")", "else",...
Joins the widgets and justifies the line
[ "Joins", "the", "widgets", "and", "justifies", "the", "line" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L545-L551
train
200,613
tanghaibao/jcvi
jcvi/utils/progressbar.py
ProgressBar._update_widgets
def _update_widgets(self): 'Checks all widgets for the time sensitive bit' self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False) for w in self.widgets)
python
def _update_widgets(self): 'Checks all widgets for the time sensitive bit' self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False) for w in self.widgets)
[ "def", "_update_widgets", "(", "self", ")", ":", "self", ".", "_time_sensitive", "=", "any", "(", "getattr", "(", "w", ",", "'TIME_SENSITIVE'", ",", "False", ")", "for", "w", "in", "self", ".", "widgets", ")" ]
Checks all widgets for the time sensitive bit
[ "Checks", "all", "widgets", "for", "the", "time", "sensitive", "bit" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L562-L566
train
200,614
tanghaibao/jcvi
jcvi/annotation/reformat.py
prepare
def prepare(bedfile): """ Remove prepended tags in gene names. """ pf = bedfile.rsplit(".", 1)[0] abedfile = pf + ".a.bed" bbedfile = pf + ".b.bed" fwa = open(abedfile, "w") fwb = open(bbedfile, "w") bed = Bed(bedfile) seen = set() for b in bed: accns = b.accn.split(";") new_accns = [] for accn in accns: if ":" in accn: method, a = accn.split(":", 1) if method in ("liftOver", "GMAP", ""): accn = a if accn in seen: logging.error("Duplicate id {0} found. Ignored.".format(accn)) continue new_accns.append(accn) b.accn = accn print(b, file=fwa) seen.add(accn) b.accn = ";".join(new_accns) print(b, file=fwb) fwa.close() fwb.close()
python
def prepare(bedfile): """ Remove prepended tags in gene names. """ pf = bedfile.rsplit(".", 1)[0] abedfile = pf + ".a.bed" bbedfile = pf + ".b.bed" fwa = open(abedfile, "w") fwb = open(bbedfile, "w") bed = Bed(bedfile) seen = set() for b in bed: accns = b.accn.split(";") new_accns = [] for accn in accns: if ":" in accn: method, a = accn.split(":", 1) if method in ("liftOver", "GMAP", ""): accn = a if accn in seen: logging.error("Duplicate id {0} found. Ignored.".format(accn)) continue new_accns.append(accn) b.accn = accn print(b, file=fwa) seen.add(accn) b.accn = ";".join(new_accns) print(b, file=fwb) fwa.close() fwb.close()
[ "def", "prepare", "(", "bedfile", ")", ":", "pf", "=", "bedfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "abedfile", "=", "pf", "+", "\".a.bed\"", "bbedfile", "=", "pf", "+", "\".b.bed\"", "fwa", "=", "open", "(", "abedfile", ",...
Remove prepended tags in gene names.
[ "Remove", "prepended", "tags", "in", "gene", "names", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/reformat.py#L408-L440
train
200,615
tanghaibao/jcvi
jcvi/annotation/reformat.py
renumber
def renumber(args): """ %prog renumber Mt35.consolidated.bed > tagged.bed Renumber genes for annotation updates. """ from jcvi.algorithms.lis import longest_increasing_subsequence from jcvi.utils.grouper import Grouper p = OptionParser(renumber.__doc__) p.set_annot_reformat_opts() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args pf = bedfile.rsplit(".", 1)[0] abedfile = pf + ".a.bed" bbedfile = pf + ".b.bed" if need_update(bedfile, (abedfile, bbedfile)): prepare(bedfile) mbed = Bed(bbedfile) g = Grouper() for s in mbed: accn = s.accn g.join(*accn.split(";")) bed = Bed(abedfile) for chr, sbed in bed.sub_beds(): current_chr = chr_number(chr) if not current_chr: continue ranks = [] gg = set() for s in sbed: accn = s.accn achr, arank = atg_name(accn) if achr != current_chr: continue ranks.append(arank) gg.add(accn) lranks = longest_increasing_subsequence(ranks) print(current_chr, len(sbed), "==>", len(ranks), \ "==>", len(lranks), file=sys.stderr) granks = set(gene_name(current_chr, x, prefix=opts.prefix, \ pad0=opts.pad0, uc=opts.uc) for x in lranks) | \ set(gene_name(current_chr, x, prefix=opts.prefix, \ pad0=opts.pad0, sep="te", uc=opts.uc) for x in lranks) tagstore = {} for s in sbed: achr, arank = atg_name(s.accn) accn = s.accn if accn in granks: tag = (accn, FRAME) elif accn in gg: tag = (accn, RETAIN) else: tag = (".", NEW) tagstore[accn] = tag # Find cases where genes overlap for s in sbed: accn = s.accn gaccn = g[accn] tags = [((tagstore[x][-1] if x in tagstore else NEW), x) for x in gaccn] group = [(PRIORITY.index(tag), x) for tag, x in tags] best = min(group)[-1] if accn != best: tag = (best, OVERLAP) else: tag = tagstore[accn] print("\t".join((str(s), "|".join(tag))))
python
def renumber(args): """ %prog renumber Mt35.consolidated.bed > tagged.bed Renumber genes for annotation updates. """ from jcvi.algorithms.lis import longest_increasing_subsequence from jcvi.utils.grouper import Grouper p = OptionParser(renumber.__doc__) p.set_annot_reformat_opts() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args pf = bedfile.rsplit(".", 1)[0] abedfile = pf + ".a.bed" bbedfile = pf + ".b.bed" if need_update(bedfile, (abedfile, bbedfile)): prepare(bedfile) mbed = Bed(bbedfile) g = Grouper() for s in mbed: accn = s.accn g.join(*accn.split(";")) bed = Bed(abedfile) for chr, sbed in bed.sub_beds(): current_chr = chr_number(chr) if not current_chr: continue ranks = [] gg = set() for s in sbed: accn = s.accn achr, arank = atg_name(accn) if achr != current_chr: continue ranks.append(arank) gg.add(accn) lranks = longest_increasing_subsequence(ranks) print(current_chr, len(sbed), "==>", len(ranks), \ "==>", len(lranks), file=sys.stderr) granks = set(gene_name(current_chr, x, prefix=opts.prefix, \ pad0=opts.pad0, uc=opts.uc) for x in lranks) | \ set(gene_name(current_chr, x, prefix=opts.prefix, \ pad0=opts.pad0, sep="te", uc=opts.uc) for x in lranks) tagstore = {} for s in sbed: achr, arank = atg_name(s.accn) accn = s.accn if accn in granks: tag = (accn, FRAME) elif accn in gg: tag = (accn, RETAIN) else: tag = (".", NEW) tagstore[accn] = tag # Find cases where genes overlap for s in sbed: accn = s.accn gaccn = g[accn] tags = [((tagstore[x][-1] if x in tagstore else NEW), x) for x in gaccn] group = [(PRIORITY.index(tag), x) for tag, x in tags] best = min(group)[-1] if accn != best: tag = (best, OVERLAP) else: tag = tagstore[accn] print("\t".join((str(s), "|".join(tag))))
[ "def", "renumber", "(", "args", ")", ":", "from", "jcvi", ".", "algorithms", ".", "lis", "import", "longest_increasing_subsequence", "from", "jcvi", ".", "utils", ".", "grouper", "import", "Grouper", "p", "=", "OptionParser", "(", "renumber", ".", "__doc__", ...
%prog renumber Mt35.consolidated.bed > tagged.bed Renumber genes for annotation updates.
[ "%prog", "renumber", "Mt35", ".", "consolidated", ".", "bed", ">", "tagged", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/reformat.py#L443-L525
train
200,616
tanghaibao/jcvi
jcvi/annotation/reformat.py
publocus
def publocus(args): """ %prog publocus idsfile > idsfiles.publocus Given a list of model identifiers, convert each into a GenBank approved pub_locus. Example output: Medtr1g007020.1 MTR_1g007020 Medtr1g007030.1 MTR_1g007030 Medtr1g007060.1 MTR_1g007060A Medtr1g007060.2 MTR_1g007060B """ p = OptionParser(publocus.__doc__) p.add_option("--locus_tag", default="MTR_", help="GenBank locus tag [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) locus_tag = opts.locus_tag index = AutoVivification() idsfile, = args fp = must_open(idsfile) for row in fp: locus, chrom, sep, rank, iso = atg_name(row, retval="locus,chr,sep,rank,iso") if None in (locus, chrom, sep, rank, iso): logging.warning("{0} is not a valid gene model identifier".format(row)) continue if locus not in index.keys(): pub_locus = gene_name(chrom, rank, prefix=locus_tag, sep=sep) index[locus]['pub_locus'] = pub_locus index[locus]['isos'] = set() index[locus]['isos'].add(int(iso)) for locus in index: pub_locus = index[locus]['pub_locus'] index[locus]['isos'] = sorted(index[locus]['isos']) if len(index[locus]['isos']) > 1: new = [chr(n+64) for n in index[locus]['isos'] if n < 27] for i, ni in zip(index[locus]['isos'], new): print("\t".join(x for x in ("{0}.{1}".format(locus, i), \ "{0}{1}".format(pub_locus, ni)))) else: print("\t".join(x for x in ("{0}.{1}".format(locus, index[locus]['isos'][0]), \ pub_locus)))
python
def publocus(args): """ %prog publocus idsfile > idsfiles.publocus Given a list of model identifiers, convert each into a GenBank approved pub_locus. Example output: Medtr1g007020.1 MTR_1g007020 Medtr1g007030.1 MTR_1g007030 Medtr1g007060.1 MTR_1g007060A Medtr1g007060.2 MTR_1g007060B """ p = OptionParser(publocus.__doc__) p.add_option("--locus_tag", default="MTR_", help="GenBank locus tag [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) locus_tag = opts.locus_tag index = AutoVivification() idsfile, = args fp = must_open(idsfile) for row in fp: locus, chrom, sep, rank, iso = atg_name(row, retval="locus,chr,sep,rank,iso") if None in (locus, chrom, sep, rank, iso): logging.warning("{0} is not a valid gene model identifier".format(row)) continue if locus not in index.keys(): pub_locus = gene_name(chrom, rank, prefix=locus_tag, sep=sep) index[locus]['pub_locus'] = pub_locus index[locus]['isos'] = set() index[locus]['isos'].add(int(iso)) for locus in index: pub_locus = index[locus]['pub_locus'] index[locus]['isos'] = sorted(index[locus]['isos']) if len(index[locus]['isos']) > 1: new = [chr(n+64) for n in index[locus]['isos'] if n < 27] for i, ni in zip(index[locus]['isos'], new): print("\t".join(x for x in ("{0}.{1}".format(locus, i), \ "{0}{1}".format(pub_locus, ni)))) else: print("\t".join(x for x in ("{0}.{1}".format(locus, index[locus]['isos'][0]), \ pub_locus)))
[ "def", "publocus", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "publocus", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--locus_tag\"", ",", "default", "=", "\"MTR_\"", ",", "help", "=", "\"GenBank locus tag [default: %default]\"", ")", "opts...
%prog publocus idsfile > idsfiles.publocus Given a list of model identifiers, convert each into a GenBank approved pub_locus. Example output: Medtr1g007020.1 MTR_1g007020 Medtr1g007030.1 MTR_1g007030 Medtr1g007060.1 MTR_1g007060A Medtr1g007060.2 MTR_1g007060B
[ "%prog", "publocus", "idsfile", ">", "idsfiles", ".", "publocus" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/reformat.py#L1104-L1152
train
200,617
tanghaibao/jcvi
jcvi/annotation/reformat.py
augustus
def augustus(args): """ %prog augustus augustus.gff3 > reformatted.gff3 AUGUSTUS does generate a gff3 (--gff3=on) but need some refinement. """ from jcvi.formats.gff import Gff p = OptionParser(augustus.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) ingff3, = args gff = Gff(ingff3) fw = must_open(opts.outfile, "w") seen = defaultdict(int) for g in gff: if g.type not in ("gene", "transcript", "CDS"): continue if g.type == "transcript": g.type = "mRNA" prefix = g.seqid + "_" pid = prefix + g.id newid = "{0}-{1}".format(pid, seen[pid]) if pid in seen else pid seen[pid] += 1 g.attributes["ID"] = [newid] g.attributes["Parent"] = [(prefix + x) for x in g.attributes["Parent"]] g.update_attributes() print(g, file=fw) fw.close()
python
def augustus(args): """ %prog augustus augustus.gff3 > reformatted.gff3 AUGUSTUS does generate a gff3 (--gff3=on) but need some refinement. """ from jcvi.formats.gff import Gff p = OptionParser(augustus.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) ingff3, = args gff = Gff(ingff3) fw = must_open(opts.outfile, "w") seen = defaultdict(int) for g in gff: if g.type not in ("gene", "transcript", "CDS"): continue if g.type == "transcript": g.type = "mRNA" prefix = g.seqid + "_" pid = prefix + g.id newid = "{0}-{1}".format(pid, seen[pid]) if pid in seen else pid seen[pid] += 1 g.attributes["ID"] = [newid] g.attributes["Parent"] = [(prefix + x) for x in g.attributes["Parent"]] g.update_attributes() print(g, file=fw) fw.close()
[ "def", "augustus", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "gff", "import", "Gff", "p", "=", "OptionParser", "(", "augustus", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args"...
%prog augustus augustus.gff3 > reformatted.gff3 AUGUSTUS does generate a gff3 (--gff3=on) but need some refinement.
[ "%prog", "augustus", "augustus", ".", "gff3", ">", "reformatted", ".", "gff3" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/reformat.py#L1155-L1189
train
200,618
tanghaibao/jcvi
jcvi/annotation/reformat.py
tRNAscan
def tRNAscan(args): """ %prog tRNAscan all.trna > all.trna.gff3 Convert tRNAscan-SE output into gff3 format. Sequence tRNA Bounds tRNA Anti Intron Bounds Cove Name tRNA # Begin End Type Codon Begin End Score -------- ------ ---- ------ ---- ----- ----- ---- ------ 23231 1 335355 335440 Tyr GTA 335392 335404 69.21 23231 2 1076190 1076270 Leu AAG 0 0 66.33 Conversion based on PERL one-liner in: <https://github.com/sujaikumar/assemblage/blob/master/README-annotation.md> """ from jcvi.formats.gff import sort p = OptionParser(tRNAscan.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) trnaout, = args gffout = trnaout + ".gff3" fp = open(trnaout) fw = open(gffout, "w") next(fp) next(fp) row = next(fp) assert row.startswith("--------") for row in fp: atoms = [x.strip() for x in row.split("\t")] contig, trnanum, start, end, aa, codon, \ intron_start, intron_end, score = atoms start, end = int(start), int(end) orientation = '+' if start > end: start, end = end, start orientation = '-' source = "tRNAscan" type = "tRNA" if codon == "???": codon = "XXX" comment = "ID={0}.tRNA.{1};Name=tRNA-{2} (anticodon: {3})".\ format(contig, trnanum, aa, codon) print("\t".join(str(x) for x in (contig, source, type, start,\ end, score, orientation, ".", comment)), file=fw) fw.close() sort([gffout, "-i"])
python
def tRNAscan(args): """ %prog tRNAscan all.trna > all.trna.gff3 Convert tRNAscan-SE output into gff3 format. Sequence tRNA Bounds tRNA Anti Intron Bounds Cove Name tRNA # Begin End Type Codon Begin End Score -------- ------ ---- ------ ---- ----- ----- ---- ------ 23231 1 335355 335440 Tyr GTA 335392 335404 69.21 23231 2 1076190 1076270 Leu AAG 0 0 66.33 Conversion based on PERL one-liner in: <https://github.com/sujaikumar/assemblage/blob/master/README-annotation.md> """ from jcvi.formats.gff import sort p = OptionParser(tRNAscan.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) trnaout, = args gffout = trnaout + ".gff3" fp = open(trnaout) fw = open(gffout, "w") next(fp) next(fp) row = next(fp) assert row.startswith("--------") for row in fp: atoms = [x.strip() for x in row.split("\t")] contig, trnanum, start, end, aa, codon, \ intron_start, intron_end, score = atoms start, end = int(start), int(end) orientation = '+' if start > end: start, end = end, start orientation = '-' source = "tRNAscan" type = "tRNA" if codon == "???": codon = "XXX" comment = "ID={0}.tRNA.{1};Name=tRNA-{2} (anticodon: {3})".\ format(contig, trnanum, aa, codon) print("\t".join(str(x) for x in (contig, source, type, start,\ end, score, orientation, ".", comment)), file=fw) fw.close() sort([gffout, "-i"])
[ "def", "tRNAscan", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "gff", "import", "sort", "p", "=", "OptionParser", "(", "tRNAscan", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", ...
%prog tRNAscan all.trna > all.trna.gff3 Convert tRNAscan-SE output into gff3 format. Sequence tRNA Bounds tRNA Anti Intron Bounds Cove Name tRNA # Begin End Type Codon Begin End Score -------- ------ ---- ------ ---- ----- ----- ---- ------ 23231 1 335355 335440 Tyr GTA 335392 335404 69.21 23231 2 1076190 1076270 Leu AAG 0 0 66.33 Conversion based on PERL one-liner in: <https://github.com/sujaikumar/assemblage/blob/master/README-annotation.md>
[ "%prog", "tRNAscan", "all", ".", "trna", ">", "all", ".", "trna", ".", "gff3" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/reformat.py#L1192-L1248
train
200,619
tanghaibao/jcvi
jcvi/apps/mask.py
summary
def summary(args): """ %prog summary fastafile Report the number of bases and sequences masked. """ p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args f = Fasta(fastafile, index=False) halfmaskedseqs = set() allmasked = 0 allbases = 0 cutoff = 50 for key, seq in f.iteritems(): masked = 0 for base in seq: if base not in "AGCT": masked += 1 seqlen = len(seq) if masked * 100. / seqlen > cutoff: halfmaskedseqs.add(key) allmasked += masked allbases += seqlen seqnum = len(f) maskedseqnum = len(halfmaskedseqs) print("Total masked bases: {0}".\ format(percentage(allmasked, allbases)), file=sys.stderr) print("Total masked sequences (contain > {0}% masked): {1}".\ format(cutoff, percentage(maskedseqnum, seqnum)), file=sys.stderr)
python
def summary(args): """ %prog summary fastafile Report the number of bases and sequences masked. """ p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args f = Fasta(fastafile, index=False) halfmaskedseqs = set() allmasked = 0 allbases = 0 cutoff = 50 for key, seq in f.iteritems(): masked = 0 for base in seq: if base not in "AGCT": masked += 1 seqlen = len(seq) if masked * 100. / seqlen > cutoff: halfmaskedseqs.add(key) allmasked += masked allbases += seqlen seqnum = len(f) maskedseqnum = len(halfmaskedseqs) print("Total masked bases: {0}".\ format(percentage(allmasked, allbases)), file=sys.stderr) print("Total masked sequences (contain > {0}% masked): {1}".\ format(cutoff, percentage(maskedseqnum, seqnum)), file=sys.stderr)
[ "def", "summary", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "summary", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", ...
%prog summary fastafile Report the number of bases and sequences masked.
[ "%prog", "summary", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/mask.py#L46-L83
train
200,620
tanghaibao/jcvi
jcvi/compara/synfind.py
find_synteny_region
def find_synteny_region(query, sbed, data, window, cutoff, colinear=False): """ Get all synteny blocks for a query, algorithm is single linkage anchors are a window centered on query Two categories of syntenic regions depending on what query is: (Syntelog): syntenic region is denoted by the syntelog (Gray gene): syntenic region is marked by the closest flanker """ regions = [] ysorted = sorted(data, key=lambda x: x[1]) g = Grouper() a, b = tee(ysorted) next(b, None) for ia, ib in izip(a, b): pos1, pos2 = ia[1], ib[1] if pos2 - pos1 < window and sbed[pos1].seqid == sbed[pos2].seqid: g.join(ia, ib) for group in sorted(g): (qflanker, syntelog), (far_flanker, far_syntelog), flanked = \ get_flanker(group, query) # run a mini-dagchainer here, take the direction that gives us most anchors if colinear: y_indexed_group = [(y, i) for i, (x, y) in enumerate(group)] lis = longest_increasing_subsequence(y_indexed_group) lds = longest_decreasing_subsequence(y_indexed_group) if len(lis) >= len(lds): track = lis orientation = "+" else: track = lds orientation = "-" group = [group[i] for (y, i) in track] xpos, ypos = zip(*group) score = min(len(set(xpos)), len(set(ypos))) if qflanker == query: gray = "S" else: gray = "G" if not flanked else "F" score -= 1 # slight penalty for not finding syntelog if score < cutoff: continue # y-boundary of the block left, right = group[0][1], group[-1][1] # this characterizes a syntenic region (left, right). # syntelog is -1 if it's a gray gene syn_region = (syntelog, far_syntelog, left, right, gray, orientation, score) regions.append(syn_region) return sorted(regions, key=lambda x: -x[-1])
python
def find_synteny_region(query, sbed, data, window, cutoff, colinear=False): """ Get all synteny blocks for a query, algorithm is single linkage anchors are a window centered on query Two categories of syntenic regions depending on what query is: (Syntelog): syntenic region is denoted by the syntelog (Gray gene): syntenic region is marked by the closest flanker """ regions = [] ysorted = sorted(data, key=lambda x: x[1]) g = Grouper() a, b = tee(ysorted) next(b, None) for ia, ib in izip(a, b): pos1, pos2 = ia[1], ib[1] if pos2 - pos1 < window and sbed[pos1].seqid == sbed[pos2].seqid: g.join(ia, ib) for group in sorted(g): (qflanker, syntelog), (far_flanker, far_syntelog), flanked = \ get_flanker(group, query) # run a mini-dagchainer here, take the direction that gives us most anchors if colinear: y_indexed_group = [(y, i) for i, (x, y) in enumerate(group)] lis = longest_increasing_subsequence(y_indexed_group) lds = longest_decreasing_subsequence(y_indexed_group) if len(lis) >= len(lds): track = lis orientation = "+" else: track = lds orientation = "-" group = [group[i] for (y, i) in track] xpos, ypos = zip(*group) score = min(len(set(xpos)), len(set(ypos))) if qflanker == query: gray = "S" else: gray = "G" if not flanked else "F" score -= 1 # slight penalty for not finding syntelog if score < cutoff: continue # y-boundary of the block left, right = group[0][1], group[-1][1] # this characterizes a syntenic region (left, right). # syntelog is -1 if it's a gray gene syn_region = (syntelog, far_syntelog, left, right, gray, orientation, score) regions.append(syn_region) return sorted(regions, key=lambda x: -x[-1])
[ "def", "find_synteny_region", "(", "query", ",", "sbed", ",", "data", ",", "window", ",", "cutoff", ",", "colinear", "=", "False", ")", ":", "regions", "=", "[", "]", "ysorted", "=", "sorted", "(", "data", ",", "key", "=", "lambda", "x", ":", "x", ...
Get all synteny blocks for a query, algorithm is single linkage anchors are a window centered on query Two categories of syntenic regions depending on what query is: (Syntelog): syntenic region is denoted by the syntelog (Gray gene): syntenic region is marked by the closest flanker
[ "Get", "all", "synteny", "blocks", "for", "a", "query", "algorithm", "is", "single", "linkage", "anchors", "are", "a", "window", "centered", "on", "query" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synfind.py#L69-L128
train
200,621
tanghaibao/jcvi
jcvi/compara/pad.py
get_segments
def get_segments(ranges, extra, minsegment=40): """ Given a list of Range, perform chaining on the ranges and select a highest scoring subset and cut based on their boundaries. Let's say the projection of the synteny blocks onto one axis look like the following. 1=====10......20====30....35====~~ Then the segmentation will yield a block [1, 20), [20, 35), using an arbitrary right extension rule. Extra are additional end breaks for chromosomes. """ from jcvi.utils.range import range_chain, LEFT, RIGHT NUL = 2 selected, score = range_chain(ranges) endpoints = [(x.start, NUL) for x in selected] endpoints += [(x[0], LEFT) for x in extra] endpoints += [(x[1], RIGHT) for x in extra] endpoints.sort() current_left = 0 for a, ai in endpoints: if ai == LEFT: current_left = a if ai == RIGHT: yield current_left, a elif ai == NUL: if a - current_left < minsegment: continue yield current_left, a - 1 current_left = a
python
def get_segments(ranges, extra, minsegment=40): """ Given a list of Range, perform chaining on the ranges and select a highest scoring subset and cut based on their boundaries. Let's say the projection of the synteny blocks onto one axis look like the following. 1=====10......20====30....35====~~ Then the segmentation will yield a block [1, 20), [20, 35), using an arbitrary right extension rule. Extra are additional end breaks for chromosomes. """ from jcvi.utils.range import range_chain, LEFT, RIGHT NUL = 2 selected, score = range_chain(ranges) endpoints = [(x.start, NUL) for x in selected] endpoints += [(x[0], LEFT) for x in extra] endpoints += [(x[1], RIGHT) for x in extra] endpoints.sort() current_left = 0 for a, ai in endpoints: if ai == LEFT: current_left = a if ai == RIGHT: yield current_left, a elif ai == NUL: if a - current_left < minsegment: continue yield current_left, a - 1 current_left = a
[ "def", "get_segments", "(", "ranges", ",", "extra", ",", "minsegment", "=", "40", ")", ":", "from", "jcvi", ".", "utils", ".", "range", "import", "range_chain", ",", "LEFT", ",", "RIGHT", "NUL", "=", "2", "selected", ",", "score", "=", "range_chain", "...
Given a list of Range, perform chaining on the ranges and select a highest scoring subset and cut based on their boundaries. Let's say the projection of the synteny blocks onto one axis look like the following. 1=====10......20====30....35====~~ Then the segmentation will yield a block [1, 20), [20, 35), using an arbitrary right extension rule. Extra are additional end breaks for chromosomes.
[ "Given", "a", "list", "of", "Range", "perform", "chaining", "on", "the", "ranges", "and", "select", "a", "highest", "scoring", "subset", "and", "cut", "based", "on", "their", "boundaries", ".", "Let", "s", "say", "the", "projection", "of", "the", "synteny"...
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/pad.py#L163-L196
train
200,622
tanghaibao/jcvi
jcvi/assembly/kmer.py
entropy
def entropy(args): """ %prog entropy kmc_dump.out kmc_dump.out contains two columns: AAAAAAAAAAAGAAGAAAGAAA 34 """ p = OptionParser(entropy.__doc__) p.add_option("--threshold", default=0, type="int", help="Complexity needs to be above") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) kmc_out, = args fp = open(kmc_out) for row in fp: kmer, count = row.split() score = entropy_score(kmer) if score >= opts.threshold: print(" ".join((kmer, count, "{:.2f}".format(score))))
python
def entropy(args): """ %prog entropy kmc_dump.out kmc_dump.out contains two columns: AAAAAAAAAAAGAAGAAAGAAA 34 """ p = OptionParser(entropy.__doc__) p.add_option("--threshold", default=0, type="int", help="Complexity needs to be above") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) kmc_out, = args fp = open(kmc_out) for row in fp: kmer, count = row.split() score = entropy_score(kmer) if score >= opts.threshold: print(" ".join((kmer, count, "{:.2f}".format(score))))
[ "def", "entropy", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "entropy", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--threshold\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Complexity needs to be above...
%prog entropy kmc_dump.out kmc_dump.out contains two columns: AAAAAAAAAAAGAAGAAAGAAA 34
[ "%prog", "entropy", "kmc_dump", ".", "out" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L279-L300
train
200,623
tanghaibao/jcvi
jcvi/assembly/kmer.py
bed
def bed(args): """ %prog bed fastafile kmer.dump.txt Map kmers on FASTA. """ from jcvi.formats.fasta import rc, parse_fasta p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, dumpfile = args fp = open(dumpfile) KMERS = set() for row in fp: kmer = row.split()[0] kmer_rc = rc(kmer) KMERS.add(kmer) KMERS.add(kmer_rc) K = len(kmer) logging.debug("Imported {} {}-mers".format(len(KMERS), K)) for name, seq in parse_fasta(fastafile): name = name.split()[0] for i in range(len(seq) - K): if i % 5000000 == 0: print("{}:{}".format(name, i), file=sys.stderr) kmer = seq[i: i + K] if kmer in KMERS: print("\t".join(str(x) for x in (name, i, i + K, kmer)))
python
def bed(args): """ %prog bed fastafile kmer.dump.txt Map kmers on FASTA. """ from jcvi.formats.fasta import rc, parse_fasta p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, dumpfile = args fp = open(dumpfile) KMERS = set() for row in fp: kmer = row.split()[0] kmer_rc = rc(kmer) KMERS.add(kmer) KMERS.add(kmer_rc) K = len(kmer) logging.debug("Imported {} {}-mers".format(len(KMERS), K)) for name, seq in parse_fasta(fastafile): name = name.split()[0] for i in range(len(seq) - K): if i % 5000000 == 0: print("{}:{}".format(name, i), file=sys.stderr) kmer = seq[i: i + K] if kmer in KMERS: print("\t".join(str(x) for x in (name, i, i + K, kmer)))
[ "def", "bed", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "fasta", "import", "rc", ",", "parse_fasta", "p", "=", "OptionParser", "(", "bed", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "i...
%prog bed fastafile kmer.dump.txt Map kmers on FASTA.
[ "%prog", "bed", "fastafile", "kmer", ".", "dump", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L303-L336
train
200,624
tanghaibao/jcvi
jcvi/assembly/kmer.py
kmc
def kmc(args): """ %prog kmc folder Run kmc3 on Illumina reads. """ p = OptionParser(kmc.__doc__) p.add_option("-k", default=21, type="int", help="Kmer size") p.add_option("--ci", default=2, type="int", help="Exclude kmers with less than ci counts") p.add_option("--cs", default=2, type="int", help="Maximal value of a counter") p.add_option("--cx", default=None, type="int", help="Exclude kmers with more than cx counts") p.add_option("--single", default=False, action="store_true", help="Input is single-end data, only one FASTQ/FASTA") p.add_option("--fasta", default=False, action="store_true", help="Input is FASTA instead of FASTQ") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) folder, = args K = opts.k n = 1 if opts.single else 2 pattern = "*.fa,*.fa.gz,*.fasta,*.fasta.gz" if opts.fasta else \ "*.fq,*.fq.gz,*.fastq,*.fastq.gz" mm = MakeManager() for p, pf in iter_project(folder, pattern=pattern, n=n, commonprefix=False): pf = pf.split("_")[0] + ".ms{}".format(K) infiles = pf + ".infiles" fw = open(infiles, "w") print("\n".join(p), file=fw) fw.close() cmd = "kmc -k{} -m64 -t{}".format(K, opts.cpus) cmd += " -ci{} -cs{}".format(opts.ci, opts.cs) if opts.cx: cmd += " -cx{}".format(opts.cx) if opts.fasta: cmd += " -fm" cmd += " @{} {} .".format(infiles, pf) outfile = pf + ".kmc_suf" mm.add(p, outfile, cmd) mm.write()
python
def kmc(args): """ %prog kmc folder Run kmc3 on Illumina reads. """ p = OptionParser(kmc.__doc__) p.add_option("-k", default=21, type="int", help="Kmer size") p.add_option("--ci", default=2, type="int", help="Exclude kmers with less than ci counts") p.add_option("--cs", default=2, type="int", help="Maximal value of a counter") p.add_option("--cx", default=None, type="int", help="Exclude kmers with more than cx counts") p.add_option("--single", default=False, action="store_true", help="Input is single-end data, only one FASTQ/FASTA") p.add_option("--fasta", default=False, action="store_true", help="Input is FASTA instead of FASTQ") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) folder, = args K = opts.k n = 1 if opts.single else 2 pattern = "*.fa,*.fa.gz,*.fasta,*.fasta.gz" if opts.fasta else \ "*.fq,*.fq.gz,*.fastq,*.fastq.gz" mm = MakeManager() for p, pf in iter_project(folder, pattern=pattern, n=n, commonprefix=False): pf = pf.split("_")[0] + ".ms{}".format(K) infiles = pf + ".infiles" fw = open(infiles, "w") print("\n".join(p), file=fw) fw.close() cmd = "kmc -k{} -m64 -t{}".format(K, opts.cpus) cmd += " -ci{} -cs{}".format(opts.ci, opts.cs) if opts.cx: cmd += " -cx{}".format(opts.cx) if opts.fasta: cmd += " -fm" cmd += " @{} {} .".format(infiles, pf) outfile = pf + ".kmc_suf" mm.add(p, outfile, cmd) mm.write()
[ "def", "kmc", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "kmc", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-k\"", ",", "default", "=", "21", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Kmer size\"", ")", "p", ".", "add_op...
%prog kmc folder Run kmc3 on Illumina reads.
[ "%prog", "kmc", "folder" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L359-L408
train
200,625
tanghaibao/jcvi
jcvi/assembly/kmer.py
meryl
def meryl(args): """ %prog meryl folder Run meryl on Illumina reads. """ p = OptionParser(meryl.__doc__) p.add_option("-k", default=19, type="int", help="Kmer size") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) folder, = args K = opts.k cpus = opts.cpus mm = MakeManager() for p, pf in iter_project(folder): cmds = [] mss = [] for i, ip in enumerate(p): ms = "{}{}.ms{}".format(pf, i + 1, K) mss.append(ms) cmd = "meryl -B -C -m {} -threads {}".format(K, cpus) cmd += " -s {} -o {}".format(ip, ms) cmds.append(cmd) ams, bms = mss pms = "{}.ms{}".format(pf, K) cmd = "meryl -M add -s {} -s {} -o {}".format(ams, bms, pms) cmds.append(cmd) cmd = "rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx".\ format(ams, ams, bms, bms) cmds.append(cmd) mm.add(p, pms + ".mcdat", cmds) mm.write()
python
def meryl(args): """ %prog meryl folder Run meryl on Illumina reads. """ p = OptionParser(meryl.__doc__) p.add_option("-k", default=19, type="int", help="Kmer size") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) folder, = args K = opts.k cpus = opts.cpus mm = MakeManager() for p, pf in iter_project(folder): cmds = [] mss = [] for i, ip in enumerate(p): ms = "{}{}.ms{}".format(pf, i + 1, K) mss.append(ms) cmd = "meryl -B -C -m {} -threads {}".format(K, cpus) cmd += " -s {} -o {}".format(ip, ms) cmds.append(cmd) ams, bms = mss pms = "{}.ms{}".format(pf, K) cmd = "meryl -M add -s {} -s {} -o {}".format(ams, bms, pms) cmds.append(cmd) cmd = "rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx".\ format(ams, ams, bms, bms) cmds.append(cmd) mm.add(p, pms + ".mcdat", cmds) mm.write()
[ "def", "meryl", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "meryl", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-k\"", ",", "default", "=", "19", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Kmer size\"", ")", "p", ".", "se...
%prog meryl folder Run meryl on Illumina reads.
[ "%prog", "meryl", "folder" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L411-L447
train
200,626
tanghaibao/jcvi
jcvi/assembly/kmer.py
model
def model(args): """ %prog model erate Model kmer distribution given error rate. See derivation in FIONA paper: <http://bioinformatics.oxfordjournals.org/content/30/17/i356.full> """ from scipy.stats import binom, poisson p = OptionParser(model.__doc__) p.add_option("-k", default=23, type="int", help="Kmer size") p.add_option("--cov", default=50, type="int", help="Expected coverage") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) erate, = args erate = float(erate) cov = opts.cov k = opts.k xy = [] # Range include c although it is unclear what it means to have c=0 for c in xrange(0, cov * 2 + 1): Prob_Yk = 0 for i in xrange(k + 1): # Probability of having exactly i errors pi_i = binom.pmf(i, k, erate) # Expected coverage of kmer with exactly i errors mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i) # Probability of seeing coverage of c Prob_Yk_i = poisson.pmf(c, mu_i) # Sum i over 0, 1, ... up to k errors Prob_Yk += pi_i * Prob_Yk_i xy.append((c, Prob_Yk)) x, y = zip(*xy) asciiplot(x, y, title="Model")
python
def model(args): """ %prog model erate Model kmer distribution given error rate. See derivation in FIONA paper: <http://bioinformatics.oxfordjournals.org/content/30/17/i356.full> """ from scipy.stats import binom, poisson p = OptionParser(model.__doc__) p.add_option("-k", default=23, type="int", help="Kmer size") p.add_option("--cov", default=50, type="int", help="Expected coverage") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) erate, = args erate = float(erate) cov = opts.cov k = opts.k xy = [] # Range include c although it is unclear what it means to have c=0 for c in xrange(0, cov * 2 + 1): Prob_Yk = 0 for i in xrange(k + 1): # Probability of having exactly i errors pi_i = binom.pmf(i, k, erate) # Expected coverage of kmer with exactly i errors mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i) # Probability of seeing coverage of c Prob_Yk_i = poisson.pmf(c, mu_i) # Sum i over 0, 1, ... up to k errors Prob_Yk += pi_i * Prob_Yk_i xy.append((c, Prob_Yk)) x, y = zip(*xy) asciiplot(x, y, title="Model")
[ "def", "model", "(", "args", ")", ":", "from", "scipy", ".", "stats", "import", "binom", ",", "poisson", "p", "=", "OptionParser", "(", "model", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-k\"", ",", "default", "=", "23", ",", "type", "=", ...
%prog model erate Model kmer distribution given error rate. See derivation in FIONA paper: <http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>
[ "%prog", "model", "erate" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L450-L488
train
200,627
tanghaibao/jcvi
jcvi/assembly/kmer.py
logodds
def logodds(args): """ %prog logodds cnt1 cnt2 Compute log likelihood between two db. """ from math import log from jcvi.formats.base import DictFile p = OptionParser(logodds.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cnt1, cnt2 = args d = DictFile(cnt2) fp = open(cnt1) for row in fp: scf, c1 = row.split() c2 = d[scf] c1, c2 = float(c1), float(c2) c1 += 1 c2 += 1 score = int(100 * (log(c1) - log(c2))) print("{0}\t{1}".format(scf, score))
python
def logodds(args): """ %prog logodds cnt1 cnt2 Compute log likelihood between two db. """ from math import log from jcvi.formats.base import DictFile p = OptionParser(logodds.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cnt1, cnt2 = args d = DictFile(cnt2) fp = open(cnt1) for row in fp: scf, c1 = row.split() c2 = d[scf] c1, c2 = float(c1), float(c2) c1 += 1 c2 += 1 score = int(100 * (log(c1) - log(c2))) print("{0}\t{1}".format(scf, score))
[ "def", "logodds", "(", "args", ")", ":", "from", "math", "import", "log", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "logodds", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args"...
%prog logodds cnt1 cnt2 Compute log likelihood between two db.
[ "%prog", "logodds", "cnt1", "cnt2" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L491-L516
train
200,628
tanghaibao/jcvi
jcvi/assembly/kmer.py
count
def count(args): """ %prog count fastafile jf.db Run dump - jellyfish - bin - bincount in serial. """ from bitarray import bitarray p = OptionParser(count.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, jfdb = args K = get_K(jfdb) cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb) t = must_open("tmp", "w") proc = Popen(cmd, stdin=PIPE, stdout=t) t.flush() f = Fasta(fastafile, lazy=True) for name, rec in f.iteritems_ordered(): kmers = list(make_kmers(rec.seq, K)) print("\n".join(kmers), file=proc.stdin) proc.stdin.close() logging.debug(cmd) proc.wait() a = bitarray() binfile = ".".join((fastafile, jfdb, "bin")) fw = open(binfile, "w") t.seek(0) for row in t: c = row.strip() a.append(int(c)) a.tofile(fw) logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile)) fw.close() sh("rm {0}".format(t.name)) logging.debug("Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".\ format(K, fastafile, jfdb, binfile)) cntfile = ".".join((fastafile, jfdb, "cnt")) bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)]) logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile))
python
def count(args): """ %prog count fastafile jf.db Run dump - jellyfish - bin - bincount in serial. """ from bitarray import bitarray p = OptionParser(count.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, jfdb = args K = get_K(jfdb) cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb) t = must_open("tmp", "w") proc = Popen(cmd, stdin=PIPE, stdout=t) t.flush() f = Fasta(fastafile, lazy=True) for name, rec in f.iteritems_ordered(): kmers = list(make_kmers(rec.seq, K)) print("\n".join(kmers), file=proc.stdin) proc.stdin.close() logging.debug(cmd) proc.wait() a = bitarray() binfile = ".".join((fastafile, jfdb, "bin")) fw = open(binfile, "w") t.seek(0) for row in t: c = row.strip() a.append(int(c)) a.tofile(fw) logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile)) fw.close() sh("rm {0}".format(t.name)) logging.debug("Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".\ format(K, fastafile, jfdb, binfile)) cntfile = ".".join((fastafile, jfdb, "cnt")) bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)]) logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile))
[ "def", "count", "(", "args", ")", ":", "from", "bitarray", "import", "bitarray", "p", "=", "OptionParser", "(", "count", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "...
%prog count fastafile jf.db Run dump - jellyfish - bin - bincount in serial.
[ "%prog", "count", "fastafile", "jf", ".", "db" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L528-L573
train
200,629
tanghaibao/jcvi
jcvi/assembly/kmer.py
bincount
def bincount(args): """ %prog bincount fastafile binfile Count K-mers in the bin. """ from bitarray import bitarray from jcvi.formats.sizes import Sizes p = OptionParser(bincount.__doc__) p.add_option("-K", default=23, type="int", help="K-mer size [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, binfile = args K = opts.K fp = open(binfile) a = bitarray() a.fromfile(fp) f = Sizes(fastafile) tsize = 0 fw = must_open(opts.outfile, "w") for name, seqlen in f.iter_sizes(): ksize = seqlen - K + 1 b = a[tsize: tsize + ksize] bcount = b.count() print("\t".join(str(x) for x in (name, bcount)), file=fw) tsize += ksize
python
def bincount(args): """ %prog bincount fastafile binfile Count K-mers in the bin. """ from bitarray import bitarray from jcvi.formats.sizes import Sizes p = OptionParser(bincount.__doc__) p.add_option("-K", default=23, type="int", help="K-mer size [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, binfile = args K = opts.K fp = open(binfile) a = bitarray() a.fromfile(fp) f = Sizes(fastafile) tsize = 0 fw = must_open(opts.outfile, "w") for name, seqlen in f.iter_sizes(): ksize = seqlen - K + 1 b = a[tsize: tsize + ksize] bcount = b.count() print("\t".join(str(x) for x in (name, bcount)), file=fw) tsize += ksize
[ "def", "bincount", "(", "args", ")", ":", "from", "bitarray", "import", "bitarray", "from", "jcvi", ".", "formats", ".", "sizes", "import", "Sizes", "p", "=", "OptionParser", "(", "bincount", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-K\"", ",...
%prog bincount fastafile binfile Count K-mers in the bin.
[ "%prog", "bincount", "fastafile", "binfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L576-L608
train
200,630
tanghaibao/jcvi
jcvi/assembly/kmer.py
bin
def bin(args): """ %prog bin filename filename.bin Serialize counts to bitarrays. """ from bitarray import bitarray p = OptionParser(bin.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) inp, outp = args fp = must_open(inp) fw = must_open(outp, "w") a = bitarray() for row in fp: c = row.split()[-1] a.append(int(c)) a.tofile(fw) fw.close()
python
def bin(args): """ %prog bin filename filename.bin Serialize counts to bitarrays. """ from bitarray import bitarray p = OptionParser(bin.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) inp, outp = args fp = must_open(inp) fw = must_open(outp, "w") a = bitarray() for row in fp: c = row.split()[-1] a.append(int(c)) a.tofile(fw) fw.close()
[ "def", "bin", "(", "args", ")", ":", "from", "bitarray", "import", "bitarray", "p", "=", "OptionParser", "(", "bin", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ...
%prog bin filename filename.bin Serialize counts to bitarrays.
[ "%prog", "bin", "filename", "filename", ".", "bin" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L611-L632
train
200,631
tanghaibao/jcvi
jcvi/assembly/kmer.py
dump
def dump(args): """ %prog dump fastafile Convert FASTA sequences to list of K-mers. """ p = OptionParser(dump.__doc__) p.add_option("-K", default=23, type="int", help="K-mer size [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args K = opts.K fw = must_open(opts.outfile, "w") f = Fasta(fastafile, lazy=True) for name, rec in f.iteritems_ordered(): kmers = list(make_kmers(rec.seq, K)) print("\n".join(kmers), file=fw) fw.close()
python
def dump(args): """ %prog dump fastafile Convert FASTA sequences to list of K-mers. """ p = OptionParser(dump.__doc__) p.add_option("-K", default=23, type="int", help="K-mer size [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args K = opts.K fw = must_open(opts.outfile, "w") f = Fasta(fastafile, lazy=True) for name, rec in f.iteritems_ordered(): kmers = list(make_kmers(rec.seq, K)) print("\n".join(kmers), file=fw) fw.close()
[ "def", "dump", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "dump", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-K\"", ",", "default", "=", "23", ",", "type", "=", "\"int\"", ",", "help", "=", "\"K-mer size [default: %default]\"", ")", ...
%prog dump fastafile Convert FASTA sequences to list of K-mers.
[ "%prog", "dump", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L642-L664
train
200,632
tanghaibao/jcvi
jcvi/apps/align.py
nucmer
def nucmer(args): """ %prog nucmer ref.fasta query.fasta Run NUCMER using query against reference. Parallel implementation derived from: <https://github.com/fritzsedlazeck/sge_mummer> """ from itertools import product from jcvi.apps.grid import MakeManager from jcvi.formats.base import split p = OptionParser(nucmer.__doc__) p.add_option("--chunks", type="int", help="Split both query and subject into chunks") p.set_params(prog="nucmer", params="-l 100 -c 500") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ref, query = args cpus = opts.cpus nrefs = nqueries = opts.chunks or int(cpus ** .5) refdir = ref.split(".")[0] + "-outdir" querydir = query.split(".")[0] + "-outdir" reflist = split([ref, refdir, str(nrefs)]).names querylist = split([query, querydir, str(nqueries)]).names mm = MakeManager() for i, (r, q) in enumerate(product(reflist, querylist)): pf = "{0:04d}".format(i) cmd = "nucmer -maxmatch" cmd += " {0}".format(opts.extra) cmd += " {0} {1} -p {2}".format(r, q, pf) deltafile = pf + ".delta" mm.add((r, q), deltafile, cmd) print(cmd) mm.write()
python
def nucmer(args): """ %prog nucmer ref.fasta query.fasta Run NUCMER using query against reference. Parallel implementation derived from: <https://github.com/fritzsedlazeck/sge_mummer> """ from itertools import product from jcvi.apps.grid import MakeManager from jcvi.formats.base import split p = OptionParser(nucmer.__doc__) p.add_option("--chunks", type="int", help="Split both query and subject into chunks") p.set_params(prog="nucmer", params="-l 100 -c 500") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ref, query = args cpus = opts.cpus nrefs = nqueries = opts.chunks or int(cpus ** .5) refdir = ref.split(".")[0] + "-outdir" querydir = query.split(".")[0] + "-outdir" reflist = split([ref, refdir, str(nrefs)]).names querylist = split([query, querydir, str(nqueries)]).names mm = MakeManager() for i, (r, q) in enumerate(product(reflist, querylist)): pf = "{0:04d}".format(i) cmd = "nucmer -maxmatch" cmd += " {0}".format(opts.extra) cmd += " {0} {1} -p {2}".format(r, q, pf) deltafile = pf + ".delta" mm.add((r, q), deltafile, cmd) print(cmd) mm.write()
[ "def", "nucmer", "(", "args", ")", ":", "from", "itertools", "import", "product", "from", "jcvi", ".", "apps", ".", "grid", "import", "MakeManager", "from", "jcvi", ".", "formats", ".", "base", "import", "split", "p", "=", "OptionParser", "(", "nucmer", ...
%prog nucmer ref.fasta query.fasta Run NUCMER using query against reference. Parallel implementation derived from: <https://github.com/fritzsedlazeck/sge_mummer>
[ "%prog", "nucmer", "ref", ".", "fasta", "query", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/align.py#L118-L158
train
200,633
tanghaibao/jcvi
jcvi/apps/align.py
blasr
def blasr(args): """ %prog blasr ref.fasta fofn Run blasr on a set of PacBio reads. This is based on a divide-and-conquer strategy described below. """ from jcvi.apps.grid import MakeManager from jcvi.utils.iter import grouper p = OptionParser(blasr.__doc__) p.set_cpus(cpus=8) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) reffasta, fofn = args flist = sorted([x.strip() for x in open(fofn)]) h5list = [] mm = MakeManager() for i, fl in enumerate(grouper(flist, 3)): chunkname = "chunk{0:03d}".format(i) fn = chunkname + ".fofn" h5 = chunkname + ".cmp.h5" fw = open(fn, "w") print("\n".join(fl), file=fw) fw.close() cmd = "pbalign {0} {1} {2}".format(fn, reffasta, h5) cmd += " --nproc {0} --forQuiver --tmpDir .".format(opts.cpus) mm.add((fn, reffasta), h5, cmd) h5list.append(h5) # Merge h5, sort and repack allh5 = "all.cmp.h5" tmph5 = "tmp.cmp.h5" cmd_merge = "cmph5tools.py merge --outFile {0}".format(allh5) cmd_merge += " " + " ".join(h5list) cmd_sort = "cmph5tools.py sort --deep {0} --tmpDir .".format(allh5) cmd_repack = "h5repack -f GZIP=1 {0} {1}".format(allh5, tmph5) cmd_repack += " && mv {0} {1}".format(tmph5, allh5) mm.add(h5list, allh5, [cmd_merge, cmd_sort, cmd_repack]) # Quiver pf = reffasta.rsplit(".", 1)[0] variantsgff = pf + ".variants.gff" consensusfasta = pf + ".consensus.fasta" cmd_faidx = "samtools faidx {0}".format(reffasta) cmd = "quiver -j 32 {0}".format(allh5) cmd += " -r {0} -o {1} -o {2}".format(reffasta, variantsgff, consensusfasta) mm.add(allh5, consensusfasta, [cmd_faidx, cmd]) mm.write()
python
def blasr(args): """ %prog blasr ref.fasta fofn Run blasr on a set of PacBio reads. This is based on a divide-and-conquer strategy described below. """ from jcvi.apps.grid import MakeManager from jcvi.utils.iter import grouper p = OptionParser(blasr.__doc__) p.set_cpus(cpus=8) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) reffasta, fofn = args flist = sorted([x.strip() for x in open(fofn)]) h5list = [] mm = MakeManager() for i, fl in enumerate(grouper(flist, 3)): chunkname = "chunk{0:03d}".format(i) fn = chunkname + ".fofn" h5 = chunkname + ".cmp.h5" fw = open(fn, "w") print("\n".join(fl), file=fw) fw.close() cmd = "pbalign {0} {1} {2}".format(fn, reffasta, h5) cmd += " --nproc {0} --forQuiver --tmpDir .".format(opts.cpus) mm.add((fn, reffasta), h5, cmd) h5list.append(h5) # Merge h5, sort and repack allh5 = "all.cmp.h5" tmph5 = "tmp.cmp.h5" cmd_merge = "cmph5tools.py merge --outFile {0}".format(allh5) cmd_merge += " " + " ".join(h5list) cmd_sort = "cmph5tools.py sort --deep {0} --tmpDir .".format(allh5) cmd_repack = "h5repack -f GZIP=1 {0} {1}".format(allh5, tmph5) cmd_repack += " && mv {0} {1}".format(tmph5, allh5) mm.add(h5list, allh5, [cmd_merge, cmd_sort, cmd_repack]) # Quiver pf = reffasta.rsplit(".", 1)[0] variantsgff = pf + ".variants.gff" consensusfasta = pf + ".consensus.fasta" cmd_faidx = "samtools faidx {0}".format(reffasta) cmd = "quiver -j 32 {0}".format(allh5) cmd += " -r {0} -o {1} -o {2}".format(reffasta, variantsgff, consensusfasta) mm.add(allh5, consensusfasta, [cmd_faidx, cmd]) mm.write()
[ "def", "blasr", "(", "args", ")", ":", "from", "jcvi", ".", "apps", ".", "grid", "import", "MakeManager", "from", "jcvi", ".", "utils", ".", "iter", "import", "grouper", "p", "=", "OptionParser", "(", "blasr", ".", "__doc__", ")", "p", ".", "set_cpus",...
%prog blasr ref.fasta fofn Run blasr on a set of PacBio reads. This is based on a divide-and-conquer strategy described below.
[ "%prog", "blasr", "ref", ".", "fasta", "fofn" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/align.py#L161-L214
train
200,634
tanghaibao/jcvi
jcvi/apps/align.py
blat
def blat(args): """ %prog blat ref.fasta query.fasta Calls blat and filters BLAST hits. """ p = OptionParser(blat.__doc__) p.set_align(pctid=95, hitlen=30) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) reffasta, queryfasta = args blastfile = get_outfile(reffasta, queryfasta, suffix="blat") run_blat(infile=queryfasta, outfile=blastfile, db=reffasta, pctid=opts.pctid, hitlen=opts.hitlen, cpus=opts.cpus, overwrite=False) return blastfile
python
def blat(args): """ %prog blat ref.fasta query.fasta Calls blat and filters BLAST hits. """ p = OptionParser(blat.__doc__) p.set_align(pctid=95, hitlen=30) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) reffasta, queryfasta = args blastfile = get_outfile(reffasta, queryfasta, suffix="blat") run_blat(infile=queryfasta, outfile=blastfile, db=reffasta, pctid=opts.pctid, hitlen=opts.hitlen, cpus=opts.cpus, overwrite=False) return blastfile
[ "def", "blat", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "blat", ".", "__doc__", ")", "p", ".", "set_align", "(", "pctid", "=", "95", ",", "hitlen", "=", "30", ")", "p", ".", "set_cpus", "(", ")", "opts", ",", "args", "=", "p", ".",...
%prog blat ref.fasta query.fasta Calls blat and filters BLAST hits.
[ "%prog", "blat", "ref", ".", "fasta", "query", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/align.py#L223-L244
train
200,635
tanghaibao/jcvi
jcvi/apps/align.py
blast
def blast(args): """ %prog blast ref.fasta query.fasta Calls blast and then filter the BLAST hits. Default is megablast. """ task_choices = ("blastn", "blastn-short", "dc-megablast", \ "megablast", "vecscreen") p = OptionParser(blast.__doc__) p.set_align(pctid=0, evalue=.01) p.add_option("--wordsize", type="int", help="Word size [default: %default]") p.add_option("--best", default=1, type="int", help="Only look for best N hits [default: %default]") p.add_option("--task", default="megablast", choices=task_choices, help="Task of the blastn [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) reffasta, queryfasta = args blastfile = get_outfile(reffasta, queryfasta) run_megablast(infile=queryfasta, outfile=blastfile, db=reffasta, wordsize=opts.wordsize, pctid=opts.pctid, evalue=opts.evalue, hitlen=None, best=opts.best, task=opts.task, cpus=opts.cpus) return blastfile
python
def blast(args): """ %prog blast ref.fasta query.fasta Calls blast and then filter the BLAST hits. Default is megablast. """ task_choices = ("blastn", "blastn-short", "dc-megablast", \ "megablast", "vecscreen") p = OptionParser(blast.__doc__) p.set_align(pctid=0, evalue=.01) p.add_option("--wordsize", type="int", help="Word size [default: %default]") p.add_option("--best", default=1, type="int", help="Only look for best N hits [default: %default]") p.add_option("--task", default="megablast", choices=task_choices, help="Task of the blastn [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) reffasta, queryfasta = args blastfile = get_outfile(reffasta, queryfasta) run_megablast(infile=queryfasta, outfile=blastfile, db=reffasta, wordsize=opts.wordsize, pctid=opts.pctid, evalue=opts.evalue, hitlen=None, best=opts.best, task=opts.task, cpus=opts.cpus) return blastfile
[ "def", "blast", "(", "args", ")", ":", "task_choices", "=", "(", "\"blastn\"", ",", "\"blastn-short\"", ",", "\"dc-megablast\"", ",", "\"megablast\"", ",", "\"vecscreen\"", ")", "p", "=", "OptionParser", "(", "blast", ".", "__doc__", ")", "p", ".", "set_alig...
%prog blast ref.fasta query.fasta Calls blast and then filter the BLAST hits. Default is megablast.
[ "%prog", "blast", "ref", ".", "fasta", "query", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/align.py#L247-L275
train
200,636
tanghaibao/jcvi
jcvi/apps/align.py
lastgenome
def lastgenome(args): """ %prog genome_A.fasta genome_B.fasta Run LAST by calling LASTDB, LASTAL and LAST-SPLIT. The recipe is based on tutorial here: <https://github.com/mcfrith/last-genome-alignments> The script runs the following steps: $ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa $ lastal -E0.05 -C2 Chr10A-NEAR Chr10B.fa | last-split -m1 | maf-swap | last-split -m1 -fMAF > Chr10A.Chr10B.1-1.maf $ maf-convert -n blasttab Chr10A.Chr10B.1-1.maf > Chr10A.Chr10B.1-1.blast Works with LAST v959. """ from jcvi.apps.grid import MakeManager p = OptionParser(lastgenome.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gA, gB = args mm = MakeManager() bb = lambda x : op.basename(x).rsplit(".", 1)[0] gA_pf, gB_pf = bb(gA), bb(gB) # Build LASTDB dbname = "-".join((gA_pf, "NEAR")) dbfile = dbname + ".suf" build_db_cmd = "lastdb -P0 -uNEAR -R01 {} {}".format(dbfile, gA) mm.add(gA, dbfile, build_db_cmd) # Run LASTAL maffile = "{}.{}.1-1.maf".format(gA_pf, gB_pf) lastal_cmd = "lastal -E0.05 -C2 {} {}".format(dbname, gB) lastal_cmd += " | last-split -m1" lastal_cmd += " | maf-swap" lastal_cmd += " | last-split -m1 -fMAF > {}".format(maffile) mm.add([dbfile, gB], maffile, lastal_cmd) # Convert to BLAST format blastfile = maffile.replace(".maf", ".blast") convert_cmd = "maf-convert -n blasttab {} > {}".format(maffile, blastfile) mm.add(maffile, blastfile, convert_cmd) mm.write()
python
def lastgenome(args): """ %prog genome_A.fasta genome_B.fasta Run LAST by calling LASTDB, LASTAL and LAST-SPLIT. The recipe is based on tutorial here: <https://github.com/mcfrith/last-genome-alignments> The script runs the following steps: $ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa $ lastal -E0.05 -C2 Chr10A-NEAR Chr10B.fa | last-split -m1 | maf-swap | last-split -m1 -fMAF > Chr10A.Chr10B.1-1.maf $ maf-convert -n blasttab Chr10A.Chr10B.1-1.maf > Chr10A.Chr10B.1-1.blast Works with LAST v959. """ from jcvi.apps.grid import MakeManager p = OptionParser(lastgenome.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gA, gB = args mm = MakeManager() bb = lambda x : op.basename(x).rsplit(".", 1)[0] gA_pf, gB_pf = bb(gA), bb(gB) # Build LASTDB dbname = "-".join((gA_pf, "NEAR")) dbfile = dbname + ".suf" build_db_cmd = "lastdb -P0 -uNEAR -R01 {} {}".format(dbfile, gA) mm.add(gA, dbfile, build_db_cmd) # Run LASTAL maffile = "{}.{}.1-1.maf".format(gA_pf, gB_pf) lastal_cmd = "lastal -E0.05 -C2 {} {}".format(dbname, gB) lastal_cmd += " | last-split -m1" lastal_cmd += " | maf-swap" lastal_cmd += " | last-split -m1 -fMAF > {}".format(maffile) mm.add([dbfile, gB], maffile, lastal_cmd) # Convert to BLAST format blastfile = maffile.replace(".maf", ".blast") convert_cmd = "maf-convert -n blasttab {} > {}".format(maffile, blastfile) mm.add(maffile, blastfile, convert_cmd) mm.write()
[ "def", "lastgenome", "(", "args", ")", ":", "from", "jcvi", ".", "apps", ".", "grid", "import", "MakeManager", "p", "=", "OptionParser", "(", "lastgenome", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", ...
%prog genome_A.fasta genome_B.fasta Run LAST by calling LASTDB, LASTAL and LAST-SPLIT. The recipe is based on tutorial here: <https://github.com/mcfrith/last-genome-alignments> The script runs the following steps: $ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa $ lastal -E0.05 -C2 Chr10A-NEAR Chr10B.fa | last-split -m1 | maf-swap | last-split -m1 -fMAF > Chr10A.Chr10B.1-1.maf $ maf-convert -n blasttab Chr10A.Chr10B.1-1.maf > Chr10A.Chr10B.1-1.blast Works with LAST v959.
[ "%prog", "genome_A", ".", "fasta", "genome_B", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/align.py#L278-L326
train
200,637
tanghaibao/jcvi
jcvi/apps/align.py
last
def last(args, dbtype=None): """ %prog database.fasta query.fasta Run LAST by calling LASTDB and LASTAL. LAST program available: <http://last.cbrc.jp> Works with LAST-719. """ p = OptionParser(last.__doc__) p.add_option("--dbtype", default="nucl", choices=("nucl", "prot"), help="Molecule type of subject database") p.add_option("--path", help="Specify LAST path") p.add_option("--mask", default=False, action="store_true", help="Invoke -c in lastdb") p.add_option("--format", default="BlastTab", choices=("TAB", "MAF", "BlastTab", "BlastTab+"), help="Output format") p.add_option("--minlen", default=0, type="int", help="Filter alignments by how many bases match") p.add_option("--minid", default=0, type="int", help="Minimum sequence identity") p.set_cpus() p.set_params() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) subject, query = args path = opts.path cpus = opts.cpus if not dbtype: dbtype = opts.dbtype getpath = lambda x: op.join(path, x) if path else x lastdb_bin = getpath("lastdb") lastal_bin = getpath("lastal") subjectdb = subject.rsplit(".", 1)[0] run_lastdb(infile=subject, outfile=subjectdb + ".prj", mask=opts.mask, \ lastdb_bin=lastdb_bin, dbtype=dbtype) u = 2 if opts.mask else 0 cmd = "{0} -u {1}".format(lastal_bin, u) cmd += " -P {0} -i3G".format(cpus) cmd += " -f {0}".format(opts.format) cmd += " {0} {1}".format(subjectdb, query) minlen = opts.minlen minid = opts.minid extra = opts.extra assert minid != 100, "Perfect match not yet supported" mm = minid / (100 - minid) if minlen: extra += " -e{0}".format(minlen) if minid: extra += " -r1 -q{0} -a{0} -b{0}".format(mm) if extra: cmd += " " + extra.strip() lastfile = get_outfile(subject, query, suffix="last") sh(cmd, outfile=lastfile)
python
def last(args, dbtype=None): """ %prog database.fasta query.fasta Run LAST by calling LASTDB and LASTAL. LAST program available: <http://last.cbrc.jp> Works with LAST-719. """ p = OptionParser(last.__doc__) p.add_option("--dbtype", default="nucl", choices=("nucl", "prot"), help="Molecule type of subject database") p.add_option("--path", help="Specify LAST path") p.add_option("--mask", default=False, action="store_true", help="Invoke -c in lastdb") p.add_option("--format", default="BlastTab", choices=("TAB", "MAF", "BlastTab", "BlastTab+"), help="Output format") p.add_option("--minlen", default=0, type="int", help="Filter alignments by how many bases match") p.add_option("--minid", default=0, type="int", help="Minimum sequence identity") p.set_cpus() p.set_params() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) subject, query = args path = opts.path cpus = opts.cpus if not dbtype: dbtype = opts.dbtype getpath = lambda x: op.join(path, x) if path else x lastdb_bin = getpath("lastdb") lastal_bin = getpath("lastal") subjectdb = subject.rsplit(".", 1)[0] run_lastdb(infile=subject, outfile=subjectdb + ".prj", mask=opts.mask, \ lastdb_bin=lastdb_bin, dbtype=dbtype) u = 2 if opts.mask else 0 cmd = "{0} -u {1}".format(lastal_bin, u) cmd += " -P {0} -i3G".format(cpus) cmd += " -f {0}".format(opts.format) cmd += " {0} {1}".format(subjectdb, query) minlen = opts.minlen minid = opts.minid extra = opts.extra assert minid != 100, "Perfect match not yet supported" mm = minid / (100 - minid) if minlen: extra += " -e{0}".format(minlen) if minid: extra += " -r1 -q{0} -a{0} -b{0}".format(mm) if extra: cmd += " " + extra.strip() lastfile = get_outfile(subject, query, suffix="last") sh(cmd, outfile=lastfile)
[ "def", "last", "(", "args", ",", "dbtype", "=", "None", ")", ":", "p", "=", "OptionParser", "(", "last", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--dbtype\"", ",", "default", "=", "\"nucl\"", ",", "choices", "=", "(", "\"nucl\"", ",", "\"...
%prog database.fasta query.fasta Run LAST by calling LASTDB and LASTAL. LAST program available: <http://last.cbrc.jp> Works with LAST-719.
[ "%prog", "database", ".", "fasta", "query", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/align.py#L338-L400
train
200,638
tanghaibao/jcvi
jcvi/assembly/soap.py
fillstats
def fillstats(args): """ %prog fillstats genome.fill Build stats on .fill file from GapCloser. """ from jcvi.utils.cbook import SummaryStats, percentage, thousands p = OptionParser(fillstats.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fillfile, = args fp = open(fillfile) scaffolds = 0 gaps = [] for row in fp: if row[0] == ">": scaffolds += 1 continue fl = FillLine(row) gaps.append(fl) print("{0} scaffolds in total".format(scaffolds), file=sys.stderr) closed = [x for x in gaps if x.closed] closedbp = sum(x.before for x in closed) notClosed = [x for x in gaps if not x.closed] notClosedbp = sum(x.before for x in notClosed) totalgaps = len(closed) + len(notClosed) print("Closed gaps: {0} size: {1} bp".\ format(percentage(len(closed), totalgaps), thousands(closedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in closed]) print(ss, file=sys.stderr) ss = SummaryStats([x.delta for x in closed]) print("Delta:", ss, file=sys.stderr) print("Remaining gaps: {0} size: {1} bp".\ format(percentage(len(notClosed), totalgaps), thousands(notClosedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in notClosed]) print(ss, file=sys.stderr)
python
def fillstats(args): """ %prog fillstats genome.fill Build stats on .fill file from GapCloser. """ from jcvi.utils.cbook import SummaryStats, percentage, thousands p = OptionParser(fillstats.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fillfile, = args fp = open(fillfile) scaffolds = 0 gaps = [] for row in fp: if row[0] == ">": scaffolds += 1 continue fl = FillLine(row) gaps.append(fl) print("{0} scaffolds in total".format(scaffolds), file=sys.stderr) closed = [x for x in gaps if x.closed] closedbp = sum(x.before for x in closed) notClosed = [x for x in gaps if not x.closed] notClosedbp = sum(x.before for x in notClosed) totalgaps = len(closed) + len(notClosed) print("Closed gaps: {0} size: {1} bp".\ format(percentage(len(closed), totalgaps), thousands(closedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in closed]) print(ss, file=sys.stderr) ss = SummaryStats([x.delta for x in closed]) print("Delta:", ss, file=sys.stderr) print("Remaining gaps: {0} size: {1} bp".\ format(percentage(len(notClosed), totalgaps), thousands(notClosedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in notClosed]) print(ss, file=sys.stderr)
[ "def", "fillstats", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "SummaryStats", ",", "percentage", ",", "thousands", "p", "=", "OptionParser", "(", "fillstats", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "...
%prog fillstats genome.fill Build stats on .fill file from GapCloser.
[ "%prog", "fillstats", "genome", ".", "fill" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/soap.py#L169-L214
train
200,639
tanghaibao/jcvi
jcvi/formats/psl.py
bed
def bed(args): """ %prog bed pslfile Convert to bed format. """ p = OptionParser(bed.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) pslfile, = args fw = must_open(opts.outfile, "w") psl = Psl(pslfile) for p in psl: print(p.bed12line, file=fw)
python
def bed(args): """ %prog bed pslfile Convert to bed format. """ p = OptionParser(bed.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) pslfile, = args fw = must_open(opts.outfile, "w") psl = Psl(pslfile) for p in psl: print(p.bed12line, file=fw)
[ "def", "bed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "bed", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":"...
%prog bed pslfile Convert to bed format.
[ "%prog", "bed", "pslfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/psl.py#L206-L225
train
200,640
tanghaibao/jcvi
jcvi/formats/psl.py
gff
def gff(args): """ %prog gff pslfile Convert to gff format. """ p = OptionParser(gff.__doc__) p.add_option("--source", default="GMAP", help="specify GFF source [default: %default]") p.add_option("--type", default="EST_match", help="specify GFF feature type [default: %default]") p.add_option("--suffix", default=".match", help="match ID suffix [default: \"%default\"]") p.add_option("--swap", default=False, action="store_true", help="swap query and target features [default: %default]") p.add_option("--simple_score", default=False, action="store_true", help="calculate a simple percent score [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) pslfile, = args fw = must_open(opts.outfile, "w") print("##gff-version 3", file=fw) psl = Psl(pslfile) for p in psl: if opts.swap: p.swap psl.trackMatches(p.qName) # switch from 0-origin to 1-origin p.qStart += 1 p.tStart += 1 print(p.gffline(source=opts.source, type=opts.type, suffix=opts.suffix, \ primary_tag="ID", alt_score=opts.simple_score, \ count=psl.getMatchCount(p.qName)), file=fw) # create an empty PslLine() object and load only # the targetName, queryName and strand info part = PslLine("\t".join(str(x) for x in [0] * p.nargs)) part.tName, part.qName, part.strand = p.tName, p.qName, p.strand nparts = len(p.qStarts) for n in xrange(nparts): part.qStart, part.tStart, aLen = p.qStarts[n] + 1, p.tStarts[n] + 1, p.blockSizes[n] part.qEnd = part.qStart + aLen - 1 part.tEnd = part.tStart + aLen - 1 if part.strand == "-": part.qStart = p.qSize - (p.qStarts[n] + p.blockSizes[n]) + 1 part.qEnd = p.qSize - p.qStarts[n] print(part.gffline(source=opts.source, suffix=opts.suffix, \ count=psl.getMatchCount(part.qName)), file=fw)
python
def gff(args): """ %prog gff pslfile Convert to gff format. """ p = OptionParser(gff.__doc__) p.add_option("--source", default="GMAP", help="specify GFF source [default: %default]") p.add_option("--type", default="EST_match", help="specify GFF feature type [default: %default]") p.add_option("--suffix", default=".match", help="match ID suffix [default: \"%default\"]") p.add_option("--swap", default=False, action="store_true", help="swap query and target features [default: %default]") p.add_option("--simple_score", default=False, action="store_true", help="calculate a simple percent score [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) pslfile, = args fw = must_open(opts.outfile, "w") print("##gff-version 3", file=fw) psl = Psl(pslfile) for p in psl: if opts.swap: p.swap psl.trackMatches(p.qName) # switch from 0-origin to 1-origin p.qStart += 1 p.tStart += 1 print(p.gffline(source=opts.source, type=opts.type, suffix=opts.suffix, \ primary_tag="ID", alt_score=opts.simple_score, \ count=psl.getMatchCount(p.qName)), file=fw) # create an empty PslLine() object and load only # the targetName, queryName and strand info part = PslLine("\t".join(str(x) for x in [0] * p.nargs)) part.tName, part.qName, part.strand = p.tName, p.qName, p.strand nparts = len(p.qStarts) for n in xrange(nparts): part.qStart, part.tStart, aLen = p.qStarts[n] + 1, p.tStarts[n] + 1, p.blockSizes[n] part.qEnd = part.qStart + aLen - 1 part.tEnd = part.tStart + aLen - 1 if part.strand == "-": part.qStart = p.qSize - (p.qStarts[n] + p.blockSizes[n]) + 1 part.qEnd = p.qSize - p.qStarts[n] print(part.gffline(source=opts.source, suffix=opts.suffix, \ count=psl.getMatchCount(part.qName)), file=fw)
[ "def", "gff", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "gff", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--source\"", ",", "default", "=", "\"GMAP\"", ",", "help", "=", "\"specify GFF source [default: %default]\"", ")", "p", ".", "ad...
%prog gff pslfile Convert to gff format.
[ "%prog", "gff", "pslfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/psl.py#L228-L286
train
200,641
tanghaibao/jcvi
jcvi/formats/psl.py
PslLine._isProtein
def _isProtein(self): """ check if blockSizes and scores are in the protein space or not """ last = self.blockCount - 1 return ((self.tEnd == self.tStarts[last] + 3 * self.blockSizes[last]) \ and self.strand == "+") or \ ((self.tStart == self.tSize - (self.tStarts[last] + 3 * self.blockSizes[last])\ and self.strand == "-"))
python
def _isProtein(self): """ check if blockSizes and scores are in the protein space or not """ last = self.blockCount - 1 return ((self.tEnd == self.tStarts[last] + 3 * self.blockSizes[last]) \ and self.strand == "+") or \ ((self.tStart == self.tSize - (self.tStarts[last] + 3 * self.blockSizes[last])\ and self.strand == "-"))
[ "def", "_isProtein", "(", "self", ")", ":", "last", "=", "self", ".", "blockCount", "-", "1", "return", "(", "(", "self", ".", "tEnd", "==", "self", ".", "tStarts", "[", "last", "]", "+", "3", "*", "self", ".", "blockSizes", "[", "last", "]", ")"...
check if blockSizes and scores are in the protein space or not
[ "check", "if", "blockSizes", "and", "scores", "are", "in", "the", "protein", "space", "or", "not" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/psl.py#L105-L113
train
200,642
tanghaibao/jcvi
jcvi/formats/psl.py
PslLine._milliBad
def _milliBad(self, ismRNA=False): """ calculate badness in parts per thousand i.e. number of non-identical matches """ sizeMult = self._sizeMult qAlnSize, tAlnSize = self.qspan * sizeMult, self.tspan alnSize = min(qAlnSize, tAlnSize) if alnSize <= 0: return 0 sizeDiff = qAlnSize - tAlnSize if sizeDiff < 0: sizeDiff = 0 if ismRNA else -sizeDiff insertFactor = self.qNumInsert if not ismRNA: insertFactor += self.tNumInsert total = (self.matches + self.repMatches + self.misMatches) * sizeMult return (1000 * (self.misMatches * sizeMult + insertFactor + \ round(3 * math.log(1 + sizeDiff)))) / total if total != 0 else 0
python
def _milliBad(self, ismRNA=False): """ calculate badness in parts per thousand i.e. number of non-identical matches """ sizeMult = self._sizeMult qAlnSize, tAlnSize = self.qspan * sizeMult, self.tspan alnSize = min(qAlnSize, tAlnSize) if alnSize <= 0: return 0 sizeDiff = qAlnSize - tAlnSize if sizeDiff < 0: sizeDiff = 0 if ismRNA else -sizeDiff insertFactor = self.qNumInsert if not ismRNA: insertFactor += self.tNumInsert total = (self.matches + self.repMatches + self.misMatches) * sizeMult return (1000 * (self.misMatches * sizeMult + insertFactor + \ round(3 * math.log(1 + sizeDiff)))) / total if total != 0 else 0
[ "def", "_milliBad", "(", "self", ",", "ismRNA", "=", "False", ")", ":", "sizeMult", "=", "self", ".", "_sizeMult", "qAlnSize", ",", "tAlnSize", "=", "self", ".", "qspan", "*", "sizeMult", ",", "self", ".", "tspan", "alnSize", "=", "min", "(", "qAlnSize...
calculate badness in parts per thousand i.e. number of non-identical matches
[ "calculate", "badness", "in", "parts", "per", "thousand", "i", ".", "e", ".", "number", "of", "non", "-", "identical", "matches" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/psl.py#L115-L138
train
200,643
tanghaibao/jcvi
jcvi/assembly/unitig.py
get_prefix
def get_prefix(dir="../"): """ Look for prefix.gkpStore in the upper directory. """ prefix = glob(dir + "*.gkpStore")[0] prefix = op.basename(prefix).rsplit(".", 1)[0] return prefix
python
def get_prefix(dir="../"): """ Look for prefix.gkpStore in the upper directory. """ prefix = glob(dir + "*.gkpStore")[0] prefix = op.basename(prefix).rsplit(".", 1)[0] return prefix
[ "def", "get_prefix", "(", "dir", "=", "\"../\"", ")", ":", "prefix", "=", "glob", "(", "dir", "+", "\"*.gkpStore\"", ")", "[", "0", "]", "prefix", "=", "op", ".", "basename", "(", "prefix", ")", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0...
Look for prefix.gkpStore in the upper directory.
[ "Look", "for", "prefix", ".", "gkpStore", "in", "the", "upper", "directory", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/unitig.py#L144-L151
train
200,644
tanghaibao/jcvi
jcvi/assembly/unitig.py
cnsfix
def cnsfix(args): """ %prog cnsfix consensus-fix.out.FAILED > blacklist.ids Parse consensus-fix.out to extract layouts for fixed unitigs. This will mark all the failed fragments detected by utgcnsfix and pop them out of the existing unitigs. """ from jcvi.formats.base import read_block p = OptionParser(cnsfix.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) cnsfixout, = args fp = open(cnsfixout) utgs = [] saves = [] for header, contents in read_block(fp, "Evaluating"): contents = list(contents) utg = header.split()[2] utgs.append(utg) # Look for this line: # save fragment idx=388 ident=206054426 for next pass for c in contents: if not c.startswith("save"): continue ident = c.split()[3].split("=")[-1] saves.append(ident) print("\n".join(saves))
python
def cnsfix(args): """ %prog cnsfix consensus-fix.out.FAILED > blacklist.ids Parse consensus-fix.out to extract layouts for fixed unitigs. This will mark all the failed fragments detected by utgcnsfix and pop them out of the existing unitigs. """ from jcvi.formats.base import read_block p = OptionParser(cnsfix.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) cnsfixout, = args fp = open(cnsfixout) utgs = [] saves = [] for header, contents in read_block(fp, "Evaluating"): contents = list(contents) utg = header.split()[2] utgs.append(utg) # Look for this line: # save fragment idx=388 ident=206054426 for next pass for c in contents: if not c.startswith("save"): continue ident = c.split()[3].split("=")[-1] saves.append(ident) print("\n".join(saves))
[ "def", "cnsfix", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "read_block", "p", "=", "OptionParser", "(", "cnsfix", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len"...
%prog cnsfix consensus-fix.out.FAILED > blacklist.ids Parse consensus-fix.out to extract layouts for fixed unitigs. This will mark all the failed fragments detected by utgcnsfix and pop them out of the existing unitigs.
[ "%prog", "cnsfix", "consensus", "-", "fix", ".", "out", ".", "FAILED", ">", "blacklist", ".", "ids" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/unitig.py#L161-L192
train
200,645
tanghaibao/jcvi
jcvi/assembly/unitig.py
error
def error(args): """ %prog error version backup_folder Find all errors in ../5-consensus/*.err and pull the error unitigs into backup/ folder. """ p = OptionParser(error.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) version, backup_folder = args mkdir(backup_folder) fw = open("errors.log", "w") seen = set() for g in glob("../5-consensus/*.err"): if "partitioned" in g: continue fp = open(g) partID = op.basename(g).rsplit(".err", 1)[0] partID = int(partID.split("_")[-1]) for row in fp: if row.startswith(working): unitigID = row.split("(")[0].split()[-1] continue if not failed.upper() in row.upper(): continue uu = (version, partID, unitigID) if uu in seen: continue seen.add(uu) print("\t".join(str(x) for x in (partID, unitigID)), file=fw) s = [str(x) for x in uu] unitigfile = pull(s) cmd = "mv {0} {1}".format(unitigfile, backup_folder) sh(cmd) fp.close() logging.debug("A total of {0} unitigs saved to {1}.".\ format(len(seen), backup_folder))
python
def error(args): """ %prog error version backup_folder Find all errors in ../5-consensus/*.err and pull the error unitigs into backup/ folder. """ p = OptionParser(error.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) version, backup_folder = args mkdir(backup_folder) fw = open("errors.log", "w") seen = set() for g in glob("../5-consensus/*.err"): if "partitioned" in g: continue fp = open(g) partID = op.basename(g).rsplit(".err", 1)[0] partID = int(partID.split("_")[-1]) for row in fp: if row.startswith(working): unitigID = row.split("(")[0].split()[-1] continue if not failed.upper() in row.upper(): continue uu = (version, partID, unitigID) if uu in seen: continue seen.add(uu) print("\t".join(str(x) for x in (partID, unitigID)), file=fw) s = [str(x) for x in uu] unitigfile = pull(s) cmd = "mv {0} {1}".format(unitigfile, backup_folder) sh(cmd) fp.close() logging.debug("A total of {0} unitigs saved to {1}.".\ format(len(seen), backup_folder))
[ "def", "error", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "error", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not...
%prog error version backup_folder Find all errors in ../5-consensus/*.err and pull the error unitigs into backup/ folder.
[ "%prog", "error", "version", "backup_folder" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/unitig.py#L199-L249
train
200,646
tanghaibao/jcvi
jcvi/assembly/unitig.py
cut
def cut(args): """ %prog cut unitigfile fragID Cut the unitig at a given fragment. Run `%prog trace unitigfile` first to see which fragment breaks the unitig. """ from jcvi.formats.base import SetFile p = OptionParser(cut.__doc__) p.add_option("-s", dest="shredafter", default=False, action="store_true", help="Shred fragments after the given fragID [default: %default]") p.add_option("--notest", default=False, action="store_true", help="Do not test the unitigfile after edits [default: %default]") p.add_option("--blacklist", help="File that contains blacklisted fragments to be popped " "[default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) s, fragID = args u = UnitigLayout(s) blacklist = opts.blacklist black = SetFile(blacklist) if blacklist else None if opts.shredafter: u.shredafter(fragID) elif black: assert fragID == "0", "Must set fragID to 0 when --blacklist is on" u.pop(black) else: u.cut(fragID) u.print_to_file(inplace=True) if not opts.notest: test([s])
python
def cut(args): """ %prog cut unitigfile fragID Cut the unitig at a given fragment. Run `%prog trace unitigfile` first to see which fragment breaks the unitig. """ from jcvi.formats.base import SetFile p = OptionParser(cut.__doc__) p.add_option("-s", dest="shredafter", default=False, action="store_true", help="Shred fragments after the given fragID [default: %default]") p.add_option("--notest", default=False, action="store_true", help="Do not test the unitigfile after edits [default: %default]") p.add_option("--blacklist", help="File that contains blacklisted fragments to be popped " "[default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) s, fragID = args u = UnitigLayout(s) blacklist = opts.blacklist black = SetFile(blacklist) if blacklist else None if opts.shredafter: u.shredafter(fragID) elif black: assert fragID == "0", "Must set fragID to 0 when --blacklist is on" u.pop(black) else: u.cut(fragID) u.print_to_file(inplace=True) if not opts.notest: test([s])
[ "def", "cut", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "SetFile", "p", "=", "OptionParser", "(", "cut", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-s\"", ",", "dest", "=", "\"shredafter\"", ",", "default",...
%prog cut unitigfile fragID Cut the unitig at a given fragment. Run `%prog trace unitigfile` first to see which fragment breaks the unitig.
[ "%prog", "cut", "unitigfile", "fragID" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/unitig.py#L287-L324
train
200,647
tanghaibao/jcvi
jcvi/assembly/unitig.py
shred
def shred(args): """ %prog shred unitigfile Shred the unitig into one fragment per unitig to fix. This is the last resort as a desperate fix. """ p = OptionParser(shred.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) s, = args u = UnitigLayout(s) u.shred() u.print_to_file(inplace=True)
python
def shred(args): """ %prog shred unitigfile Shred the unitig into one fragment per unitig to fix. This is the last resort as a desperate fix. """ p = OptionParser(shred.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) s, = args u = UnitigLayout(s) u.shred() u.print_to_file(inplace=True)
[ "def", "shred", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "shred", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not...
%prog shred unitigfile Shred the unitig into one fragment per unitig to fix. This is the last resort as a desperate fix.
[ "%prog", "shred", "unitigfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/unitig.py#L327-L343
train
200,648
tanghaibao/jcvi
jcvi/assembly/unitig.py
pull
def pull(args): """ %prog pull version partID unitigID For example, `%prog pull 5 530` will pull the utg530 from partition 5 The layout is written to `unitig530` """ p = OptionParser(pull.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) prefix = get_prefix() version, partID, unitigID = args s = ".".join(args) cmd = "tigStore" cmd += " -g ../{0}.gkpStore -t ../{0}.tigStore".format(prefix) cmd += " {0} -up {1} -d layout -u {2}".format(version, partID, unitigID) unitigfile = "unitig" + s sh(cmd, outfile=unitigfile) return unitigfile
python
def pull(args): """ %prog pull version partID unitigID For example, `%prog pull 5 530` will pull the utg530 from partition 5 The layout is written to `unitig530` """ p = OptionParser(pull.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) prefix = get_prefix() version, partID, unitigID = args s = ".".join(args) cmd = "tigStore" cmd += " -g ../{0}.gkpStore -t ../{0}.tigStore".format(prefix) cmd += " {0} -up {1} -d layout -u {2}".format(version, partID, unitigID) unitigfile = "unitig" + s sh(cmd, outfile=unitigfile) return unitigfile
[ "def", "pull", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "pull", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "3", ":", "sys", ".", "exit", "(", "not",...
%prog pull version partID unitigID For example, `%prog pull 5 530` will pull the utg530 from partition 5 The layout is written to `unitig530`
[ "%prog", "pull", "version", "partID", "unitigID" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/unitig.py#L346-L370
train
200,649
tanghaibao/jcvi
jcvi/projects/misc.py
mtdotplots
def mtdotplots(args): """ %prog mtdotplots Mt3.5 Mt4.0 medicago.medicago.lifted.1x1.anchors Plot Mt3.5 and Mt4.0 side-by-side. This is essentially combined from two graphics.dotplot() function calls as panel A and B. """ from jcvi.graphics.dotplot import check_beds, dotplot p = OptionParser(mtdotplots.__doc__) p.set_beds() opts, args, iopts = p.set_image_options(args, figsize="16x8", dpi=90) if len(args) != 3: sys.exit(not p.print_help()) a, b, ac = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) r1 = fig.add_axes([0, 0, .5, 1]) r2 = fig.add_axes([.5, 0, .5, 1]) a1 = fig.add_axes([.05, .1, .4, .8]) a2 = fig.add_axes([.55, .1, .4, .8]) anchorfile = op.join(a, ac) qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts) dotplot(anchorfile, qbed, sbed, fig, r1, a1, is_self=is_self, genomenames="Mt3.5_Mt3.5") opts.qbed = opts.sbed = None anchorfile = op.join(b, ac) qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts) dotplot(anchorfile, qbed, sbed, fig, r2, a2, is_self=is_self, genomenames="Mt4.0_Mt4.0") root.text(.03, .95, "A", ha="center", va="center", size=36) root.text(.53, .95, "B", ha="center", va="center", size=36) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "mtdotplots" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
python
def mtdotplots(args): """ %prog mtdotplots Mt3.5 Mt4.0 medicago.medicago.lifted.1x1.anchors Plot Mt3.5 and Mt4.0 side-by-side. This is essentially combined from two graphics.dotplot() function calls as panel A and B. """ from jcvi.graphics.dotplot import check_beds, dotplot p = OptionParser(mtdotplots.__doc__) p.set_beds() opts, args, iopts = p.set_image_options(args, figsize="16x8", dpi=90) if len(args) != 3: sys.exit(not p.print_help()) a, b, ac = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) r1 = fig.add_axes([0, 0, .5, 1]) r2 = fig.add_axes([.5, 0, .5, 1]) a1 = fig.add_axes([.05, .1, .4, .8]) a2 = fig.add_axes([.55, .1, .4, .8]) anchorfile = op.join(a, ac) qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts) dotplot(anchorfile, qbed, sbed, fig, r1, a1, is_self=is_self, genomenames="Mt3.5_Mt3.5") opts.qbed = opts.sbed = None anchorfile = op.join(b, ac) qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts) dotplot(anchorfile, qbed, sbed, fig, r2, a2, is_self=is_self, genomenames="Mt4.0_Mt4.0") root.text(.03, .95, "A", ha="center", va="center", size=36) root.text(.53, .95, "B", ha="center", va="center", size=36) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "mtdotplots" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "mtdotplots", "(", "args", ")", ":", "from", "jcvi", ".", "graphics", ".", "dotplot", "import", "check_beds", ",", "dotplot", "p", "=", "OptionParser", "(", "mtdotplots", ".", "__doc__", ")", "p", ".", "set_beds", "(", ")", "opts", ",", "args", ...
%prog mtdotplots Mt3.5 Mt4.0 medicago.medicago.lifted.1x1.anchors Plot Mt3.5 and Mt4.0 side-by-side. This is essentially combined from two graphics.dotplot() function calls as panel A and B.
[ "%prog", "mtdotplots", "Mt3", ".", "5", "Mt4", ".", "0", "medicago", ".", "medicago", ".", "lifted", ".", "1x1", ".", "anchors" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/misc.py#L214-L258
train
200,650
tanghaibao/jcvi
jcvi/projects/misc.py
oropetium
def oropetium(args): """ %prog oropetium mcscan.out all.bed layout switch.ids Build a composite figure that calls graphis.synteny. """ p = OptionParser(oropetium.__doc__) p.add_option("--extra", help="Extra features in BED format") opts, args, iopts = p.set_image_options(args, figsize="9x6") if len(args) != 4: sys.exit(not p.print_help()) datafile, bedfile, slayout, switch = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Synteny(fig, root, datafile, bedfile, slayout, switch=switch, extra_features=opts.extra) # legend showing the orientation of the genes draw_gene_legend(root, .4, .57, .74, text=True, repeat=True) # On the left panel, make a species tree fc = 'lightslategrey' coords = {} xs, xp = .16, .03 coords["oropetium"] = (xs, .7) coords["setaria"] = (xs, .6) coords["sorghum"] = (xs, .5) coords["rice"] = (xs, .4) coords["brachypodium"] = (xs, .3) xs -= xp coords["Panicoideae"] = join_nodes(root, coords, "setaria", "sorghum", xs) xs -= xp coords["BEP"] = join_nodes(root, coords, "rice", "brachypodium", xs) coords["PACMAD"] = join_nodes(root, coords, "oropetium", "Panicoideae", xs) xs -= xp coords["Poaceae"] = join_nodes(root, coords, "BEP", "PACMAD", xs) # Names of the internal nodes for tag in ("BEP", "Poaceae"): nx, ny = coords[tag] nx, ny = nx - .005, ny - .02 root.text(nx, ny, tag, rotation=90, ha="right", va="top", color=fc) for tag in ("PACMAD",): nx, ny = coords[tag] nx, ny = nx - .005, ny + .02 root.text(nx, ny, tag, rotation=90, ha="right", va="bottom", color=fc) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "oropetium" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
python
def oropetium(args): """ %prog oropetium mcscan.out all.bed layout switch.ids Build a composite figure that calls graphis.synteny. """ p = OptionParser(oropetium.__doc__) p.add_option("--extra", help="Extra features in BED format") opts, args, iopts = p.set_image_options(args, figsize="9x6") if len(args) != 4: sys.exit(not p.print_help()) datafile, bedfile, slayout, switch = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Synteny(fig, root, datafile, bedfile, slayout, switch=switch, extra_features=opts.extra) # legend showing the orientation of the genes draw_gene_legend(root, .4, .57, .74, text=True, repeat=True) # On the left panel, make a species tree fc = 'lightslategrey' coords = {} xs, xp = .16, .03 coords["oropetium"] = (xs, .7) coords["setaria"] = (xs, .6) coords["sorghum"] = (xs, .5) coords["rice"] = (xs, .4) coords["brachypodium"] = (xs, .3) xs -= xp coords["Panicoideae"] = join_nodes(root, coords, "setaria", "sorghum", xs) xs -= xp coords["BEP"] = join_nodes(root, coords, "rice", "brachypodium", xs) coords["PACMAD"] = join_nodes(root, coords, "oropetium", "Panicoideae", xs) xs -= xp coords["Poaceae"] = join_nodes(root, coords, "BEP", "PACMAD", xs) # Names of the internal nodes for tag in ("BEP", "Poaceae"): nx, ny = coords[tag] nx, ny = nx - .005, ny - .02 root.text(nx, ny, tag, rotation=90, ha="right", va="top", color=fc) for tag in ("PACMAD",): nx, ny = coords[tag] nx, ny = nx - .005, ny + .02 root.text(nx, ny, tag, rotation=90, ha="right", va="bottom", color=fc) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "oropetium" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "oropetium", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "oropetium", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--extra\"", ",", "help", "=", "\"Extra features in BED format\"", ")", "opts", ",", "args", ",", "iopts", "=", "p", ...
%prog oropetium mcscan.out all.bed layout switch.ids Build a composite figure that calls graphis.synteny.
[ "%prog", "oropetium", "mcscan", ".", "out", "all", ".", "bed", "layout", "switch", ".", "ids" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/misc.py#L261-L318
train
200,651
tanghaibao/jcvi
jcvi/projects/misc.py
amborella
def amborella(args): """ %prog amborella seqids karyotype.layout mcscan.out all.bed synteny.layout Build a composite figure that calls graphics.karyotype and graphics.synteny. """ p = OptionParser(amborella.__doc__) p.add_option("--tree", help="Display trees on the bottom of the figure [default: %default]") p.add_option("--switch", help="Rename the seqid with two-column file [default: %default]") opts, args, iopts = p.set_image_options(args, figsize="8x7") if len(args) != 5: sys.exit(not p.print_help()) seqidsfile, klayout, datafile, bedfile, slayout = args switch = opts.switch tree = opts.tree fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Karyotype(fig, root, seqidsfile, klayout) Synteny(fig, root, datafile, bedfile, slayout, switch=switch, tree=tree) # legend showing the orientation of the genes draw_gene_legend(root, .5, .68, .5) # annotate the WGD events fc = 'lightslategrey' x = .05 radius = .012 TextCircle(root, x, .86, '$\gamma$', radius=radius) TextCircle(root, x, .95, '$\epsilon$', radius=radius) root.plot([x, x], [.83, .9], ":", color=fc, lw=2) pts = plot_cap((x, .95), np.radians(range(-70, 250)), .02) x, y = zip(*pts) root.plot(x, y, ":", color=fc, lw=2) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "amborella" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
python
def amborella(args): """ %prog amborella seqids karyotype.layout mcscan.out all.bed synteny.layout Build a composite figure that calls graphics.karyotype and graphics.synteny. """ p = OptionParser(amborella.__doc__) p.add_option("--tree", help="Display trees on the bottom of the figure [default: %default]") p.add_option("--switch", help="Rename the seqid with two-column file [default: %default]") opts, args, iopts = p.set_image_options(args, figsize="8x7") if len(args) != 5: sys.exit(not p.print_help()) seqidsfile, klayout, datafile, bedfile, slayout = args switch = opts.switch tree = opts.tree fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Karyotype(fig, root, seqidsfile, klayout) Synteny(fig, root, datafile, bedfile, slayout, switch=switch, tree=tree) # legend showing the orientation of the genes draw_gene_legend(root, .5, .68, .5) # annotate the WGD events fc = 'lightslategrey' x = .05 radius = .012 TextCircle(root, x, .86, '$\gamma$', radius=radius) TextCircle(root, x, .95, '$\epsilon$', radius=radius) root.plot([x, x], [.83, .9], ":", color=fc, lw=2) pts = plot_cap((x, .95), np.radians(range(-70, 250)), .02) x, y = zip(*pts) root.plot(x, y, ":", color=fc, lw=2) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "amborella" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "amborella", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "amborella", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--tree\"", ",", "help", "=", "\"Display trees on the bottom of the figure [default: %default]\"", ")", "p", ".", "add_option...
%prog amborella seqids karyotype.layout mcscan.out all.bed synteny.layout Build a composite figure that calls graphics.karyotype and graphics.synteny.
[ "%prog", "amborella", "seqids", "karyotype", ".", "layout", "mcscan", ".", "out", "all", ".", "bed", "synteny", ".", "layout" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/misc.py#L377-L423
train
200,652
tanghaibao/jcvi
jcvi/assembly/gaps.py
annotate
def annotate(args): """ %prog annotate agpfile gaps.linkage.bed assembly.fasta Annotate AGP file with linkage info of `paired-end` or `map`. File `gaps.linkage.bed` is generated by assembly.gaps.estimate(). """ from jcvi.formats.agp import AGP, bed, tidy p = OptionParser(annotate.__doc__) p.add_option("--minsize", default=200, help="Smallest component size [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) agpfile, linkagebed, assemblyfasta = args linkagebed = Bed(linkagebed) spannedgaps = set() for b in linkagebed: score = int(b.score) if score == 0: spannedgaps.add((b.accn, b.start, b.end)) agp = AGP(agpfile) newagpfile = agpfile.rsplit(".", 1)[0] + ".linkage.agp" newagp = open(newagpfile, "w") contig_id = 0 minsize = opts.minsize for a in agp: if not a.is_gap: cs = a.component_span if cs < minsize: a.is_gap = True a.component_type = "N" a.gap_length = cs a.gap_type = "scaffold" a.linkage = "yes" a.linkage_evidence = [] else: contig_id += 1 a.component_id = "contig{0:04d}".format(contig_id) a.component_beg = 1 a.component_end = cs a.component_type = "W" print(a, file=newagp) continue gapinfo = (a.object, a.object_beg, a.object_end) gaplen = a.gap_length if gaplen == 100 and gapinfo not in spannedgaps: a.component_type = "U" tag = "map" else: tag = "paired-ends" a.linkage_evidence.append(tag) print(a, file=newagp) newagp.close() logging.debug("Annotated AGP written to `{0}`.".format(newagpfile)) contigbed = assemblyfasta.rsplit(".", 1)[0] + ".contigs.bed" bedfile = bed([newagpfile, "--nogaps", "--outfile=" + contigbed]) contigfasta = fastaFromBed(bedfile, assemblyfasta, name=True, stranded=True) tidy([newagpfile, contigfasta])
python
def annotate(args): """ %prog annotate agpfile gaps.linkage.bed assembly.fasta Annotate AGP file with linkage info of `paired-end` or `map`. File `gaps.linkage.bed` is generated by assembly.gaps.estimate(). """ from jcvi.formats.agp import AGP, bed, tidy p = OptionParser(annotate.__doc__) p.add_option("--minsize", default=200, help="Smallest component size [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) agpfile, linkagebed, assemblyfasta = args linkagebed = Bed(linkagebed) spannedgaps = set() for b in linkagebed: score = int(b.score) if score == 0: spannedgaps.add((b.accn, b.start, b.end)) agp = AGP(agpfile) newagpfile = agpfile.rsplit(".", 1)[0] + ".linkage.agp" newagp = open(newagpfile, "w") contig_id = 0 minsize = opts.minsize for a in agp: if not a.is_gap: cs = a.component_span if cs < minsize: a.is_gap = True a.component_type = "N" a.gap_length = cs a.gap_type = "scaffold" a.linkage = "yes" a.linkage_evidence = [] else: contig_id += 1 a.component_id = "contig{0:04d}".format(contig_id) a.component_beg = 1 a.component_end = cs a.component_type = "W" print(a, file=newagp) continue gapinfo = (a.object, a.object_beg, a.object_end) gaplen = a.gap_length if gaplen == 100 and gapinfo not in spannedgaps: a.component_type = "U" tag = "map" else: tag = "paired-ends" a.linkage_evidence.append(tag) print(a, file=newagp) newagp.close() logging.debug("Annotated AGP written to `{0}`.".format(newagpfile)) contigbed = assemblyfasta.rsplit(".", 1)[0] + ".contigs.bed" bedfile = bed([newagpfile, "--nogaps", "--outfile=" + contigbed]) contigfasta = fastaFromBed(bedfile, assemblyfasta, name=True, stranded=True) tidy([newagpfile, contigfasta])
[ "def", "annotate", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "agp", "import", "AGP", ",", "bed", ",", "tidy", "p", "=", "OptionParser", "(", "annotate", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--minsize\"", ",", "default"...
%prog annotate agpfile gaps.linkage.bed assembly.fasta Annotate AGP file with linkage info of `paired-end` or `map`. File `gaps.linkage.bed` is generated by assembly.gaps.estimate().
[ "%prog", "annotate", "agpfile", "gaps", ".", "linkage", ".", "bed", "assembly", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/gaps.py#L33-L103
train
200,653
tanghaibao/jcvi
jcvi/assembly/gaps.py
estimate
def estimate(args): """ %prog estimate gaps.bed all.spans.bed all.mates Estimate gap sizes based on mate positions and library insert sizes. """ from collections import defaultdict from jcvi.formats.bed import intersectBed_wao from jcvi.formats.posmap import MatesFile p = OptionParser(estimate.__doc__) p.add_option("--minlinks", default=3, type="int", help="Minimum number of links to place [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) gapsbed, spansbed, matesfile = args mf = MatesFile(matesfile) bed = Bed(gapsbed) order = bed.order gap2mate = defaultdict(set) mate2gap = defaultdict(set) for a, b in intersectBed_wao(gapsbed, spansbed): gapsize = a.span if gapsize != 100: continue gapname = a.accn if b is None: gap2mate[gapname] = set() continue matename = b.accn gap2mate[gapname].add(matename) mate2gap[matename].add(gapname) omgapsbed = "gaps.linkage.bed" fw = open(omgapsbed, "w") for gapname, mates in sorted(gap2mate.items()): i, b = order[gapname] nmates = len(mates) if nmates < opts.minlinks: print("{0}\t{1}".format(b, nmates), file=fw) continue print(gapname, mates) fw.close()
python
def estimate(args): """ %prog estimate gaps.bed all.spans.bed all.mates Estimate gap sizes based on mate positions and library insert sizes. """ from collections import defaultdict from jcvi.formats.bed import intersectBed_wao from jcvi.formats.posmap import MatesFile p = OptionParser(estimate.__doc__) p.add_option("--minlinks", default=3, type="int", help="Minimum number of links to place [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) gapsbed, spansbed, matesfile = args mf = MatesFile(matesfile) bed = Bed(gapsbed) order = bed.order gap2mate = defaultdict(set) mate2gap = defaultdict(set) for a, b in intersectBed_wao(gapsbed, spansbed): gapsize = a.span if gapsize != 100: continue gapname = a.accn if b is None: gap2mate[gapname] = set() continue matename = b.accn gap2mate[gapname].add(matename) mate2gap[matename].add(gapname) omgapsbed = "gaps.linkage.bed" fw = open(omgapsbed, "w") for gapname, mates in sorted(gap2mate.items()): i, b = order[gapname] nmates = len(mates) if nmates < opts.minlinks: print("{0}\t{1}".format(b, nmates), file=fw) continue print(gapname, mates) fw.close()
[ "def", "estimate", "(", "args", ")", ":", "from", "collections", "import", "defaultdict", "from", "jcvi", ".", "formats", ".", "bed", "import", "intersectBed_wao", "from", "jcvi", ".", "formats", ".", "posmap", "import", "MatesFile", "p", "=", "OptionParser", ...
%prog estimate gaps.bed all.spans.bed all.mates Estimate gap sizes based on mate positions and library insert sizes.
[ "%prog", "estimate", "gaps", ".", "bed", "all", ".", "spans", ".", "bed", "all", ".", "mates" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/gaps.py#L106-L158
train
200,654
tanghaibao/jcvi
jcvi/assembly/gaps.py
sizes
def sizes(args): """ %prog sizes gaps.bed a.fasta b.fasta Take the flanks of gaps within a.fasta, map them onto b.fasta. Compile the results to the gap size estimates in b. The output is detailed below: Columns are: 1. A scaffold 2. Start position 3. End position 4. Gap identifier 5. Gap size in A (= End - Start) 6. Gap size in B (based on BLAST, see below) For each gap, I extracted the left and right sequence (mostly 2Kb, but can be shorter if it runs into another gap) flanking the gap. The flanker names look like gap.00003L and gap.00003R means the left and right flanker of this particular gap, respectively. The BLAST output is used to calculate the gap size. For each flanker sequence, I took the best hit, and calculate the inner distance between the L match range and R range. The two flankers must map with at least 98% identity, and in the same orientation. NOTE the sixth column in the list file is not always a valid number. Other values are: - na: both flankers are missing in B - Singleton: one flanker is missing - Different chr: flankers map to different scaffolds - Strand +|-: flankers map in different orientations - Negative value: the R flanker map before L flanker """ from jcvi.formats.base import DictFile from jcvi.apps.align import blast p = OptionParser(sizes.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) gapsbed, afasta, bfasta = args pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" extfasta = pf + ".ext.fasta" if need_update(gapsbed, extfasta): extbed, extfasta = flanks([gapsbed, afasta]) q = op.basename(extfasta).split(".")[0] r = op.basename(bfasta).split(".")[0] blastfile = "{0}.{1}.blast".format(q, r) if need_update([extfasta, bfasta], blastfile): blastfile = blast([bfasta, extfasta, "--wordsize=50", "--pctid=98"]) labelsfile = blast_to_twobeds(blastfile) labels = DictFile(labelsfile, delimiter='\t') bed = Bed(gapsbed) for b in bed: b.score = b.span accn = b.accn print("\t".join((str(x) for x in (b.seqid, b.start - 1, b.end, accn, b.score, labels.get(accn, "na")))))
python
def sizes(args): """ %prog sizes gaps.bed a.fasta b.fasta Take the flanks of gaps within a.fasta, map them onto b.fasta. Compile the results to the gap size estimates in b. The output is detailed below: Columns are: 1. A scaffold 2. Start position 3. End position 4. Gap identifier 5. Gap size in A (= End - Start) 6. Gap size in B (based on BLAST, see below) For each gap, I extracted the left and right sequence (mostly 2Kb, but can be shorter if it runs into another gap) flanking the gap. The flanker names look like gap.00003L and gap.00003R means the left and right flanker of this particular gap, respectively. The BLAST output is used to calculate the gap size. For each flanker sequence, I took the best hit, and calculate the inner distance between the L match range and R range. The two flankers must map with at least 98% identity, and in the same orientation. NOTE the sixth column in the list file is not always a valid number. Other values are: - na: both flankers are missing in B - Singleton: one flanker is missing - Different chr: flankers map to different scaffolds - Strand +|-: flankers map in different orientations - Negative value: the R flanker map before L flanker """ from jcvi.formats.base import DictFile from jcvi.apps.align import blast p = OptionParser(sizes.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) gapsbed, afasta, bfasta = args pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" extfasta = pf + ".ext.fasta" if need_update(gapsbed, extfasta): extbed, extfasta = flanks([gapsbed, afasta]) q = op.basename(extfasta).split(".")[0] r = op.basename(bfasta).split(".")[0] blastfile = "{0}.{1}.blast".format(q, r) if need_update([extfasta, bfasta], blastfile): blastfile = blast([bfasta, extfasta, "--wordsize=50", "--pctid=98"]) labelsfile = blast_to_twobeds(blastfile) labels = DictFile(labelsfile, delimiter='\t') bed = Bed(gapsbed) for b in bed: b.score = b.span accn = b.accn print("\t".join((str(x) for x in (b.seqid, b.start - 1, b.end, accn, b.score, labels.get(accn, "na")))))
[ "def", "sizes", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "from", "jcvi", ".", "apps", ".", "align", "import", "blast", "p", "=", "OptionParser", "(", "sizes", ".", "__doc__", ")", "opts", ",", "args", ...
%prog sizes gaps.bed a.fasta b.fasta Take the flanks of gaps within a.fasta, map them onto b.fasta. Compile the results to the gap size estimates in b. The output is detailed below: Columns are: 1. A scaffold 2. Start position 3. End position 4. Gap identifier 5. Gap size in A (= End - Start) 6. Gap size in B (based on BLAST, see below) For each gap, I extracted the left and right sequence (mostly 2Kb, but can be shorter if it runs into another gap) flanking the gap. The flanker names look like gap.00003L and gap.00003R means the left and right flanker of this particular gap, respectively. The BLAST output is used to calculate the gap size. For each flanker sequence, I took the best hit, and calculate the inner distance between the L match range and R range. The two flankers must map with at least 98% identity, and in the same orientation. NOTE the sixth column in the list file is not always a valid number. Other values are: - na: both flankers are missing in B - Singleton: one flanker is missing - Different chr: flankers map to different scaffolds - Strand +|-: flankers map in different orientations - Negative value: the R flanker map before L flanker
[ "%prog", "sizes", "gaps", ".", "bed", "a", ".", "fasta", "b", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/gaps.py#L220-L281
train
200,655
tanghaibao/jcvi
jcvi/assembly/gaps.py
flanks
def flanks(args): """ %prog flanks gaps.bed fastafile Create sequences flanking the gaps. """ p = OptionParser(flanks.__doc__) p.add_option("--extend", default=2000, type="int", help="Extend seq flanking the gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gapsbed, fastafile = args Ext = opts.extend sizes = Sizes(fastafile).mapping bed = Bed(gapsbed) pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" fw = open(extbed, "w") for i, b in enumerate(bed): seqid = b.seqid gapname = b.accn size = sizes[seqid] prev_b = bed[i - 1] if i > 0 else None next_b = bed[i + 1] if i + 1 < len(bed) else None if prev_b and prev_b.seqid != seqid: prev_b = None if next_b and next_b.seqid != seqid: next_b = None start = prev_b.end + 1 if prev_b else 1 start, end = max(start, b.start - Ext), b.start - 1 print("\t".join(str(x) for x in \ (b.seqid, start - 1, end, gapname + "L")), file=fw) end = next_b.start - 1 if next_b else size start, end = b.end + 1, min(end, b.end + Ext) print("\t".join(str(x) for x in \ (b.seqid, start - 1, end, gapname + "R")), file=fw) fw.close() extfasta = fastaFromBed(extbed, fastafile, name=True) return extbed, extfasta
python
def flanks(args): """ %prog flanks gaps.bed fastafile Create sequences flanking the gaps. """ p = OptionParser(flanks.__doc__) p.add_option("--extend", default=2000, type="int", help="Extend seq flanking the gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gapsbed, fastafile = args Ext = opts.extend sizes = Sizes(fastafile).mapping bed = Bed(gapsbed) pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" fw = open(extbed, "w") for i, b in enumerate(bed): seqid = b.seqid gapname = b.accn size = sizes[seqid] prev_b = bed[i - 1] if i > 0 else None next_b = bed[i + 1] if i + 1 < len(bed) else None if prev_b and prev_b.seqid != seqid: prev_b = None if next_b and next_b.seqid != seqid: next_b = None start = prev_b.end + 1 if prev_b else 1 start, end = max(start, b.start - Ext), b.start - 1 print("\t".join(str(x) for x in \ (b.seqid, start - 1, end, gapname + "L")), file=fw) end = next_b.start - 1 if next_b else size start, end = b.end + 1, min(end, b.end + Ext) print("\t".join(str(x) for x in \ (b.seqid, start - 1, end, gapname + "R")), file=fw) fw.close() extfasta = fastaFromBed(extbed, fastafile, name=True) return extbed, extfasta
[ "def", "flanks", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "flanks", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--extend\"", ",", "default", "=", "2000", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Extend seq flanking the gaps [...
%prog flanks gaps.bed fastafile Create sequences flanking the gaps.
[ "%prog", "flanks", "gaps", ".", "bed", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/gaps.py#L284-L330
train
200,656
tanghaibao/jcvi
jcvi/formats/posmap.py
index
def index(args): """ %prog index frgscf.sorted Compress frgscffile.sorted and index it using `tabix`. """ p = OptionParser(index.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgscffile, = args gzfile = frgscffile + ".gz" cmd = "bgzip -c {0}".format(frgscffile) if not op.exists(gzfile): sh(cmd, outfile=gzfile) tbifile = gzfile + ".tbi" # Sequence, begin, end in 2, 3, 4-th column, respectively cmd = "tabix -s 2 -b 3 -e 4 {0}".format(gzfile) if not op.exists(tbifile): sh(cmd)
python
def index(args): """ %prog index frgscf.sorted Compress frgscffile.sorted and index it using `tabix`. """ p = OptionParser(index.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgscffile, = args gzfile = frgscffile + ".gz" cmd = "bgzip -c {0}".format(frgscffile) if not op.exists(gzfile): sh(cmd, outfile=gzfile) tbifile = gzfile + ".tbi" # Sequence, begin, end in 2, 3, 4-th column, respectively cmd = "tabix -s 2 -b 3 -e 4 {0}".format(gzfile) if not op.exists(tbifile): sh(cmd)
[ "def", "index", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "index", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p",...
%prog index frgscf.sorted Compress frgscffile.sorted and index it using `tabix`.
[ "%prog", "index", "frgscf", ".", "sorted" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/posmap.py#L155-L180
train
200,657
tanghaibao/jcvi
jcvi/formats/posmap.py
reads
def reads(args): """ %prog reads frgscffile Report read counts per scaffold (based on frgscf). """ p = OptionParser(reads.__doc__) p.add_option("-p", dest="prefix_length", default=4, type="int", help="group the reads based on the first N chars [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgscffile, = args prefix_length = opts.prefix_length fp = open(frgscffile) keyfn = lambda: defaultdict(int) counts = defaultdict(keyfn) for row in fp: f = FrgScfLine(row) fi = f.fragmentID[:prefix_length] counts[f.scaffoldID][fi] += 1 for scf, count in sorted(counts.items()): print("{0}\t{1}".format(scf, ", ".join("{0}:{1}".format(*x) for x in sorted(count.items()))))
python
def reads(args): """ %prog reads frgscffile Report read counts per scaffold (based on frgscf). """ p = OptionParser(reads.__doc__) p.add_option("-p", dest="prefix_length", default=4, type="int", help="group the reads based on the first N chars [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgscffile, = args prefix_length = opts.prefix_length fp = open(frgscffile) keyfn = lambda: defaultdict(int) counts = defaultdict(keyfn) for row in fp: f = FrgScfLine(row) fi = f.fragmentID[:prefix_length] counts[f.scaffoldID][fi] += 1 for scf, count in sorted(counts.items()): print("{0}\t{1}".format(scf, ", ".join("{0}:{1}".format(*x) for x in sorted(count.items()))))
[ "def", "reads", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "reads", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-p\"", ",", "dest", "=", "\"prefix_length\"", ",", "default", "=", "4", ",", "type", "=", "\"int\"", ",", "help", "=",...
%prog reads frgscffile Report read counts per scaffold (based on frgscf).
[ "%prog", "reads", "frgscffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/posmap.py#L211-L238
train
200,658
tanghaibao/jcvi
jcvi/formats/posmap.py
bed
def bed(args): """ %prog bed frgscffile Convert the frgscf posmap file to bed format. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) frgscffile, = args bedfile = frgscffile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") fp = open(frgscffile) for row in fp: f = FrgScfLine(row) print(f.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
python
def bed(args): """ %prog bed frgscffile Convert the frgscf posmap file to bed format. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) frgscffile, = args bedfile = frgscffile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") fp = open(frgscffile) for row in fp: f = FrgScfLine(row) print(f.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
[ "def", "bed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "bed", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", ...
%prog bed frgscffile Convert the frgscf posmap file to bed format.
[ "%prog", "bed", "frgscffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/posmap.py#L241-L264
train
200,659
tanghaibao/jcvi
jcvi/formats/posmap.py
dup
def dup(args): """ %prog dup frgscffile Use the frgscf posmap file as an indication of the coverage of the library. Large insert libraries are frequently victims of high levels of redundancy. """ p = OptionParser(dup.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgscffile, = args fp = open(frgscffile) data = [FrgScfLine(row) for row in fp] # we need to separate forward and reverse reads, because the position # collisions are handled differently forward_data = [x for x in data if x.orientation == '+'] reverse_data = [x for x in data if x.orientation == '-'] counts = defaultdict(int) key = lambda x: (x.scaffoldID, x.begin) forward_data.sort(key=key) for k, data in groupby(forward_data, key=key): data = list(data) count = len(data) counts[count] += 1 key = lambda x: (x.scaffoldID, x.end) reverse_data.sort(key=key) for k, data in groupby(forward_data, key=key): data = list(data) count = len(data) counts[count] += 1 prefix = frgscffile.split(".")[0] print("Duplication level in `{0}`".format(prefix), file=sys.stderr) print("=" * 40, file=sys.stderr) for c, v in sorted(counts.items()): if c > 10: break label = "unique" if c == 1 else "{0} copies".format(c) print("{0}: {1}".format(label, v), file=sys.stderr)
python
def dup(args): """ %prog dup frgscffile Use the frgscf posmap file as an indication of the coverage of the library. Large insert libraries are frequently victims of high levels of redundancy. """ p = OptionParser(dup.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgscffile, = args fp = open(frgscffile) data = [FrgScfLine(row) for row in fp] # we need to separate forward and reverse reads, because the position # collisions are handled differently forward_data = [x for x in data if x.orientation == '+'] reverse_data = [x for x in data if x.orientation == '-'] counts = defaultdict(int) key = lambda x: (x.scaffoldID, x.begin) forward_data.sort(key=key) for k, data in groupby(forward_data, key=key): data = list(data) count = len(data) counts[count] += 1 key = lambda x: (x.scaffoldID, x.end) reverse_data.sort(key=key) for k, data in groupby(forward_data, key=key): data = list(data) count = len(data) counts[count] += 1 prefix = frgscffile.split(".")[0] print("Duplication level in `{0}`".format(prefix), file=sys.stderr) print("=" * 40, file=sys.stderr) for c, v in sorted(counts.items()): if c > 10: break label = "unique" if c == 1 else "{0} copies".format(c) print("{0}: {1}".format(label, v), file=sys.stderr)
[ "def", "dup", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "dup", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p", "...
%prog dup frgscffile Use the frgscf posmap file as an indication of the coverage of the library. Large insert libraries are frequently victims of high levels of redundancy.
[ "%prog", "dup", "frgscffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/posmap.py#L267-L311
train
200,660
tanghaibao/jcvi
jcvi/compara/synteny.py
_score
def _score(cluster): """ score of the cluster, in this case, is the number of non-repetitive matches """ x, y = zip(*cluster)[:2] return min(len(set(x)), len(set(y)))
python
def _score(cluster): """ score of the cluster, in this case, is the number of non-repetitive matches """ x, y = zip(*cluster)[:2] return min(len(set(x)), len(set(y)))
[ "def", "_score", "(", "cluster", ")", ":", "x", ",", "y", "=", "zip", "(", "*", "cluster", ")", "[", ":", "2", "]", "return", "min", "(", "len", "(", "set", "(", "x", ")", ")", ",", "len", "(", "set", "(", "y", ")", ")", ")" ]
score of the cluster, in this case, is the number of non-repetitive matches
[ "score", "of", "the", "cluster", "in", "this", "case", "is", "the", "number", "of", "non", "-", "repetitive", "matches" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L256-L261
train
200,661
tanghaibao/jcvi
jcvi/compara/synteny.py
read_blast
def read_blast(blast_file, qorder, sorder, is_self=False, ostrip=True): """ Read the blast and convert name into coordinates """ filtered_blast = [] seen = set() bl = Blast(blast_file) for b in bl: query, subject = b.query, b.subject if query == subject: continue if ostrip: query, subject = gene_name(query), gene_name(subject) if query not in qorder or subject not in sorder: continue qi, q = qorder[query] si, s = sorder[subject] if is_self: # remove redundant a<->b to one side when doing self-self BLAST if qi > si: query, subject = subject, query qi, si = si, qi q, s = s, q # Too close to diagonal! possible tandem repeats if q.seqid == s.seqid and si - qi < 40: continue key = query, subject if key in seen: continue seen.add(key) b.qseqid, b.sseqid = q.seqid, s.seqid b.qi, b.si = qi, si b.query, b.subject = query, subject filtered_blast.append(b) logging.debug("A total of {0} BLAST imported from `{1}`.".\ format(len(filtered_blast), blast_file)) return filtered_blast
python
def read_blast(blast_file, qorder, sorder, is_self=False, ostrip=True): """ Read the blast and convert name into coordinates """ filtered_blast = [] seen = set() bl = Blast(blast_file) for b in bl: query, subject = b.query, b.subject if query == subject: continue if ostrip: query, subject = gene_name(query), gene_name(subject) if query not in qorder or subject not in sorder: continue qi, q = qorder[query] si, s = sorder[subject] if is_self: # remove redundant a<->b to one side when doing self-self BLAST if qi > si: query, subject = subject, query qi, si = si, qi q, s = s, q # Too close to diagonal! possible tandem repeats if q.seqid == s.seqid and si - qi < 40: continue key = query, subject if key in seen: continue seen.add(key) b.qseqid, b.sseqid = q.seqid, s.seqid b.qi, b.si = qi, si b.query, b.subject = query, subject filtered_blast.append(b) logging.debug("A total of {0} BLAST imported from `{1}`.".\ format(len(filtered_blast), blast_file)) return filtered_blast
[ "def", "read_blast", "(", "blast_file", ",", "qorder", ",", "sorder", ",", "is_self", "=", "False", ",", "ostrip", "=", "True", ")", ":", "filtered_blast", "=", "[", "]", "seen", "=", "set", "(", ")", "bl", "=", "Blast", "(", "blast_file", ")", "for"...
Read the blast and convert name into coordinates
[ "Read", "the", "blast", "and", "convert", "name", "into", "coordinates" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L280-L322
train
200,662
tanghaibao/jcvi
jcvi/compara/synteny.py
add_options
def add_options(p, args, dist=10): """ scan and liftover has similar interfaces, so share common options returns opts, files """ p.set_beds() p.add_option("--dist", default=dist, type="int", help="Extent of flanking regions to search [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blast_file, anchor_file = args return blast_file, anchor_file, opts.dist, opts
python
def add_options(p, args, dist=10): """ scan and liftover has similar interfaces, so share common options returns opts, files """ p.set_beds() p.add_option("--dist", default=dist, type="int", help="Extent of flanking regions to search [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blast_file, anchor_file = args return blast_file, anchor_file, opts.dist, opts
[ "def", "add_options", "(", "p", ",", "args", ",", "dist", "=", "10", ")", ":", "p", ".", "set_beds", "(", ")", "p", ".", "add_option", "(", "\"--dist\"", ",", "default", "=", "dist", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Extent of flankin...
scan and liftover has similar interfaces, so share common options returns opts, files
[ "scan", "and", "liftover", "has", "similar", "interfaces", "so", "share", "common", "options", "returns", "opts", "files" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L446-L462
train
200,663
tanghaibao/jcvi
jcvi/compara/synteny.py
layout
def layout(args): """ %prog layout query.subject.simple query.seqids subject.seqids Compute optimal seqids order in a second genome, based on seqids on one genome, given the pairwise blocks in .simple format. """ from jcvi.algorithms.ec import GA_setup, GA_run p = OptionParser(layout.__doc__) p.set_beds() p.set_cpus(cpus=32) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) simplefile, qseqids, sseqids = args qbed, sbed, qorder, sorder, is_self = check_beds(simplefile, p, opts) qseqids = qseqids.strip().split(",") sseqids = sseqids.strip().split(",") qseqids_ii = dict((s, i) for i, s in enumerate(qseqids)) sseqids_ii = dict((s, i) for i, s in enumerate(sseqids)) blocks = SimpleFile(simplefile).blocks scores = defaultdict(int) for a, b, c, d, score, orientation, hl in blocks: qi, q = qorder[a] si, s = sorder[c] qseqid, sseqid = q.seqid, s.seqid if sseqid not in sseqids: continue scores[sseqids_ii[sseqid], qseqid] += score data = [] for (a, b), score in sorted(scores.items()): if b not in qseqids_ii: continue data.append((qseqids_ii[b], score)) tour = range(len(qseqids)) toolbox = GA_setup(tour) toolbox.register("evaluate", colinear_evaluate_weights, data=data) tour, fitness = GA_run(toolbox, ngen=100, npop=100, cpus=opts.cpus) tour = [qseqids[x] for x in tour] print(",".join(tour))
python
def layout(args): """ %prog layout query.subject.simple query.seqids subject.seqids Compute optimal seqids order in a second genome, based on seqids on one genome, given the pairwise blocks in .simple format. """ from jcvi.algorithms.ec import GA_setup, GA_run p = OptionParser(layout.__doc__) p.set_beds() p.set_cpus(cpus=32) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) simplefile, qseqids, sseqids = args qbed, sbed, qorder, sorder, is_self = check_beds(simplefile, p, opts) qseqids = qseqids.strip().split(",") sseqids = sseqids.strip().split(",") qseqids_ii = dict((s, i) for i, s in enumerate(qseqids)) sseqids_ii = dict((s, i) for i, s in enumerate(sseqids)) blocks = SimpleFile(simplefile).blocks scores = defaultdict(int) for a, b, c, d, score, orientation, hl in blocks: qi, q = qorder[a] si, s = sorder[c] qseqid, sseqid = q.seqid, s.seqid if sseqid not in sseqids: continue scores[sseqids_ii[sseqid], qseqid] += score data = [] for (a, b), score in sorted(scores.items()): if b not in qseqids_ii: continue data.append((qseqids_ii[b], score)) tour = range(len(qseqids)) toolbox = GA_setup(tour) toolbox.register("evaluate", colinear_evaluate_weights, data=data) tour, fitness = GA_run(toolbox, ngen=100, npop=100, cpus=opts.cpus) tour = [qseqids[x] for x in tour] print(",".join(tour))
[ "def", "layout", "(", "args", ")", ":", "from", "jcvi", ".", "algorithms", ".", "ec", "import", "GA_setup", ",", "GA_run", "p", "=", "OptionParser", "(", "layout", ".", "__doc__", ")", "p", ".", "set_beds", "(", ")", "p", ".", "set_cpus", "(", "cpus"...
%prog layout query.subject.simple query.seqids subject.seqids Compute optimal seqids order in a second genome, based on seqids on one genome, given the pairwise blocks in .simple format.
[ "%prog", "layout", "query", ".", "subject", ".", "simple", "query", ".", "seqids", "subject", ".", "seqids" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L496-L543
train
200,664
tanghaibao/jcvi
jcvi/compara/synteny.py
fromaligns
def fromaligns(args): """ %prog fromaligns out.aligns Convert aligns file (old MCscan output) to anchors file. """ p = OptionParser(fromaligns.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) alignsfile, = args fp = must_open(alignsfile) fw = must_open(opts.outfile, "w") for row in fp: if row.startswith("## Alignment"): print("###", file=fw) continue if row[0] == '#' or not row.strip(): continue atoms = row.split(':')[-1].split() print("\t".join(atoms[:2]), file=fw) fw.close()
python
def fromaligns(args): """ %prog fromaligns out.aligns Convert aligns file (old MCscan output) to anchors file. """ p = OptionParser(fromaligns.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) alignsfile, = args fp = must_open(alignsfile) fw = must_open(opts.outfile, "w") for row in fp: if row.startswith("## Alignment"): print("###", file=fw) continue if row[0] == '#' or not row.strip(): continue atoms = row.split(':')[-1].split() print("\t".join(atoms[:2]), file=fw) fw.close()
[ "def", "fromaligns", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fromaligns", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!="...
%prog fromaligns out.aligns Convert aligns file (old MCscan output) to anchors file.
[ "%prog", "fromaligns", "out", ".", "aligns" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L546-L570
train
200,665
tanghaibao/jcvi
jcvi/compara/synteny.py
mcscanq
def mcscanq(args): """ %prog mcscanq query.ids blocksfile Query multiple synteny blocks to get the closest alignment feature. Mostly used for 'highlighting' the lines in the synteny plot, drawn by graphics.karyotype and graphics.synteny. """ p = OptionParser(mcscanq.__doc__) p.add_option("--color", help="Add color highlight, used in plotting") p.add_option("--invert", default=False, action="store_true", help="Invert query and subject [default: %default]") opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) qids, blocksfile = args b = BlockFile(blocksfile) fp = open(qids) for gene in fp: gene = gene.strip() for line in b.query_gene(gene, color=opts.color, invert=opts.invert): print(line)
python
def mcscanq(args): """ %prog mcscanq query.ids blocksfile Query multiple synteny blocks to get the closest alignment feature. Mostly used for 'highlighting' the lines in the synteny plot, drawn by graphics.karyotype and graphics.synteny. """ p = OptionParser(mcscanq.__doc__) p.add_option("--color", help="Add color highlight, used in plotting") p.add_option("--invert", default=False, action="store_true", help="Invert query and subject [default: %default]") opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) qids, blocksfile = args b = BlockFile(blocksfile) fp = open(qids) for gene in fp: gene = gene.strip() for line in b.query_gene(gene, color=opts.color, invert=opts.invert): print(line)
[ "def", "mcscanq", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "mcscanq", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--color\"", ",", "help", "=", "\"Add color highlight, used in plotting\"", ")", "p", ".", "add_option", "(", "\"--invert\"",...
%prog mcscanq query.ids blocksfile Query multiple synteny blocks to get the closest alignment feature. Mostly used for 'highlighting' the lines in the synteny plot, drawn by graphics.karyotype and graphics.synteny.
[ "%prog", "mcscanq", "query", ".", "ids", "blocksfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L573-L596
train
200,666
tanghaibao/jcvi
jcvi/compara/synteny.py
spa
def spa(args): """ %prog spa spafiles Convert chromosome ordering from SPA to simple lists. First column is the reference order. """ from jcvi.algorithms.graph import merge_paths from jcvi.utils.cbook import uniqify p = OptionParser(spa.__doc__) p.add_option("--unmapped", default=False, action="store_true", help="Include unmapped scaffolds in the list [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) spafiles = args paths = [] mappings = [] missings = [] for spafile in spafiles: fp = open(spafile) path = [] mapping = [] missing = [] for row in fp: if row[0] == '#' or not row.strip(): continue atoms = row.rstrip().split('\t') if len(atoms) == 2: a, c2 = atoms assert a == "unmapped" missing.append(c2) continue c1, c2, orientation = atoms path.append(c1) mapping.append(c2) paths.append(uniqify(path)) mappings.append(mapping) missings.append(missing) ref = merge_paths(paths) print("ref", len(ref), ",".join(ref)) for spafile, mapping, missing in zip(spafiles, mappings, missings): mapping = [x for x in mapping if "random" not in x] mapping = uniqify(mapping) if len(mapping) < 50 and opts.unmapped: mapping = uniqify(mapping + missing) print(spafile, len(mapping), ",".join(mapping))
python
def spa(args): """ %prog spa spafiles Convert chromosome ordering from SPA to simple lists. First column is the reference order. """ from jcvi.algorithms.graph import merge_paths from jcvi.utils.cbook import uniqify p = OptionParser(spa.__doc__) p.add_option("--unmapped", default=False, action="store_true", help="Include unmapped scaffolds in the list [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) spafiles = args paths = [] mappings = [] missings = [] for spafile in spafiles: fp = open(spafile) path = [] mapping = [] missing = [] for row in fp: if row[0] == '#' or not row.strip(): continue atoms = row.rstrip().split('\t') if len(atoms) == 2: a, c2 = atoms assert a == "unmapped" missing.append(c2) continue c1, c2, orientation = atoms path.append(c1) mapping.append(c2) paths.append(uniqify(path)) mappings.append(mapping) missings.append(missing) ref = merge_paths(paths) print("ref", len(ref), ",".join(ref)) for spafile, mapping, missing in zip(spafiles, mappings, missings): mapping = [x for x in mapping if "random" not in x] mapping = uniqify(mapping) if len(mapping) < 50 and opts.unmapped: mapping = uniqify(mapping + missing) print(spafile, len(mapping), ",".join(mapping))
[ "def", "spa", "(", "args", ")", ":", "from", "jcvi", ".", "algorithms", ".", "graph", "import", "merge_paths", "from", "jcvi", ".", "utils", ".", "cbook", "import", "uniqify", "p", "=", "OptionParser", "(", "spa", ".", "__doc__", ")", "p", ".", "add_op...
%prog spa spafiles Convert chromosome ordering from SPA to simple lists. First column is the reference order.
[ "%prog", "spa", "spafiles" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L599-L653
train
200,667
tanghaibao/jcvi
jcvi/compara/synteny.py
rebuild
def rebuild(args): """ %prog rebuild blocksfile blastfile Rebuild anchors file from pre-built blocks file. """ p = OptionParser(rebuild.__doc__) p.add_option("--header", default=False, action="store_true", help="First line is header [default: %default]") p.add_option("--write_blast", default=False, action="store_true", help="Get blast records of rebuilt anchors [default: %default]") p.set_beds() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blocksfile, blastfile = args bk = BlockFile(blocksfile, header=opts.header) fw = open("pairs", "w") for a, b, h in bk.iter_all_pairs(): print("\t".join((a, b)), file=fw) fw.close() if opts.write_blast: AnchorFile("pairs").blast(blastfile, "pairs.blast") fw = open("tracks", "w") for g, col in bk.iter_gene_col(): print("\t".join(str(x) for x in (g, col)), file=fw) fw.close()
python
def rebuild(args): """ %prog rebuild blocksfile blastfile Rebuild anchors file from pre-built blocks file. """ p = OptionParser(rebuild.__doc__) p.add_option("--header", default=False, action="store_true", help="First line is header [default: %default]") p.add_option("--write_blast", default=False, action="store_true", help="Get blast records of rebuilt anchors [default: %default]") p.set_beds() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blocksfile, blastfile = args bk = BlockFile(blocksfile, header=opts.header) fw = open("pairs", "w") for a, b, h in bk.iter_all_pairs(): print("\t".join((a, b)), file=fw) fw.close() if opts.write_blast: AnchorFile("pairs").blast(blastfile, "pairs.blast") fw = open("tracks", "w") for g, col in bk.iter_gene_col(): print("\t".join(str(x) for x in (g, col)), file=fw) fw.close()
[ "def", "rebuild", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "rebuild", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--header\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"First line is head...
%prog rebuild blocksfile blastfile Rebuild anchors file from pre-built blocks file.
[ "%prog", "rebuild", "blocksfile", "blastfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L656-L687
train
200,668
tanghaibao/jcvi
jcvi/compara/synteny.py
coge
def coge(args): """ %prog coge cogefile Convert CoGe file to anchors file. """ p = OptionParser(coge.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) cogefile, = args fp = must_open(cogefile) cogefile = cogefile.replace(".gz", "") ksfile = cogefile + ".ks" anchorsfile = cogefile + ".anchors" fw_ks = must_open(ksfile, "w") fw_ac = must_open(anchorsfile, "w") tag = "###" print(tag, file=fw_ks) for header, lines in read_block(fp, tag): print(tag, file=fw_ac) lines = list(lines) for line in lines: if line[0] == '#': continue ks, ka, achr, a, astart, astop, bchr, \ b, bstart, bstop, ev, ss = line.split() a = a.split("||")[3] b = b.split("||")[3] print("\t".join((a, b, ev)), file=fw_ac) print(",".join((";".join((a, b)), ks, ka, ks, ka)), file=fw_ks) fw_ks.close() fw_ac.close()
python
def coge(args): """ %prog coge cogefile Convert CoGe file to anchors file. """ p = OptionParser(coge.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) cogefile, = args fp = must_open(cogefile) cogefile = cogefile.replace(".gz", "") ksfile = cogefile + ".ks" anchorsfile = cogefile + ".anchors" fw_ks = must_open(ksfile, "w") fw_ac = must_open(anchorsfile, "w") tag = "###" print(tag, file=fw_ks) for header, lines in read_block(fp, tag): print(tag, file=fw_ac) lines = list(lines) for line in lines: if line[0] == '#': continue ks, ka, achr, a, astart, astop, bchr, \ b, bstart, bstop, ev, ss = line.split() a = a.split("||")[3] b = b.split("||")[3] print("\t".join((a, b, ev)), file=fw_ac) print(",".join((";".join((a, b)), ks, ka, ks, ka)), file=fw_ks) fw_ks.close() fw_ac.close()
[ "def", "coge", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "coge", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not",...
%prog coge cogefile Convert CoGe file to anchors file.
[ "%prog", "coge", "cogefile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L690-L726
train
200,669
tanghaibao/jcvi
jcvi/compara/synteny.py
matrix
def matrix(args): """ %prog matrix all.bed anchorfile matrixfile Make oxford grid based on anchors file. """ p = OptionParser(matrix.__doc__) p.add_option("--seqids", help="File with seqids [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, anchorfile, matrixfile = args ac = AnchorFile(anchorfile) seqidsfile = opts.seqids if seqidsfile: seqids = SetFile(seqidsfile, delimiter=',') order = Bed(bedfile).order blocks = ac.blocks m = defaultdict(int) fw = open(matrixfile, "w") aseqids = set() bseqids = set() for block in blocks: a, b, scores = zip(*block) ai, af = order[a[0]] bi, bf = order[b[0]] aseqid = af.seqid bseqid = bf.seqid if seqidsfile: if (aseqid not in seqids) or (bseqid not in seqids): continue m[(aseqid, bseqid)] += len(block) aseqids.add(aseqid) bseqids.add(bseqid) aseqids = list(aseqids) bseqids = list(bseqids) print("\t".join(["o"] + bseqids), file=fw) for aseqid in aseqids: print("\t".join([aseqid] + \ [str(m[(aseqid, x)]) for x in bseqids]), file=fw)
python
def matrix(args): """ %prog matrix all.bed anchorfile matrixfile Make oxford grid based on anchors file. """ p = OptionParser(matrix.__doc__) p.add_option("--seqids", help="File with seqids [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, anchorfile, matrixfile = args ac = AnchorFile(anchorfile) seqidsfile = opts.seqids if seqidsfile: seqids = SetFile(seqidsfile, delimiter=',') order = Bed(bedfile).order blocks = ac.blocks m = defaultdict(int) fw = open(matrixfile, "w") aseqids = set() bseqids = set() for block in blocks: a, b, scores = zip(*block) ai, af = order[a[0]] bi, bf = order[b[0]] aseqid = af.seqid bseqid = bf.seqid if seqidsfile: if (aseqid not in seqids) or (bseqid not in seqids): continue m[(aseqid, bseqid)] += len(block) aseqids.add(aseqid) bseqids.add(bseqid) aseqids = list(aseqids) bseqids = list(bseqids) print("\t".join(["o"] + bseqids), file=fw) for aseqid in aseqids: print("\t".join([aseqid] + \ [str(m[(aseqid, x)]) for x in bseqids]), file=fw)
[ "def", "matrix", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "matrix", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--seqids\"", ",", "help", "=", "\"File with seqids [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse...
%prog matrix all.bed anchorfile matrixfile Make oxford grid based on anchors file.
[ "%prog", "matrix", "all", ".", "bed", "anchorfile", "matrixfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L729-L773
train
200,670
tanghaibao/jcvi
jcvi/compara/synteny.py
summary
def summary(args): """ %prog summary anchorfile Provide statistics for pairwise blocks. """ from jcvi.utils.cbook import SummaryStats p = OptionParser(summary.__doc__) p.add_option("--prefix", help="Generate per block stats [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorfile, = args ac = AnchorFile(anchorfile) clusters = ac.blocks if clusters == [[]]: logging.debug("A total of 0 anchor was found. Aborted.") raise ValueError("A total of 0 anchor was found. Aborted.") nclusters = len(clusters) nanchors = [len(c) for c in clusters] nranchors = [_score(c) for c in clusters] # non-redundant anchors print("A total of {0} (NR:{1}) anchors found in {2} clusters.".\ format(sum(nanchors), sum(nranchors), nclusters), file=sys.stderr) print("Stats:", SummaryStats(nanchors), file=sys.stderr) print("NR stats:", SummaryStats(nranchors), file=sys.stderr) prefix = opts.prefix if prefix: pad = len(str(nclusters)) for i, c in enumerate(clusters): block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad) print("\t".join((block_id, str(len(c)))))
python
def summary(args): """ %prog summary anchorfile Provide statistics for pairwise blocks. """ from jcvi.utils.cbook import SummaryStats p = OptionParser(summary.__doc__) p.add_option("--prefix", help="Generate per block stats [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorfile, = args ac = AnchorFile(anchorfile) clusters = ac.blocks if clusters == [[]]: logging.debug("A total of 0 anchor was found. Aborted.") raise ValueError("A total of 0 anchor was found. Aborted.") nclusters = len(clusters) nanchors = [len(c) for c in clusters] nranchors = [_score(c) for c in clusters] # non-redundant anchors print("A total of {0} (NR:{1}) anchors found in {2} clusters.".\ format(sum(nanchors), sum(nranchors), nclusters), file=sys.stderr) print("Stats:", SummaryStats(nanchors), file=sys.stderr) print("NR stats:", SummaryStats(nranchors), file=sys.stderr) prefix = opts.prefix if prefix: pad = len(str(nclusters)) for i, c in enumerate(clusters): block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad) print("\t".join((block_id, str(len(c)))))
[ "def", "summary", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "SummaryStats", "p", "=", "OptionParser", "(", "summary", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--prefix\"", ",", "help", "=", "\"Generate per blo...
%prog summary anchorfile Provide statistics for pairwise blocks.
[ "%prog", "summary", "anchorfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L1030-L1065
train
200,671
tanghaibao/jcvi
jcvi/compara/synteny.py
stats
def stats(args): """ %prog stats blocksfile Provide statistics for MCscan-style blocks. The count of homologs in each pivot gene is recorded. """ from jcvi.utils.cbook import percentage p = OptionParser(stats.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blocksfile, = args fp = open(blocksfile) counts = defaultdict(int) total = orthologous = 0 for row in fp: atoms = row.rstrip().split("\t") hits = [x for x in atoms[1:] if x != '.'] counts[len(hits)] += 1 total += 1 if atoms[1] != '.': orthologous += 1 print("Total lines: {0}".format(total), file=sys.stderr) for i, n in sorted(counts.items()): print("Count {0}: {1}".format(i, percentage(n, total)), file=sys.stderr) print(file=sys.stderr) matches = sum(n for i, n in counts.items() if i != 0) print("Total lines with matches: {0}".\ format(percentage(matches, total)), file=sys.stderr) for i, n in sorted(counts.items()): if i == 0: continue print("Count {0}: {1}".format(i, percentage(n, matches)), file=sys.stderr) print(file=sys.stderr) print("Orthologous matches: {0}".\ format(percentage(orthologous, matches)), file=sys.stderr)
python
def stats(args): """ %prog stats blocksfile Provide statistics for MCscan-style blocks. The count of homologs in each pivot gene is recorded. """ from jcvi.utils.cbook import percentage p = OptionParser(stats.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blocksfile, = args fp = open(blocksfile) counts = defaultdict(int) total = orthologous = 0 for row in fp: atoms = row.rstrip().split("\t") hits = [x for x in atoms[1:] if x != '.'] counts[len(hits)] += 1 total += 1 if atoms[1] != '.': orthologous += 1 print("Total lines: {0}".format(total), file=sys.stderr) for i, n in sorted(counts.items()): print("Count {0}: {1}".format(i, percentage(n, total)), file=sys.stderr) print(file=sys.stderr) matches = sum(n for i, n in counts.items() if i != 0) print("Total lines with matches: {0}".\ format(percentage(matches, total)), file=sys.stderr) for i, n in sorted(counts.items()): if i == 0: continue print("Count {0}: {1}".format(i, percentage(n, matches)), file=sys.stderr) print(file=sys.stderr) print("Orthologous matches: {0}".\ format(percentage(orthologous, matches)), file=sys.stderr)
[ "def", "stats", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "percentage", "p", "=", "OptionParser", "(", "stats", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", ...
%prog stats blocksfile Provide statistics for MCscan-style blocks. The count of homologs in each pivot gene is recorded.
[ "%prog", "stats", "blocksfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L1068-L1112
train
200,672
tanghaibao/jcvi
jcvi/compara/synteny.py
write_details
def write_details(fw, details, bed): """ Write per gene depth to file """ for a, b, depth in details: for i in xrange(a, b): gi = bed[i].accn print("\t".join((gi, str(depth))), file=fw)
python
def write_details(fw, details, bed): """ Write per gene depth to file """ for a, b, depth in details: for i in xrange(a, b): gi = bed[i].accn print("\t".join((gi, str(depth))), file=fw)
[ "def", "write_details", "(", "fw", ",", "details", ",", "bed", ")", ":", "for", "a", ",", "b", ",", "depth", "in", "details", ":", "for", "i", "in", "xrange", "(", "a", ",", "b", ")", ":", "gi", "=", "bed", "[", "i", "]", ".", "accn", "print"...
Write per gene depth to file
[ "Write", "per", "gene", "depth", "to", "file" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L1268-L1275
train
200,673
tanghaibao/jcvi
jcvi/compara/synteny.py
AnchorFile.blast
def blast(self, blastfile=None, outfile=None): """ convert anchor file to 12 col blast file """ from jcvi.formats.blast import BlastSlow, BlastLineByConversion if not outfile: outfile = self.filename + ".blast" if blastfile is not None: blasts = BlastSlow(blastfile).to_dict() else: blasts = None fw = must_open(outfile, "w", checkexists=True) nlines = 0 for a, b, id in self.iter_pairs(): if (a, b) in blasts: bline = blasts[(a, b)] elif (b, a) in blasts: bline = blasts[(b, a)] else: line = "\t".join((a, b)) bline = BlastLineByConversion(line, mode="110000000000") print(bline, file=fw) nlines += 1 fw.close() logging.debug("A total of {0} BLAST lines written to `{1}`."\ .format(nlines, outfile)) return outfile
python
def blast(self, blastfile=None, outfile=None): """ convert anchor file to 12 col blast file """ from jcvi.formats.blast import BlastSlow, BlastLineByConversion if not outfile: outfile = self.filename + ".blast" if blastfile is not None: blasts = BlastSlow(blastfile).to_dict() else: blasts = None fw = must_open(outfile, "w", checkexists=True) nlines = 0 for a, b, id in self.iter_pairs(): if (a, b) in blasts: bline = blasts[(a, b)] elif (b, a) in blasts: bline = blasts[(b, a)] else: line = "\t".join((a, b)) bline = BlastLineByConversion(line, mode="110000000000") print(bline, file=fw) nlines += 1 fw.close() logging.debug("A total of {0} BLAST lines written to `{1}`."\ .format(nlines, outfile)) return outfile
[ "def", "blast", "(", "self", ",", "blastfile", "=", "None", ",", "outfile", "=", "None", ")", ":", "from", "jcvi", ".", "formats", ".", "blast", "import", "BlastSlow", ",", "BlastLineByConversion", "if", "not", "outfile", ":", "outfile", "=", "self", "."...
convert anchor file to 12 col blast file
[ "convert", "anchor", "file", "to", "12", "col", "blast", "file" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L68-L100
train
200,674
tanghaibao/jcvi
jcvi/formats/blast.py
filter
def filter(args): """ %prog filter test.blast Produce a new blast file and filter based on: - score: >= cutoff - pctid: >= cutoff - hitlen: >= cutoff - evalue: <= cutoff - ids: valid ids Use --inverse to obtain the complementary records for the criteria above. - noself: remove self-self hits """ p = OptionParser(filter.__doc__) p.add_option("--score", dest="score", default=0, type="int", help="Score cutoff") p.set_align(pctid=95, hitlen=100, evalue=.01) p.add_option("--noself", default=False, action="store_true", help="Remove self-self hits") p.add_option("--ids", help="Path to file with ids to retain") p.add_option("--inverse", default=False, action="store_true", help="Similar to grep -v, inverse") p.set_outfile(outfile=None) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) if opts.ids: ids = set() for row in must_open(opts.ids): if row[0] == "#": continue row = row.replace(",", "\t") ids.update(row.split()) else: ids = None blastfile, = args inverse = opts.inverse outfile = opts.outfile fp = must_open(blastfile) score, pctid, hitlen, evalue, noself = \ opts.score, opts.pctid, opts.hitlen, opts.evalue, opts.noself newblastfile = blastfile + ".P{0}L{1}".format(int(pctid), hitlen) if \ outfile is None else outfile if inverse: newblastfile += ".inverse" fw = must_open(newblastfile, "w") for row in fp: if row[0] == '#': continue c = BlastLine(row) if ids: if c.query in ids and c.subject in ids: noids = False else: noids = True else: noids = None remove = c.score < score or \ c.pctid < pctid or \ c.hitlen < hitlen or \ c.evalue > evalue or \ noids if inverse: remove = not remove remove = remove or (noself and c.query == c.subject) if not remove: print(row.rstrip(), file=fw) fw.close() return newblastfile
python
def filter(args): """ %prog filter test.blast Produce a new blast file and filter based on: - score: >= cutoff - pctid: >= cutoff - hitlen: >= cutoff - evalue: <= cutoff - ids: valid ids Use --inverse to obtain the complementary records for the criteria above. - noself: remove self-self hits """ p = OptionParser(filter.__doc__) p.add_option("--score", dest="score", default=0, type="int", help="Score cutoff") p.set_align(pctid=95, hitlen=100, evalue=.01) p.add_option("--noself", default=False, action="store_true", help="Remove self-self hits") p.add_option("--ids", help="Path to file with ids to retain") p.add_option("--inverse", default=False, action="store_true", help="Similar to grep -v, inverse") p.set_outfile(outfile=None) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) if opts.ids: ids = set() for row in must_open(opts.ids): if row[0] == "#": continue row = row.replace(",", "\t") ids.update(row.split()) else: ids = None blastfile, = args inverse = opts.inverse outfile = opts.outfile fp = must_open(blastfile) score, pctid, hitlen, evalue, noself = \ opts.score, opts.pctid, opts.hitlen, opts.evalue, opts.noself newblastfile = blastfile + ".P{0}L{1}".format(int(pctid), hitlen) if \ outfile is None else outfile if inverse: newblastfile += ".inverse" fw = must_open(newblastfile, "w") for row in fp: if row[0] == '#': continue c = BlastLine(row) if ids: if c.query in ids and c.subject in ids: noids = False else: noids = True else: noids = None remove = c.score < score or \ c.pctid < pctid or \ c.hitlen < hitlen or \ c.evalue > evalue or \ noids if inverse: remove = not remove remove = remove or (noself and c.query == c.subject) if not remove: print(row.rstrip(), file=fw) fw.close() return newblastfile
[ "def", "filter", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "filter", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--score\"", ",", "dest", "=", "\"score\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", ...
%prog filter test.blast Produce a new blast file and filter based on: - score: >= cutoff - pctid: >= cutoff - hitlen: >= cutoff - evalue: <= cutoff - ids: valid ids Use --inverse to obtain the complementary records for the criteria above. - noself: remove self-self hits
[ "%prog", "filter", "test", ".", "blast" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L255-L336
train
200,675
tanghaibao/jcvi
jcvi/formats/blast.py
collect_gaps
def collect_gaps(blast, use_subject=False): """ Collect the gaps between adjacent HSPs in the BLAST file. """ key = lambda x: x.sstart if use_subject else x.qstart blast.sort(key=key) for a, b in zip(blast, blast[1:]): if use_subject: if a.sstop < b.sstart: yield b.sstart - a.sstop else: if a.qstop < b.qstart: yield b.qstart - a.qstop
python
def collect_gaps(blast, use_subject=False): """ Collect the gaps between adjacent HSPs in the BLAST file. """ key = lambda x: x.sstart if use_subject else x.qstart blast.sort(key=key) for a, b in zip(blast, blast[1:]): if use_subject: if a.sstop < b.sstart: yield b.sstart - a.sstop else: if a.qstop < b.qstart: yield b.qstart - a.qstop
[ "def", "collect_gaps", "(", "blast", ",", "use_subject", "=", "False", ")", ":", "key", "=", "lambda", "x", ":", "x", ".", "sstart", "if", "use_subject", "else", "x", ".", "qstart", "blast", ".", "sort", "(", "key", "=", "key", ")", "for", "a", ","...
Collect the gaps between adjacent HSPs in the BLAST file.
[ "Collect", "the", "gaps", "between", "adjacent", "HSPs", "in", "the", "BLAST", "file", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L367-L380
train
200,676
tanghaibao/jcvi
jcvi/formats/blast.py
gaps
def gaps(args): """ %prog gaps A_vs_B.blast Find distribution of gap sizes betwen adjacent HSPs. """ p = OptionParser(gaps.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args blast = BlastSlow(blastfile) logging.debug("A total of {} records imported".format(len(blast))) query_gaps = list(collect_gaps(blast)) subject_gaps = list(collect_gaps(blast, use_subject=True)) logging.debug("Query gaps: {} Subject gaps: {}"\ .format(len(query_gaps), len(subject_gaps))) from jcvi.graphics.base import savefig import seaborn as sns sns.distplot(query_gaps) savefig("query_gaps.pdf")
python
def gaps(args): """ %prog gaps A_vs_B.blast Find distribution of gap sizes betwen adjacent HSPs. """ p = OptionParser(gaps.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args blast = BlastSlow(blastfile) logging.debug("A total of {} records imported".format(len(blast))) query_gaps = list(collect_gaps(blast)) subject_gaps = list(collect_gaps(blast, use_subject=True)) logging.debug("Query gaps: {} Subject gaps: {}"\ .format(len(query_gaps), len(subject_gaps))) from jcvi.graphics.base import savefig import seaborn as sns sns.distplot(query_gaps) savefig("query_gaps.pdf")
[ "def", "gaps", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "gaps", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not",...
%prog gaps A_vs_B.blast Find distribution of gap sizes betwen adjacent HSPs.
[ "%prog", "gaps", "A_vs_B", ".", "blast" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L383-L408
train
200,677
tanghaibao/jcvi
jcvi/formats/blast.py
rbbh
def rbbh(args): """ %prog rbbh A_vs_B.blast B_vs_A.blast Identify the reciprocal best blast hit for each query sequence in set A when compared to set B. This program assumes that the BLAST results have already been filtered based on a combination of %id, %cov, e-value cutoffs. BLAST output should be in tabular `-m 8` format. """ p = OptionParser(rbbh.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) abfile, bafile, = args ab = Blast(abfile) ba = Blast(bafile) ab_hits = ab.best_hits ba_hits = ba.best_hits for aquery in ab_hits: ahit = ab_hits[aquery].subject ba_bline = ba_hits.get(ahit) if ba_bline: bhit = ba_bline.subject if bhit == aquery: print("\t".join(str(x) for x in (aquery, ahit)))
python
def rbbh(args): """ %prog rbbh A_vs_B.blast B_vs_A.blast Identify the reciprocal best blast hit for each query sequence in set A when compared to set B. This program assumes that the BLAST results have already been filtered based on a combination of %id, %cov, e-value cutoffs. BLAST output should be in tabular `-m 8` format. """ p = OptionParser(rbbh.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) abfile, bafile, = args ab = Blast(abfile) ba = Blast(bafile) ab_hits = ab.best_hits ba_hits = ba.best_hits for aquery in ab_hits: ahit = ab_hits[aquery].subject ba_bline = ba_hits.get(ahit) if ba_bline: bhit = ba_bline.subject if bhit == aquery: print("\t".join(str(x) for x in (aquery, ahit)))
[ "def", "rbbh", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "rbbh", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not",...
%prog rbbh A_vs_B.blast B_vs_A.blast Identify the reciprocal best blast hit for each query sequence in set A when compared to set B. This program assumes that the BLAST results have already been filtered based on a combination of %id, %cov, e-value cutoffs. BLAST output should be in tabular `-m 8` format.
[ "%prog", "rbbh", "A_vs_B", ".", "blast", "B_vs_A", ".", "blast" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L411-L441
train
200,678
tanghaibao/jcvi
jcvi/formats/blast.py
score
def score(args): """ %prog score blastfile query.fasta A.ids Add up the scores for each query seq. Go through the lines and for each query sequence, add up the scores when subject is in each pile by A.ids. """ from jcvi.formats.base import SetFile from jcvi.formats.fasta import Fasta p = OptionParser(score.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blastfile, fastafile, idsfile = args ids = SetFile(idsfile) blast = Blast(blastfile) scores = defaultdict(int) for b in blast: query = b.query subject = b.subject if subject not in ids: continue scores[query] += b.score logging.debug("A total of {0} ids loaded.".format(len(ids))) f = Fasta(fastafile) for s in f.iterkeys_ordered(): sc = scores.get(s, 0) print("\t".join((s, str(sc))))
python
def score(args): """ %prog score blastfile query.fasta A.ids Add up the scores for each query seq. Go through the lines and for each query sequence, add up the scores when subject is in each pile by A.ids. """ from jcvi.formats.base import SetFile from jcvi.formats.fasta import Fasta p = OptionParser(score.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blastfile, fastafile, idsfile = args ids = SetFile(idsfile) blast = Blast(blastfile) scores = defaultdict(int) for b in blast: query = b.query subject = b.subject if subject not in ids: continue scores[query] += b.score logging.debug("A total of {0} ids loaded.".format(len(ids))) f = Fasta(fastafile) for s in f.iterkeys_ordered(): sc = scores.get(s, 0) print("\t".join((s, str(sc))))
[ "def", "score", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "SetFile", "from", "jcvi", ".", "formats", ".", "fasta", "import", "Fasta", "p", "=", "OptionParser", "(", "score", ".", "__doc__", ")", "opts", ",", "args", ...
%prog score blastfile query.fasta A.ids Add up the scores for each query seq. Go through the lines and for each query sequence, add up the scores when subject is in each pile by A.ids.
[ "%prog", "score", "blastfile", "query", ".", "fasta", "A", ".", "ids" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L444-L477
train
200,679
tanghaibao/jcvi
jcvi/formats/blast.py
annotation
def annotation(args): """ %prog annotation blastfile > annotations Create simple two column files from the first two coluns in blastfile. Use --queryids and --subjectids to switch IDs or descriptions. """ from jcvi.formats.base import DictFile p = OptionParser(annotation.__doc__) p.add_option("--queryids", help="Query IDS file to switch [default: %default]") p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args d = "\t" qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None blast = Blast(blastfile) for b in blast: query, subject = b.query, b.subject if qids: query = qids[query] if sids: subject = sids[subject] print("\t".join((query, subject)))
python
def annotation(args): """ %prog annotation blastfile > annotations Create simple two column files from the first two coluns in blastfile. Use --queryids and --subjectids to switch IDs or descriptions. """ from jcvi.formats.base import DictFile p = OptionParser(annotation.__doc__) p.add_option("--queryids", help="Query IDS file to switch [default: %default]") p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args d = "\t" qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None blast = Blast(blastfile) for b in blast: query, subject = b.query, b.subject if qids: query = qids[query] if sids: subject = sids[subject] print("\t".join((query, subject)))
[ "def", "annotation", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "annotation", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--queryids\"", ",", "help", "=", "\"Query IDS f...
%prog annotation blastfile > annotations Create simple two column files from the first two coluns in blastfile. Use --queryids and --subjectids to switch IDs or descriptions.
[ "%prog", "annotation", "blastfile", ">", "annotations" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L480-L509
train
200,680
tanghaibao/jcvi
jcvi/formats/blast.py
completeness
def completeness(args): """ %prog completeness blastfile ref.fasta > outfile Print statistics for each gene, the coverage of the alignment onto the best hit, as an indicator for completeness of the gene model. For example, one might BLAST sugarcane ESTs against sorghum annotations as reference, to find full-length transcripts. """ from jcvi.utils.range import range_minmax from jcvi.utils.cbook import SummaryStats p = OptionParser(completeness.__doc__) p.add_option("--ids", help="Save ids that are over 50% complete [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blastfile, fastafile = args idsfile = opts.ids f = Sizes(fastafile).mapping b = BlastSlow(blastfile) valid = [] data = [] cutoff = 50 for query, blines in groupby(b, key=lambda x: x.query): blines = list(blines) ranges = [(x.sstart, x.sstop) for x in blines] b = blines[0] query, subject = b.query, b.subject rmin, rmax = range_minmax(ranges) subject_len = f[subject] nterminal_dist = rmin - 1 cterminal_dist = subject_len - rmax covered = (rmax - rmin + 1) * 100 / subject_len if covered > cutoff: valid.append(query) data.append((nterminal_dist, cterminal_dist, covered)) print("\t".join(str(x) for x in (query, subject, nterminal_dist, cterminal_dist, covered))) nd, cd, cv = zip(*data) m = "Total: {0}, Coverage > {1}%: {2}\n".\ format(len(data), cutoff, len(valid)) m += "N-terminal: {0}\n".format(SummaryStats(nd)) m += "C-terminal: {0}\n".format(SummaryStats(cd)) m += "Coverage: {0}".format(SummaryStats(cv)) print(m, file=sys.stderr) if idsfile: fw = open(idsfile, "w") print("\n".join(valid), file=fw) logging.debug("A total of {0} ids (cov > {1} %) written to `{2}`.".\ format(len(valid), cutoff, idsfile)) fw.close()
python
def completeness(args): """ %prog completeness blastfile ref.fasta > outfile Print statistics for each gene, the coverage of the alignment onto the best hit, as an indicator for completeness of the gene model. For example, one might BLAST sugarcane ESTs against sorghum annotations as reference, to find full-length transcripts. """ from jcvi.utils.range import range_minmax from jcvi.utils.cbook import SummaryStats p = OptionParser(completeness.__doc__) p.add_option("--ids", help="Save ids that are over 50% complete [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blastfile, fastafile = args idsfile = opts.ids f = Sizes(fastafile).mapping b = BlastSlow(blastfile) valid = [] data = [] cutoff = 50 for query, blines in groupby(b, key=lambda x: x.query): blines = list(blines) ranges = [(x.sstart, x.sstop) for x in blines] b = blines[0] query, subject = b.query, b.subject rmin, rmax = range_minmax(ranges) subject_len = f[subject] nterminal_dist = rmin - 1 cterminal_dist = subject_len - rmax covered = (rmax - rmin + 1) * 100 / subject_len if covered > cutoff: valid.append(query) data.append((nterminal_dist, cterminal_dist, covered)) print("\t".join(str(x) for x in (query, subject, nterminal_dist, cterminal_dist, covered))) nd, cd, cv = zip(*data) m = "Total: {0}, Coverage > {1}%: {2}\n".\ format(len(data), cutoff, len(valid)) m += "N-terminal: {0}\n".format(SummaryStats(nd)) m += "C-terminal: {0}\n".format(SummaryStats(cd)) m += "Coverage: {0}".format(SummaryStats(cv)) print(m, file=sys.stderr) if idsfile: fw = open(idsfile, "w") print("\n".join(valid), file=fw) logging.debug("A total of {0} ids (cov > {1} %) written to `{2}`.".\ format(len(valid), cutoff, idsfile)) fw.close()
[ "def", "completeness", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "range", "import", "range_minmax", "from", "jcvi", ".", "utils", ".", "cbook", "import", "SummaryStats", "p", "=", "OptionParser", "(", "completeness", ".", "__doc__", ")", "p...
%prog completeness blastfile ref.fasta > outfile Print statistics for each gene, the coverage of the alignment onto the best hit, as an indicator for completeness of the gene model. For example, one might BLAST sugarcane ESTs against sorghum annotations as reference, to find full-length transcripts.
[ "%prog", "completeness", "blastfile", "ref", ".", "fasta", ">", "outfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L512-L572
train
200,681
tanghaibao/jcvi
jcvi/formats/blast.py
annotate
def annotate(args): """ %prog annotate blastfile query.fasta subject.fasta Annotate overlap types (dovetail, contained, etc) in BLAST tabular file. """ from jcvi.assembly.goldenpath import Cutoff, Overlap, Overlap_types p = OptionParser(annotate.__doc__) p.set_align(pctid=94, hitlen=500) p.add_option("--hang", default=500, type="int", help="Maximum overhang length") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blastfile, afasta, bfasta = args fp = must_open(blastfile) asizes = Sizes(afasta).mapping bsizes = Sizes(bfasta).mapping cutoff = Cutoff(opts.pctid, opts.hitlen, opts.hang) logging.debug(str(cutoff)) for row in fp: b = BlastLine(row) asize = asizes[b.query] bsize = bsizes[b.subject] if b.query == b.subject: continue ov = Overlap(b, asize, bsize, cutoff) if ov.otype: ov.print_graphic() print("{0}\t{1}".format(b, Overlap_types[ov.otype]))
python
def annotate(args): """ %prog annotate blastfile query.fasta subject.fasta Annotate overlap types (dovetail, contained, etc) in BLAST tabular file. """ from jcvi.assembly.goldenpath import Cutoff, Overlap, Overlap_types p = OptionParser(annotate.__doc__) p.set_align(pctid=94, hitlen=500) p.add_option("--hang", default=500, type="int", help="Maximum overhang length") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blastfile, afasta, bfasta = args fp = must_open(blastfile) asizes = Sizes(afasta).mapping bsizes = Sizes(bfasta).mapping cutoff = Cutoff(opts.pctid, opts.hitlen, opts.hang) logging.debug(str(cutoff)) for row in fp: b = BlastLine(row) asize = asizes[b.query] bsize = bsizes[b.subject] if b.query == b.subject: continue ov = Overlap(b, asize, bsize, cutoff) if ov.otype: ov.print_graphic() print("{0}\t{1}".format(b, Overlap_types[ov.otype]))
[ "def", "annotate", "(", "args", ")", ":", "from", "jcvi", ".", "assembly", ".", "goldenpath", "import", "Cutoff", ",", "Overlap", ",", "Overlap_types", "p", "=", "OptionParser", "(", "annotate", ".", "__doc__", ")", "p", ".", "set_align", "(", "pctid", "...
%prog annotate blastfile query.fasta subject.fasta Annotate overlap types (dovetail, contained, etc) in BLAST tabular file.
[ "%prog", "annotate", "blastfile", "query", ".", "fasta", "subject", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L575-L607
train
200,682
tanghaibao/jcvi
jcvi/formats/blast.py
top10
def top10(args): """ %prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`. """ from jcvi.formats.base import DictFile p = OptionParser(top10.__doc__) p.add_option("--top", default=10, type="int", help="Top N taxa to extract [default: %default]") p.add_option("--ids", default=None, help="Two column ids file to query seqid [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args mapping = DictFile(opts.ids, delimiter="\t") if opts.ids else {} cmd = "cut -f2 {0}".format(blastfile) cmd += " | sort | uniq -c | sort -k1,1nr | head -n {0}".format(opts.top) fp = popen(cmd) for row in fp: count, seqid = row.split() nseqid = mapping.get(seqid, seqid) print("\t".join((count, nseqid)))
python
def top10(args): """ %prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`. """ from jcvi.formats.base import DictFile p = OptionParser(top10.__doc__) p.add_option("--top", default=10, type="int", help="Top N taxa to extract [default: %default]") p.add_option("--ids", default=None, help="Two column ids file to query seqid [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args mapping = DictFile(opts.ids, delimiter="\t") if opts.ids else {} cmd = "cut -f2 {0}".format(blastfile) cmd += " | sort | uniq -c | sort -k1,1nr | head -n {0}".format(opts.top) fp = popen(cmd) for row in fp: count, seqid = row.split() nseqid = mapping.get(seqid, seqid) print("\t".join((count, nseqid)))
[ "def", "top10", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "top10", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--top\"", ",", "default", "=", "10", ",", "type", "...
%prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`.
[ "%prog", "top10", "blastfile", ".", "best" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L610-L642
train
200,683
tanghaibao/jcvi
jcvi/formats/blast.py
cscore
def cscore(args): """ %prog cscore blastfile > cscoreOut See supplementary info for sea anemone genome paper, C-score formula: cscore(A,B) = score(A,B) / max(best score for A, best score for B) A C-score of one is the same as reciprocal best hit (RBH). Output file will be 3-column (query, subject, cscore). Use --cutoff to select a different cutoff. """ from jcvi.utils.cbook import gene_name p = OptionParser(cscore.__doc__) p.add_option("--cutoff", default=.9999, type="float", help="Minimum C-score to report [default: %default]") p.add_option("--pct", default=False, action="store_true", help="Also include pct as last column [default: %default]") p.add_option("--writeblast", default=False, action="store_true", help="Also write filtered blast file [default: %default]") p.set_stripnames() p.set_outfile() opts, args = p.parse_args(args) ostrip = opts.strip_names writeblast = opts.writeblast outfile = opts.outfile if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args blast = Blast(blastfile) logging.debug("Register best scores ..") best_score = defaultdict(float) for b in blast: query, subject = b.query, b.subject if ostrip: query, subject = gene_name(query), gene_name(subject) score = b.score if score > best_score[query]: best_score[query] = score if score > best_score[subject]: best_score[subject] = score blast = Blast(blastfile) pairs = {} cutoff = opts.cutoff for b in blast: query, subject = b.query, b.subject if ostrip: query, subject = gene_name(query), gene_name(subject) score = b.score pctid = b.pctid s = score / max(best_score[query], best_score[subject]) if s > cutoff: pair = (query, subject) if pair not in pairs or s > pairs[pair][0]: pairs[pair] = (s, pctid, b) fw = must_open(outfile, "w") if writeblast: fwb = must_open(outfile + ".filtered.blast", "w") pct = opts.pct for (query, subject), (s, pctid, b) in sorted(pairs.items()): args = [query, subject, "{0:.2f}".format(s)] if pct: args.append("{0:.1f}".format(pctid)) print("\t".join(args), file=fw) if writeblast: print(b, file=fwb) fw.close() if writeblast: fwb.close()
python
def cscore(args): """ %prog cscore blastfile > cscoreOut See supplementary info for sea anemone genome paper, C-score formula: cscore(A,B) = score(A,B) / max(best score for A, best score for B) A C-score of one is the same as reciprocal best hit (RBH). Output file will be 3-column (query, subject, cscore). Use --cutoff to select a different cutoff. """ from jcvi.utils.cbook import gene_name p = OptionParser(cscore.__doc__) p.add_option("--cutoff", default=.9999, type="float", help="Minimum C-score to report [default: %default]") p.add_option("--pct", default=False, action="store_true", help="Also include pct as last column [default: %default]") p.add_option("--writeblast", default=False, action="store_true", help="Also write filtered blast file [default: %default]") p.set_stripnames() p.set_outfile() opts, args = p.parse_args(args) ostrip = opts.strip_names writeblast = opts.writeblast outfile = opts.outfile if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args blast = Blast(blastfile) logging.debug("Register best scores ..") best_score = defaultdict(float) for b in blast: query, subject = b.query, b.subject if ostrip: query, subject = gene_name(query), gene_name(subject) score = b.score if score > best_score[query]: best_score[query] = score if score > best_score[subject]: best_score[subject] = score blast = Blast(blastfile) pairs = {} cutoff = opts.cutoff for b in blast: query, subject = b.query, b.subject if ostrip: query, subject = gene_name(query), gene_name(subject) score = b.score pctid = b.pctid s = score / max(best_score[query], best_score[subject]) if s > cutoff: pair = (query, subject) if pair not in pairs or s > pairs[pair][0]: pairs[pair] = (s, pctid, b) fw = must_open(outfile, "w") if writeblast: fwb = must_open(outfile + ".filtered.blast", "w") pct = opts.pct for (query, subject), (s, pctid, b) in sorted(pairs.items()): args = [query, subject, "{0:.2f}".format(s)] if pct: args.append("{0:.1f}".format(pctid)) print("\t".join(args), file=fw) if writeblast: print(b, file=fwb) fw.close() if writeblast: fwb.close()
[ "def", "cscore", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "gene_name", "p", "=", "OptionParser", "(", "cscore", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--cutoff\"", ",", "default", "=", ".9999", ",", "ty...
%prog cscore blastfile > cscoreOut See supplementary info for sea anemone genome paper, C-score formula: cscore(A,B) = score(A,B) / max(best score for A, best score for B) A C-score of one is the same as reciprocal best hit (RBH). Output file will be 3-column (query, subject, cscore). Use --cutoff to select a different cutoff.
[ "%prog", "cscore", "blastfile", ">", "cscoreOut" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L693-L772
train
200,684
tanghaibao/jcvi
jcvi/formats/blast.py
get_distance
def get_distance(a, b, xaxis=True): """ Returns the distance between two blast HSPs. """ if xaxis: arange = ("0", a.qstart, a.qstop, a.orientation) # 0 is the dummy chromosome brange = ("0", b.qstart, b.qstop, b.orientation) else: arange = ("0", a.sstart, a.sstop, a.orientation) brange = ("0", b.sstart, b.sstop, b.orientation) dist, oo = range_distance(arange, brange, distmode="ee") dist = abs(dist) return dist
python
def get_distance(a, b, xaxis=True): """ Returns the distance between two blast HSPs. """ if xaxis: arange = ("0", a.qstart, a.qstop, a.orientation) # 0 is the dummy chromosome brange = ("0", b.qstart, b.qstop, b.orientation) else: arange = ("0", a.sstart, a.sstop, a.orientation) brange = ("0", b.sstart, b.sstop, b.orientation) dist, oo = range_distance(arange, brange, distmode="ee") dist = abs(dist) return dist
[ "def", "get_distance", "(", "a", ",", "b", ",", "xaxis", "=", "True", ")", ":", "if", "xaxis", ":", "arange", "=", "(", "\"0\"", ",", "a", ".", "qstart", ",", "a", ".", "qstop", ",", "a", ".", "orientation", ")", "# 0 is the dummy chromosome", "brang...
Returns the distance between two blast HSPs.
[ "Returns", "the", "distance", "between", "two", "blast", "HSPs", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L775-L789
train
200,685
tanghaibao/jcvi
jcvi/formats/blast.py
combine_HSPs
def combine_HSPs(a): """ Combine HSPs into a single BlastLine. """ m = a[0] if len(a) == 1: return m for b in a[1:]: assert m.query == b.query assert m.subject == b.subject m.hitlen += b.hitlen m.nmismatch += b.nmismatch m.ngaps += b.ngaps m.qstart = min(m.qstart, b.qstart) m.qstop = max(m.qstop, b.qstop) m.sstart = min(m.sstart, b.sstart) m.sstop = max(m.sstop, b.sstop) if m.has_score: m.score += b.score m.pctid = 100 - (m.nmismatch + m.ngaps) * 100. / m.hitlen return m
python
def combine_HSPs(a): """ Combine HSPs into a single BlastLine. """ m = a[0] if len(a) == 1: return m for b in a[1:]: assert m.query == b.query assert m.subject == b.subject m.hitlen += b.hitlen m.nmismatch += b.nmismatch m.ngaps += b.ngaps m.qstart = min(m.qstart, b.qstart) m.qstop = max(m.qstop, b.qstop) m.sstart = min(m.sstart, b.sstart) m.sstop = max(m.sstop, b.sstop) if m.has_score: m.score += b.score m.pctid = 100 - (m.nmismatch + m.ngaps) * 100. / m.hitlen return m
[ "def", "combine_HSPs", "(", "a", ")", ":", "m", "=", "a", "[", "0", "]", "if", "len", "(", "a", ")", "==", "1", ":", "return", "m", "for", "b", "in", "a", "[", "1", ":", "]", ":", "assert", "m", ".", "query", "==", "b", ".", "query", "ass...
Combine HSPs into a single BlastLine.
[ "Combine", "HSPs", "into", "a", "single", "BlastLine", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L792-L814
train
200,686
tanghaibao/jcvi
jcvi/formats/blast.py
chain
def chain(args): """ %prog chain blastfile Chain adjacent HSPs together to form larger HSP. """ p = OptionParser(chain.__doc__) p.add_option("--dist", dest="dist", default=100, type="int", help="extent of flanking regions to search [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args dist = opts.dist assert dist > 0 blast = BlastSlow(blastfile) logging.debug("A total of {} records imported".format(len(blast))) chained_hsps = chain_HSPs(blast, xdist=dist, ydist=dist) logging.debug("A total of {} records after chaining".format(len(chained_hsps))) for b in chained_hsps: print(b)
python
def chain(args): """ %prog chain blastfile Chain adjacent HSPs together to form larger HSP. """ p = OptionParser(chain.__doc__) p.add_option("--dist", dest="dist", default=100, type="int", help="extent of flanking regions to search [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args dist = opts.dist assert dist > 0 blast = BlastSlow(blastfile) logging.debug("A total of {} records imported".format(len(blast))) chained_hsps = chain_HSPs(blast, xdist=dist, ydist=dist) logging.debug("A total of {} records after chaining".format(len(chained_hsps))) for b in chained_hsps: print(b)
[ "def", "chain", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "chain", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--dist\"", ",", "dest", "=", "\"dist\"", ",", "default", "=", "100", ",", "type", "=", "\"int\"", ",", "help", "=", ...
%prog chain blastfile Chain adjacent HSPs together to form larger HSP.
[ "%prog", "chain", "blastfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L855-L881
train
200,687
tanghaibao/jcvi
jcvi/formats/blast.py
condense
def condense(args): """ %prog condense blastfile > blastfile.condensed Condense HSPs that belong to the same query-subject pair into one. """ p = OptionParser(condense.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args blast = BlastSlow(blastfile) key = lambda x: x.query blast.sort(key=key) clusters = [] for q, lines in groupby(blast, key=key): lines = list(lines) condenser = defaultdict(list) for b in lines: condenser[(b.subject, b.orientation)].append(b) for bs in condenser.values(): clusters.append(bs) chained_hsps = [combine_HSPs(x) for x in clusters] chained_hsps = sorted(chained_hsps, key=lambda x: (x.query, -x.score)) for b in chained_hsps: print(b)
python
def condense(args): """ %prog condense blastfile > blastfile.condensed Condense HSPs that belong to the same query-subject pair into one. """ p = OptionParser(condense.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args blast = BlastSlow(blastfile) key = lambda x: x.query blast.sort(key=key) clusters = [] for q, lines in groupby(blast, key=key): lines = list(lines) condenser = defaultdict(list) for b in lines: condenser[(b.subject, b.orientation)].append(b) for bs in condenser.values(): clusters.append(bs) chained_hsps = [combine_HSPs(x) for x in clusters] chained_hsps = sorted(chained_hsps, key=lambda x: (x.query, -x.score)) for b in chained_hsps: print(b)
[ "def", "condense", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "condense", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", ...
%prog condense blastfile > blastfile.condensed Condense HSPs that belong to the same query-subject pair into one.
[ "%prog", "condense", "blastfile", ">", "blastfile", ".", "condensed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L884-L915
train
200,688
tanghaibao/jcvi
jcvi/formats/blast.py
mismatches
def mismatches(args): """ %prog mismatches blastfile Print out histogram of mismatches of HSPs, usually for evaluating SNP level. """ from jcvi.utils.cbook import percentage from jcvi.graphics.histogram import stem_leaf_plot p = OptionParser(mismatches.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args data = [] matches = 0 b = Blast(blastfile) for query, bline in b.iter_best_hit(): mm = bline.nmismatch + bline.ngaps data.append(mm) nonzeros = [x for x in data if x != 0] title = "Polymorphic sites: {0}".\ format(percentage(len(nonzeros), len(data))) stem_leaf_plot(data, 0, 20, 20, title=title)
python
def mismatches(args): """ %prog mismatches blastfile Print out histogram of mismatches of HSPs, usually for evaluating SNP level. """ from jcvi.utils.cbook import percentage from jcvi.graphics.histogram import stem_leaf_plot p = OptionParser(mismatches.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args data = [] matches = 0 b = Blast(blastfile) for query, bline in b.iter_best_hit(): mm = bline.nmismatch + bline.ngaps data.append(mm) nonzeros = [x for x in data if x != 0] title = "Polymorphic sites: {0}".\ format(percentage(len(nonzeros), len(data))) stem_leaf_plot(data, 0, 20, 20, title=title)
[ "def", "mismatches", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "percentage", "from", "jcvi", ".", "graphics", ".", "histogram", "import", "stem_leaf_plot", "p", "=", "OptionParser", "(", "mismatches", ".", "__doc__", ")", ...
%prog mismatches blastfile Print out histogram of mismatches of HSPs, usually for evaluating SNP level.
[ "%prog", "mismatches", "blastfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L918-L946
train
200,689
tanghaibao/jcvi
jcvi/formats/blast.py
swap
def swap(args): """ %prog swap blastfile Print out a new blast file with query and subject swapped. """ p = OptionParser(swap.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) blastfile, = args swappedblastfile = blastfile + ".swapped" fp = must_open(blastfile) fw = must_open(swappedblastfile, "w") for row in fp: b = BlastLine(row) print(b.swapped, file=fw) fw.close() sort([swappedblastfile])
python
def swap(args): """ %prog swap blastfile Print out a new blast file with query and subject swapped. """ p = OptionParser(swap.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) blastfile, = args swappedblastfile = blastfile + ".swapped" fp = must_open(blastfile) fw = must_open(swappedblastfile, "w") for row in fp: b = BlastLine(row) print(b.swapped, file=fw) fw.close() sort([swappedblastfile])
[ "def", "swap", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "swap", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "1", ":", "sys", ".", "exit", "(", "not", ...
%prog swap blastfile Print out a new blast file with query and subject swapped.
[ "%prog", "swap", "blastfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L1120-L1142
train
200,690
tanghaibao/jcvi
jcvi/formats/blast.py
bed
def bed(args): """ %prog bed blastfile Print out bed file based on coordinates in BLAST report. By default, write out subject positions. Use --swap to write query positions. """ from jcvi.formats.bed import sort as bed_sort p = OptionParser(bed.__doc__) p.add_option("--swap", default=False, action="store_true", help="Write query positions [default: %default]") p.add_option("--both", default=False, action="store_true", help="Generate one line for each of query and subject") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) blastfile, = args positive = (not opts.swap) or opts.both negative = opts.swap or opts.both fp = must_open(blastfile) bedfile = "{0}.bed".format(blastfile.rsplit(".", 1)[0]) \ if blastfile.endswith(".blast") \ else "{0}.bed".format(blastfile) fw = open(bedfile, "w") for row in fp: b = BlastLine(row) if positive: print(b.bedline, file=fw) if negative: print(b.swapped.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) fw.close() bed_sort([bedfile, "-i"]) return bedfile
python
def bed(args): """ %prog bed blastfile Print out bed file based on coordinates in BLAST report. By default, write out subject positions. Use --swap to write query positions. """ from jcvi.formats.bed import sort as bed_sort p = OptionParser(bed.__doc__) p.add_option("--swap", default=False, action="store_true", help="Write query positions [default: %default]") p.add_option("--both", default=False, action="store_true", help="Generate one line for each of query and subject") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) blastfile, = args positive = (not opts.swap) or opts.both negative = opts.swap or opts.both fp = must_open(blastfile) bedfile = "{0}.bed".format(blastfile.rsplit(".", 1)[0]) \ if blastfile.endswith(".blast") \ else "{0}.bed".format(blastfile) fw = open(bedfile, "w") for row in fp: b = BlastLine(row) if positive: print(b.bedline, file=fw) if negative: print(b.swapped.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) fw.close() bed_sort([bedfile, "-i"]) return bedfile
[ "def", "bed", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "bed", "import", "sort", "as", "bed_sort", "p", "=", "OptionParser", "(", "bed", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--swap\"", ",", "default", "=", "False", "...
%prog bed blastfile Print out bed file based on coordinates in BLAST report. By default, write out subject positions. Use --swap to write query positions.
[ "%prog", "bed", "blastfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L1145-L1185
train
200,691
tanghaibao/jcvi
jcvi/formats/blast.py
best
def best(args): """ %prog best blastfile print the best hit for each query in the blastfile """ p = OptionParser(best.__doc__) p.add_option("-n", default=1, type="int", help="get best N hits [default: %default]") p.add_option("--nosort", default=False, action="store_true", help="assume BLAST is already sorted [default: %default]") p.add_option("--hsps", default=False, action="store_true", help="get all HSPs for the best pair [default: %default]") p.add_option("--subject", default=False, action="store_true", help="get best hit(s) for subject genome instead [default: %default]") p.set_tmpdir() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args n = opts.n hsps = opts.hsps tmpdir = opts.tmpdir ref = "query" if not opts.subject else "subject" if not opts.nosort: sargs = [blastfile] if tmpdir: sargs += ["-T {0}".format(tmpdir)] if ref != "query": sargs += ["--refscore"] sort(sargs) else: logging.debug("Assuming sorted BLAST") if not opts.subject: bestblastfile = blastfile + ".best" else: bestblastfile = blastfile + ".subject.best" fw = open(bestblastfile, "w") b = Blast(blastfile) for q, bline in b.iter_best_hit(N=n, hsps=hsps, ref=ref): print(bline, file=fw) return bestblastfile
python
def best(args): """ %prog best blastfile print the best hit for each query in the blastfile """ p = OptionParser(best.__doc__) p.add_option("-n", default=1, type="int", help="get best N hits [default: %default]") p.add_option("--nosort", default=False, action="store_true", help="assume BLAST is already sorted [default: %default]") p.add_option("--hsps", default=False, action="store_true", help="get all HSPs for the best pair [default: %default]") p.add_option("--subject", default=False, action="store_true", help="get best hit(s) for subject genome instead [default: %default]") p.set_tmpdir() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args n = opts.n hsps = opts.hsps tmpdir = opts.tmpdir ref = "query" if not opts.subject else "subject" if not opts.nosort: sargs = [blastfile] if tmpdir: sargs += ["-T {0}".format(tmpdir)] if ref != "query": sargs += ["--refscore"] sort(sargs) else: logging.debug("Assuming sorted BLAST") if not opts.subject: bestblastfile = blastfile + ".best" else: bestblastfile = blastfile + ".subject.best" fw = open(bestblastfile, "w") b = Blast(blastfile) for q, bline in b.iter_best_hit(N=n, hsps=hsps, ref=ref): print(bline, file=fw) return bestblastfile
[ "def", "best", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "best", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-n\"", ",", "default", "=", "1", ",", "type", "=", "\"int\"", ",", "help", "=", "\"get best N hits [default: %default]\"", "...
%prog best blastfile print the best hit for each query in the blastfile
[ "%prog", "best", "blastfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L1208-L1256
train
200,692
tanghaibao/jcvi
jcvi/formats/blast.py
summary
def summary(args): """ %prog summary blastfile Provide summary on id% and cov%, for both query and reference. Often used in comparing genomes (based on NUCMER results). """ p = OptionParser(summary.__doc__) p.add_option("--strict", default=False, action="store_true", help="Strict 'gapless' mode. Exclude gaps from covered base.") p.add_option("--tabular", default=False, action="store_true", help="Print succint tabular output") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args alignstats = get_stats(blastfile, strict=opts.strict) if opts.tabular: print(str(alignstats)) else: alignstats.print_stats()
python
def summary(args): """ %prog summary blastfile Provide summary on id% and cov%, for both query and reference. Often used in comparing genomes (based on NUCMER results). """ p = OptionParser(summary.__doc__) p.add_option("--strict", default=False, action="store_true", help="Strict 'gapless' mode. Exclude gaps from covered base.") p.add_option("--tabular", default=False, action="store_true", help="Print succint tabular output") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args alignstats = get_stats(blastfile, strict=opts.strict) if opts.tabular: print(str(alignstats)) else: alignstats.print_stats()
[ "def", "summary", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "summary", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--strict\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Strict 'gapless' m...
%prog summary blastfile Provide summary on id% and cov%, for both query and reference. Often used in comparing genomes (based on NUCMER results).
[ "%prog", "summary", "blastfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L1259-L1283
train
200,693
tanghaibao/jcvi
jcvi/formats/blast.py
subset
def subset(args): """ %prog subset blastfile qbedfile sbedfile Extract blast hits between given query and subject chrs. If --qchrs or --schrs is not given, then all chrs from q/s genome will be included. However one of --qchrs and --schrs must be specified. Otherwise the script will do nothing. """ p = OptionParser(subset.__doc__) p.add_option("--qchrs", default=None, help="query chrs to extract, comma sep [default: %default]") p.add_option("--schrs", default=None, help="subject chrs to extract, comma sep [default: %default]") p.add_option("--convert", default=False, action="store_true", help="convert accns to chr_rank [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blastfile, qbedfile, sbedfile = args qchrs = opts.qchrs schrs = opts.schrs assert qchrs or schrs, p.print_help() convert = opts.convert outfile = blastfile + "." if qchrs: outfile += qchrs + "." qchrs = set(qchrs.split(",")) else: qchrs = set(Bed(qbedfile).seqids) if schrs: schrs = set(schrs.split(",")) if qbedfile != sbedfile or qchrs != schrs: outfile += ",".join(schrs) + "." else: schrs = set(Bed(sbedfile).seqids) outfile += "blast" qo = Bed(qbedfile).order so = Bed(sbedfile).order fw = must_open(outfile, "w") for b in Blast(blastfile): q, s = b.query, b.subject if qo[q][1].seqid in qchrs and so[s][1].seqid in schrs: if convert: b.query = qo[q][1].seqid + "_" + "{0:05d}".format(qo[q][0]) b.subject = so[s][1].seqid + "_" + "{0:05d}".format(so[s][0]) print(b, file=fw) fw.close() logging.debug("Subset blastfile written to `{0}`".format(outfile))
python
def subset(args): """ %prog subset blastfile qbedfile sbedfile Extract blast hits between given query and subject chrs. If --qchrs or --schrs is not given, then all chrs from q/s genome will be included. However one of --qchrs and --schrs must be specified. Otherwise the script will do nothing. """ p = OptionParser(subset.__doc__) p.add_option("--qchrs", default=None, help="query chrs to extract, comma sep [default: %default]") p.add_option("--schrs", default=None, help="subject chrs to extract, comma sep [default: %default]") p.add_option("--convert", default=False, action="store_true", help="convert accns to chr_rank [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blastfile, qbedfile, sbedfile = args qchrs = opts.qchrs schrs = opts.schrs assert qchrs or schrs, p.print_help() convert = opts.convert outfile = blastfile + "." if qchrs: outfile += qchrs + "." qchrs = set(qchrs.split(",")) else: qchrs = set(Bed(qbedfile).seqids) if schrs: schrs = set(schrs.split(",")) if qbedfile != sbedfile or qchrs != schrs: outfile += ",".join(schrs) + "." else: schrs = set(Bed(sbedfile).seqids) outfile += "blast" qo = Bed(qbedfile).order so = Bed(sbedfile).order fw = must_open(outfile, "w") for b in Blast(blastfile): q, s = b.query, b.subject if qo[q][1].seqid in qchrs and so[s][1].seqid in schrs: if convert: b.query = qo[q][1].seqid + "_" + "{0:05d}".format(qo[q][0]) b.subject = so[s][1].seqid + "_" + "{0:05d}".format(so[s][0]) print(b, file=fw) fw.close() logging.debug("Subset blastfile written to `{0}`".format(outfile))
[ "def", "subset", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "subset", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--qchrs\"", ",", "default", "=", "None", ",", "help", "=", "\"query chrs to extract, comma sep [default: %default]\"", ")", "p...
%prog subset blastfile qbedfile sbedfile Extract blast hits between given query and subject chrs. If --qchrs or --schrs is not given, then all chrs from q/s genome will be included. However one of --qchrs and --schrs must be specified. Otherwise the script will do nothing.
[ "%prog", "subset", "blastfile", "qbedfile", "sbedfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L1286-L1340
train
200,694
tanghaibao/jcvi
jcvi/projects/pineapple.py
flanking
def flanking(args): """ %prog flanking SI.ids liftover.bed master.txt master-removed.txt Extract flanking genes for given SI loci. """ p = OptionParser(flanking.__doc__) p.add_option("-N", default=50, type="int", help="How many genes on both directions") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) SI, liftover, master, te = args N = opts.N SI = SetFile(SI, column=0, delimiter='.') liftover = Bed(liftover) order = liftover.order neighbors = set() for s in SI: si, s = order[s] LB = max(si - N, 0) RB = min(si + N, len(liftover)) for j in xrange(LB, RB + 1): a = liftover[j] if a.seqid != s.seqid: continue neighbors.add(a.accn) dmain = DictFile(master, keypos=0, valuepos=None, delimiter='\t') dte = DictFile(te, keypos=0, valuepos=None, delimiter='\t') header = next(open(master)) print("\t".join(("SI/Neighbor", "Gene/TE", header.strip()))) for a in liftover: s = a.accn if s not in neighbors: continue tag = "SI" if s in SI else "neighbor" if s in dmain: d = dmain[s] print("\t".join([tag, "gene"] + d)) elif s in dte: d = dte[s] print("\t".join([tag, "TE"] + d))
python
def flanking(args): """ %prog flanking SI.ids liftover.bed master.txt master-removed.txt Extract flanking genes for given SI loci. """ p = OptionParser(flanking.__doc__) p.add_option("-N", default=50, type="int", help="How many genes on both directions") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) SI, liftover, master, te = args N = opts.N SI = SetFile(SI, column=0, delimiter='.') liftover = Bed(liftover) order = liftover.order neighbors = set() for s in SI: si, s = order[s] LB = max(si - N, 0) RB = min(si + N, len(liftover)) for j in xrange(LB, RB + 1): a = liftover[j] if a.seqid != s.seqid: continue neighbors.add(a.accn) dmain = DictFile(master, keypos=0, valuepos=None, delimiter='\t') dte = DictFile(te, keypos=0, valuepos=None, delimiter='\t') header = next(open(master)) print("\t".join(("SI/Neighbor", "Gene/TE", header.strip()))) for a in liftover: s = a.accn if s not in neighbors: continue tag = "SI" if s in SI else "neighbor" if s in dmain: d = dmain[s] print("\t".join([tag, "gene"] + d)) elif s in dte: d = dte[s] print("\t".join([tag, "TE"] + d))
[ "def", "flanking", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "flanking", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-N\"", ",", "default", "=", "50", ",", "type", "=", "\"int\"", ",", "help", "=", "\"How many genes on both directions\...
%prog flanking SI.ids liftover.bed master.txt master-removed.txt Extract flanking genes for given SI loci.
[ "%prog", "flanking", "SI", ".", "ids", "liftover", ".", "bed", "master", ".", "txt", "master", "-", "removed", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/pineapple.py#L71-L116
train
200,695
tanghaibao/jcvi
jcvi/projects/pineapple.py
geneinfo
def geneinfo(args): """ %prog geneinfo pineapple.20141004.bed liftover.bed pineapple.20150413.bed \ note.txt interproscan.txt Build gene info table from various sources. The three beds contain information on the original scaffolds, linkage groups, and final selected loci (after removal of TEs and split loci). The final two text files contain AHRD and domain data. """ p = OptionParser(geneinfo.__doc__) opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) scfbed, liftoverbed, lgbed, note, ipr = args note = DictFile(note, delimiter="\t") scfbed = Bed(scfbed) lgorder = Bed(lgbed).order liftover = Bed(liftoverbed).order header = "Accession Scaffold-position LG-position "\ "Description Interpro-domain Interpro-description "\ "GO-term KEGG".split() ipr = read_interpro(ipr) fw_clean = must_open("master.txt", "w") fw_removed = must_open("master-removed.txt", "w") for fw in (fw_clean, fw_removed): print("\t".join(header), file=fw) for b in scfbed: accession = b.accn scaffold_position = b.tag if accession in liftover: lg_position = liftover[accession][-1].tag else: lg_position = "split" fw = fw_clean if accession in lgorder else fw_removed description = note[accession] interpro = interpro_description = go = kegg = "" if accession in ipr: interpro, interpro_description, go, kegg = ipr[accession] print("\t".join((accession, scaffold_position, lg_position, description, interpro, interpro_description, go, kegg)), file=fw) fw.close()
python
def geneinfo(args): """ %prog geneinfo pineapple.20141004.bed liftover.bed pineapple.20150413.bed \ note.txt interproscan.txt Build gene info table from various sources. The three beds contain information on the original scaffolds, linkage groups, and final selected loci (after removal of TEs and split loci). The final two text files contain AHRD and domain data. """ p = OptionParser(geneinfo.__doc__) opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) scfbed, liftoverbed, lgbed, note, ipr = args note = DictFile(note, delimiter="\t") scfbed = Bed(scfbed) lgorder = Bed(lgbed).order liftover = Bed(liftoverbed).order header = "Accession Scaffold-position LG-position "\ "Description Interpro-domain Interpro-description "\ "GO-term KEGG".split() ipr = read_interpro(ipr) fw_clean = must_open("master.txt", "w") fw_removed = must_open("master-removed.txt", "w") for fw in (fw_clean, fw_removed): print("\t".join(header), file=fw) for b in scfbed: accession = b.accn scaffold_position = b.tag if accession in liftover: lg_position = liftover[accession][-1].tag else: lg_position = "split" fw = fw_clean if accession in lgorder else fw_removed description = note[accession] interpro = interpro_description = go = kegg = "" if accession in ipr: interpro, interpro_description, go, kegg = ipr[accession] print("\t".join((accession, scaffold_position, lg_position, description, interpro, interpro_description, go, kegg)), file=fw) fw.close()
[ "def", "geneinfo", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "geneinfo", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "5", ":", "sys", ".", "exit", "(", ...
%prog geneinfo pineapple.20141004.bed liftover.bed pineapple.20150413.bed \ note.txt interproscan.txt Build gene info table from various sources. The three beds contain information on the original scaffolds, linkage groups, and final selected loci (after removal of TEs and split loci). The final two text files contain AHRD and domain data.
[ "%prog", "geneinfo", "pineapple", ".", "20141004", ".", "bed", "liftover", ".", "bed", "pineapple", ".", "20150413", ".", "bed", "\\", "note", ".", "txt", "interproscan", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/pineapple.py#L229-L275
train
200,696
tanghaibao/jcvi
jcvi/projects/pineapple.py
ploidy
def ploidy(args): """ %prog ploidy seqids karyotype.layout mcscan.out all.bed synteny.layout Build a figure that calls graphics.karyotype to illustrate the high ploidy of WGD history of pineapple genome. The script calls both graphics.karyotype and graphic.synteny. """ p = OptionParser(ploidy.__doc__) p.add_option("--switch", help="Rename the seqid with two-column file") opts, args, iopts = p.set_image_options(args, figsize="9x7") if len(args) != 5: sys.exit(not p.print_help()) seqidsfile, klayout, datafile, bedfile, slayout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Karyotype(fig, root, seqidsfile, klayout) Synteny(fig, root, datafile, bedfile, slayout, switch=opts.switch) # legend showing the orientation of the genes draw_gene_legend(root, .27, .37, .52) # annotate the WGD events fc = 'lightslategrey' x = .09 radius = .012 TextCircle(root, x, .825, r'$\tau$', radius=radius, fc=fc) TextCircle(root, x, .8, r'$\sigma$', radius=radius, fc=fc) TextCircle(root, x, .72, r'$\rho$', radius=radius, fc=fc) for ypos in (.825, .8, .72): root.text(.12, ypos, r"$\times2$", color=fc, ha="center", va="center") root.plot([x, x], [.85, .775], ":", color=fc, lw=2) root.plot([x, x], [.75, .675], ":", color=fc, lw=2) labels = ((.04, .96, 'A'), (.04, .54, 'B')) panel_labels(root, labels) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "pineapple-karyotype" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
python
def ploidy(args): """ %prog ploidy seqids karyotype.layout mcscan.out all.bed synteny.layout Build a figure that calls graphics.karyotype to illustrate the high ploidy of WGD history of pineapple genome. The script calls both graphics.karyotype and graphic.synteny. """ p = OptionParser(ploidy.__doc__) p.add_option("--switch", help="Rename the seqid with two-column file") opts, args, iopts = p.set_image_options(args, figsize="9x7") if len(args) != 5: sys.exit(not p.print_help()) seqidsfile, klayout, datafile, bedfile, slayout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Karyotype(fig, root, seqidsfile, klayout) Synteny(fig, root, datafile, bedfile, slayout, switch=opts.switch) # legend showing the orientation of the genes draw_gene_legend(root, .27, .37, .52) # annotate the WGD events fc = 'lightslategrey' x = .09 radius = .012 TextCircle(root, x, .825, r'$\tau$', radius=radius, fc=fc) TextCircle(root, x, .8, r'$\sigma$', radius=radius, fc=fc) TextCircle(root, x, .72, r'$\rho$', radius=radius, fc=fc) for ypos in (.825, .8, .72): root.text(.12, ypos, r"$\times2$", color=fc, ha="center", va="center") root.plot([x, x], [.85, .775], ":", color=fc, lw=2) root.plot([x, x], [.75, .675], ":", color=fc, lw=2) labels = ((.04, .96, 'A'), (.04, .54, 'B')) panel_labels(root, labels) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "pineapple-karyotype" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "ploidy", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "ploidy", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--switch\"", ",", "help", "=", "\"Rename the seqid with two-column file\"", ")", "opts", ",", "args", ",", "iopts", "=", "...
%prog ploidy seqids karyotype.layout mcscan.out all.bed synteny.layout Build a figure that calls graphics.karyotype to illustrate the high ploidy of WGD history of pineapple genome. The script calls both graphics.karyotype and graphic.synteny.
[ "%prog", "ploidy", "seqids", "karyotype", ".", "layout", "mcscan", ".", "out", "all", ".", "bed", "synteny", ".", "layout" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/pineapple.py#L278-L325
train
200,697
tanghaibao/jcvi
jcvi/algorithms/tsp.py
node_to_edge
def node_to_edge(edges, directed=True): """ From list of edges, record per node, incoming and outgoing edges """ outgoing = defaultdict(set) incoming = defaultdict(set) if directed else outgoing nodes = set() for i, edge in enumerate(edges): a, b, = edge[:2] outgoing[a].add(i) incoming[b].add(i) nodes.add(a) nodes.add(b) nodes = sorted(nodes) if directed: return outgoing, incoming, nodes return outgoing, nodes
python
def node_to_edge(edges, directed=True): """ From list of edges, record per node, incoming and outgoing edges """ outgoing = defaultdict(set) incoming = defaultdict(set) if directed else outgoing nodes = set() for i, edge in enumerate(edges): a, b, = edge[:2] outgoing[a].add(i) incoming[b].add(i) nodes.add(a) nodes.add(b) nodes = sorted(nodes) if directed: return outgoing, incoming, nodes return outgoing, nodes
[ "def", "node_to_edge", "(", "edges", ",", "directed", "=", "True", ")", ":", "outgoing", "=", "defaultdict", "(", "set", ")", "incoming", "=", "defaultdict", "(", "set", ")", "if", "directed", "else", "outgoing", "nodes", "=", "set", "(", ")", "for", "...
From list of edges, record per node, incoming and outgoing edges
[ "From", "list", "of", "edges", "record", "per", "node", "incoming", "and", "outgoing", "edges" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/tsp.py#L125-L141
train
200,698
tanghaibao/jcvi
jcvi/projects/allmaps.py
resample
def resample(args): """ %prog resample yellow-catfish-resample.txt medicago-resample.txt Plot ALLMAPS performance across resampled real data. """ p = OptionParser(resample.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x4", dpi=300) if len(args) != 2: sys.exit(not p.print_help()) dataA, dataB = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) A = fig.add_axes([.1, .18, .32, .64]) B = fig.add_axes([.6, .18, .32, .64]) dataA = import_data(dataA) dataB = import_data(dataB) xlabel = "Fraction of markers" ylabels = ("Anchor rate", "Runtime (m)") legend = ("anchor rate", "runtime") subplot_twinx(A, dataA, xlabel, ylabels, title="Yellow catfish", legend=legend) subplot_twinx(B, dataB, xlabel, ylabels, title="Medicago", legend=legend) labels = ((.04, .92, "A"), (.54, .92, "B")) panel_labels(root, labels) normalize_axes(root) image_name = "resample." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
python
def resample(args): """ %prog resample yellow-catfish-resample.txt medicago-resample.txt Plot ALLMAPS performance across resampled real data. """ p = OptionParser(resample.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x4", dpi=300) if len(args) != 2: sys.exit(not p.print_help()) dataA, dataB = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) A = fig.add_axes([.1, .18, .32, .64]) B = fig.add_axes([.6, .18, .32, .64]) dataA = import_data(dataA) dataB = import_data(dataB) xlabel = "Fraction of markers" ylabels = ("Anchor rate", "Runtime (m)") legend = ("anchor rate", "runtime") subplot_twinx(A, dataA, xlabel, ylabels, title="Yellow catfish", legend=legend) subplot_twinx(B, dataB, xlabel, ylabels, title="Medicago", legend=legend) labels = ((.04, .92, "A"), (.54, .92, "B")) panel_labels(root, labels) normalize_axes(root) image_name = "resample." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "resample", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "resample", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"8x4\"", ",", "dpi", "=", "300", ")",...
%prog resample yellow-catfish-resample.txt medicago-resample.txt Plot ALLMAPS performance across resampled real data.
[ "%prog", "resample", "yellow", "-", "catfish", "-", "resample", ".", "txt", "medicago", "-", "resample", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/allmaps.py#L35-L67
train
200,699