repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
tanghaibao/jcvi
jcvi/projects/allmaps.py
resamplestats
def resamplestats(args): """ %prog resamplestats prefix run.log Prepare resample results table. Ten subsets of original data were generated and ALLMAPS were iterated through them, creating `run.log` which contains the timing results. The anchor rate can be found in `prefix.0.{1-10}.summary.txt`. """ p = OptionParser(resamplestats.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) pf, runlog = args fp = open(runlog) Real = "real" times = [] for row in fp: # real 10m31.513s if not row.startswith(Real): continue tag, time = row.split() assert tag == Real m, s = time.split('m') s = s.rstrip('s') m, s = float(m), float(s) time = m + s / 60 times.append(time) N = len(times) rates = [] for i in xrange(-N + 1, 1, 1): summaryfile = "{0}.{1}.summary.txt".format(pf, 2 ** i) fp = open(summaryfile) lines = fp.readlines() # Total bases 580,791,244 (80.8%) 138,298,666 (19.2%) pct = float(lines[-2].split()[3].strip("()%")) rates.append(pct / 100.) assert len(rates) == N print("ratio\tanchor-rate\ttime(m)") for j, i in enumerate(xrange(-N + 1, 1, 1)): print("{0}\t{1:.3f}\t{2:.3f}".format(i, rates[j], times[j]))
python
def resamplestats(args): """ %prog resamplestats prefix run.log Prepare resample results table. Ten subsets of original data were generated and ALLMAPS were iterated through them, creating `run.log` which contains the timing results. The anchor rate can be found in `prefix.0.{1-10}.summary.txt`. """ p = OptionParser(resamplestats.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) pf, runlog = args fp = open(runlog) Real = "real" times = [] for row in fp: # real 10m31.513s if not row.startswith(Real): continue tag, time = row.split() assert tag == Real m, s = time.split('m') s = s.rstrip('s') m, s = float(m), float(s) time = m + s / 60 times.append(time) N = len(times) rates = [] for i in xrange(-N + 1, 1, 1): summaryfile = "{0}.{1}.summary.txt".format(pf, 2 ** i) fp = open(summaryfile) lines = fp.readlines() # Total bases 580,791,244 (80.8%) 138,298,666 (19.2%) pct = float(lines[-2].split()[3].strip("()%")) rates.append(pct / 100.) assert len(rates) == N print("ratio\tanchor-rate\ttime(m)") for j, i in enumerate(xrange(-N + 1, 1, 1)): print("{0}\t{1:.3f}\t{2:.3f}".format(i, rates[j], times[j]))
[ "def", "resamplestats", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "resamplestats", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit...
%prog resamplestats prefix run.log Prepare resample results table. Ten subsets of original data were generated and ALLMAPS were iterated through them, creating `run.log` which contains the timing results. The anchor rate can be found in `prefix.0.{1-10}.summary.txt`.
[ "%prog", "resamplestats", "prefix", "run", ".", "log" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/allmaps.py#L70-L115
train
200,700
tanghaibao/jcvi
jcvi/projects/allmaps.py
comparebed
def comparebed(args): """ %prog comparebed AP.chr.bed infer.bed Compare the scaffold links indicated in two bed files. """ p = OptionParser(comparebed.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) abed, bbed = args abed = Bed(abed) bbed = Bed(bbed) query_links(abed, bbed) query_links(bbed, abed)
python
def comparebed(args): """ %prog comparebed AP.chr.bed infer.bed Compare the scaffold links indicated in two bed files. """ p = OptionParser(comparebed.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) abed, bbed = args abed = Bed(abed) bbed = Bed(bbed) query_links(abed, bbed) query_links(bbed, abed)
[ "def", "comparebed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "comparebed", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "...
%prog comparebed AP.chr.bed infer.bed Compare the scaffold links indicated in two bed files.
[ "%prog", "comparebed", "AP", ".", "chr", ".", "bed", "infer", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/allmaps.py#L137-L153
train
200,701
tanghaibao/jcvi
jcvi/projects/allmaps.py
simulation
def simulation(args): """ %prog simulation inversion.txt translocation.txt maps.txt multimaps.txt Plot ALLMAPS accuracy across a range of simulated datasets. """ p = OptionParser(simulation.__doc__) opts, args, iopts = p.set_image_options(args, dpi=300) if len(args) != 4: sys.exit(not p.print_help()) dataA, dataB, dataC, dataD = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) A = fig.add_axes([.12, .62, .35, .35]) B = fig.add_axes([.62, .62, .35, .35]) C = fig.add_axes([.12, .12, .35, .35]) D = fig.add_axes([.62, .12, .35, .35]) dataA = import_data(dataA) dataB = import_data(dataB) dataC = import_data(dataC) dataD = import_data(dataD) subplot(A, dataA, "Inversion error rate", "Accuracy", xlim=.5) subplot(B, dataB, "Translocation error rate", "Accuracy", xlim=.5, legend=("intra-chromosomal", "inter-chromosomal", "75\% intra + 25\% inter")) subplot(C, dataC, "Number of input maps", "Accuracy", xcast=int) subplot(D, dataD, "Number of input maps", "Accuracy", xcast=int) labels = ((.03, .97, "A"), (.53, .97, "B"), (.03, .47, "C"), (.53, .47, "D")) panel_labels(root, labels) normalize_axes(root) image_name = "simulation." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
python
def simulation(args): """ %prog simulation inversion.txt translocation.txt maps.txt multimaps.txt Plot ALLMAPS accuracy across a range of simulated datasets. """ p = OptionParser(simulation.__doc__) opts, args, iopts = p.set_image_options(args, dpi=300) if len(args) != 4: sys.exit(not p.print_help()) dataA, dataB, dataC, dataD = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) A = fig.add_axes([.12, .62, .35, .35]) B = fig.add_axes([.62, .62, .35, .35]) C = fig.add_axes([.12, .12, .35, .35]) D = fig.add_axes([.62, .12, .35, .35]) dataA = import_data(dataA) dataB = import_data(dataB) dataC = import_data(dataC) dataD = import_data(dataD) subplot(A, dataA, "Inversion error rate", "Accuracy", xlim=.5) subplot(B, dataB, "Translocation error rate", "Accuracy", xlim=.5, legend=("intra-chromosomal", "inter-chromosomal", "75\% intra + 25\% inter")) subplot(C, dataC, "Number of input maps", "Accuracy", xcast=int) subplot(D, dataD, "Number of input maps", "Accuracy", xcast=int) labels = ((.03, .97, "A"), (.53, .97, "B"), (.03, .47, "C"), (.53, .47, "D")) panel_labels(root, labels) normalize_axes(root) image_name = "simulation." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "simulation", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "simulation", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "dpi", "=", "300", ")", "if", "len", "(", "args", ...
%prog simulation inversion.txt translocation.txt maps.txt multimaps.txt Plot ALLMAPS accuracy across a range of simulated datasets.
[ "%prog", "simulation", "inversion", ".", "txt", "translocation", ".", "txt", "maps", ".", "txt", "multimaps", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/allmaps.py#L432-L468
train
200,702
tanghaibao/jcvi
jcvi/apps/restriction.py
digest
def digest(args): """ %prog digest fastafile NspI,BfuCI Digest fasta sequences to map restriction site positions. """ p = OptionParser(digest.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, enzymes = args enzymes = enzymes.split(",") enzymes = [x for x in AllEnzymes if str(x) in enzymes] f = Fasta(fastafile, lazy=True) fw = must_open(opts.outfile, "w") header = ["Contig", "Length"] + [str(x) for x in enzymes] print("\t".join(header), file=fw) for name, rec in f.iteritems_ordered(): row = [name, len(rec)] for e in enzymes: pos = e.search(rec.seq) pos = "na" if not pos else "|".join(str(x) for x in pos) row.append(pos) print("\t".join(str(x) for x in row), file=fw)
python
def digest(args): """ %prog digest fastafile NspI,BfuCI Digest fasta sequences to map restriction site positions. """ p = OptionParser(digest.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, enzymes = args enzymes = enzymes.split(",") enzymes = [x for x in AllEnzymes if str(x) in enzymes] f = Fasta(fastafile, lazy=True) fw = must_open(opts.outfile, "w") header = ["Contig", "Length"] + [str(x) for x in enzymes] print("\t".join(header), file=fw) for name, rec in f.iteritems_ordered(): row = [name, len(rec)] for e in enzymes: pos = e.search(rec.seq) pos = "na" if not pos else "|".join(str(x) for x in pos) row.append(pos) print("\t".join(str(x) for x in row), file=fw)
[ "def", "digest", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "digest", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2",...
%prog digest fastafile NspI,BfuCI Digest fasta sequences to map restriction site positions.
[ "%prog", "digest", "fastafile", "NspI", "BfuCI" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/restriction.py#L29-L56
train
200,703
tanghaibao/jcvi
jcvi/apps/restriction.py
extract_full
def extract_full(rec, sites, flank, fw): """ Full extraction of seq flanking the sites. """ for s in sites: newid = "{0}:{1}".format(rec.name, s) left = max(s - flank, 0) right = min(s + flank, len(rec)) frag = rec.seq[left:right].strip("Nn") newrec = SeqRecord(frag, id=newid, description="") SeqIO.write([newrec], fw, "fasta")
python
def extract_full(rec, sites, flank, fw): """ Full extraction of seq flanking the sites. """ for s in sites: newid = "{0}:{1}".format(rec.name, s) left = max(s - flank, 0) right = min(s + flank, len(rec)) frag = rec.seq[left:right].strip("Nn") newrec = SeqRecord(frag, id=newid, description="") SeqIO.write([newrec], fw, "fasta")
[ "def", "extract_full", "(", "rec", ",", "sites", ",", "flank", ",", "fw", ")", ":", "for", "s", "in", "sites", ":", "newid", "=", "\"{0}:{1}\"", ".", "format", "(", "rec", ".", "name", ",", "s", ")", "left", "=", "max", "(", "s", "-", "flank", ...
Full extraction of seq flanking the sites.
[ "Full", "extraction", "of", "seq", "flanking", "the", "sites", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/restriction.py#L59-L69
train
200,704
tanghaibao/jcvi
jcvi/apps/restriction.py
extract_ends
def extract_ends(rec, sites, flank, fw, maxfragsize=800): """ Extraction of ends of fragments above certain size. """ nsites = len(sites) size = len(rec) for i, s in enumerate(sites): newid = "{0}:{1}".format(rec.name, s) recs = [] if i == 0 or s - sites[i - 1] <= maxfragsize: newidL = newid + "L" left = max(s - flank, 0) right = s frag = rec.seq[left:right].strip("Nn") recL = SeqRecord(frag, id=newidL, description="") if i == 0 and s > maxfragsize: # Contig L-end pass else: recs.append(recL) if i == nsites - 1 or sites[i + 1] - s <= maxfragsize: newidR = newid + "R" left = s right = min(s + flank, size) frag = rec.seq[left:right].strip("Nn") recR = SeqRecord(frag, id=newidR, description="") if i == nsites - 1 and size - s > maxfragsize: # Contig R-end pass else: recs.append(recR) SeqIO.write(recs, fw, "fasta")
python
def extract_ends(rec, sites, flank, fw, maxfragsize=800): """ Extraction of ends of fragments above certain size. """ nsites = len(sites) size = len(rec) for i, s in enumerate(sites): newid = "{0}:{1}".format(rec.name, s) recs = [] if i == 0 or s - sites[i - 1] <= maxfragsize: newidL = newid + "L" left = max(s - flank, 0) right = s frag = rec.seq[left:right].strip("Nn") recL = SeqRecord(frag, id=newidL, description="") if i == 0 and s > maxfragsize: # Contig L-end pass else: recs.append(recL) if i == nsites - 1 or sites[i + 1] - s <= maxfragsize: newidR = newid + "R" left = s right = min(s + flank, size) frag = rec.seq[left:right].strip("Nn") recR = SeqRecord(frag, id=newidR, description="") if i == nsites - 1 and size - s > maxfragsize: # Contig R-end pass else: recs.append(recR) SeqIO.write(recs, fw, "fasta")
[ "def", "extract_ends", "(", "rec", ",", "sites", ",", "flank", ",", "fw", ",", "maxfragsize", "=", "800", ")", ":", "nsites", "=", "len", "(", "sites", ")", "size", "=", "len", "(", "rec", ")", "for", "i", ",", "s", "in", "enumerate", "(", "sites...
Extraction of ends of fragments above certain size.
[ "Extraction", "of", "ends", "of", "fragments", "above", "certain", "size", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/restriction.py#L72-L104
train
200,705
tanghaibao/jcvi
jcvi/apps/restriction.py
fragment
def fragment(args): """ %prog fragment fastafile enzyme Cut the fastafile using the specified enzyme, and grab upstream and downstream nucleotide sequence along with the cut site. In this case, the sequences extracted are: |- PstI ============|=========== (-------) Sometimes we need to limit the size of the restriction fragments, for example the GBS protocol does not allow fragments larger than 800bp. |-PstI |- PstI |- PstI ~~~====|=============|==========~~~~~~~===|============ (---) (---) In this case, the second fragment is longer than 800bp, therefore the two ends are NOT extracted, as in the first fragment. """ p = OptionParser(fragment.__doc__) p.add_option("--flank", default=150, type="int", help="Extract flanking bases of the cut sites [default: %default]") p.add_option("--full", default=False, action="store_true", help="The full extraction mode [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, enzyme = args flank = opts.flank assert flank > 0 extract = extract_full if opts.full else extract_ends tag = "full" if opts.full else "ends" assert enzyme in set(str(x) for x in AllEnzymes) fragfastafile = fastafile.split(".")[0] + \ ".{0}.flank{1}.{2}.fasta".format(enzyme, flank, tag) enzyme = [x for x in AllEnzymes if str(x) == enzyme][0] f = Fasta(fastafile, lazy=True) fw = open(fragfastafile, "w") for name, rec in f.iteritems_ordered(): a = Analysis([enzyme], rec.seq) sites = a.full()[enzyme] extract(rec, sites, flank, fw) logging.debug("Fragments written to `{0}`.".format(fragfastafile))
python
def fragment(args): """ %prog fragment fastafile enzyme Cut the fastafile using the specified enzyme, and grab upstream and downstream nucleotide sequence along with the cut site. In this case, the sequences extracted are: |- PstI ============|=========== (-------) Sometimes we need to limit the size of the restriction fragments, for example the GBS protocol does not allow fragments larger than 800bp. |-PstI |- PstI |- PstI ~~~====|=============|==========~~~~~~~===|============ (---) (---) In this case, the second fragment is longer than 800bp, therefore the two ends are NOT extracted, as in the first fragment. """ p = OptionParser(fragment.__doc__) p.add_option("--flank", default=150, type="int", help="Extract flanking bases of the cut sites [default: %default]") p.add_option("--full", default=False, action="store_true", help="The full extraction mode [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, enzyme = args flank = opts.flank assert flank > 0 extract = extract_full if opts.full else extract_ends tag = "full" if opts.full else "ends" assert enzyme in set(str(x) for x in AllEnzymes) fragfastafile = fastafile.split(".")[0] + \ ".{0}.flank{1}.{2}.fasta".format(enzyme, flank, tag) enzyme = [x for x in AllEnzymes if str(x) == enzyme][0] f = Fasta(fastafile, lazy=True) fw = open(fragfastafile, "w") for name, rec in f.iteritems_ordered(): a = Analysis([enzyme], rec.seq) sites = a.full()[enzyme] extract(rec, sites, flank, fw) logging.debug("Fragments written to `{0}`.".format(fragfastafile))
[ "def", "fragment", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fragment", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--flank\"", ",", "default", "=", "150", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Extract flanking bases of th...
%prog fragment fastafile enzyme Cut the fastafile using the specified enzyme, and grab upstream and downstream nucleotide sequence along with the cut site. In this case, the sequences extracted are: |- PstI ============|=========== (-------) Sometimes we need to limit the size of the restriction fragments, for example the GBS protocol does not allow fragments larger than 800bp. |-PstI |- PstI |- PstI ~~~====|=============|==========~~~~~~~===|============ (---) (---) In this case, the second fragment is longer than 800bp, therefore the two ends are NOT extracted, as in the first fragment.
[ "%prog", "fragment", "fastafile", "enzyme" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/restriction.py#L107-L157
train
200,706
tanghaibao/jcvi
jcvi/assembly/base.py
calculate_A50
def calculate_A50(ctgsizes, cutoff=0, percent=50): """ Given an array of contig sizes, produce A50, N50, and L50 values """ ctgsizes = np.array(ctgsizes, dtype="int") ctgsizes = np.sort(ctgsizes)[::-1] ctgsizes = ctgsizes[ctgsizes >= cutoff] a50 = np.cumsum(ctgsizes) total = np.sum(ctgsizes) idx = bisect(a50, total * percent / 100.) l50 = ctgsizes[idx] n50 = idx + 1 return a50, l50, n50
python
def calculate_A50(ctgsizes, cutoff=0, percent=50): """ Given an array of contig sizes, produce A50, N50, and L50 values """ ctgsizes = np.array(ctgsizes, dtype="int") ctgsizes = np.sort(ctgsizes)[::-1] ctgsizes = ctgsizes[ctgsizes >= cutoff] a50 = np.cumsum(ctgsizes) total = np.sum(ctgsizes) idx = bisect(a50, total * percent / 100.) l50 = ctgsizes[idx] n50 = idx + 1 return a50, l50, n50
[ "def", "calculate_A50", "(", "ctgsizes", ",", "cutoff", "=", "0", ",", "percent", "=", "50", ")", ":", "ctgsizes", "=", "np", ".", "array", "(", "ctgsizes", ",", "dtype", "=", "\"int\"", ")", "ctgsizes", "=", "np", ".", "sort", "(", "ctgsizes", ")", ...
Given an array of contig sizes, produce A50, N50, and L50 values
[ "Given", "an", "array", "of", "contig", "sizes", "produce", "A50", "N50", "and", "L50", "values" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/base.py#L94-L110
train
200,707
tanghaibao/jcvi
jcvi/assembly/base.py
n50
def n50(args): """ %prog n50 filename Given a file with a list of numbers denoting contig lengths, calculate N50. Input file can be both FASTA or a list of sizes. """ from jcvi.graphics.histogram import loghistogram p = OptionParser(n50.__doc__) p.add_option("--print0", default=False, action="store_true", help="Print size and L50 to stdout [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) ctgsizes = [] # Guess file format probe = open(args[0]).readline()[0] isFasta = (probe == '>') if isFasta: for filename in args: f = Fasta(filename) ctgsizes += list(b for a, b in f.itersizes()) else: for row in must_open(args): try: ctgsize = int(float(row.split()[-1])) except ValueError: continue ctgsizes.append(ctgsize) a50, l50, nn50 = calculate_A50(ctgsizes) sumsize = sum(ctgsizes) minsize = min(ctgsizes) maxsize = max(ctgsizes) n = len(ctgsizes) print(", ".join(args), file=sys.stderr) summary = (sumsize, l50, nn50, minsize, maxsize, n) print(" ".join("{0}={1}".format(a, b) for a, b in \ zip(header, summary)), file=sys.stderr) loghistogram(ctgsizes) if opts.print0: print("\t".join(str(x) for x in (",".join(args), sumsize, l50))) return zip(header, summary)
python
def n50(args): """ %prog n50 filename Given a file with a list of numbers denoting contig lengths, calculate N50. Input file can be both FASTA or a list of sizes. """ from jcvi.graphics.histogram import loghistogram p = OptionParser(n50.__doc__) p.add_option("--print0", default=False, action="store_true", help="Print size and L50 to stdout [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) ctgsizes = [] # Guess file format probe = open(args[0]).readline()[0] isFasta = (probe == '>') if isFasta: for filename in args: f = Fasta(filename) ctgsizes += list(b for a, b in f.itersizes()) else: for row in must_open(args): try: ctgsize = int(float(row.split()[-1])) except ValueError: continue ctgsizes.append(ctgsize) a50, l50, nn50 = calculate_A50(ctgsizes) sumsize = sum(ctgsizes) minsize = min(ctgsizes) maxsize = max(ctgsizes) n = len(ctgsizes) print(", ".join(args), file=sys.stderr) summary = (sumsize, l50, nn50, minsize, maxsize, n) print(" ".join("{0}={1}".format(a, b) for a, b in \ zip(header, summary)), file=sys.stderr) loghistogram(ctgsizes) if opts.print0: print("\t".join(str(x) for x in (",".join(args), sumsize, l50))) return zip(header, summary)
[ "def", "n50", "(", "args", ")", ":", "from", "jcvi", ".", "graphics", ".", "histogram", "import", "loghistogram", "p", "=", "OptionParser", "(", "n50", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--print0\"", ",", "default", "=", "False", ",", ...
%prog n50 filename Given a file with a list of numbers denoting contig lengths, calculate N50. Input file can be both FASTA or a list of sizes.
[ "%prog", "n50", "filename" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/base.py#L143-L194
train
200,708
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
fromovl
def fromovl(args): """ %prog graph nucmer2ovl.ovl fastafile Build overlap graph from ovl file which is converted using NUCMER2OVL. """ p = OptionParser(fromovl.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ovlfile, fastafile = args ovl = OVL(ovlfile) g = ovl.graph fw = open("contained.ids", "w") print("\n".join(sorted(ovl.contained)), file=fw) graph_to_agp(g, ovlfile, fastafile, exclude=ovl.contained, verbose=False)
python
def fromovl(args): """ %prog graph nucmer2ovl.ovl fastafile Build overlap graph from ovl file which is converted using NUCMER2OVL. """ p = OptionParser(fromovl.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ovlfile, fastafile = args ovl = OVL(ovlfile) g = ovl.graph fw = open("contained.ids", "w") print("\n".join(sorted(ovl.contained)), file=fw) graph_to_agp(g, ovlfile, fastafile, exclude=ovl.contained, verbose=False)
[ "def", "fromovl", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fromovl", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", ...
%prog graph nucmer2ovl.ovl fastafile Build overlap graph from ovl file which is converted using NUCMER2OVL.
[ "%prog", "graph", "nucmer2ovl", ".", "ovl", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L106-L125
train
200,709
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
bed
def bed(args): """ %prog bed anchorsfile Convert ANCHORS file to BED format. """ from collections import defaultdict from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.bed import Bed from jcvi.formats.base import get_number p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements") p.add_option("--scale", type="float", help="Scale the aligned map distance by factor") p.set_beds() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorsfile, = args switch = opts.switch scale = opts.scale ac = AnchorFile(anchorsfile) pairs = defaultdict(list) for a, b, block_id in ac.iter_pairs(): pairs[a].append(b) qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) bd = Bed() for q in qbed: qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn if qaccn not in pairs: continue for s in pairs[qaccn]: si, s = sorder[s] sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn if switch: qseqid, sseqid = sseqid, qseqid qstart, sstart = sstart, qstart qend, send = send, qend qaccn, saccn = saccn, qaccn if scale: sstart /= scale try: newsseqid = get_number(sseqid) except ValueError: raise ValueError("`{0}` is on `{1}` with no number to extract".\ format(saccn, sseqid)) bedline = "\t".join(str(x) for x in (qseqid, qstart - 1, qend, "{0}:{1}".format(newsseqid, sstart))) bd.add(bedline) bd.print_to_file(filename=opts.outfile, sorted=True)
python
def bed(args): """ %prog bed anchorsfile Convert ANCHORS file to BED format. """ from collections import defaultdict from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.bed import Bed from jcvi.formats.base import get_number p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements") p.add_option("--scale", type="float", help="Scale the aligned map distance by factor") p.set_beds() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorsfile, = args switch = opts.switch scale = opts.scale ac = AnchorFile(anchorsfile) pairs = defaultdict(list) for a, b, block_id in ac.iter_pairs(): pairs[a].append(b) qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) bd = Bed() for q in qbed: qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn if qaccn not in pairs: continue for s in pairs[qaccn]: si, s = sorder[s] sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn if switch: qseqid, sseqid = sseqid, qseqid qstart, sstart = sstart, qstart qend, send = send, qend qaccn, saccn = saccn, qaccn if scale: sstart /= scale try: newsseqid = get_number(sseqid) except ValueError: raise ValueError("`{0}` is on `{1}` with no number to extract".\ format(saccn, sseqid)) bedline = "\t".join(str(x) for x in (qseqid, qstart - 1, qend, "{0}:{1}".format(newsseqid, sstart))) bd.add(bedline) bd.print_to_file(filename=opts.outfile, sorted=True)
[ "def", "bed", "(", "args", ")", ":", "from", "collections", "import", "defaultdict", "from", "jcvi", ".", "compara", ".", "synteny", "import", "AnchorFile", ",", "check_beds", "from", "jcvi", ".", "formats", ".", "bed", "import", "Bed", "from", "jcvi", "."...
%prog bed anchorsfile Convert ANCHORS file to BED format.
[ "%prog", "bed", "anchorsfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L128-L184
train
200,710
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
happy_edges
def happy_edges(row, prefix=None): """ Convert a row in HAPPY file and yield edges. """ trans = maketrans("[](){}", " ") row = row.strip().strip("+") row = row.translate(trans) scfs = [x.strip("+") for x in row.split(":")] for a, b in pairwise(scfs): oa = '<' if a.strip()[0] == '-' else '>' ob = '<' if b.strip()[0] == '-' else '>' is_uncertain = a[-1] == ' ' or b[0] == ' ' a = a.strip().strip('-') b = b.strip().strip('-') if prefix: a = prefix + a b = prefix + b yield (a, b, oa, ob), is_uncertain
python
def happy_edges(row, prefix=None): """ Convert a row in HAPPY file and yield edges. """ trans = maketrans("[](){}", " ") row = row.strip().strip("+") row = row.translate(trans) scfs = [x.strip("+") for x in row.split(":")] for a, b in pairwise(scfs): oa = '<' if a.strip()[0] == '-' else '>' ob = '<' if b.strip()[0] == '-' else '>' is_uncertain = a[-1] == ' ' or b[0] == ' ' a = a.strip().strip('-') b = b.strip().strip('-') if prefix: a = prefix + a b = prefix + b yield (a, b, oa, ob), is_uncertain
[ "def", "happy_edges", "(", "row", ",", "prefix", "=", "None", ")", ":", "trans", "=", "maketrans", "(", "\"[](){}\"", ",", "\" \"", ")", "row", "=", "row", ".", "strip", "(", ")", ".", "strip", "(", "\"+\"", ")", "row", "=", "row", ".", "trans...
Convert a row in HAPPY file and yield edges.
[ "Convert", "a", "row", "in", "HAPPY", "file", "and", "yield", "edges", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L195-L216
train
200,711
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
partition
def partition(args): """ %prog partition happy.txt synteny.graph Select edges from another graph and merge it with the certain edges built from the HAPPY mapping data. """ allowed_format = ("png", "ps") p = OptionParser(partition.__doc__) p.add_option("--prefix", help="Add prefix to the name [default: %default]") p.add_option("--namestart", default=0, type="int", help="Use a shorter name, starting index [default: %default]") p.add_option("--format", default="png", choices=allowed_format, help="Generate image of format [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) happyfile, graphfile = args bg = BiGraph() bg.read(graphfile, color="red") prefix = opts.prefix fp = open(happyfile) for i, row in enumerate(fp): nns = happy_nodes(row, prefix=prefix) nodes = set(nns) edges = happy_edges(row, prefix=prefix) small_graph = BiGraph() for (a, b, oa, ob), is_uncertain in edges: color = "gray" if is_uncertain else "black" small_graph.add_edge(a, b, oa, ob, color=color) for (u, v), e in bg.edges.items(): # Grab edge if both vertices are on the same line if u in nodes and v in nodes: uv = (str(u), str(v)) if uv in small_graph.edges: e = small_graph.edges[uv] e.color = "blue" # supported by both evidences else: small_graph.add_edge(e) print(small_graph, file=sys.stderr) pngfile = "A{0:02d}.{1}".format(i + 1, opts.format) telomeres = (nns[0], nns[-1]) small_graph.draw(pngfile, namestart=opts.namestart, nodehighlight=telomeres, dpi=72) legend = ["Edge colors:"] legend.append("[BLUE] Experimental + Synteny") legend.append("[BLACK] Experimental certain") legend.append("[GRAY] Experimental uncertain") legend.append("[RED] Synteny only") legend.append("Rectangle nodes are telomeres.") print("\n".join(legend), file=sys.stderr)
python
def partition(args): """ %prog partition happy.txt synteny.graph Select edges from another graph and merge it with the certain edges built from the HAPPY mapping data. """ allowed_format = ("png", "ps") p = OptionParser(partition.__doc__) p.add_option("--prefix", help="Add prefix to the name [default: %default]") p.add_option("--namestart", default=0, type="int", help="Use a shorter name, starting index [default: %default]") p.add_option("--format", default="png", choices=allowed_format, help="Generate image of format [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) happyfile, graphfile = args bg = BiGraph() bg.read(graphfile, color="red") prefix = opts.prefix fp = open(happyfile) for i, row in enumerate(fp): nns = happy_nodes(row, prefix=prefix) nodes = set(nns) edges = happy_edges(row, prefix=prefix) small_graph = BiGraph() for (a, b, oa, ob), is_uncertain in edges: color = "gray" if is_uncertain else "black" small_graph.add_edge(a, b, oa, ob, color=color) for (u, v), e in bg.edges.items(): # Grab edge if both vertices are on the same line if u in nodes and v in nodes: uv = (str(u), str(v)) if uv in small_graph.edges: e = small_graph.edges[uv] e.color = "blue" # supported by both evidences else: small_graph.add_edge(e) print(small_graph, file=sys.stderr) pngfile = "A{0:02d}.{1}".format(i + 1, opts.format) telomeres = (nns[0], nns[-1]) small_graph.draw(pngfile, namestart=opts.namestart, nodehighlight=telomeres, dpi=72) legend = ["Edge colors:"] legend.append("[BLUE] Experimental + Synteny") legend.append("[BLACK] Experimental certain") legend.append("[GRAY] Experimental uncertain") legend.append("[RED] Synteny only") legend.append("Rectangle nodes are telomeres.") print("\n".join(legend), file=sys.stderr)
[ "def", "partition", "(", "args", ")", ":", "allowed_format", "=", "(", "\"png\"", ",", "\"ps\"", ")", "p", "=", "OptionParser", "(", "partition", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--prefix\"", ",", "help", "=", "\"Add prefix to the name [d...
%prog partition happy.txt synteny.graph Select edges from another graph and merge it with the certain edges built from the HAPPY mapping data.
[ "%prog", "partition", "happy", ".", "txt", "synteny", ".", "graph" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L219-L276
train
200,712
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
merge
def merge(args): """ %prog merge graphs Merge multiple graphs together and visualize. """ p = OptionParser(merge.__doc__) p.add_option("--colorlist", default="black,red,pink,blue,green", help="The color palette [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) colorlist = opts.colorlist.split(",") assert len(colorlist) >= len(args), "Need more colors in --colorlist" g = BiGraph() for a, c in zip(args, colorlist): g.read(a, color=c) g.draw("merged.png")
python
def merge(args): """ %prog merge graphs Merge multiple graphs together and visualize. """ p = OptionParser(merge.__doc__) p.add_option("--colorlist", default="black,red,pink,blue,green", help="The color palette [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) colorlist = opts.colorlist.split(",") assert len(colorlist) >= len(args), "Need more colors in --colorlist" g = BiGraph() for a, c in zip(args, colorlist): g.read(a, color=c) g.draw("merged.png")
[ "def", "merge", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "merge", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--colorlist\"", ",", "default", "=", "\"black,red,pink,blue,green\"", ",", "help", "=", "\"The color palette [default: %default]\"",...
%prog merge graphs Merge multiple graphs together and visualize.
[ "%prog", "merge", "graphs" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L279-L300
train
200,713
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
happy
def happy(args): """ %prog happy happy.txt Make bi-directed graph from HAPPY mapping data. JCVI encodes uncertainties in the order of the contigs / scaffolds. : separates scaffolds + means telomere (though the telomere repeats may not show because the telomere-adjacent sequence is missing) - means that the scaffold is in reverse orientation to that shown in the 2003 TIGR scaffolds. Ambiguities are represented as follows, using Paul Dear.s description: [ ] means undetermined orientation. error quite possible (70% confidence?) ( ) means uncertain orientation. small chance of error (90% confidence?) { } means uncertain order. Example: +-8254707:8254647:-8254690:{[8254694]:[8254713]:[8254531]:[8254797]}:8254802:8254788+ """ p = OptionParser(happy.__doc__) p.add_option("--prefix", help="Add prefix to the name [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) happyfile, = args certain = "certain.graph" uncertain = "uncertain.graph" fw1 = open(certain, "w") fw2 = open(uncertain, "w") fp = open(happyfile) for row in fp: for e, is_uncertain in happy_edges(row, prefix=opts.prefix): fw = fw2 if is_uncertain else fw1 print(e, file=fw) logging.debug("Edges written to `{0}`".format(",".join((certain, uncertain))))
python
def happy(args): """ %prog happy happy.txt Make bi-directed graph from HAPPY mapping data. JCVI encodes uncertainties in the order of the contigs / scaffolds. : separates scaffolds + means telomere (though the telomere repeats may not show because the telomere-adjacent sequence is missing) - means that the scaffold is in reverse orientation to that shown in the 2003 TIGR scaffolds. Ambiguities are represented as follows, using Paul Dear.s description: [ ] means undetermined orientation. error quite possible (70% confidence?) ( ) means uncertain orientation. small chance of error (90% confidence?) { } means uncertain order. Example: +-8254707:8254647:-8254690:{[8254694]:[8254713]:[8254531]:[8254797]}:8254802:8254788+ """ p = OptionParser(happy.__doc__) p.add_option("--prefix", help="Add prefix to the name [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) happyfile, = args certain = "certain.graph" uncertain = "uncertain.graph" fw1 = open(certain, "w") fw2 = open(uncertain, "w") fp = open(happyfile) for row in fp: for e, is_uncertain in happy_edges(row, prefix=opts.prefix): fw = fw2 if is_uncertain else fw1 print(e, file=fw) logging.debug("Edges written to `{0}`".format(",".join((certain, uncertain))))
[ "def", "happy", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "happy", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--prefix\"", ",", "help", "=", "\"Add prefix to the name [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "p...
%prog happy happy.txt Make bi-directed graph from HAPPY mapping data. JCVI encodes uncertainties in the order of the contigs / scaffolds. : separates scaffolds + means telomere (though the telomere repeats may not show because the telomere-adjacent sequence is missing) - means that the scaffold is in reverse orientation to that shown in the 2003 TIGR scaffolds. Ambiguities are represented as follows, using Paul Dear.s description: [ ] means undetermined orientation. error quite possible (70% confidence?) ( ) means uncertain orientation. small chance of error (90% confidence?) { } means uncertain order. Example: +-8254707:8254647:-8254690:{[8254694]:[8254713]:[8254531]:[8254797]}:8254802:8254788+
[ "%prog", "happy", "happy", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L303-L344
train
200,714
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
fromblast
def fromblast(args): """ %prog fromblast blastfile subject.fasta Generate path from BLAST file. If multiple subjects map to the same query, an edge is constructed between them (with the link provided by the query). The BLAST file MUST be filtered, chained, supermapped. """ from jcvi.formats.blast import sort from jcvi.utils.range import range_distance p = OptionParser(fromblast.__doc__) p.add_option("--clique", default=False, action="store_true", help="Populate clique instead of linear path [default: %default]") p.add_option("--maxdist", default=100000, type="int", help="Create edge within certain distance [default: %default]") p.set_verbose(help="Print verbose reports to stdout") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blastfile, subjectfasta = args clique = opts.clique maxdist = opts.maxdist sort([blastfile, "--query"]) blast = BlastSlow(blastfile, sorted=True) g = BiGraph() for query, blines in groupby(blast, key=lambda x: x.query): blines = list(blines) iterator = combinations(blines, 2) if clique else pairwise(blines) for a, b in iterator: asub, bsub = a.subject, b.subject if asub == bsub: continue arange = (a.query, a.qstart, a.qstop, "+") brange = (b.query, b.qstart, b.qstop, "+") dist, oo = range_distance(arange, brange, distmode="ee") if dist > maxdist: continue atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(asub, bsub, atag, btag) graph_to_agp(g, blastfile, subjectfasta, verbose=opts.verbose)
python
def fromblast(args): """ %prog fromblast blastfile subject.fasta Generate path from BLAST file. If multiple subjects map to the same query, an edge is constructed between them (with the link provided by the query). The BLAST file MUST be filtered, chained, supermapped. """ from jcvi.formats.blast import sort from jcvi.utils.range import range_distance p = OptionParser(fromblast.__doc__) p.add_option("--clique", default=False, action="store_true", help="Populate clique instead of linear path [default: %default]") p.add_option("--maxdist", default=100000, type="int", help="Create edge within certain distance [default: %default]") p.set_verbose(help="Print verbose reports to stdout") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blastfile, subjectfasta = args clique = opts.clique maxdist = opts.maxdist sort([blastfile, "--query"]) blast = BlastSlow(blastfile, sorted=True) g = BiGraph() for query, blines in groupby(blast, key=lambda x: x.query): blines = list(blines) iterator = combinations(blines, 2) if clique else pairwise(blines) for a, b in iterator: asub, bsub = a.subject, b.subject if asub == bsub: continue arange = (a.query, a.qstart, a.qstop, "+") brange = (b.query, b.qstart, b.qstop, "+") dist, oo = range_distance(arange, brange, distmode="ee") if dist > maxdist: continue atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(asub, bsub, atag, btag) graph_to_agp(g, blastfile, subjectfasta, verbose=opts.verbose)
[ "def", "fromblast", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "blast", "import", "sort", "from", "jcvi", ".", "utils", ".", "range", "import", "range_distance", "p", "=", "OptionParser", "(", "fromblast", ".", "__doc__", ")", "p", ".", ...
%prog fromblast blastfile subject.fasta Generate path from BLAST file. If multiple subjects map to the same query, an edge is constructed between them (with the link provided by the query). The BLAST file MUST be filtered, chained, supermapped.
[ "%prog", "fromblast", "blastfile", "subject", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L347-L394
train
200,715
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
connect
def connect(args): """ %prog connect assembly.fasta read_mapping.blast Connect contigs using long reads. """ p = OptionParser(connect.__doc__) p.add_option("--clip", default=2000, type="int", help="Only consider end of contigs [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, blastfile = args clip = opts.clip sizes = Sizes(fastafile).mapping blast = Blast(blastfile) blasts = [] for b in blast: seqid = b.subject size = sizes[seqid] start, end = b.sstart, b.sstop cstart, cend = min(size, clip), max(0, size - clip) if start > cstart and end < cend: continue blasts.append(b) key = lambda x: x.query blasts.sort(key=key) g = BiGraph() for query, bb in groupby(blasts, key=key): bb = sorted(bb, key=lambda x: x.qstart) nsubjects = len(set(x.subject for x in bb)) if nsubjects == 1: continue print("\n".join(str(x) for x in bb)) for a, b in pairwise(bb): astart, astop = a.qstart, a.qstop bstart, bstop = b.qstart, b.qstop if a.subject == b.subject: continue arange = astart, astop brange = bstart, bstop ov = range_intersect(arange, brange) alen = astop - astart + 1 blen = bstop - bstart + 1 if ov: ostart, ostop = ov ov = ostop - ostart + 1 print(ov, alen, blen) if ov and (ov > alen / 2 or ov > blen / 2): print("Too much overlap ({0})".format(ov)) continue asub = a.subject bsub = b.subject atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(asub, bsub, atag, btag) graph_to_agp(g, blastfile, fastafile, verbose=False)
python
def connect(args): """ %prog connect assembly.fasta read_mapping.blast Connect contigs using long reads. """ p = OptionParser(connect.__doc__) p.add_option("--clip", default=2000, type="int", help="Only consider end of contigs [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, blastfile = args clip = opts.clip sizes = Sizes(fastafile).mapping blast = Blast(blastfile) blasts = [] for b in blast: seqid = b.subject size = sizes[seqid] start, end = b.sstart, b.sstop cstart, cend = min(size, clip), max(0, size - clip) if start > cstart and end < cend: continue blasts.append(b) key = lambda x: x.query blasts.sort(key=key) g = BiGraph() for query, bb in groupby(blasts, key=key): bb = sorted(bb, key=lambda x: x.qstart) nsubjects = len(set(x.subject for x in bb)) if nsubjects == 1: continue print("\n".join(str(x) for x in bb)) for a, b in pairwise(bb): astart, astop = a.qstart, a.qstop bstart, bstop = b.qstart, b.qstop if a.subject == b.subject: continue arange = astart, astop brange = bstart, bstop ov = range_intersect(arange, brange) alen = astop - astart + 1 blen = bstop - bstart + 1 if ov: ostart, ostop = ov ov = ostop - ostart + 1 print(ov, alen, blen) if ov and (ov > alen / 2 or ov > blen / 2): print("Too much overlap ({0})".format(ov)) continue asub = a.subject bsub = b.subject atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(asub, bsub, atag, btag) graph_to_agp(g, blastfile, fastafile, verbose=False)
[ "def", "connect", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "connect", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--clip\"", ",", "default", "=", "2000", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Only consider end of contigs [...
%prog connect assembly.fasta read_mapping.blast Connect contigs using long reads.
[ "%prog", "connect", "assembly", ".", "fasta", "read_mapping", ".", "blast" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L452-L516
train
200,716
tanghaibao/jcvi
jcvi/projects/synfind.py
grasstruth
def grasstruth(args): """ %prog grasstruth james-pan-grass.txt Prepare truth pairs for 4 grasses. """ p = OptionParser(grasstruth.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) james, = args fp = open(james) pairs = set() for row in fp: atoms = row.split() genes = [] idx = {} for i, a in enumerate(atoms): aa = a.split("||") for ma in aa: idx[ma] = i genes.extend(aa) genes = [x for x in genes if ":" not in x] Os = [x for x in genes if x.startswith("Os")] for o in Os: for g in genes: if idx[o] == idx[g]: continue pairs.add(tuple(sorted((o, g)))) for a, b in sorted(pairs): print("\t".join((a, b)))
python
def grasstruth(args): """ %prog grasstruth james-pan-grass.txt Prepare truth pairs for 4 grasses. """ p = OptionParser(grasstruth.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) james, = args fp = open(james) pairs = set() for row in fp: atoms = row.split() genes = [] idx = {} for i, a in enumerate(atoms): aa = a.split("||") for ma in aa: idx[ma] = i genes.extend(aa) genes = [x for x in genes if ":" not in x] Os = [x for x in genes if x.startswith("Os")] for o in Os: for g in genes: if idx[o] == idx[g]: continue pairs.add(tuple(sorted((o, g)))) for a, b in sorted(pairs): print("\t".join((a, b)))
[ "def", "grasstruth", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "grasstruth", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "...
%prog grasstruth james-pan-grass.txt Prepare truth pairs for 4 grasses.
[ "%prog", "grasstruth", "james", "-", "pan", "-", "grass", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L52-L85
train
200,717
tanghaibao/jcvi
jcvi/projects/synfind.py
cyntenator
def cyntenator(args): """ %prog cyntenator athaliana.athaliana.last athaliana.bed Prepare input for Cyntenator. """ p = OptionParser(cyntenator.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) lastfile = args[0] fp = open(lastfile) filteredlastfile = lastfile + ".blast" fw = open(filteredlastfile, "w") for row in fp: b = BlastLine(row) if b.query == b.subject: continue print("\t".join((b.query, b.subject, str(b.score))), file=fw) fw.close() bedfiles = args[1:] fp = open(lastfile) b = BlastLine(next(fp)) subject = b.subject txtfiles = [] for bedfile in bedfiles: order = Bed(bedfile).order if subject in order: db = op.basename(bedfile).split(".")[0][:20] logging.debug("Found db: {0}".format(db)) txtfile = write_txt(bedfile) txtfiles.append(txtfile) db += ".txt" mm = MakeManager() for txtfile in txtfiles: outfile = txtfile + ".alignment" cmd = 'cyntenator -t "({0} {1})" -h blast {2} > {3}'\ .format(txtfile, db, filteredlastfile, outfile) mm.add((txtfile, db, filteredlastfile), outfile, cmd) mm.write()
python
def cyntenator(args): """ %prog cyntenator athaliana.athaliana.last athaliana.bed Prepare input for Cyntenator. """ p = OptionParser(cyntenator.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) lastfile = args[0] fp = open(lastfile) filteredlastfile = lastfile + ".blast" fw = open(filteredlastfile, "w") for row in fp: b = BlastLine(row) if b.query == b.subject: continue print("\t".join((b.query, b.subject, str(b.score))), file=fw) fw.close() bedfiles = args[1:] fp = open(lastfile) b = BlastLine(next(fp)) subject = b.subject txtfiles = [] for bedfile in bedfiles: order = Bed(bedfile).order if subject in order: db = op.basename(bedfile).split(".")[0][:20] logging.debug("Found db: {0}".format(db)) txtfile = write_txt(bedfile) txtfiles.append(txtfile) db += ".txt" mm = MakeManager() for txtfile in txtfiles: outfile = txtfile + ".alignment" cmd = 'cyntenator -t "({0} {1})" -h blast {2} > {3}'\ .format(txtfile, db, filteredlastfile, outfile) mm.add((txtfile, db, filteredlastfile), outfile, cmd) mm.write()
[ "def", "cyntenator", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "cyntenator", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(...
%prog cyntenator athaliana.athaliana.last athaliana.bed Prepare input for Cyntenator.
[ "%prog", "cyntenator", "athaliana", ".", "athaliana", ".", "last", "athaliana", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L382-L425
train
200,718
tanghaibao/jcvi
jcvi/projects/synfind.py
iadhore
def iadhore(args): """ %prog iadhore athaliana.athaliana.last athaliana.bed Wrap around iADHoRe. """ p = OptionParser(iadhore.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) lastfile = args[0] bedfiles = args[1:] blast_table = "blast_table.txt" fp = open(lastfile) seen = set() for row in fp: c = BlastLine(row) a, b = c.query, c.subject a, b = gene_name(a), gene_name(b) if a > b: a, b = b, a seen.add((a, b)) fw = open(blast_table, "w") for a, b in seen: print("\t".join((a, b)), file=fw) fw.close() logging.debug("A total of {0} pairs written to `{1}`"\ .format(len(seen), blast_table)) fw = open("config.txt", "w") for bedfile in bedfiles: pf, stanza = write_lst(bedfile) print("genome={0}".format(pf), file=fw) for seqid, fname in stanza: print(" ".join((seqid, fname)), file=fw) print(file=fw) print("blast_table={0}".format(blast_table), file=fw) print("cluster_type=colinear", file=fw) print("tandem_gap=10", file=fw) print("prob_cutoff=0.001", file=fw) print("gap_size=20", file=fw) print("cluster_gap=20", file=fw) print("q_value=0.9", file=fw) print("anchor_points=4", file=fw) print("alignment_method=gg2", file=fw) print("max_gaps_in_alignment=20", file=fw) print("output_path=i-adhore_out", file=fw) print("number_of_threads=4", file=fw) fw.close()
python
def iadhore(args): """ %prog iadhore athaliana.athaliana.last athaliana.bed Wrap around iADHoRe. """ p = OptionParser(iadhore.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) lastfile = args[0] bedfiles = args[1:] blast_table = "blast_table.txt" fp = open(lastfile) seen = set() for row in fp: c = BlastLine(row) a, b = c.query, c.subject a, b = gene_name(a), gene_name(b) if a > b: a, b = b, a seen.add((a, b)) fw = open(blast_table, "w") for a, b in seen: print("\t".join((a, b)), file=fw) fw.close() logging.debug("A total of {0} pairs written to `{1}`"\ .format(len(seen), blast_table)) fw = open("config.txt", "w") for bedfile in bedfiles: pf, stanza = write_lst(bedfile) print("genome={0}".format(pf), file=fw) for seqid, fname in stanza: print(" ".join((seqid, fname)), file=fw) print(file=fw) print("blast_table={0}".format(blast_table), file=fw) print("cluster_type=colinear", file=fw) print("tandem_gap=10", file=fw) print("prob_cutoff=0.001", file=fw) print("gap_size=20", file=fw) print("cluster_gap=20", file=fw) print("q_value=0.9", file=fw) print("anchor_points=4", file=fw) print("alignment_method=gg2", file=fw) print("max_gaps_in_alignment=20", file=fw) print("output_path=i-adhore_out", file=fw) print("number_of_threads=4", file=fw) fw.close()
[ "def", "iadhore", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "iadhore", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "...
%prog iadhore athaliana.athaliana.last athaliana.bed Wrap around iADHoRe.
[ "%prog", "iadhore", "athaliana", ".", "athaliana", ".", "last", "athaliana", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L428-L480
train
200,719
tanghaibao/jcvi
jcvi/projects/synfind.py
athalianatruth
def athalianatruth(args): """ %prog athalianatruth J_a.txt J_bc.txt Prepare pairs data for At alpha/beta/gamma. """ p = OptionParser(athalianatruth.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) atxt, bctxt = args g = Grouper() pairs = set() for txt in (atxt, bctxt): extract_groups(g, pairs, txt) fw = open("pairs", "w") for pair in sorted(pairs): print("\t".join(pair), file=fw) fw.close() fw = open("groups", "w") for group in list(g): print(",".join(group), file=fw) fw.close()
python
def athalianatruth(args): """ %prog athalianatruth J_a.txt J_bc.txt Prepare pairs data for At alpha/beta/gamma. """ p = OptionParser(athalianatruth.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) atxt, bctxt = args g = Grouper() pairs = set() for txt in (atxt, bctxt): extract_groups(g, pairs, txt) fw = open("pairs", "w") for pair in sorted(pairs): print("\t".join(pair), file=fw) fw.close() fw = open("groups", "w") for group in list(g): print(",".join(group), file=fw) fw.close()
[ "def", "athalianatruth", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "athalianatruth", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "ex...
%prog athalianatruth J_a.txt J_bc.txt Prepare pairs data for At alpha/beta/gamma.
[ "%prog", "athalianatruth", "J_a", ".", "txt", "J_bc", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L501-L527
train
200,720
tanghaibao/jcvi
jcvi/projects/synfind.py
mcscanx
def mcscanx(args): """ %prog mcscanx athaliana.athaliana.last athaliana.bed Wrap around MCScanX. """ p = OptionParser(mcscanx.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) blastfile = args[0] bedfiles = args[1:] prefix = "_".join(op.basename(x)[:2] for x in bedfiles) symlink(blastfile, prefix + ".blast") allbedfile = prefix + ".gff" fw = open(allbedfile, "w") for i, bedfile in enumerate(bedfiles): prefix = chr(ord('A') + i) make_gff(bedfile, prefix, fw) fw.close()
python
def mcscanx(args): """ %prog mcscanx athaliana.athaliana.last athaliana.bed Wrap around MCScanX. """ p = OptionParser(mcscanx.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) blastfile = args[0] bedfiles = args[1:] prefix = "_".join(op.basename(x)[:2] for x in bedfiles) symlink(blastfile, prefix + ".blast") allbedfile = prefix + ".gff" fw = open(allbedfile, "w") for i, bedfile in enumerate(bedfiles): prefix = chr(ord('A') + i) make_gff(bedfile, prefix, fw) fw.close()
[ "def", "mcscanx", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "mcscanx", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "...
%prog mcscanx athaliana.athaliana.last athaliana.bed Wrap around MCScanX.
[ "%prog", "mcscanx", "athaliana", ".", "athaliana", ".", "last", "athaliana", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L542-L563
train
200,721
tanghaibao/jcvi
jcvi/projects/synfind.py
grass
def grass(args): """ %prog grass coge_master_table.txt james.txt Validate SynFind pan-grass set against James. This set can be generated: https://genomevolution.org/r/fhak """ p = OptionParser(grass._doc__) p.set_verbose() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) master, james = args fp = open(master) next(fp) master_store = defaultdict(set) for row in fp: atoms = row.split() s = set() for x in atoms[1:6]: m = x.split(",") s |= set(m) if '-' in s: s.remove('-') a = atoms[1] master_store[a] |= set(s) fp = open(james) next(fp) james_store = {} tandems = set() for row in fp: atoms = row.split() s = set() Os = set() for x in atoms[:-1]: m = x.split("||") if m[0].startswith("Os"): Os |= set(m) if m[0].startswith("http"): continue if m[0].startswith("chr"): m = ["proxy"] if "||" in x: tandems |= set(m) s |= set(m) for x in Os: james_store[x] = s jaccards = [] corr_jaccards = [] perfect_matches = 0 corr_perfect_matches = 0 for k, v in james_store.items(): if k not in master_store: continue m = master_store[k] jaccard = len(v & m) * 100 / len(v | m) jaccards.append(jaccard) diff = (v ^ m ) - tandems corr_jaccard = 100 - len(diff) * 100 / len(v | m) corr_jaccards.append(corr_jaccard) if opts.verbose: print(k) print(v) print(m) print(diff) print(jaccard) if jaccard > 99: perfect_matches += 1 if corr_jaccard > 99: corr_perfect_matches += 1 logging.debug("Perfect matches: {0}".format(perfect_matches)) logging.debug("Perfect matches (corrected): {0}".format(corr_perfect_matches)) print("Jaccards:", SummaryStats(jaccards)) print("Corrected Jaccards:", SummaryStats(corr_jaccards))
python
def grass(args): """ %prog grass coge_master_table.txt james.txt Validate SynFind pan-grass set against James. This set can be generated: https://genomevolution.org/r/fhak """ p = OptionParser(grass._doc__) p.set_verbose() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) master, james = args fp = open(master) next(fp) master_store = defaultdict(set) for row in fp: atoms = row.split() s = set() for x in atoms[1:6]: m = x.split(",") s |= set(m) if '-' in s: s.remove('-') a = atoms[1] master_store[a] |= set(s) fp = open(james) next(fp) james_store = {} tandems = set() for row in fp: atoms = row.split() s = set() Os = set() for x in atoms[:-1]: m = x.split("||") if m[0].startswith("Os"): Os |= set(m) if m[0].startswith("http"): continue if m[0].startswith("chr"): m = ["proxy"] if "||" in x: tandems |= set(m) s |= set(m) for x in Os: james_store[x] = s jaccards = [] corr_jaccards = [] perfect_matches = 0 corr_perfect_matches = 0 for k, v in james_store.items(): if k not in master_store: continue m = master_store[k] jaccard = len(v & m) * 100 / len(v | m) jaccards.append(jaccard) diff = (v ^ m ) - tandems corr_jaccard = 100 - len(diff) * 100 / len(v | m) corr_jaccards.append(corr_jaccard) if opts.verbose: print(k) print(v) print(m) print(diff) print(jaccard) if jaccard > 99: perfect_matches += 1 if corr_jaccard > 99: corr_perfect_matches += 1 logging.debug("Perfect matches: {0}".format(perfect_matches)) logging.debug("Perfect matches (corrected): {0}".format(corr_perfect_matches)) print("Jaccards:", SummaryStats(jaccards)) print("Corrected Jaccards:", SummaryStats(corr_jaccards))
[ "def", "grass", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "grass", ".", "_doc__", ")", "p", ".", "set_verbose", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ...
%prog grass coge_master_table.txt james.txt Validate SynFind pan-grass set against James. This set can be generated: https://genomevolution.org/r/fhak
[ "%prog", "grass", "coge_master_table", ".", "txt", "james", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L566-L648
train
200,722
tanghaibao/jcvi
jcvi/projects/synfind.py
ecoli
def ecoli(args): """ %prog ecoli coge_master_table.txt query.bed Perform gene presence / absence analysis in Ecoli master spreadsheet. Ecoli spresheets can be downloaded below: Ecoli K12 MG1655 (K) as query Regenerate this analysis: https://genomevolution.org/r/fggo Ecoli O157:H7 EDL933 (O) as query Regenerate this analysis: https://genomevolution.org/r/fgt7 Shigella flexneri 2a 301 (S) as query Regenerate this analysis: https://genomevolution.org/r/fgte Perform a similar analysis as in: Jin et al. (2002) Genome sequence of Shigella flexneri 2a: insights into pathogenicity through comparison with genomes of Escherichia coli K12 and O157. Nucleic Acid Research. """ p = OptionParser(ecoli.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) master, querybed = args fp = open(master) header = next(fp) assert header[0] == '#' qorg = header.strip().split("\t")[1] qorg = qorg.split(":")[-1].strip() store = {} MISSING = ("proxy", "-") for row in fp: a, b, c = row.strip().split("\t")[1:4] store[a] = b in MISSING and c in MISSING bed = Bed(querybed) tags = [] for i, b in enumerate(bed): accn = b.accn if accn not in store: logging.warn("missing {0}".format(accn)) continue tags.append((store[accn], accn)) large = 4 # large segments II = [] II_large = [] for missing, aa in groupby(tags, key=lambda x: x[0]): aa = list(aa) if not missing: continue glist = list(a for missing, a in aa) II.append(glist) size = len(glist) if size >= large: II_large.append(glist) fw = must_open(opts.outfile, "w") for a, t in zip((II, II_large), ("", ">=4 ")): nmissing = sum(len(x) for x in a) logging.debug("A total of {0} {1}-specific {2}islands found with {3} genes.".\ format(len(a), qorg, t, nmissing)) for x in II: print(len(x), ",".join(x), file=fw)
python
def ecoli(args): """ %prog ecoli coge_master_table.txt query.bed Perform gene presence / absence analysis in Ecoli master spreadsheet. Ecoli spresheets can be downloaded below: Ecoli K12 MG1655 (K) as query Regenerate this analysis: https://genomevolution.org/r/fggo Ecoli O157:H7 EDL933 (O) as query Regenerate this analysis: https://genomevolution.org/r/fgt7 Shigella flexneri 2a 301 (S) as query Regenerate this analysis: https://genomevolution.org/r/fgte Perform a similar analysis as in: Jin et al. (2002) Genome sequence of Shigella flexneri 2a: insights into pathogenicity through comparison with genomes of Escherichia coli K12 and O157. Nucleic Acid Research. """ p = OptionParser(ecoli.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) master, querybed = args fp = open(master) header = next(fp) assert header[0] == '#' qorg = header.strip().split("\t")[1] qorg = qorg.split(":")[-1].strip() store = {} MISSING = ("proxy", "-") for row in fp: a, b, c = row.strip().split("\t")[1:4] store[a] = b in MISSING and c in MISSING bed = Bed(querybed) tags = [] for i, b in enumerate(bed): accn = b.accn if accn not in store: logging.warn("missing {0}".format(accn)) continue tags.append((store[accn], accn)) large = 4 # large segments II = [] II_large = [] for missing, aa in groupby(tags, key=lambda x: x[0]): aa = list(aa) if not missing: continue glist = list(a for missing, a in aa) II.append(glist) size = len(glist) if size >= large: II_large.append(glist) fw = must_open(opts.outfile, "w") for a, t in zip((II, II_large), ("", ">=4 ")): nmissing = sum(len(x) for x in a) logging.debug("A total of {0} {1}-specific {2}islands found with {3} genes.".\ format(len(a), qorg, t, nmissing)) for x in II: print(len(x), ",".join(x), file=fw)
[ "def", "ecoli", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "ecoli", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ...
%prog ecoli coge_master_table.txt query.bed Perform gene presence / absence analysis in Ecoli master spreadsheet. Ecoli spresheets can be downloaded below: Ecoli K12 MG1655 (K) as query Regenerate this analysis: https://genomevolution.org/r/fggo Ecoli O157:H7 EDL933 (O) as query Regenerate this analysis: https://genomevolution.org/r/fgt7 Shigella flexneri 2a 301 (S) as query Regenerate this analysis: https://genomevolution.org/r/fgte Perform a similar analysis as in: Jin et al. (2002) Genome sequence of Shigella flexneri 2a: insights into pathogenicity through comparison with genomes of Escherichia coli K12 and O157. Nucleic Acid Research.
[ "%prog", "ecoli", "coge_master_table", ".", "txt", "query", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L651-L721
train
200,723
tanghaibao/jcvi
jcvi/annotation/maker.py
parallel
def parallel(args): """ %prog parallel genome.fasta N Partition the genome into parts and run separately. This is useful if MAKER is to be run on the grid. """ from jcvi.formats.base import split p = OptionParser(parallel.__doc__) p.set_home("maker") p.set_tmpdir(tmpdir="tmp") p.set_grid_opts(array=True) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) genome, NN = args threaded = opts.threaded or 1 tmpdir = opts.tmpdir mkdir(tmpdir) tmpdir = get_abs_path(tmpdir) N = int(NN) assert 1 <= N < 1000, "Required: 1 < N < 1000!" outdir = "outdir" fs = split([genome, outdir, NN]) c = CTLFile("maker_opts.ctl") c.update_abs_path() if threaded > 1: c.update_tag("cpus", threaded) cwd = os.getcwd() dirs = [] for name in fs.names: fn = get_abs_path(name) bn = op.basename(name) dirs.append(bn) c.update_tag("genome", fn) mkdir(bn) sh("cp *.ctl {0}".format(bn)) os.chdir(bn) c.write_file("maker_opts.ctl") os.chdir(cwd) jobs = "jobs" fw = open(jobs, "w") print("\n".join(dirs), file=fw) fw.close() # Submit to grid ncmds = len(dirs) runfile = "array.sh" cmd = op.join(opts.maker_home, "bin/maker") if tmpdir: cmd += " -TMP {0}".format(tmpdir) engine = get_grid_engine() contents = arraysh.format(jobs, cmd) if engine == "SGE" \ else arraysh_ua.format(N, threaded, jobs, cmd) write_file(runfile, contents) if engine == "PBS": return # qsub script outfile = "maker.\$TASK_ID.out" p = GridProcess(runfile, outfile=outfile, errfile=outfile, arr=ncmds, grid_opts=opts) qsubfile = "qsub.sh" qsub = p.build() write_file(qsubfile, qsub)
python
def parallel(args): """ %prog parallel genome.fasta N Partition the genome into parts and run separately. This is useful if MAKER is to be run on the grid. """ from jcvi.formats.base import split p = OptionParser(parallel.__doc__) p.set_home("maker") p.set_tmpdir(tmpdir="tmp") p.set_grid_opts(array=True) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) genome, NN = args threaded = opts.threaded or 1 tmpdir = opts.tmpdir mkdir(tmpdir) tmpdir = get_abs_path(tmpdir) N = int(NN) assert 1 <= N < 1000, "Required: 1 < N < 1000!" outdir = "outdir" fs = split([genome, outdir, NN]) c = CTLFile("maker_opts.ctl") c.update_abs_path() if threaded > 1: c.update_tag("cpus", threaded) cwd = os.getcwd() dirs = [] for name in fs.names: fn = get_abs_path(name) bn = op.basename(name) dirs.append(bn) c.update_tag("genome", fn) mkdir(bn) sh("cp *.ctl {0}".format(bn)) os.chdir(bn) c.write_file("maker_opts.ctl") os.chdir(cwd) jobs = "jobs" fw = open(jobs, "w") print("\n".join(dirs), file=fw) fw.close() # Submit to grid ncmds = len(dirs) runfile = "array.sh" cmd = op.join(opts.maker_home, "bin/maker") if tmpdir: cmd += " -TMP {0}".format(tmpdir) engine = get_grid_engine() contents = arraysh.format(jobs, cmd) if engine == "SGE" \ else arraysh_ua.format(N, threaded, jobs, cmd) write_file(runfile, contents) if engine == "PBS": return # qsub script outfile = "maker.\$TASK_ID.out" p = GridProcess(runfile, outfile=outfile, errfile=outfile, arr=ncmds, grid_opts=opts) qsubfile = "qsub.sh" qsub = p.build() write_file(qsubfile, qsub)
[ "def", "parallel", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "split", "p", "=", "OptionParser", "(", "parallel", ".", "__doc__", ")", "p", ".", "set_home", "(", "\"maker\"", ")", "p", ".", "set_tmpdir", "(", "tmpdir"...
%prog parallel genome.fasta N Partition the genome into parts and run separately. This is useful if MAKER is to be run on the grid.
[ "%prog", "parallel", "genome", ".", "fasta", "N" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L133-L209
train
200,724
tanghaibao/jcvi
jcvi/annotation/maker.py
merge
def merge(args): """ %prog merge outdir output.gff Follow-up command after grid jobs are completed after parallel(). """ from jcvi.formats.gff import merge as gmerge p = OptionParser(merge.__doc__) p.set_home("maker") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) outdir, outputgff = args fsnames, suffix = get_fsnames(outdir) nfs = len(fsnames) cmd = op.join(opts.maker_home, "bin/gff3_merge") outfile = "merge.sh" write_file(outfile, mergesh.format(suffix, cmd)) # Generate per split directory # Note that gff3_merge write to /tmp, so I limit processes here to avoid # filling up disk space sh("parallel -j 8 merge.sh {} ::: " + " ".join(fsnames)) # One final output gffnames = glob("*.all.gff") assert len(gffnames) == nfs # Again, DO NOT USE gff3_merge to merge with a smallish /tmp/ area gfflist = "gfflist" fw = open(gfflist, "w") print("\n".join(gffnames), file=fw) fw.close() nlines = sum(1 for x in open(gfflist)) assert nlines == nfs # Be extra, extra careful to include all results gmerge([gfflist, "-o", outputgff]) logging.debug("Merged GFF file written to `{0}`".format(outputgff))
python
def merge(args): """ %prog merge outdir output.gff Follow-up command after grid jobs are completed after parallel(). """ from jcvi.formats.gff import merge as gmerge p = OptionParser(merge.__doc__) p.set_home("maker") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) outdir, outputgff = args fsnames, suffix = get_fsnames(outdir) nfs = len(fsnames) cmd = op.join(opts.maker_home, "bin/gff3_merge") outfile = "merge.sh" write_file(outfile, mergesh.format(suffix, cmd)) # Generate per split directory # Note that gff3_merge write to /tmp, so I limit processes here to avoid # filling up disk space sh("parallel -j 8 merge.sh {} ::: " + " ".join(fsnames)) # One final output gffnames = glob("*.all.gff") assert len(gffnames) == nfs # Again, DO NOT USE gff3_merge to merge with a smallish /tmp/ area gfflist = "gfflist" fw = open(gfflist, "w") print("\n".join(gffnames), file=fw) fw.close() nlines = sum(1 for x in open(gfflist)) assert nlines == nfs # Be extra, extra careful to include all results gmerge([gfflist, "-o", outputgff]) logging.debug("Merged GFF file written to `{0}`".format(outputgff))
[ "def", "merge", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "gff", "import", "merge", "as", "gmerge", "p", "=", "OptionParser", "(", "merge", ".", "__doc__", ")", "p", ".", "set_home", "(", "\"maker\"", ")", "opts", ",", "args", "=", ...
%prog merge outdir output.gff Follow-up command after grid jobs are completed after parallel().
[ "%prog", "merge", "outdir", "output", ".", "gff" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L227-L268
train
200,725
tanghaibao/jcvi
jcvi/annotation/maker.py
validate
def validate(args): """ %prog validate outdir genome.fasta Validate current folder after MAKER run and check for failures. Failed batch will be written to a directory for additional work. """ from jcvi.utils.counter import Counter p = OptionParser(validate.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) outdir, genome = args counter = Counter() fsnames, suffix = get_fsnames(outdir) dsfile = "{0}{1}/{0}.maker.output/{0}_master_datastore_index.log" dslogs = [dsfile.format(x, suffix) for x in fsnames] all_failed = [] for f, d in zip(fsnames, dslogs): dslog = DatastoreIndexFile(d) counter.update(dslog.scaffold_status.values()) all_failed.extend([(f, x) for x in dslog.failed]) cmd = 'tail maker.*.out | grep -c "now finished"' n = int(popen(cmd).read()) assert len(fsnames) == n print("ALL jobs have been finished", file=sys.stderr) nfailed = len(all_failed) if nfailed == 0: print("ALL scaffolds are completed with no errors", file=sys.stderr) return print("Scaffold status:", file=sys.stderr) print(counter, file=sys.stderr) failed = "FAILED" fw = open(failed, "w") print("\n".join(["\t".join((f, x)) for f, x in all_failed]), file=fw) fw.close() nlines = sum(1 for x in open("FAILED")) assert nlines == nfailed print("FAILED !! {0} instances.".format(nfailed), file=sys.stderr) # Rebuild the failed batch failed_ids = failed + ".ids" failed_fasta = failed + ".fasta" cmd = "cut -f2 {0}".format(failed) sh(cmd, outfile=failed_ids) if need_update((genome, failed_ids), failed_fasta): cmd = "faSomeRecords {0} {1} {2}".\ format(genome, failed_ids, failed_fasta) sh(cmd)
python
def validate(args): """ %prog validate outdir genome.fasta Validate current folder after MAKER run and check for failures. Failed batch will be written to a directory for additional work. """ from jcvi.utils.counter import Counter p = OptionParser(validate.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) outdir, genome = args counter = Counter() fsnames, suffix = get_fsnames(outdir) dsfile = "{0}{1}/{0}.maker.output/{0}_master_datastore_index.log" dslogs = [dsfile.format(x, suffix) for x in fsnames] all_failed = [] for f, d in zip(fsnames, dslogs): dslog = DatastoreIndexFile(d) counter.update(dslog.scaffold_status.values()) all_failed.extend([(f, x) for x in dslog.failed]) cmd = 'tail maker.*.out | grep -c "now finished"' n = int(popen(cmd).read()) assert len(fsnames) == n print("ALL jobs have been finished", file=sys.stderr) nfailed = len(all_failed) if nfailed == 0: print("ALL scaffolds are completed with no errors", file=sys.stderr) return print("Scaffold status:", file=sys.stderr) print(counter, file=sys.stderr) failed = "FAILED" fw = open(failed, "w") print("\n".join(["\t".join((f, x)) for f, x in all_failed]), file=fw) fw.close() nlines = sum(1 for x in open("FAILED")) assert nlines == nfailed print("FAILED !! {0} instances.".format(nfailed), file=sys.stderr) # Rebuild the failed batch failed_ids = failed + ".ids" failed_fasta = failed + ".fasta" cmd = "cut -f2 {0}".format(failed) sh(cmd, outfile=failed_ids) if need_update((genome, failed_ids), failed_fasta): cmd = "faSomeRecords {0} {1} {2}".\ format(genome, failed_ids, failed_fasta) sh(cmd)
[ "def", "validate", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "counter", "import", "Counter", "p", "=", "OptionParser", "(", "validate", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "le...
%prog validate outdir genome.fasta Validate current folder after MAKER run and check for failures. Failed batch will be written to a directory for additional work.
[ "%prog", "validate", "outdir", "genome", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L271-L327
train
200,726
tanghaibao/jcvi
jcvi/annotation/maker.py
batcheval
def batcheval(args): """ %prog batcheval model.ids gff_file evidences.bed fastafile Get the accuracy for a list of models against evidences in the range of the genes. For example: $ %prog batcheval all.gff3 isoforms.ids proteins.bed scaffolds.fasta Outfile contains the scores for the models can be found in models.scores """ from jcvi.formats.bed import evaluate from jcvi.formats.gff import make_index p = OptionParser(evaluate.__doc__) p.add_option("--type", default="CDS", help="list of features to extract, use comma to separate (e.g." "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) model_ids, gff_file, evidences_bed, fastafile = args type = set(opts.type.split(",")) g = make_index(gff_file) fp = open(model_ids) prefix = model_ids.rsplit(".", 1)[0] fwscores = open(prefix + ".scores", "w") for row in fp: cid = row.strip() b = next(g.parents(cid, 1)) query = "{0}:{1}-{2}".format(b.chrom, b.start, b.stop) children = [c for c in g.children(cid, 1)] cidbed = prefix + ".bed" fw = open(cidbed, "w") for c in children: if c.featuretype not in type: continue fw.write(c.to_bed()) fw.close() b = evaluate([cidbed, evidences_bed, fastafile, "--query={0}".format(query)]) print("\t".join((cid, b.score)), file=fwscores) fwscores.flush()
python
def batcheval(args): """ %prog batcheval model.ids gff_file evidences.bed fastafile Get the accuracy for a list of models against evidences in the range of the genes. For example: $ %prog batcheval all.gff3 isoforms.ids proteins.bed scaffolds.fasta Outfile contains the scores for the models can be found in models.scores """ from jcvi.formats.bed import evaluate from jcvi.formats.gff import make_index p = OptionParser(evaluate.__doc__) p.add_option("--type", default="CDS", help="list of features to extract, use comma to separate (e.g." "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) model_ids, gff_file, evidences_bed, fastafile = args type = set(opts.type.split(",")) g = make_index(gff_file) fp = open(model_ids) prefix = model_ids.rsplit(".", 1)[0] fwscores = open(prefix + ".scores", "w") for row in fp: cid = row.strip() b = next(g.parents(cid, 1)) query = "{0}:{1}-{2}".format(b.chrom, b.start, b.stop) children = [c for c in g.children(cid, 1)] cidbed = prefix + ".bed" fw = open(cidbed, "w") for c in children: if c.featuretype not in type: continue fw.write(c.to_bed()) fw.close() b = evaluate([cidbed, evidences_bed, fastafile, "--query={0}".format(query)]) print("\t".join((cid, b.score)), file=fwscores) fwscores.flush()
[ "def", "batcheval", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "bed", "import", "evaluate", "from", "jcvi", ".", "formats", ".", "gff", "import", "make_index", "p", "=", "OptionParser", "(", "evaluate", ".", "__doc__", ")", "p", ".", "...
%prog batcheval model.ids gff_file evidences.bed fastafile Get the accuracy for a list of models against evidences in the range of the genes. For example: $ %prog batcheval all.gff3 isoforms.ids proteins.bed scaffolds.fasta Outfile contains the scores for the models can be found in models.scores
[ "%prog", "batcheval", "model", ".", "ids", "gff_file", "evidences", ".", "bed", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L330-L379
train
200,727
tanghaibao/jcvi
jcvi/annotation/maker.py
get_splits
def get_splits(split_bed, gff_file, stype, key): """ Use intersectBed to find the fused gene => split genes mappings. """ bed_file = get_bed_file(gff_file, stype, key) cmd = "intersectBed -a {0} -b {1} -wao".format(split_bed, bed_file) cmd += " | cut -f4,10" p = popen(cmd) splits = defaultdict(set) for row in p: a, b = row.split() splits[a].add(b) return splits
python
def get_splits(split_bed, gff_file, stype, key): """ Use intersectBed to find the fused gene => split genes mappings. """ bed_file = get_bed_file(gff_file, stype, key) cmd = "intersectBed -a {0} -b {1} -wao".format(split_bed, bed_file) cmd += " | cut -f4,10" p = popen(cmd) splits = defaultdict(set) for row in p: a, b = row.split() splits[a].add(b) return splits
[ "def", "get_splits", "(", "split_bed", ",", "gff_file", ",", "stype", ",", "key", ")", ":", "bed_file", "=", "get_bed_file", "(", "gff_file", ",", "stype", ",", "key", ")", "cmd", "=", "\"intersectBed -a {0} -b {1} -wao\"", ".", "format", "(", "split_bed", "...
Use intersectBed to find the fused gene => split genes mappings.
[ "Use", "intersectBed", "to", "find", "the", "fused", "gene", "=", ">", "split", "genes", "mappings", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L396-L409
train
200,728
tanghaibao/jcvi
jcvi/annotation/maker.py
split
def split(args): """ %prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile Split MAKER models by checking against predictors (such as AUGUSTUS and FGENESH). For each region covered by a working model. Find out the combination of predictors that gives the best accuracy against evidences (such as PASA). `split.bed` can be generated by pulling out subset from a list of ids $ python -m jcvi.formats.base join split.ids working.bed --column=0,3 --noheader | cut -f2-7 > split.bed """ from jcvi.formats.bed import Bed p = OptionParser(split.__doc__) p.add_option("--key", default="Name", help="Key in the attributes to extract predictor.gff [default: %default]") p.add_option("--parents", default="match", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") p.add_option("--children", default="match_part", help="list of features to extract, use comma to separate (e.g." "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) split_bed, evidences_bed, p1_gff, p2_gff, fastafile = args parents = opts.parents children = opts.children key = opts.key bed = Bed(split_bed) s1 = get_splits(split_bed, p1_gff, parents, key) s2 = get_splits(split_bed, p2_gff, parents, key) for b in bed: query = "{0}:{1}-{2}".format(b.seqid, b.start, b.end) b1 = get_accuracy(query, p1_gff, evidences_bed, fastafile, children, key) b2 = get_accuracy(query, p2_gff, evidences_bed, fastafile, children, key) accn = b.accn c1 = "|".join(s1[accn]) c2 = "|".join(s2[accn]) ac1 = b1.accuracy ac2 = b2.accuracy tag = p1_gff if ac1 >= ac2 else p2_gff tag = tag.split(".")[0] ac1 = "{0:.3f}".format(ac1) ac2 = "{0:.3f}".format(ac2) print("\t".join((accn, tag, ac1, ac2, c1, c2)))
python
def split(args): """ %prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile Split MAKER models by checking against predictors (such as AUGUSTUS and FGENESH). For each region covered by a working model. Find out the combination of predictors that gives the best accuracy against evidences (such as PASA). `split.bed` can be generated by pulling out subset from a list of ids $ python -m jcvi.formats.base join split.ids working.bed --column=0,3 --noheader | cut -f2-7 > split.bed """ from jcvi.formats.bed import Bed p = OptionParser(split.__doc__) p.add_option("--key", default="Name", help="Key in the attributes to extract predictor.gff [default: %default]") p.add_option("--parents", default="match", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") p.add_option("--children", default="match_part", help="list of features to extract, use comma to separate (e.g." "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) split_bed, evidences_bed, p1_gff, p2_gff, fastafile = args parents = opts.parents children = opts.children key = opts.key bed = Bed(split_bed) s1 = get_splits(split_bed, p1_gff, parents, key) s2 = get_splits(split_bed, p2_gff, parents, key) for b in bed: query = "{0}:{1}-{2}".format(b.seqid, b.start, b.end) b1 = get_accuracy(query, p1_gff, evidences_bed, fastafile, children, key) b2 = get_accuracy(query, p2_gff, evidences_bed, fastafile, children, key) accn = b.accn c1 = "|".join(s1[accn]) c2 = "|".join(s2[accn]) ac1 = b1.accuracy ac2 = b2.accuracy tag = p1_gff if ac1 >= ac2 else p2_gff tag = tag.split(".")[0] ac1 = "{0:.3f}".format(ac1) ac2 = "{0:.3f}".format(ac2) print("\t".join((accn, tag, ac1, ac2, c1, c2)))
[ "def", "split", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "bed", "import", "Bed", "p", "=", "OptionParser", "(", "split", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--key\"", ",", "default", "=", "\"Name\"", ",", "help", "...
%prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile Split MAKER models by checking against predictors (such as AUGUSTUS and FGENESH). For each region covered by a working model. Find out the combination of predictors that gives the best accuracy against evidences (such as PASA). `split.bed` can be generated by pulling out subset from a list of ids $ python -m jcvi.formats.base join split.ids working.bed --column=0,3 --noheader | cut -f2-7 > split.bed
[ "%prog", "split", "split", ".", "bed", "evidences", ".", "bed", "predictor1", ".", "gff", "predictor2", ".", "gff", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L425-L479
train
200,729
tanghaibao/jcvi
jcvi/annotation/maker.py
datastore
def datastore(args): """ %prog datastore datastore.log > gfflist.log Generate a list of gff filenames to merge. The `datastore.log` file can be generated by something like: $ find /usr/local/scratch/htang/EVM_test/gannotation/maker/1132350111853_default/i1/ -maxdepth 4 -name "*datastore*.log" > datastore.log """ p = OptionParser(datastore.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) ds, = args fp = open(ds) for row in fp: fn = row.strip() assert op.exists(fn) pp, logfile = op.split(fn) flog = open(fn) for row in flog: ctg, folder, status = row.split() if status != "FINISHED": continue gff_file = op.join(pp, folder, ctg + ".gff") assert op.exists(gff_file) print(gff_file)
python
def datastore(args): """ %prog datastore datastore.log > gfflist.log Generate a list of gff filenames to merge. The `datastore.log` file can be generated by something like: $ find /usr/local/scratch/htang/EVM_test/gannotation/maker/1132350111853_default/i1/ -maxdepth 4 -name "*datastore*.log" > datastore.log """ p = OptionParser(datastore.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) ds, = args fp = open(ds) for row in fp: fn = row.strip() assert op.exists(fn) pp, logfile = op.split(fn) flog = open(fn) for row in flog: ctg, folder, status = row.split() if status != "FINISHED": continue gff_file = op.join(pp, folder, ctg + ".gff") assert op.exists(gff_file) print(gff_file)
[ "def", "datastore", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "datastore", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "("...
%prog datastore datastore.log > gfflist.log Generate a list of gff filenames to merge. The `datastore.log` file can be generated by something like: $ find /usr/local/scratch/htang/EVM_test/gannotation/maker/1132350111853_default/i1/ -maxdepth 4 -name "*datastore*.log" > datastore.log
[ "%prog", "datastore", "datastore", ".", "log", ">", "gfflist", ".", "log" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L482-L513
train
200,730
tanghaibao/jcvi
jcvi/algorithms/ml.py
libsvm
def libsvm(args): """ %prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/ """ from jcvi.formats.base import DictFile p = OptionParser(libsvm.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) csvfile, prefixids = args d = DictFile(prefixids) fp = open(csvfile) next(fp) for row in fp: atoms = row.split() klass = atoms[0] kp = klass.split("_")[0] klass = d.get(kp, "0") feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])] print(" ".join([klass] + feats))
python
def libsvm(args): """ %prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/ """ from jcvi.formats.base import DictFile p = OptionParser(libsvm.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) csvfile, prefixids = args d = DictFile(prefixids) fp = open(csvfile) next(fp) for row in fp: atoms = row.split() klass = atoms[0] kp = klass.split("_")[0] klass = d.get(kp, "0") feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])] print(" ".join([klass] + feats))
[ "def", "libsvm", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "libsvm", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", ...
%prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/
[ "%prog", "libsvm", "csvfile", "prefix", ".", "ids" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/ml.py#L23-L54
train
200,731
tanghaibao/jcvi
jcvi/algorithms/matrix.py
determine_positions
def determine_positions(nodes, edges): """ Construct the problem instance to solve the positions of contigs. The input for spring_system() is A, K, L, which looks like the following. A = np.array([[1, -1, 0], [0, 1, -1], [1, 0, -1]]) K = np.eye(3, dtype=int) L = np.array([1, 2, 3]) For example, A-B distance 1, B-C distance 2, A-C distance 3, solve positions >>> determine_positions([0, 1, 2], [(0, 1, 1), (1, 2, 2), (0, 2, 3)]) array([0, 1, 3]) """ N = len(nodes) E = len(edges) A = np.zeros((E, N), dtype=int) for i, (a, b, distance) in enumerate(edges): A[i, a] = 1 A[i, b] = -1 K = np.eye(E, dtype=int) L = np.array([x[-1] for x in edges]) s = spring_system(A, K, L) return np.array([0] + [int(round(x, 0)) for x in s])
python
def determine_positions(nodes, edges): """ Construct the problem instance to solve the positions of contigs. The input for spring_system() is A, K, L, which looks like the following. A = np.array([[1, -1, 0], [0, 1, -1], [1, 0, -1]]) K = np.eye(3, dtype=int) L = np.array([1, 2, 3]) For example, A-B distance 1, B-C distance 2, A-C distance 3, solve positions >>> determine_positions([0, 1, 2], [(0, 1, 1), (1, 2, 2), (0, 2, 3)]) array([0, 1, 3]) """ N = len(nodes) E = len(edges) A = np.zeros((E, N), dtype=int) for i, (a, b, distance) in enumerate(edges): A[i, a] = 1 A[i, b] = -1 K = np.eye(E, dtype=int) L = np.array([x[-1] for x in edges]) s = spring_system(A, K, L) return np.array([0] + [int(round(x, 0)) for x in s])
[ "def", "determine_positions", "(", "nodes", ",", "edges", ")", ":", "N", "=", "len", "(", "nodes", ")", "E", "=", "len", "(", "edges", ")", "A", "=", "np", ".", "zeros", "(", "(", "E", ",", "N", ")", ",", "dtype", "=", "int", ")", "for", "i",...
Construct the problem instance to solve the positions of contigs. The input for spring_system() is A, K, L, which looks like the following. A = np.array([[1, -1, 0], [0, 1, -1], [1, 0, -1]]) K = np.eye(3, dtype=int) L = np.array([1, 2, 3]) For example, A-B distance 1, B-C distance 2, A-C distance 3, solve positions >>> determine_positions([0, 1, 2], [(0, 1, 1), (1, 2, 2), (0, 2, 3)]) array([0, 1, 3])
[ "Construct", "the", "problem", "instance", "to", "solve", "the", "positions", "of", "contigs", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/matrix.py#L38-L64
train
200,732
tanghaibao/jcvi
jcvi/algorithms/matrix.py
determine_signs
def determine_signs(nodes, edges, cutoff=1e-10): """ Construct the orientation matrix for the pairs on N molecules. >>> determine_signs([0, 1, 2], [(0, 1, 1), (0, 2, -1), (1, 2, -1)]) array([ 1, 1, -1]) """ N = len(nodes) M = np.zeros((N, N), dtype=float) for a, b, w in edges: M[a, b] += w M = symmetrize(M) return get_signs(M, cutoff=cutoff, validate=False)
python
def determine_signs(nodes, edges, cutoff=1e-10): """ Construct the orientation matrix for the pairs on N molecules. >>> determine_signs([0, 1, 2], [(0, 1, 1), (0, 2, -1), (1, 2, -1)]) array([ 1, 1, -1]) """ N = len(nodes) M = np.zeros((N, N), dtype=float) for a, b, w in edges: M[a, b] += w M = symmetrize(M) return get_signs(M, cutoff=cutoff, validate=False)
[ "def", "determine_signs", "(", "nodes", ",", "edges", ",", "cutoff", "=", "1e-10", ")", ":", "N", "=", "len", "(", "nodes", ")", "M", "=", "np", ".", "zeros", "(", "(", "N", ",", "N", ")", ",", "dtype", "=", "float", ")", "for", "a", ",", "b"...
Construct the orientation matrix for the pairs on N molecules. >>> determine_signs([0, 1, 2], [(0, 1, 1), (0, 2, -1), (1, 2, -1)]) array([ 1, 1, -1])
[ "Construct", "the", "orientation", "matrix", "for", "the", "pairs", "on", "N", "molecules", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/matrix.py#L67-L80
train
200,733
tanghaibao/jcvi
jcvi/annotation/ahrd.py
fix
def fix(args): """ %prog fix ahrd.csv > ahrd.fixed.csv Fix ugly names from Uniprot. """ p = OptionParser(fix.__doc__) p.add_option("--ignore_sym_pat", default=False, action="store_true", help="Do not fix names matching symbol patterns i.e." + \ " names beginning or ending with gene symbols or a series of numbers." + \ " e.g. `ARM repeat superfamily protein`, `beta-hexosaminidase 3`," + \ " `CYCLIN A3;4`, `WALL ASSOCIATED KINASE (WAK)-LIKE 10`") p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) csvfile, = args fp = open(csvfile) fw = must_open(opts.outfile, "w") for row in fp: if row[0] == '#': continue if row.strip() == "": continue atoms = row.rstrip("\r\n").split("\t") name, hit, ahrd_code, desc = atoms[:4] \ if len(atoms) > 2 else \ (atoms[0], None, None, atoms[-1]) newdesc = fix_text(desc, ignore_sym_pat=opts.ignore_sym_pat) if hit and hit.strip() != "" and newdesc == Hypothetical: newdesc = "conserved " + newdesc print("\t".join(atoms[:4] + [newdesc] + atoms[4:]), file=fw)
python
def fix(args): """ %prog fix ahrd.csv > ahrd.fixed.csv Fix ugly names from Uniprot. """ p = OptionParser(fix.__doc__) p.add_option("--ignore_sym_pat", default=False, action="store_true", help="Do not fix names matching symbol patterns i.e." + \ " names beginning or ending with gene symbols or a series of numbers." + \ " e.g. `ARM repeat superfamily protein`, `beta-hexosaminidase 3`," + \ " `CYCLIN A3;4`, `WALL ASSOCIATED KINASE (WAK)-LIKE 10`") p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) csvfile, = args fp = open(csvfile) fw = must_open(opts.outfile, "w") for row in fp: if row[0] == '#': continue if row.strip() == "": continue atoms = row.rstrip("\r\n").split("\t") name, hit, ahrd_code, desc = atoms[:4] \ if len(atoms) > 2 else \ (atoms[0], None, None, atoms[-1]) newdesc = fix_text(desc, ignore_sym_pat=opts.ignore_sym_pat) if hit and hit.strip() != "" and newdesc == Hypothetical: newdesc = "conserved " + newdesc print("\t".join(atoms[:4] + [newdesc] + atoms[4:]), file=fw)
[ "def", "fix", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fix", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--ignore_sym_pat\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Do not fix names m...
%prog fix ahrd.csv > ahrd.fixed.csv Fix ugly names from Uniprot.
[ "%prog", "fix", "ahrd", ".", "csv", ">", "ahrd", ".", "fixed", ".", "csv" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/ahrd.py#L531-L565
train
200,734
tanghaibao/jcvi
jcvi/annotation/ahrd.py
batch
def batch(args): """ %prog batch splits output The arguments are two folders. Input FASTA sequences are in splits/. Output csv files are in output/. Must have folders swissprot/, tair/, trembl/ that contains the respective BLAST output. Once finished, you can run, for example: $ parallel java -Xmx2g -jar ~/code/AHRD/dist/ahrd.jar {} ::: output/*.yml """ p = OptionParser(batch.__doc__) ahrd_weights = { "blastp": [0.5, 0.3, 0.2], "blastx": [0.6, 0.4, 0.0] } blast_progs = tuple(ahrd_weights.keys()) p.add_option("--path", default="~/code/AHRD/", help="Path where AHRD is installed [default: %default]") p.add_option("--blastprog", default="blastp", choices=blast_progs, help="Specify the blast program being run. Based on this option," \ + " the AHRD parameters (score_weights) will be modified." \ + " [default: %default]") p.add_option("--iprscan", default=None, help="Specify path to InterProScan results file if available." \ + " If specified, the yml conf file will be modified" \ + " appropriately. [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) splits, output = args mkdir(output) bit_score, db_score, ovl_score = ahrd_weights[opts.blastprog] for f in glob("{0}/*.fa*".format(splits)): fb = op.basename(f).rsplit(".", 1)[0] fw = open(op.join(output, fb + ".yml"), "w") path = op.expanduser(opts.path) dir = op.join(path, "test/resources") outfile = op.join(output, fb + ".csv") interpro = iprscanTemplate.format(opts.iprscan) if opts.iprscan else "" print(Template.format(dir, fb, f, outfile, bit_score, db_score, ovl_score, interpro), file=fw) if opts.iprscan: if not op.lexists("interpro.xml"): symlink(op.join(iprscan_datadir, "interpro.xml"), "interpro.xml") if not op.lexists("interpro.dtd"): symlink(op.join(iprscan_datadir, "interpro.dtd"), "interpro.dtd")
python
def batch(args): """ %prog batch splits output The arguments are two folders. Input FASTA sequences are in splits/. Output csv files are in output/. Must have folders swissprot/, tair/, trembl/ that contains the respective BLAST output. Once finished, you can run, for example: $ parallel java -Xmx2g -jar ~/code/AHRD/dist/ahrd.jar {} ::: output/*.yml """ p = OptionParser(batch.__doc__) ahrd_weights = { "blastp": [0.5, 0.3, 0.2], "blastx": [0.6, 0.4, 0.0] } blast_progs = tuple(ahrd_weights.keys()) p.add_option("--path", default="~/code/AHRD/", help="Path where AHRD is installed [default: %default]") p.add_option("--blastprog", default="blastp", choices=blast_progs, help="Specify the blast program being run. Based on this option," \ + " the AHRD parameters (score_weights) will be modified." \ + " [default: %default]") p.add_option("--iprscan", default=None, help="Specify path to InterProScan results file if available." \ + " If specified, the yml conf file will be modified" \ + " appropriately. [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) splits, output = args mkdir(output) bit_score, db_score, ovl_score = ahrd_weights[opts.blastprog] for f in glob("{0}/*.fa*".format(splits)): fb = op.basename(f).rsplit(".", 1)[0] fw = open(op.join(output, fb + ".yml"), "w") path = op.expanduser(opts.path) dir = op.join(path, "test/resources") outfile = op.join(output, fb + ".csv") interpro = iprscanTemplate.format(opts.iprscan) if opts.iprscan else "" print(Template.format(dir, fb, f, outfile, bit_score, db_score, ovl_score, interpro), file=fw) if opts.iprscan: if not op.lexists("interpro.xml"): symlink(op.join(iprscan_datadir, "interpro.xml"), "interpro.xml") if not op.lexists("interpro.dtd"): symlink(op.join(iprscan_datadir, "interpro.dtd"), "interpro.dtd")
[ "def", "batch", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "batch", ".", "__doc__", ")", "ahrd_weights", "=", "{", "\"blastp\"", ":", "[", "0.5", ",", "0.3", ",", "0.2", "]", ",", "\"blastx\"", ":", "[", "0.6", ",", "0.4", ",", "0.0", ...
%prog batch splits output The arguments are two folders. Input FASTA sequences are in splits/. Output csv files are in output/. Must have folders swissprot/, tair/, trembl/ that contains the respective BLAST output. Once finished, you can run, for example: $ parallel java -Xmx2g -jar ~/code/AHRD/dist/ahrd.jar {} ::: output/*.yml
[ "%prog", "batch", "splits", "output" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/ahrd.py#L612-L669
train
200,735
tanghaibao/jcvi
jcvi/formats/gff.py
to_range
def to_range(obj, score=None, id=None, strand=None): """ Given a gffutils object, convert it to a range object """ from jcvi.utils.range import Range if score or id: _score = score if score else obj.score _id = id if id else obj.id return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \ score=_score, id=_id) elif strand: return (obj.seqid, obj.start, obj.end, obj.strand) return (obj.seqid, obj.start, obj.end)
python
def to_range(obj, score=None, id=None, strand=None): """ Given a gffutils object, convert it to a range object """ from jcvi.utils.range import Range if score or id: _score = score if score else obj.score _id = id if id else obj.id return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \ score=_score, id=_id) elif strand: return (obj.seqid, obj.start, obj.end, obj.strand) return (obj.seqid, obj.start, obj.end)
[ "def", "to_range", "(", "obj", ",", "score", "=", "None", ",", "id", "=", "None", ",", "strand", "=", "None", ")", ":", "from", "jcvi", ".", "utils", ".", "range", "import", "Range", "if", "score", "or", "id", ":", "_score", "=", "score", "if", "...
Given a gffutils object, convert it to a range object
[ "Given", "a", "gffutils", "object", "convert", "it", "to", "a", "range", "object" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L376-L390
train
200,736
tanghaibao/jcvi
jcvi/formats/gff.py
sizes
def sizes(args): """ %prog sizes gffile Given a gff file of features, calculate the sizes of chosen parent feature based on summation of sizes of child features. For example, for parent 'mRNA' and child 'CDS' feature types, calcuate sizes of mRNA by summing the sizes of the disjoint CDS parts. """ p = OptionParser(sizes.__doc__) p.set_outfile() p.add_option("--parents", dest="parents", default="mRNA", help="parent feature(s) for which size is to be calculated") p.add_option("--child", dest="child", default="CDS", help="child feature to use for size calculations") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args parents, cftype = set(opts.parents.split(",")), opts.child gff = make_index(gffile) fw = must_open(opts.outfile, "w") for parent in parents: for feat in gff.features_of_type(parent, order_by=('seqid', 'start')): fsize = 0 fsize = feat.end - feat.start + 1 \ if cftype == parent else \ gff.children_bp(feat, child_featuretype=cftype) print("\t".join(str(x) for x in (feat.id, fsize)), file=fw) fw.close()
python
def sizes(args): """ %prog sizes gffile Given a gff file of features, calculate the sizes of chosen parent feature based on summation of sizes of child features. For example, for parent 'mRNA' and child 'CDS' feature types, calcuate sizes of mRNA by summing the sizes of the disjoint CDS parts. """ p = OptionParser(sizes.__doc__) p.set_outfile() p.add_option("--parents", dest="parents", default="mRNA", help="parent feature(s) for which size is to be calculated") p.add_option("--child", dest="child", default="CDS", help="child feature to use for size calculations") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args parents, cftype = set(opts.parents.split(",")), opts.child gff = make_index(gffile) fw = must_open(opts.outfile, "w") for parent in parents: for feat in gff.features_of_type(parent, order_by=('seqid', 'start')): fsize = 0 fsize = feat.end - feat.start + 1 \ if cftype == parent else \ gff.children_bp(feat, child_featuretype=cftype) print("\t".join(str(x) for x in (feat.id, fsize)), file=fw) fw.close()
[ "def", "sizes", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "sizes", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "p", ".", "add_option", "(", "\"--parents\"", ",", "dest", "=", "\"parents\"", ",", "default", "=", "\"mRNA\"", ",", ...
%prog sizes gffile Given a gff file of features, calculate the sizes of chosen parent feature based on summation of sizes of child features. For example, for parent 'mRNA' and child 'CDS' feature types, calcuate sizes of mRNA by summing the sizes of the disjoint CDS parts.
[ "%prog", "sizes", "gffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L663-L697
train
200,737
tanghaibao/jcvi
jcvi/formats/gff.py
summary
def summary(args): """ %prog summary gffile Print summary stats for features of different types. """ from jcvi.formats.base import SetFile from jcvi.formats.bed import BedSummary from jcvi.utils.table import tabulate p = OptionParser(summary.__doc__) p.add_option("--isoform", default=False, action="store_true", help="Find longest isoform of each id") p.add_option("--ids", help="Only include features from certain IDs") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args ids = opts.ids if ids: ids = SetFile(ids) logging.debug("Total ids loaded: {0}".format(len(ids))) if opts.isoform: pids = set() gff = Gff(gff_file) for g in gff: if g.type != "mRNA": continue if g.parent not in ids: continue if "longest" not in g.attributes: pids = set(x + ".1" for x in ids) break if g.attributes["longest"][0] == "0": continue pids.add(g.id) ids = pids logging.debug("After checking longest: {0}".format(len(ids))) # Collects aliases gff = Gff(gff_file) for g in gff: if g.name in ids: ids.add(g.id) logging.debug("Total ids including aliases: {0}".format(len(ids))) gff = Gff(gff_file) beds = defaultdict(list) for g in gff: if ids and not (g.id in ids or g.name in ids or g.parent in ids): continue beds[g.type].append(g.bedline) table = {} for type, bb in sorted(beds.items()): bs = BedSummary(bb) table[(type, "Features")] = bs.nfeats table[(type, "Unique bases")] = bs.unique_bases table[(type, "Total bases")] = bs.total_bases print(tabulate(table), file=sys.stdout)
python
def summary(args): """ %prog summary gffile Print summary stats for features of different types. """ from jcvi.formats.base import SetFile from jcvi.formats.bed import BedSummary from jcvi.utils.table import tabulate p = OptionParser(summary.__doc__) p.add_option("--isoform", default=False, action="store_true", help="Find longest isoform of each id") p.add_option("--ids", help="Only include features from certain IDs") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args ids = opts.ids if ids: ids = SetFile(ids) logging.debug("Total ids loaded: {0}".format(len(ids))) if opts.isoform: pids = set() gff = Gff(gff_file) for g in gff: if g.type != "mRNA": continue if g.parent not in ids: continue if "longest" not in g.attributes: pids = set(x + ".1" for x in ids) break if g.attributes["longest"][0] == "0": continue pids.add(g.id) ids = pids logging.debug("After checking longest: {0}".format(len(ids))) # Collects aliases gff = Gff(gff_file) for g in gff: if g.name in ids: ids.add(g.id) logging.debug("Total ids including aliases: {0}".format(len(ids))) gff = Gff(gff_file) beds = defaultdict(list) for g in gff: if ids and not (g.id in ids or g.name in ids or g.parent in ids): continue beds[g.type].append(g.bedline) table = {} for type, bb in sorted(beds.items()): bs = BedSummary(bb) table[(type, "Features")] = bs.nfeats table[(type, "Unique bases")] = bs.unique_bases table[(type, "Total bases")] = bs.total_bases print(tabulate(table), file=sys.stdout)
[ "def", "summary", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "SetFile", "from", "jcvi", ".", "formats", ".", "bed", "import", "BedSummary", "from", "jcvi", ".", "utils", ".", "table", "import", "tabulate", "p", "=", "...
%prog summary gffile Print summary stats for features of different types.
[ "%prog", "summary", "gffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L786-L851
train
200,738
tanghaibao/jcvi
jcvi/formats/gff.py
gb
def gb(args): """ %prog gb gffile fastafile Convert GFF3 to Genbank format. Recipe taken from: <http://www.biostars.org/p/2492/> """ from Bio.Alphabet import generic_dna try: from BCBio import GFF except ImportError: print("You need to install dep first: $ easy_install bcbio-gff", file=sys.stderr) p = OptionParser(gb.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, fasta_file = args pf = op.splitext(gff_file)[0] out_file = pf + ".gb" fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna)) gff_iter = GFF.parse(gff_file, fasta_input) SeqIO.write(gff_iter, out_file, "genbank")
python
def gb(args): """ %prog gb gffile fastafile Convert GFF3 to Genbank format. Recipe taken from: <http://www.biostars.org/p/2492/> """ from Bio.Alphabet import generic_dna try: from BCBio import GFF except ImportError: print("You need to install dep first: $ easy_install bcbio-gff", file=sys.stderr) p = OptionParser(gb.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, fasta_file = args pf = op.splitext(gff_file)[0] out_file = pf + ".gb" fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna)) gff_iter = GFF.parse(gff_file, fasta_input) SeqIO.write(gff_iter, out_file, "genbank")
[ "def", "gb", "(", "args", ")", ":", "from", "Bio", ".", "Alphabet", "import", "generic_dna", "try", ":", "from", "BCBio", "import", "GFF", "except", "ImportError", ":", "print", "(", "\"You need to install dep first: $ easy_install bcbio-gff\"", ",", "file", "=", ...
%prog gb gffile fastafile Convert GFF3 to Genbank format. Recipe taken from: <http://www.biostars.org/p/2492/>
[ "%prog", "gb", "gffile", "fastafile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L854-L878
train
200,739
tanghaibao/jcvi
jcvi/formats/gff.py
orient
def orient(args): """ %prog orient in.gff3 features.fasta > out.gff3 Change the feature orientations based on translation. This script is often needed in fixing the strand information after mapping RNA-seq transcripts. You can generate the features.fasta similar to this command: $ %prog load --parents=EST_match --children=match_part clc.JCVIv4a.gff JCVI.Medtr.v4.fasta -o features.fasta """ from jcvi.formats.fasta import longestorf p = OptionParser(orient.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, fastafile = args idsfile = fastafile.rsplit(".", 1)[0] + ".orf.ids" if need_update(fastafile, idsfile): longestorf([fastafile, "--ids"]) orientations = DictFile(idsfile) gff = Gff(ingff3) flipped = 0 for g in gff: id = None for tag in ("ID", "Parent"): if tag in g.attributes: id, = g.attributes[tag] break assert id orientation = orientations.get(id, "+") if orientation == '-': g.strand = {"+": "-", "-": "+"}[g.strand] flipped += 1 print(g) logging.debug("A total of {0} features flipped.".format(flipped))
python
def orient(args): """ %prog orient in.gff3 features.fasta > out.gff3 Change the feature orientations based on translation. This script is often needed in fixing the strand information after mapping RNA-seq transcripts. You can generate the features.fasta similar to this command: $ %prog load --parents=EST_match --children=match_part clc.JCVIv4a.gff JCVI.Medtr.v4.fasta -o features.fasta """ from jcvi.formats.fasta import longestorf p = OptionParser(orient.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, fastafile = args idsfile = fastafile.rsplit(".", 1)[0] + ".orf.ids" if need_update(fastafile, idsfile): longestorf([fastafile, "--ids"]) orientations = DictFile(idsfile) gff = Gff(ingff3) flipped = 0 for g in gff: id = None for tag in ("ID", "Parent"): if tag in g.attributes: id, = g.attributes[tag] break assert id orientation = orientations.get(id, "+") if orientation == '-': g.strand = {"+": "-", "-": "+"}[g.strand] flipped += 1 print(g) logging.debug("A total of {0} features flipped.".format(flipped))
[ "def", "orient", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "fasta", "import", "longestorf", "p", "=", "OptionParser", "(", "orient", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len...
%prog orient in.gff3 features.fasta > out.gff3 Change the feature orientations based on translation. This script is often needed in fixing the strand information after mapping RNA-seq transcripts. You can generate the features.fasta similar to this command: $ %prog load --parents=EST_match --children=match_part clc.JCVIv4a.gff JCVI.Medtr.v4.fasta -o features.fasta
[ "%prog", "orient", "in", ".", "gff3", "features", ".", "fasta", ">", "out", ".", "gff3" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L881-L924
train
200,740
tanghaibao/jcvi
jcvi/formats/gff.py
rename
def rename(args): """ %prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3. """ p = OptionParser(rename.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, switch = args switch = DictFile(switch) gff = Gff(ingff3) for g in gff: id, = g.attributes["ID"] newname = switch.get(id, id) g.attributes["ID"] = [newname] if "Parent" in g.attributes: parents = g.attributes["Parent"] g.attributes["Parent"] = [switch.get(x, x) for x in parents] g.update_attributes() print(g)
python
def rename(args): """ %prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3. """ p = OptionParser(rename.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, switch = args switch = DictFile(switch) gff = Gff(ingff3) for g in gff: id, = g.attributes["ID"] newname = switch.get(id, id) g.attributes["ID"] = [newname] if "Parent" in g.attributes: parents = g.attributes["Parent"] g.attributes["Parent"] = [switch.get(x, x) for x in parents] g.update_attributes() print(g)
[ "def", "rename", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "rename", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "n...
%prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3.
[ "%prog", "rename", "in", ".", "gff3", "switch", ".", "ids", ">", "reindexed", ".", "gff3" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L927-L953
train
200,741
tanghaibao/jcvi
jcvi/formats/gff.py
parents
def parents(args): """ %prog parents gffile models.ids Find the parents given a list of IDs in "models.ids". """ p = OptionParser(parents.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, idsfile = args g = make_index(gff_file) fp = open(idsfile) for row in fp: cid = row.strip() b = next(g.parents(cid, 1)) print("\t".join((cid, b.id)))
python
def parents(args): """ %prog parents gffile models.ids Find the parents given a list of IDs in "models.ids". """ p = OptionParser(parents.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, idsfile = args g = make_index(gff_file) fp = open(idsfile) for row in fp: cid = row.strip() b = next(g.parents(cid, 1)) print("\t".join((cid, b.id)))
[ "def", "parents", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "parents", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", ...
%prog parents gffile models.ids Find the parents given a list of IDs in "models.ids".
[ "%prog", "parents", "gffile", "models", ".", "ids" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L956-L975
train
200,742
tanghaibao/jcvi
jcvi/formats/gff.py
liftover
def liftover(args): """ %prog liftover gffile > liftover.gff Adjust gff coordinates based on tile number. For example, "gannotation.asmbl.000095.7" is the 8-th tile on asmbl.000095. """ p = OptionParser(liftover.__doc__) p.add_option("--tilesize", default=50000, type="int", help="The size for each tile [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) for g in gff: seqid = g.seqid seqid, tilenum = seqid.rsplit(".", 1) tilenum = int(tilenum) g.seqid = seqid offset = tilenum * opts.tilesize g.start += offset g.end += offset print(g)
python
def liftover(args): """ %prog liftover gffile > liftover.gff Adjust gff coordinates based on tile number. For example, "gannotation.asmbl.000095.7" is the 8-th tile on asmbl.000095. """ p = OptionParser(liftover.__doc__) p.add_option("--tilesize", default=50000, type="int", help="The size for each tile [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) for g in gff: seqid = g.seqid seqid, tilenum = seqid.rsplit(".", 1) tilenum = int(tilenum) g.seqid = seqid offset = tilenum * opts.tilesize g.start += offset g.end += offset print(g)
[ "def", "liftover", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "liftover", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--tilesize\"", ",", "default", "=", "50000", ",", "type", "=", "\"int\"", ",", "help", "=", "\"The size for each tile ...
%prog liftover gffile > liftover.gff Adjust gff coordinates based on tile number. For example, "gannotation.asmbl.000095.7" is the 8-th tile on asmbl.000095.
[ "%prog", "liftover", "gffile", ">", "liftover", ".", "gff" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L1754-L1779
train
200,743
tanghaibao/jcvi
jcvi/formats/gff.py
get_piles
def get_piles(allgenes): """ Before running uniq, we need to compute all the piles. The piles are a set of redundant features we want to get rid of. Input are a list of GffLines features. Output are list of list of features distinct "piles". """ from jcvi.utils.range import Range, range_piles ranges = [Range(a.seqid, a.start, a.end, 0, i) \ for i, a in enumerate(allgenes)] for pile in range_piles(ranges): yield [allgenes[x] for x in pile]
python
def get_piles(allgenes): """ Before running uniq, we need to compute all the piles. The piles are a set of redundant features we want to get rid of. Input are a list of GffLines features. Output are list of list of features distinct "piles". """ from jcvi.utils.range import Range, range_piles ranges = [Range(a.seqid, a.start, a.end, 0, i) \ for i, a in enumerate(allgenes)] for pile in range_piles(ranges): yield [allgenes[x] for x in pile]
[ "def", "get_piles", "(", "allgenes", ")", ":", "from", "jcvi", ".", "utils", ".", "range", "import", "Range", ",", "range_piles", "ranges", "=", "[", "Range", "(", "a", ".", "seqid", ",", "a", ".", "start", ",", "a", ".", "end", ",", "0", ",", "i...
Before running uniq, we need to compute all the piles. The piles are a set of redundant features we want to get rid of. Input are a list of GffLines features. Output are list of list of features distinct "piles".
[ "Before", "running", "uniq", "we", "need", "to", "compute", "all", "the", "piles", ".", "The", "piles", "are", "a", "set", "of", "redundant", "features", "we", "want", "to", "get", "rid", "of", ".", "Input", "are", "a", "list", "of", "GffLines", "featu...
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L1782-L1794
train
200,744
tanghaibao/jcvi
jcvi/formats/gff.py
match_subfeats
def match_subfeats(f1, f2, dbx1, dbx2, featuretype=None, slop=False): """ Given 2 gffutils features located in 2 separate gffutils databases, iterate through all subfeatures of a certain type and check whether they are identical or not The `slop` parameter allows for variation in the terminal UTR region """ f1c, f2c = list(dbx1.children(f1, featuretype=featuretype, order_by='start')), \ list(dbx2.children(f2, featuretype=featuretype, order_by='start')) lf1c, lf2c = len(f1c), len(f2c) if match_nchildren(f1c, f2c): if lf1c > 0 and lf2c > 0: exclN = set() if featuretype.endswith('UTR') or featuretype == 'exon': N = [] if featuretype.startswith('five_prime'): N = [1] if f1.strand == "+" else [lf1c] elif featuretype.startswith('three_prime'): N = [lf1c] if f1.strand == "+" else [1] else: # infer UTR from exon collection N = [1] if 1 == lf1c else [1, lf1c] for n in N: if match_Nth_child(f1c, f2c, N=n, slop=slop): exclN.add(n-1) else: return False for i, (cf1, cf2) in enumerate(zip(f1c, f2c)): if i in exclN: continue if not match_span(cf1, cf2): return False else: if (lf1c, lf2c) in [(0, 1), (1, 0)] and slop \ and featuretype.endswith('UTR'): return True return False return True
python
def match_subfeats(f1, f2, dbx1, dbx2, featuretype=None, slop=False): """ Given 2 gffutils features located in 2 separate gffutils databases, iterate through all subfeatures of a certain type and check whether they are identical or not The `slop` parameter allows for variation in the terminal UTR region """ f1c, f2c = list(dbx1.children(f1, featuretype=featuretype, order_by='start')), \ list(dbx2.children(f2, featuretype=featuretype, order_by='start')) lf1c, lf2c = len(f1c), len(f2c) if match_nchildren(f1c, f2c): if lf1c > 0 and lf2c > 0: exclN = set() if featuretype.endswith('UTR') or featuretype == 'exon': N = [] if featuretype.startswith('five_prime'): N = [1] if f1.strand == "+" else [lf1c] elif featuretype.startswith('three_prime'): N = [lf1c] if f1.strand == "+" else [1] else: # infer UTR from exon collection N = [1] if 1 == lf1c else [1, lf1c] for n in N: if match_Nth_child(f1c, f2c, N=n, slop=slop): exclN.add(n-1) else: return False for i, (cf1, cf2) in enumerate(zip(f1c, f2c)): if i in exclN: continue if not match_span(cf1, cf2): return False else: if (lf1c, lf2c) in [(0, 1), (1, 0)] and slop \ and featuretype.endswith('UTR'): return True return False return True
[ "def", "match_subfeats", "(", "f1", ",", "f2", ",", "dbx1", ",", "dbx2", ",", "featuretype", "=", "None", ",", "slop", "=", "False", ")", ":", "f1c", ",", "f2c", "=", "list", "(", "dbx1", ".", "children", "(", "f1", ",", "featuretype", "=", "featur...
Given 2 gffutils features located in 2 separate gffutils databases, iterate through all subfeatures of a certain type and check whether they are identical or not The `slop` parameter allows for variation in the terminal UTR region
[ "Given", "2", "gffutils", "features", "located", "in", "2", "separate", "gffutils", "databases", "iterate", "through", "all", "subfeatures", "of", "a", "certain", "type", "and", "check", "whether", "they", "are", "identical", "or", "not" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L1840-L1881
train
200,745
tanghaibao/jcvi
jcvi/formats/gff.py
uniq
def uniq(args): """ %prog uniq gffile > uniq.gff Remove redundant gene models. For overlapping gene models, take the longest gene. A second scan takes only the genes selected. --mode controls whether you want larger feature, or higher scoring feature. --best controls how many redundant features to keep, e.g. 10 for est2genome. """ supported_modes = ("span", "score") p = OptionParser(uniq.__doc__) p.add_option("--type", default="gene", help="Types of features to non-redundify [default: %default]") p.add_option("--mode", default="span", choices=supported_modes, help="Pile mode [default: %default]") p.add_option("--best", default=1, type="int", help="Use best N features [default: %default]") p.add_option("--name", default=False, action="store_true", help="Non-redundify Name attribute [default: %default]") p.add_option("--iter", default="2", choices=("1", "2"), help="Number of iterations to grab children [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args mode = opts.mode bestn = opts.best allgenes = import_feats(gffile, opts.type) g = get_piles(allgenes) bestids = set() for group in g: if mode == "span": scores_group = [(- x.span, x) for x in group] else: scores_group = [(- float(x.score), x) for x in group] scores_group.sort() seen = set() for score, x in scores_group: if len(seen) >= bestn: break name = x.attributes["Name"][0] if opts.name else x.accn if name in seen: continue seen.add(name) bestids.add(x.accn) populate_children(opts.outfile, bestids, gffile, iter=opts.iter)
python
def uniq(args): """ %prog uniq gffile > uniq.gff Remove redundant gene models. For overlapping gene models, take the longest gene. A second scan takes only the genes selected. --mode controls whether you want larger feature, or higher scoring feature. --best controls how many redundant features to keep, e.g. 10 for est2genome. """ supported_modes = ("span", "score") p = OptionParser(uniq.__doc__) p.add_option("--type", default="gene", help="Types of features to non-redundify [default: %default]") p.add_option("--mode", default="span", choices=supported_modes, help="Pile mode [default: %default]") p.add_option("--best", default=1, type="int", help="Use best N features [default: %default]") p.add_option("--name", default=False, action="store_true", help="Non-redundify Name attribute [default: %default]") p.add_option("--iter", default="2", choices=("1", "2"), help="Number of iterations to grab children [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args mode = opts.mode bestn = opts.best allgenes = import_feats(gffile, opts.type) g = get_piles(allgenes) bestids = set() for group in g: if mode == "span": scores_group = [(- x.span, x) for x in group] else: scores_group = [(- float(x.score), x) for x in group] scores_group.sort() seen = set() for score, x in scores_group: if len(seen) >= bestn: break name = x.attributes["Name"][0] if opts.name else x.accn if name in seen: continue seen.add(name) bestids.add(x.accn) populate_children(opts.outfile, bestids, gffile, iter=opts.iter)
[ "def", "uniq", "(", "args", ")", ":", "supported_modes", "=", "(", "\"span\"", ",", "\"score\"", ")", "p", "=", "OptionParser", "(", "uniq", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--type\"", ",", "default", "=", "\"gene\"", ",", "help", "...
%prog uniq gffile > uniq.gff Remove redundant gene models. For overlapping gene models, take the longest gene. A second scan takes only the genes selected. --mode controls whether you want larger feature, or higher scoring feature. --best controls how many redundant features to keep, e.g. 10 for est2genome.
[ "%prog", "uniq", "gffile", ">", "uniq", ".", "gff" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L1897-L1953
train
200,746
tanghaibao/jcvi
jcvi/formats/gff.py
sort
def sort(args): """ %prog sort gffile Sort gff file using plain old unix sort based on [chromosome, start coordinate]. or topologically based on hierarchy of features using the gt (genometools) toolkit """ valid_sort_methods = ("unix", "topo") p = OptionParser(sort.__doc__) p.add_option("--method", default="unix", choices=valid_sort_methods, help="Specify sort method [default: %default]") p.add_option("-i", dest="inplace", default=False, action="store_true", help="If doing a unix sort, perform sort inplace [default: %default]") p.set_tmpdir() p.set_outfile() p.set_home("gt") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args sortedgff = opts.outfile if opts.inplace: if opts.method == "topo" or (opts.method == "unix" and gffile in ("-", "stdin")): logging.error("Cannot perform inplace sort when method is `topo`" + \ " or method is `unix` and input is `stdin` stream") sys.exit() if opts.method == "unix": cmd = "sort" cmd += " -k1,1 -k4,4n {0}".format(gffile) if opts.tmpdir: cmd += " -T {0}".format(opts.tmpdir) if opts.inplace: cmd += " -o {0}".gffile sortedgff = None sh(cmd, outfile=sortedgff) elif opts.method == "topo": GT_HOME = opts.gt_home if not op.isdir(GT_HOME): logging.error("GT_HOME={0} directory does not exist".format(GT_HOME)) sys.exit() cmd = "{0}".format(op.join(GT_HOME, "bin", "gt")) cmd += " gff3 -sort -tidy -retainids -addids no {0}".format(gffile) sh(cmd, outfile=sortedgff)
python
def sort(args): """ %prog sort gffile Sort gff file using plain old unix sort based on [chromosome, start coordinate]. or topologically based on hierarchy of features using the gt (genometools) toolkit """ valid_sort_methods = ("unix", "topo") p = OptionParser(sort.__doc__) p.add_option("--method", default="unix", choices=valid_sort_methods, help="Specify sort method [default: %default]") p.add_option("-i", dest="inplace", default=False, action="store_true", help="If doing a unix sort, perform sort inplace [default: %default]") p.set_tmpdir() p.set_outfile() p.set_home("gt") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args sortedgff = opts.outfile if opts.inplace: if opts.method == "topo" or (opts.method == "unix" and gffile in ("-", "stdin")): logging.error("Cannot perform inplace sort when method is `topo`" + \ " or method is `unix` and input is `stdin` stream") sys.exit() if opts.method == "unix": cmd = "sort" cmd += " -k1,1 -k4,4n {0}".format(gffile) if opts.tmpdir: cmd += " -T {0}".format(opts.tmpdir) if opts.inplace: cmd += " -o {0}".gffile sortedgff = None sh(cmd, outfile=sortedgff) elif opts.method == "topo": GT_HOME = opts.gt_home if not op.isdir(GT_HOME): logging.error("GT_HOME={0} directory does not exist".format(GT_HOME)) sys.exit() cmd = "{0}".format(op.join(GT_HOME, "bin", "gt")) cmd += " gff3 -sort -tidy -retainids -addids no {0}".format(gffile) sh(cmd, outfile=sortedgff)
[ "def", "sort", "(", "args", ")", ":", "valid_sort_methods", "=", "(", "\"unix\"", ",", "\"topo\"", ")", "p", "=", "OptionParser", "(", "sort", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--method\"", ",", "default", "=", "\"unix\"", ",", "choice...
%prog sort gffile Sort gff file using plain old unix sort based on [chromosome, start coordinate]. or topologically based on hierarchy of features using the gt (genometools) toolkit
[ "%prog", "sort", "gffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2014-L2060
train
200,747
tanghaibao/jcvi
jcvi/formats/gff.py
fromgtf
def fromgtf(args): """ %prog fromgtf gtffile Convert gtf to gff file. In gtf, the "transcript_id" will convert to "ID=", the "transcript_id" in exon/CDS feature will be converted to "Parent=". """ p = OptionParser(fromgtf.__doc__) p.add_option("--transcript_id", default="transcript_id", help="Field name for transcript [default: %default]") p.add_option("--gene_id", default="gene_id", help="Field name for gene [default: %default]") p.add_option("--augustus", default=False, action="store_true", help="Input is AUGUSTUS gtf [default: %default]") p.set_home("augustus") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gtffile, = args outfile = opts.outfile if opts.augustus: ahome = opts.augustus_home s = op.join(ahome, "scripts/gtf2gff.pl") cmd = "{0} --gff3 < {1} --out={2}".format(s, gtffile, outfile) sh(cmd) return gff = Gff(gtffile) fw = must_open(outfile, "w") transcript_id = opts.transcript_id gene_id = opts.gene_id nfeats = 0 for g in gff: if g.type in ("transcript", "mRNA"): g.type = "mRNA" g.update_tag(transcript_id, "ID") g.update_tag("mRNA", "ID") g.update_tag(gene_id, "Parent") g.update_tag("Gene", "Parent") elif g.type in ("exon", "CDS") or "UTR" in g.type: g.update_tag("transcript_id", "Parent") g.update_tag(g.type, "Parent") elif g.type == "gene": g.update_tag(gene_id, "ID") g.update_tag("Gene", "ID") else: assert 0, "Don't know how to deal with {0}".format(g.type) g.update_attributes() print(g, file=fw) nfeats += 1 logging.debug("A total of {0} features written.".format(nfeats))
python
def fromgtf(args): """ %prog fromgtf gtffile Convert gtf to gff file. In gtf, the "transcript_id" will convert to "ID=", the "transcript_id" in exon/CDS feature will be converted to "Parent=". """ p = OptionParser(fromgtf.__doc__) p.add_option("--transcript_id", default="transcript_id", help="Field name for transcript [default: %default]") p.add_option("--gene_id", default="gene_id", help="Field name for gene [default: %default]") p.add_option("--augustus", default=False, action="store_true", help="Input is AUGUSTUS gtf [default: %default]") p.set_home("augustus") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gtffile, = args outfile = opts.outfile if opts.augustus: ahome = opts.augustus_home s = op.join(ahome, "scripts/gtf2gff.pl") cmd = "{0} --gff3 < {1} --out={2}".format(s, gtffile, outfile) sh(cmd) return gff = Gff(gtffile) fw = must_open(outfile, "w") transcript_id = opts.transcript_id gene_id = opts.gene_id nfeats = 0 for g in gff: if g.type in ("transcript", "mRNA"): g.type = "mRNA" g.update_tag(transcript_id, "ID") g.update_tag("mRNA", "ID") g.update_tag(gene_id, "Parent") g.update_tag("Gene", "Parent") elif g.type in ("exon", "CDS") or "UTR" in g.type: g.update_tag("transcript_id", "Parent") g.update_tag(g.type, "Parent") elif g.type == "gene": g.update_tag(gene_id, "ID") g.update_tag("Gene", "ID") else: assert 0, "Don't know how to deal with {0}".format(g.type) g.update_attributes() print(g, file=fw) nfeats += 1 logging.debug("A total of {0} features written.".format(nfeats))
[ "def", "fromgtf", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fromgtf", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--transcript_id\"", ",", "default", "=", "\"transcript_id\"", ",", "help", "=", "\"Field name for transcript [default: %default]...
%prog fromgtf gtffile Convert gtf to gff file. In gtf, the "transcript_id" will convert to "ID=", the "transcript_id" in exon/CDS feature will be converted to "Parent=".
[ "%prog", "fromgtf", "gtffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2063-L2118
train
200,748
tanghaibao/jcvi
jcvi/formats/gff.py
fromsoap
def fromsoap(args): """ %prog fromsoap soapfile > gff_file """ p = OptionParser(fromsoap.__doc__) p.add_option("--type", default="nucleotide_match", help="GFF feature type [default: %default]") p.add_option("--source", default="soap", help="GFF source qualifier [default: %default]") p.set_fixchrnames(orgn="maize") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) soapfile, = args pad0 = len(str(sum(1 for line in open(soapfile)))) fw = must_open(opts.outfile, "w") fp = must_open(soapfile) for idx, line in enumerate(fp): if opts.fix_chr_name: from jcvi.utils.cbook import fixChromName line = fixChromName(line, orgn=opts.fix_chr_name) atoms = line.strip().split("\t") attributes = "ID=match{0};Name={1}".format(str(idx).zfill(pad0), atoms[0]) start, end = int(atoms[8]), int(atoms[5]) + int(atoms[8]) - 1 seqid = atoms[7] print("\t".join(str(x) for x in (seqid, opts.source, opts.type, \ start, end, ".", atoms[6], ".", attributes)), file=fw)
python
def fromsoap(args): """ %prog fromsoap soapfile > gff_file """ p = OptionParser(fromsoap.__doc__) p.add_option("--type", default="nucleotide_match", help="GFF feature type [default: %default]") p.add_option("--source", default="soap", help="GFF source qualifier [default: %default]") p.set_fixchrnames(orgn="maize") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) soapfile, = args pad0 = len(str(sum(1 for line in open(soapfile)))) fw = must_open(opts.outfile, "w") fp = must_open(soapfile) for idx, line in enumerate(fp): if opts.fix_chr_name: from jcvi.utils.cbook import fixChromName line = fixChromName(line, orgn=opts.fix_chr_name) atoms = line.strip().split("\t") attributes = "ID=match{0};Name={1}".format(str(idx).zfill(pad0), atoms[0]) start, end = int(atoms[8]), int(atoms[5]) + int(atoms[8]) - 1 seqid = atoms[7] print("\t".join(str(x) for x in (seqid, opts.source, opts.type, \ start, end, ".", atoms[6], ".", attributes)), file=fw)
[ "def", "fromsoap", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fromsoap", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--type\"", ",", "default", "=", "\"nucleotide_match\"", ",", "help", "=", "\"GFF feature type [default: %default]\"", ")", ...
%prog fromsoap soapfile > gff_file
[ "%prog", "fromsoap", "soapfile", ">", "gff_file" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2145-L2178
train
200,749
tanghaibao/jcvi
jcvi/formats/gff.py
gtf
def gtf(args): """ %prog gtf gffile Convert gff to gtf file. In gtf, only exon/CDS features are important. The first 8 columns are the same as gff, but in the attributes field, we need to specify "gene_id" and "transcript_id". """ p = OptionParser(gtf.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) transcript_info = AutoVivification() for g in gff: if g.type.endswith(("RNA", "transcript")): if "ID" in g.attributes and "Parent" in g.attributes: transcript_id = g.get_attr("ID") gene_id = g.get_attr("Parent") elif "mRNA" in g.attributes and "Gene" in g.attributes: transcript_id = g.get_attr("mRNA") gene_id = g.get_attr("Gene") else: transcript_id = g.get_attr("ID") gene_id = transcript_id transcript_info[transcript_id]["gene_id"] = gene_id transcript_info[transcript_id]["gene_type"] = g.type continue if g.type not in valid_gff_to_gtf_type.keys(): continue try: transcript_id = g.get_attr("Parent", first=False) except IndexError: transcript_id = g.get_attr("mRNA", first=False) g.type = valid_gff_to_gtf_type[g.type] for tid in transcript_id: if tid not in transcript_info: continue gene_type = transcript_info[tid]["gene_type"] if not gene_type.endswith("RNA") and not gene_type.endswith("transcript"): continue gene_id = transcript_info[tid]["gene_id"] g.attributes = dict(gene_id=[gene_id], transcript_id=[tid]) g.update_attributes(gtf=True, urlquote=False) print(g)
python
def gtf(args): """ %prog gtf gffile Convert gff to gtf file. In gtf, only exon/CDS features are important. The first 8 columns are the same as gff, but in the attributes field, we need to specify "gene_id" and "transcript_id". """ p = OptionParser(gtf.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) transcript_info = AutoVivification() for g in gff: if g.type.endswith(("RNA", "transcript")): if "ID" in g.attributes and "Parent" in g.attributes: transcript_id = g.get_attr("ID") gene_id = g.get_attr("Parent") elif "mRNA" in g.attributes and "Gene" in g.attributes: transcript_id = g.get_attr("mRNA") gene_id = g.get_attr("Gene") else: transcript_id = g.get_attr("ID") gene_id = transcript_id transcript_info[transcript_id]["gene_id"] = gene_id transcript_info[transcript_id]["gene_type"] = g.type continue if g.type not in valid_gff_to_gtf_type.keys(): continue try: transcript_id = g.get_attr("Parent", first=False) except IndexError: transcript_id = g.get_attr("mRNA", first=False) g.type = valid_gff_to_gtf_type[g.type] for tid in transcript_id: if tid not in transcript_info: continue gene_type = transcript_info[tid]["gene_type"] if not gene_type.endswith("RNA") and not gene_type.endswith("transcript"): continue gene_id = transcript_info[tid]["gene_id"] g.attributes = dict(gene_id=[gene_id], transcript_id=[tid]) g.update_attributes(gtf=True, urlquote=False) print(g)
[ "def", "gtf", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "gtf", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", ...
%prog gtf gffile Convert gff to gtf file. In gtf, only exon/CDS features are important. The first 8 columns are the same as gff, but in the attributes field, we need to specify "gene_id" and "transcript_id".
[ "%prog", "gtf", "gffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2181-L2231
train
200,750
tanghaibao/jcvi
jcvi/formats/gff.py
merge
def merge(args): """ %prog merge gffiles Merge several gff files into one. When only one file is given, it is assumed to be a file with a list of gff files. """ p = OptionParser(merge.__doc__) p.add_option("--seq", default=False, action="store_true", help="Print FASTA sequences at the end") p.set_outfile() opts, args = p.parse_args(args) nargs = len(args) if nargs < 1: sys.exit(not p.print_help()) if nargs == 1: listfile, = args fp = open(listfile) gffiles = [x.strip() for x in fp] else: gffiles = args outfile = opts.outfile deflines = set() fw = must_open(outfile, "w") fastarecs = {} for gffile in natsorted(gffiles, key=lambda x: op.basename(x)): logging.debug(gffile) fp = open(gffile) for row in fp: row = row.rstrip() if not row or row[0] == '#': if row == FastaTag: break if row in deflines: continue else: deflines.add(row) print(row, file=fw) if not opts.seq: continue f = Fasta(gffile, lazy=True) for key, rec in f.iteritems_ordered(): if key in fastarecs: continue fastarecs[key] = rec if opts.seq: print(FastaTag, file=fw) SeqIO.write(fastarecs.values(), fw, "fasta") fw.close()
python
def merge(args): """ %prog merge gffiles Merge several gff files into one. When only one file is given, it is assumed to be a file with a list of gff files. """ p = OptionParser(merge.__doc__) p.add_option("--seq", default=False, action="store_true", help="Print FASTA sequences at the end") p.set_outfile() opts, args = p.parse_args(args) nargs = len(args) if nargs < 1: sys.exit(not p.print_help()) if nargs == 1: listfile, = args fp = open(listfile) gffiles = [x.strip() for x in fp] else: gffiles = args outfile = opts.outfile deflines = set() fw = must_open(outfile, "w") fastarecs = {} for gffile in natsorted(gffiles, key=lambda x: op.basename(x)): logging.debug(gffile) fp = open(gffile) for row in fp: row = row.rstrip() if not row or row[0] == '#': if row == FastaTag: break if row in deflines: continue else: deflines.add(row) print(row, file=fw) if not opts.seq: continue f = Fasta(gffile, lazy=True) for key, rec in f.iteritems_ordered(): if key in fastarecs: continue fastarecs[key] = rec if opts.seq: print(FastaTag, file=fw) SeqIO.write(fastarecs.values(), fw, "fasta") fw.close()
[ "def", "merge", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "merge", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--seq\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Print FASTA sequences at ...
%prog merge gffiles Merge several gff files into one. When only one file is given, it is assumed to be a file with a list of gff files.
[ "%prog", "merge", "gffiles" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2234-L2292
train
200,751
tanghaibao/jcvi
jcvi/formats/gff.py
extract
def extract(args): """ %prog extract gffile --contigs: Extract particular contig(s) from the gff file. If multiple contigs are involved, use "," to separate, e.g. "contig_12,contig_150"; or provide a file with multiple contig IDs, one per line --names: Process particular ID(s) from the gff file. If multiple IDs are involved, use "," to separate; or provide a file with multiple IDs, one per line """ p = OptionParser(extract.__doc__) p.add_option("--contigs", help="Extract features from certain contigs [default: %default]") p.add_option("--names", help="Extract features with certain names [default: %default]") p.add_option("--types", type="str", default=None, help="Extract features of certain feature types [default: %default]") p.add_option("--children", default=0, choices=["1", "2"], help="Specify number of iterations: `1` grabs children, " + \ "`2` grabs grand-children [default: %default]") p.add_option("--tag", default="ID", help="Scan the tags for the names [default: %default]") p.add_option("--fasta", default=False, action="store_true", help="Write FASTA if available [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args contigfile = opts.contigs namesfile = opts.names typesfile = opts.types nametag = opts.tag contigID = parse_multi_values(contigfile) names = parse_multi_values(namesfile) types = parse_multi_values(typesfile) outfile = opts.outfile if opts.children: assert types is not None or names is not None, "Must set --names or --types" if names == None: names = list() populate_children(outfile, names, gffile, iter=opts.children, types=types) return fp = must_open(gffile) fw = must_open(opts.outfile, "w") for row in fp: atoms = row.split() if len(atoms) == 0: continue tag = atoms[0] if row[0] == "#": if row.strip() == "###": continue if not (tag == RegionTag and contigID and atoms[1] not in contigID): print(row.rstrip(), file=fw) if tag == FastaTag: break continue b = GffLine(row) attrib = b.attributes if contigID and tag not in contigID: continue if types and b.type in types: _id = b.accn if _id not in names: names.append(_id) if names is not None: if nametag not in attrib: continue if attrib[nametag][0] not in names: continue print(row.rstrip(), file=fw) if not opts.fasta: return f = Fasta(gffile) for s in contigID: if s in f: SeqIO.write([f[s]], fw, "fasta")
python
def extract(args): """ %prog extract gffile --contigs: Extract particular contig(s) from the gff file. If multiple contigs are involved, use "," to separate, e.g. "contig_12,contig_150"; or provide a file with multiple contig IDs, one per line --names: Process particular ID(s) from the gff file. If multiple IDs are involved, use "," to separate; or provide a file with multiple IDs, one per line """ p = OptionParser(extract.__doc__) p.add_option("--contigs", help="Extract features from certain contigs [default: %default]") p.add_option("--names", help="Extract features with certain names [default: %default]") p.add_option("--types", type="str", default=None, help="Extract features of certain feature types [default: %default]") p.add_option("--children", default=0, choices=["1", "2"], help="Specify number of iterations: `1` grabs children, " + \ "`2` grabs grand-children [default: %default]") p.add_option("--tag", default="ID", help="Scan the tags for the names [default: %default]") p.add_option("--fasta", default=False, action="store_true", help="Write FASTA if available [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args contigfile = opts.contigs namesfile = opts.names typesfile = opts.types nametag = opts.tag contigID = parse_multi_values(contigfile) names = parse_multi_values(namesfile) types = parse_multi_values(typesfile) outfile = opts.outfile if opts.children: assert types is not None or names is not None, "Must set --names or --types" if names == None: names = list() populate_children(outfile, names, gffile, iter=opts.children, types=types) return fp = must_open(gffile) fw = must_open(opts.outfile, "w") for row in fp: atoms = row.split() if len(atoms) == 0: continue tag = atoms[0] if row[0] == "#": if row.strip() == "###": continue if not (tag == RegionTag and contigID and atoms[1] not in contigID): print(row.rstrip(), file=fw) if tag == FastaTag: break continue b = GffLine(row) attrib = b.attributes if contigID and tag not in contigID: continue if types and b.type in types: _id = b.accn if _id not in names: names.append(_id) if names is not None: if nametag not in attrib: continue if attrib[nametag][0] not in names: continue print(row.rstrip(), file=fw) if not opts.fasta: return f = Fasta(gffile) for s in contigID: if s in f: SeqIO.write([f[s]], fw, "fasta")
[ "def", "extract", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "extract", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--contigs\"", ",", "help", "=", "\"Extract features from certain contigs [default: %default]\"", ")", "p", ".", "add_option", ...
%prog extract gffile --contigs: Extract particular contig(s) from the gff file. If multiple contigs are involved, use "," to separate, e.g. "contig_12,contig_150"; or provide a file with multiple contig IDs, one per line --names: Process particular ID(s) from the gff file. If multiple IDs are involved, use "," to separate; or provide a file with multiple IDs, one per line
[ "%prog", "extract", "gffile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2295-L2381
train
200,752
tanghaibao/jcvi
jcvi/formats/gff.py
split
def split(args): """ %prog split gffile outdir Split the gff into one contig per file. Will also take sequences if the file contains FASTA sequences. """ p = OptionParser(split.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gffile, outdir = args mkdir(outdir) g = Gff(gffile) seqids = g.seqids for s in seqids: outfile = op.join(outdir, s + ".gff") extract([gffile, "--contigs=" + s, "--outfile=" + outfile])
python
def split(args): """ %prog split gffile outdir Split the gff into one contig per file. Will also take sequences if the file contains FASTA sequences. """ p = OptionParser(split.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gffile, outdir = args mkdir(outdir) g = Gff(gffile) seqids = g.seqids for s in seqids: outfile = op.join(outdir, s + ".gff") extract([gffile, "--contigs=" + s, "--outfile=" + outfile])
[ "def", "split", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "split", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not...
%prog split gffile outdir Split the gff into one contig per file. Will also take sequences if the file contains FASTA sequences.
[ "%prog", "split", "gffile", "outdir" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2384-L2406
train
200,753
tanghaibao/jcvi
jcvi/formats/gff.py
note
def note(args): """ %prog note gffile > tabfile Extract certain attribute field for each feature. """ p = OptionParser(note.__doc__) p.add_option("--type", default=None, help="Only process certain types, multiple types allowed with comma") p.add_option("--attribute", default="Parent,Note", help="Attribute field to extract, multiple fields allowd with comma") p.add_option("--AED", type="float", help="Only extract lines with AED score <=") p.add_option("--exoncount", default=False, action="store_true", help="Get the exon count for each mRNA feat") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args type = opts.type if type: type = type.split(",") g = make_index(gffile) exoncounts = {} if opts.exoncount: for feat in g.features_of_type("mRNA"): nexons = 0 for c in g.children(feat.id, 1): if c.featuretype != "exon": continue nexons += 1 exoncounts[feat.id] = nexons attrib = opts.attribute.split(",") gff = Gff(gffile) seen = set() AED = opts.AED for g in gff: if type and g.type not in type: continue if AED is not None and float(g.attributes["_AED"][0]) > AED: continue keyval = [g.accn] + [",".join(g.attributes[x]) \ for x in attrib if x in g.attributes] if exoncounts: nexons = exoncounts.get(g.accn, 0) keyval.append(str(nexons)) keyval = tuple(keyval) if keyval not in seen: print("\t".join(keyval)) seen.add(keyval)
python
def note(args): """ %prog note gffile > tabfile Extract certain attribute field for each feature. """ p = OptionParser(note.__doc__) p.add_option("--type", default=None, help="Only process certain types, multiple types allowed with comma") p.add_option("--attribute", default="Parent,Note", help="Attribute field to extract, multiple fields allowd with comma") p.add_option("--AED", type="float", help="Only extract lines with AED score <=") p.add_option("--exoncount", default=False, action="store_true", help="Get the exon count for each mRNA feat") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args type = opts.type if type: type = type.split(",") g = make_index(gffile) exoncounts = {} if opts.exoncount: for feat in g.features_of_type("mRNA"): nexons = 0 for c in g.children(feat.id, 1): if c.featuretype != "exon": continue nexons += 1 exoncounts[feat.id] = nexons attrib = opts.attribute.split(",") gff = Gff(gffile) seen = set() AED = opts.AED for g in gff: if type and g.type not in type: continue if AED is not None and float(g.attributes["_AED"][0]) > AED: continue keyval = [g.accn] + [",".join(g.attributes[x]) \ for x in attrib if x in g.attributes] if exoncounts: nexons = exoncounts.get(g.accn, 0) keyval.append(str(nexons)) keyval = tuple(keyval) if keyval not in seen: print("\t".join(keyval)) seen.add(keyval)
[ "def", "note", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "note", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--type\"", ",", "default", "=", "None", ",", "help", "=", "\"Only process certain types, multiple types allowed with comma\"", ")", ...
%prog note gffile > tabfile Extract certain attribute field for each feature.
[ "%prog", "note", "gffile", ">", "tabfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2409-L2462
train
200,754
tanghaibao/jcvi
jcvi/formats/gff.py
make_index
def make_index(gff_file): """ Make a sqlite database for fast retrieval of features. """ import gffutils db_file = gff_file + ".db" if need_update(gff_file, db_file): if op.exists(db_file): os.remove(db_file) logging.debug("Indexing `{0}`".format(gff_file)) gffutils.create_db(gff_file, db_file, merge_strategy="create_unique") else: logging.debug("Load index `{0}`".format(gff_file)) return gffutils.FeatureDB(db_file)
python
def make_index(gff_file): """ Make a sqlite database for fast retrieval of features. """ import gffutils db_file = gff_file + ".db" if need_update(gff_file, db_file): if op.exists(db_file): os.remove(db_file) logging.debug("Indexing `{0}`".format(gff_file)) gffutils.create_db(gff_file, db_file, merge_strategy="create_unique") else: logging.debug("Load index `{0}`".format(gff_file)) return gffutils.FeatureDB(db_file)
[ "def", "make_index", "(", "gff_file", ")", ":", "import", "gffutils", "db_file", "=", "gff_file", "+", "\".db\"", "if", "need_update", "(", "gff_file", ",", "db_file", ")", ":", "if", "op", ".", "exists", "(", "db_file", ")", ":", "os", ".", "remove", ...
Make a sqlite database for fast retrieval of features.
[ "Make", "a", "sqlite", "database", "for", "fast", "retrieval", "of", "features", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2613-L2628
train
200,755
tanghaibao/jcvi
jcvi/formats/gff.py
children
def children(args): """ %prog children gff_file Get the children that have the same parent. """ p = OptionParser(children.__doc__) p.add_option("--parents", default="gene", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args g = make_index(gff_file) parents = set(opts.parents.split(',')) for feat in get_parents(gff_file, parents): cc = [c.id for c in g.children(feat.id, 1)] if len(cc) <= 1: continue print("\t".join(str(x) for x in \ (feat.id, feat.start, feat.stop, "|".join(cc))))
python
def children(args): """ %prog children gff_file Get the children that have the same parent. """ p = OptionParser(children.__doc__) p.add_option("--parents", default="gene", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args g = make_index(gff_file) parents = set(opts.parents.split(',')) for feat in get_parents(gff_file, parents): cc = [c.id for c in g.children(feat.id, 1)] if len(cc) <= 1: continue print("\t".join(str(x) for x in \ (feat.id, feat.start, feat.stop, "|".join(cc))))
[ "def", "children", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "children", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--parents\"", ",", "default", "=", "\"gene\"", ",", "help", "=", "\"list of features to extract, use comma to separate (e.g.\"...
%prog children gff_file Get the children that have the same parent.
[ "%prog", "children", "gff_file" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2639-L2666
train
200,756
tanghaibao/jcvi
jcvi/formats/gff.py
bed12
def bed12(args): """ %prog bed12 gffile > bedfile Produce bed12 file for coding features. The exons will be converted to blocks. The CDS range will be shown between thickStart to thickEnd. For reference, bed format consists of the following fields: 1. chrom 2. chromStart 3. chromEnd 4. name 5. score 6. strand 7. thickStart 8. thickEnd 9. itemRgb 10. blockCount 11. blockSizes 12. blockStarts """ p = OptionParser(bed12.__doc__) p.add_option("--parent", default="mRNA", help="Top feature type [default: %default]") p.add_option("--block", default="exon", help="Feature type for regular blocks [default: %default]") p.add_option("--thick", default="CDS", help="Feature type for thick blocks [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args parent, block, thick = opts.parent, opts.block, opts.thick outfile = opts.outfile g = make_index(gffile) fw = must_open(outfile, "w") for f in g.features_of_type(parent): chrom = f.chrom chromStart = f.start - 1 chromEnd = f.stop name = f.id score = 0 strand = f.strand thickStart = 1e15 thickEnd = 0 blocks = [] for c in g.children(name, 1): cstart, cend = c.start - 1, c.stop if c.featuretype == block: blockStart = cstart - chromStart blockSize = cend - cstart blocks.append((blockStart, blockSize)) elif c.featuretype == thick: thickStart = min(thickStart, cstart) thickEnd = max(thickEnd, cend) blocks.sort() blockStarts, blockSizes = zip(*blocks) blockCount = len(blocks) blockSizes = ",".join(str(x) for x in blockSizes) + "," blockStarts = ",".join(str(x) for x in blockStarts) + "," itemRgb = 0 print("\t".join(str(x) for x in (chrom, chromStart, chromEnd, \ name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts)), file=fw)
python
def bed12(args): """ %prog bed12 gffile > bedfile Produce bed12 file for coding features. The exons will be converted to blocks. The CDS range will be shown between thickStart to thickEnd. For reference, bed format consists of the following fields: 1. chrom 2. chromStart 3. chromEnd 4. name 5. score 6. strand 7. thickStart 8. thickEnd 9. itemRgb 10. blockCount 11. blockSizes 12. blockStarts """ p = OptionParser(bed12.__doc__) p.add_option("--parent", default="mRNA", help="Top feature type [default: %default]") p.add_option("--block", default="exon", help="Feature type for regular blocks [default: %default]") p.add_option("--thick", default="CDS", help="Feature type for thick blocks [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args parent, block, thick = opts.parent, opts.block, opts.thick outfile = opts.outfile g = make_index(gffile) fw = must_open(outfile, "w") for f in g.features_of_type(parent): chrom = f.chrom chromStart = f.start - 1 chromEnd = f.stop name = f.id score = 0 strand = f.strand thickStart = 1e15 thickEnd = 0 blocks = [] for c in g.children(name, 1): cstart, cend = c.start - 1, c.stop if c.featuretype == block: blockStart = cstart - chromStart blockSize = cend - cstart blocks.append((blockStart, blockSize)) elif c.featuretype == thick: thickStart = min(thickStart, cstart) thickEnd = max(thickEnd, cend) blocks.sort() blockStarts, blockSizes = zip(*blocks) blockCount = len(blocks) blockSizes = ",".join(str(x) for x in blockSizes) + "," blockStarts = ",".join(str(x) for x in blockStarts) + "," itemRgb = 0 print("\t".join(str(x) for x in (chrom, chromStart, chromEnd, \ name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts)), file=fw)
[ "def", "bed12", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "bed12", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--parent\"", ",", "default", "=", "\"mRNA\"", ",", "help", "=", "\"Top feature type [default: %default]\"", ")", "p", ".", "...
%prog bed12 gffile > bedfile Produce bed12 file for coding features. The exons will be converted to blocks. The CDS range will be shown between thickStart to thickEnd. For reference, bed format consists of the following fields: 1. chrom 2. chromStart 3. chromEnd 4. name 5. score 6. strand 7. thickStart 8. thickEnd 9. itemRgb 10. blockCount 11. blockSizes 12. blockStarts
[ "%prog", "bed12", "gffile", ">", "bedfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2945-L3020
train
200,757
tanghaibao/jcvi
jcvi/formats/maf.py
bed
def bed(args): """ %prog bed maffiles > out.bed Convert a folder of maf alignments to the bed features then useful to check coverage, etc. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) flist = args prefix = flist[0].split(".")[0] j = 0 for f in flist: reader = Maf(f).reader for rec in reader: a, b = rec.components for a, tag in zip((a, b), "ab"): name = "{0}_{1:07d}{2}".format(prefix, j, tag) print("\t".join(str(x) for x in (a.src, a.forward_strand_start, \ a.forward_strand_end, name))) j += 1
python
def bed(args): """ %prog bed maffiles > out.bed Convert a folder of maf alignments to the bed features then useful to check coverage, etc. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) flist = args prefix = flist[0].split(".")[0] j = 0 for f in flist: reader = Maf(f).reader for rec in reader: a, b = rec.components for a, tag in zip((a, b), "ab"): name = "{0}_{1:07d}{2}".format(prefix, j, tag) print("\t".join(str(x) for x in (a.src, a.forward_strand_start, \ a.forward_strand_end, name))) j += 1
[ "def", "bed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "bed", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p", "...
%prog bed maffiles > out.bed Convert a folder of maf alignments to the bed features then useful to check coverage, etc.
[ "%prog", "bed", "maffiles", ">", "out", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/maf.py#L69-L97
train
200,758
tanghaibao/jcvi
jcvi/formats/maf.py
blast
def blast(args): ''' %prog blast maffiles > out.blast From a folder of .maf files, generate .blast file with tabular format. ''' p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) == 0: sys.exit(p.print_help()) flist = args for f in flist: maf_to_blast8(f)
python
def blast(args): ''' %prog blast maffiles > out.blast From a folder of .maf files, generate .blast file with tabular format. ''' p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) == 0: sys.exit(p.print_help()) flist = args for f in flist: maf_to_blast8(f)
[ "def", "blast", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "blast", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "==", "0", ":", "sys", ".", "exit", "(", "p",...
%prog blast maffiles > out.blast From a folder of .maf files, generate .blast file with tabular format.
[ "%prog", "blast", "maffiles", ">", "out", ".", "blast" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/maf.py#L143-L158
train
200,759
tanghaibao/jcvi
jcvi/algorithms/ec.py
genome_mutation
def genome_mutation(candidate): """Return the mutants created by inversion mutation on the candidates. This function performs inversion or insertion. It randomly chooses two locations along the candidate and reverses the values within that slice. Insertion is done by popping one item and insert it back at random position. """ size = len(candidate) prob = random.random() if prob > .5: # Inversion p = random.randint(0, size-1) q = random.randint(0, size-1) if p > q: p, q = q, p q += 1 s = candidate[p:q] x = candidate[:p] + s[::-1] + candidate[q:] return creator.Individual(x), else: # Insertion p = random.randint(0, size-1) q = random.randint(0, size-1) cq = candidate.pop(q) candidate.insert(p, cq) return candidate,
python
def genome_mutation(candidate): """Return the mutants created by inversion mutation on the candidates. This function performs inversion or insertion. It randomly chooses two locations along the candidate and reverses the values within that slice. Insertion is done by popping one item and insert it back at random position. """ size = len(candidate) prob = random.random() if prob > .5: # Inversion p = random.randint(0, size-1) q = random.randint(0, size-1) if p > q: p, q = q, p q += 1 s = candidate[p:q] x = candidate[:p] + s[::-1] + candidate[q:] return creator.Individual(x), else: # Insertion p = random.randint(0, size-1) q = random.randint(0, size-1) cq = candidate.pop(q) candidate.insert(p, cq) return candidate,
[ "def", "genome_mutation", "(", "candidate", ")", ":", "size", "=", "len", "(", "candidate", ")", "prob", "=", "random", ".", "random", "(", ")", "if", "prob", ">", ".5", ":", "# Inversion", "p", "=", "random", ".", "randint", "(", "0", ",", "size", ...
Return the mutants created by inversion mutation on the candidates. This function performs inversion or insertion. It randomly chooses two locations along the candidate and reverses the values within that slice. Insertion is done by popping one item and insert it back at random position.
[ "Return", "the", "mutants", "created", "by", "inversion", "mutation", "on", "the", "candidates", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/ec.py#L40-L64
train
200,760
tanghaibao/jcvi
jcvi/formats/contig.py
frombed
def frombed(args): """ %prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS. """ from jcvi.formats.fasta import Fasta from jcvi.formats.bed import Bed from jcvi.utils.cbook import fill p = OptionParser(frombed.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, contigfasta, readfasta = args prefix = bedfile.rsplit(".", 1)[0] contigfile = prefix + ".contig" idsfile = prefix + ".ids" contigfasta = Fasta(contigfasta) readfasta = Fasta(readfasta) bed = Bed(bedfile) checksum = "00000000 checksum." fw_ids = open(idsfile, "w") fw = open(contigfile, "w") for ctg, reads in bed.sub_beds(): ctgseq = contigfasta[ctg] ctgline = "##{0} {1} {2} bases, {3}".format(\ ctg, len(reads), len(ctgseq), checksum) print(ctg, file=fw_ids) print(ctgline, file=fw) print(fill(ctgseq.seq), file=fw) for b in reads: read = b.accn strand = b.strand readseq = readfasta[read] rc = " [RC]" if strand == "-" else "" readlen = len(readseq) rstart, rend = 1, readlen if strand == "-": rstart, rend = rend, rstart readrange = "{{{0} {1}}}".format(rstart, rend) conrange = "<{0} {1}>".format(b.start, b.end) readline = "#{0}(0){1} {2} bases, {3} {4} {5}".format(\ read, rc, readlen, checksum, readrange, conrange) print(readline, file=fw) print(fill(readseq.seq), file=fw) logging.debug("Mapped contigs written to `{0}`.".format(contigfile)) logging.debug("Contig IDs written to `{0}`.".format(idsfile))
python
def frombed(args): """ %prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS. """ from jcvi.formats.fasta import Fasta from jcvi.formats.bed import Bed from jcvi.utils.cbook import fill p = OptionParser(frombed.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, contigfasta, readfasta = args prefix = bedfile.rsplit(".", 1)[0] contigfile = prefix + ".contig" idsfile = prefix + ".ids" contigfasta = Fasta(contigfasta) readfasta = Fasta(readfasta) bed = Bed(bedfile) checksum = "00000000 checksum." fw_ids = open(idsfile, "w") fw = open(contigfile, "w") for ctg, reads in bed.sub_beds(): ctgseq = contigfasta[ctg] ctgline = "##{0} {1} {2} bases, {3}".format(\ ctg, len(reads), len(ctgseq), checksum) print(ctg, file=fw_ids) print(ctgline, file=fw) print(fill(ctgseq.seq), file=fw) for b in reads: read = b.accn strand = b.strand readseq = readfasta[read] rc = " [RC]" if strand == "-" else "" readlen = len(readseq) rstart, rend = 1, readlen if strand == "-": rstart, rend = rend, rstart readrange = "{{{0} {1}}}".format(rstart, rend) conrange = "<{0} {1}>".format(b.start, b.end) readline = "#{0}(0){1} {2} bases, {3} {4} {5}".format(\ read, rc, readlen, checksum, readrange, conrange) print(readline, file=fw) print(fill(readseq.seq), file=fw) logging.debug("Mapped contigs written to `{0}`.".format(contigfile)) logging.debug("Contig IDs written to `{0}`.".format(idsfile))
[ "def", "frombed", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "fasta", "import", "Fasta", "from", "jcvi", ".", "formats", ".", "bed", "import", "Bed", "from", "jcvi", ".", "utils", ".", "cbook", "import", "fill", "p", "=", "OptionParser...
%prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS.
[ "%prog", "frombed", "bedfile", "contigfasta", "readfasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/contig.py#L90-L146
train
200,761
tanghaibao/jcvi
jcvi/formats/contig.py
bed
def bed(args): """ %prog bed contigfile Prints out the contigs and their associated reads. """ p = OptionParser(main.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) contigfile, = args bedfile = contigfile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") c = ContigFile(contigfile) for rec in c.iter_records(): for r in rec.reads: print(r.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
python
def bed(args): """ %prog bed contigfile Prints out the contigs and their associated reads. """ p = OptionParser(main.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) contigfile, = args bedfile = contigfile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") c = ContigFile(contigfile) for rec in c.iter_records(): for r in rec.reads: print(r.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
[ "def", "bed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "main", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", ...
%prog bed contigfile Prints out the contigs and their associated reads.
[ "%prog", "bed", "contigfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/contig.py#L149-L172
train
200,762
tanghaibao/jcvi
jcvi/apps/tesseract.py
get_errors
def get_errors(error_string): ''' returns all lines in the error_string that start with the string "error" ''' lines = error_string.splitlines() error_lines = tuple(line for line in lines if line.find('Error') >= 0) if len(error_lines) > 0: return '\n'.join(error_lines) else: return error_string.strip()
python
def get_errors(error_string): ''' returns all lines in the error_string that start with the string "error" ''' lines = error_string.splitlines() error_lines = tuple(line for line in lines if line.find('Error') >= 0) if len(error_lines) > 0: return '\n'.join(error_lines) else: return error_string.strip()
[ "def", "get_errors", "(", "error_string", ")", ":", "lines", "=", "error_string", ".", "splitlines", "(", ")", "error_lines", "=", "tuple", "(", "line", "for", "line", "in", "lines", "if", "line", ".", "find", "(", "'Error'", ")", ">=", "0", ")", "if",...
returns all lines in the error_string that start with the string "error"
[ "returns", "all", "lines", "in", "the", "error_string", "that", "start", "with", "the", "string", "error" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/tesseract.py#L98-L109
train
200,763
tanghaibao/jcvi
jcvi/apps/tesseract.py
tempnam
def tempnam(): ''' returns a temporary file-name ''' # prevent os.tmpname from printing an error... stderr = sys.stderr try: sys.stderr = cStringIO.StringIO() return os.tempnam(None, 'tess_') finally: sys.stderr = stderr
python
def tempnam(): ''' returns a temporary file-name ''' # prevent os.tmpname from printing an error... stderr = sys.stderr try: sys.stderr = cStringIO.StringIO() return os.tempnam(None, 'tess_') finally: sys.stderr = stderr
[ "def", "tempnam", "(", ")", ":", "# prevent os.tmpname from printing an error...", "stderr", "=", "sys", ".", "stderr", "try", ":", "sys", ".", "stderr", "=", "cStringIO", ".", "StringIO", "(", ")", "return", "os", ".", "tempnam", "(", "None", ",", "'tess_'"...
returns a temporary file-name
[ "returns", "a", "temporary", "file", "-", "name" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/tesseract.py#L112-L121
train
200,764
tanghaibao/jcvi
jcvi/apps/tesseract.py
image_to_string
def image_to_string(image, lang=None, boxes=False): ''' Runs tesseract on the specified image. First, the image is written to disk, and then the tesseract command is run on the image. Resseract's result is read, and the temporary files are erased. ''' input_file_name = '%s.bmp' % tempnam() output_file_name_base = tempnam() if not boxes: output_file_name = '%s.txt' % output_file_name_base else: output_file_name = '%s.box' % output_file_name_base try: image.save(input_file_name) status, error_string = run_tesseract(input_file_name, output_file_name_base, lang=lang, boxes=boxes) if status: errors = get_errors(error_string) raise TesseractError(status, errors) f = file(output_file_name) try: return f.read().strip() finally: f.close() finally: cleanup(input_file_name) cleanup(output_file_name)
python
def image_to_string(image, lang=None, boxes=False): ''' Runs tesseract on the specified image. First, the image is written to disk, and then the tesseract command is run on the image. Resseract's result is read, and the temporary files are erased. ''' input_file_name = '%s.bmp' % tempnam() output_file_name_base = tempnam() if not boxes: output_file_name = '%s.txt' % output_file_name_base else: output_file_name = '%s.box' % output_file_name_base try: image.save(input_file_name) status, error_string = run_tesseract(input_file_name, output_file_name_base, lang=lang, boxes=boxes) if status: errors = get_errors(error_string) raise TesseractError(status, errors) f = file(output_file_name) try: return f.read().strip() finally: f.close() finally: cleanup(input_file_name) cleanup(output_file_name)
[ "def", "image_to_string", "(", "image", ",", "lang", "=", "None", ",", "boxes", "=", "False", ")", ":", "input_file_name", "=", "'%s.bmp'", "%", "tempnam", "(", ")", "output_file_name_base", "=", "tempnam", "(", ")", "if", "not", "boxes", ":", "output_file...
Runs tesseract on the specified image. First, the image is written to disk, and then the tesseract command is run on the image. Resseract's result is read, and the temporary files are erased.
[ "Runs", "tesseract", "on", "the", "specified", "image", ".", "First", "the", "image", "is", "written", "to", "disk", "and", "then", "the", "tesseract", "command", "is", "run", "on", "the", "image", ".", "Resseract", "s", "result", "is", "read", "and", "t...
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/tesseract.py#L124-L154
train
200,765
tanghaibao/jcvi
jcvi/variation/delly.py
mitosomatic
def mitosomatic(args): """ %prog mitosomatic t.piledriver Find mito mosaic somatic mutations in piledriver results. """ import pandas as pd p = OptionParser(mitosomatic.__doc__) p.add_option("--minaf", default=.005, type="float", help="Minimum allele fraction") p.add_option("--maxaf", default=.1, type="float", help="Maximum allele fraction") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) df, = args af_file = df.rsplit(".", 1)[0] + ".af" fw = open(af_file, "w") df = pd.read_csv(df, sep="\t") for i, row in df.iterrows(): na = row["num_A"] nt = row["num_T"] nc = row["num_C"] ng = row["num_G"] nd = row["num_D"] ni = row["num_I"] depth = row["depth"] #major, minor = sorted([na, nt, nc, ng], reverse=True)[:2] #af = minor * 1. / (major + minor) af = (nd + ni) * 1. / depth if not (opts.minaf <= af <= opts.maxaf): continue print("{}\t{}\t{:.6f}".format(row["chrom"], row["start"], af), file=fw) fw.close() logging.debug("Allele freq written to `{}`".format(af_file))
python
def mitosomatic(args): """ %prog mitosomatic t.piledriver Find mito mosaic somatic mutations in piledriver results. """ import pandas as pd p = OptionParser(mitosomatic.__doc__) p.add_option("--minaf", default=.005, type="float", help="Minimum allele fraction") p.add_option("--maxaf", default=.1, type="float", help="Maximum allele fraction") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) df, = args af_file = df.rsplit(".", 1)[0] + ".af" fw = open(af_file, "w") df = pd.read_csv(df, sep="\t") for i, row in df.iterrows(): na = row["num_A"] nt = row["num_T"] nc = row["num_C"] ng = row["num_G"] nd = row["num_D"] ni = row["num_I"] depth = row["depth"] #major, minor = sorted([na, nt, nc, ng], reverse=True)[:2] #af = minor * 1. / (major + minor) af = (nd + ni) * 1. / depth if not (opts.minaf <= af <= opts.maxaf): continue print("{}\t{}\t{:.6f}".format(row["chrom"], row["start"], af), file=fw) fw.close() logging.debug("Allele freq written to `{}`".format(af_file))
[ "def", "mitosomatic", "(", "args", ")", ":", "import", "pandas", "as", "pd", "p", "=", "OptionParser", "(", "mitosomatic", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--minaf\"", ",", "default", "=", ".005", ",", "type", "=", "\"float\"", ",", ...
%prog mitosomatic t.piledriver Find mito mosaic somatic mutations in piledriver results.
[ "%prog", "mitosomatic", "t", ".", "piledriver" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/delly.py#L75-L113
train
200,766
tanghaibao/jcvi
jcvi/variation/delly.py
bed
def bed(args): """ %prog bed del.txt Convert `del.txt` to BED format. DELLY manual here: <http://www.embl.de/~rausch/delly.html> Deletion: chr, start, end, size, #supporting_pairs, avg._mapping_quality, deletion_id chr1, 10180, 10509, 329, 75, 15.8667, Deletion_Sample_00000000 """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) delt, = args dt = Delly(delt) dt.write_bed("del.bed")
python
def bed(args): """ %prog bed del.txt Convert `del.txt` to BED format. DELLY manual here: <http://www.embl.de/~rausch/delly.html> Deletion: chr, start, end, size, #supporting_pairs, avg._mapping_quality, deletion_id chr1, 10180, 10509, 329, 75, 15.8667, Deletion_Sample_00000000 """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) delt, = args dt = Delly(delt) dt.write_bed("del.bed")
[ "def", "bed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "bed", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", ...
%prog bed del.txt Convert `del.txt` to BED format. DELLY manual here: <http://www.embl.de/~rausch/delly.html> Deletion: chr, start, end, size, #supporting_pairs, avg._mapping_quality, deletion_id chr1, 10180, 10509, 329, 75, 15.8667, Deletion_Sample_00000000
[ "%prog", "bed", "del", ".", "txt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/delly.py#L116-L135
train
200,767
tanghaibao/jcvi
jcvi/variation/delly.py
mito
def mito(args): """ %prog mito chrM.fa input.bam Identify mitochondrial deletions. """ p = OptionParser(mito.__doc__) p.set_aws_opts(store="hli-mv-data-science/htang/mito-deletions") p.add_option("--realignonly", default=False, action="store_true", help="Realign only") p.add_option("--svonly", default=False, action="store_true", help="Run Realign => SV calls only") p.add_option("--support", default=1, type="int", help="Minimum number of supporting reads") p.set_home("speedseq", default="/mnt/software/speedseq/bin") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) chrMfa, bamfile = args store = opts.output_path cleanup = not opts.nocleanup if not op.exists(chrMfa): logging.debug("File `{}` missing. Exiting.".format(chrMfa)) return chrMfai = chrMfa + ".fai" if not op.exists(chrMfai): cmd = "samtools index {}".format(chrMfa) sh(cmd) if not bamfile.endswith(".bam"): bamfiles = [x.strip() for x in open(bamfile)] else: bamfiles = [bamfile] if store: computed = ls_s3(store) computed = [op.basename(x).split('.')[0] for x in computed if x.endswith(".depth")] remaining_samples = [x for x in bamfiles if op.basename(x).split(".")[0] not in computed] logging.debug("Already computed on `{}`: {}". format(store, len(bamfiles) - len(remaining_samples))) bamfiles = remaining_samples logging.debug("Total samples: {}".format(len(bamfiles))) for bamfile in bamfiles: run_mito(chrMfa, bamfile, opts, realignonly=opts.realignonly, svonly=opts.svonly, store=store, cleanup=cleanup)
python
def mito(args): """ %prog mito chrM.fa input.bam Identify mitochondrial deletions. """ p = OptionParser(mito.__doc__) p.set_aws_opts(store="hli-mv-data-science/htang/mito-deletions") p.add_option("--realignonly", default=False, action="store_true", help="Realign only") p.add_option("--svonly", default=False, action="store_true", help="Run Realign => SV calls only") p.add_option("--support", default=1, type="int", help="Minimum number of supporting reads") p.set_home("speedseq", default="/mnt/software/speedseq/bin") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) chrMfa, bamfile = args store = opts.output_path cleanup = not opts.nocleanup if not op.exists(chrMfa): logging.debug("File `{}` missing. Exiting.".format(chrMfa)) return chrMfai = chrMfa + ".fai" if not op.exists(chrMfai): cmd = "samtools index {}".format(chrMfa) sh(cmd) if not bamfile.endswith(".bam"): bamfiles = [x.strip() for x in open(bamfile)] else: bamfiles = [bamfile] if store: computed = ls_s3(store) computed = [op.basename(x).split('.')[0] for x in computed if x.endswith(".depth")] remaining_samples = [x for x in bamfiles if op.basename(x).split(".")[0] not in computed] logging.debug("Already computed on `{}`: {}". format(store, len(bamfiles) - len(remaining_samples))) bamfiles = remaining_samples logging.debug("Total samples: {}".format(len(bamfiles))) for bamfile in bamfiles: run_mito(chrMfa, bamfile, opts, realignonly=opts.realignonly, svonly=opts.svonly, store=store, cleanup=cleanup)
[ "def", "mito", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "mito", ".", "__doc__", ")", "p", ".", "set_aws_opts", "(", "store", "=", "\"hli-mv-data-science/htang/mito-deletions\"", ")", "p", ".", "add_option", "(", "\"--realignonly\"", ",", "default"...
%prog mito chrM.fa input.bam Identify mitochondrial deletions.
[ "%prog", "mito", "chrM", ".", "fa", "input", ".", "bam" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/delly.py#L177-L233
train
200,768
tanghaibao/jcvi
jcvi/apps/gbsubmit.py
fcs
def fcs(args): """ %prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter """ p = OptionParser(fcs.__doc__) p.add_option("--cutoff", default=200, help="Skip small components less than [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fcsfile, = args cutoff = opts.cutoff fp = open(fcsfile) for row in fp: if row[0] == "#": continue sep = "\t" if "\t" in row else None atoms = row.rstrip().split(sep, 3) contig, length = atoms[:2] length = int(length) label = atoms[-1] label = label.replace(" ", "_") if len(atoms) == 3: ranges = "{0}..{1}".format(1, length) else: assert len(atoms) == 4 ranges = atoms[2] for ab in ranges.split(","): a, b = ab.split("..") a, b = int(a), int(b) assert a <= b ahang = a - 1 bhang = length - b if ahang < cutoff: a = 1 if bhang < cutoff: b = length print("\t".join(str(x) for x in (contig, a - 1, b, label)))
python
def fcs(args): """ %prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter """ p = OptionParser(fcs.__doc__) p.add_option("--cutoff", default=200, help="Skip small components less than [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fcsfile, = args cutoff = opts.cutoff fp = open(fcsfile) for row in fp: if row[0] == "#": continue sep = "\t" if "\t" in row else None atoms = row.rstrip().split(sep, 3) contig, length = atoms[:2] length = int(length) label = atoms[-1] label = label.replace(" ", "_") if len(atoms) == 3: ranges = "{0}..{1}".format(1, length) else: assert len(atoms) == 4 ranges = atoms[2] for ab in ranges.split(","): a, b = ab.split("..") a, b = int(a), int(b) assert a <= b ahang = a - 1 bhang = length - b if ahang < cutoff: a = 1 if bhang < cutoff: b = length print("\t".join(str(x) for x in (contig, a - 1, b, label)))
[ "def", "fcs", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fcs", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--cutoff\"", ",", "default", "=", "200", ",", "help", "=", "\"Skip small components less than [default: %default]\"", ")", "opts", ...
%prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter
[ "%prog", "fcs", "fcsfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gbsubmit.py#L125-L175
train
200,769
tanghaibao/jcvi
jcvi/apps/gbsubmit.py
asn
def asn(args): """ %prog asn asnfiles Mainly to get this block, and extract `str` field: general { db "TIGR" , tag str "mtg2_12952" } , genbank { accession "AC148996" , """ from jcvi.formats.base import must_open p = OptionParser(asn.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fw = must_open(opts.outfile, "w") for asnfile in args: fp = open(asnfile) ingeneralblock = False ingenbankblock = False gb, name = None, None for row in fp: if row.strip() == "": continue tag = row.split()[0] if tag == "general": ingeneralblock = True if ingeneralblock and tag == "str": if name is None: # Only allow first assignment name = row.split("\"")[1] ingeneralblock = False if tag == "genbank": ingenbankblock = True if ingenbankblock and tag == "accession": if gb is None: gb = row.split("\"")[1] ingenbankblock = False assert gb and name print("{0}\t{1}".format(gb, name), file=fw)
python
def asn(args): """ %prog asn asnfiles Mainly to get this block, and extract `str` field: general { db "TIGR" , tag str "mtg2_12952" } , genbank { accession "AC148996" , """ from jcvi.formats.base import must_open p = OptionParser(asn.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fw = must_open(opts.outfile, "w") for asnfile in args: fp = open(asnfile) ingeneralblock = False ingenbankblock = False gb, name = None, None for row in fp: if row.strip() == "": continue tag = row.split()[0] if tag == "general": ingeneralblock = True if ingeneralblock and tag == "str": if name is None: # Only allow first assignment name = row.split("\"")[1] ingeneralblock = False if tag == "genbank": ingenbankblock = True if ingenbankblock and tag == "accession": if gb is None: gb = row.split("\"")[1] ingenbankblock = False assert gb and name print("{0}\t{1}".format(gb, name), file=fw)
[ "def", "asn", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "must_open", "p", "=", "OptionParser", "(", "asn", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", ...
%prog asn asnfiles Mainly to get this block, and extract `str` field: general { db "TIGR" , tag str "mtg2_12952" } , genbank { accession "AC148996" ,
[ "%prog", "asn", "asnfiles" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gbsubmit.py#L178-L227
train
200,770
tanghaibao/jcvi
jcvi/apps/gbsubmit.py
htgnew
def htgnew(args): """ %prog htgnew fastafile phasefile template.sbt Prepare sqnfiles for submitting new Genbank HTG records. `fastafile` contains the sequences. `phasefile` contains the phase information, it is a two column file: mth2-45h12 3 `template.sbt` is the Genbank submission template. This function is simpler than htg, since the record names have not be assigned yet (so less bookkeeping). """ from jcvi.formats.fasta import sequin p = OptionParser(htgnew.__doc__) p.add_option("--comment", default="", help="Comments for this submission [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) fastafile, phasefile, sbtfile = args comment = opts.comment fastadir = "fasta" sqndir = "sqn" mkdir(fastadir) mkdir(sqndir) cmd = "faSplit byname {0} {1}/".format(fastafile, fastadir) sh(cmd, outfile="/dev/null", errfile="/dev/null") acmd = 'tbl2asn -a z -p fasta -r {sqndir}' acmd += ' -i {splitfile} -t {sbtfile} -C tigr' acmd += ' -j "[tech=htgs {phase}] [organism=Medicago truncatula] [strain=A17]"' acmd += ' -o {sqndir}/{accession_nv}.sqn -V Vbr' acmd += ' -y "{comment}" -W T -T T' nupdated = 0 for row in open(phasefile): name, phase = row.split()[:2] fafile = op.join(fastadir, name + ".fa") cloneopt = "--clone={0}".format(name) splitfile, gaps = sequin([fafile, cloneopt]) splitfile = op.basename(splitfile) accession = accession_nv = name phase = int(phase) assert phase in (1, 2, 3) cmd = acmd.format(accession_nv=accession_nv, sqndir=sqndir, sbtfile=sbtfile, splitfile=splitfile, phase=phase, comment=comment) sh(cmd) verify_sqn(sqndir, accession) nupdated += 1 print("A total of {0} records updated.".format(nupdated), file=sys.stderr)
python
def htgnew(args): """ %prog htgnew fastafile phasefile template.sbt Prepare sqnfiles for submitting new Genbank HTG records. `fastafile` contains the sequences. `phasefile` contains the phase information, it is a two column file: mth2-45h12 3 `template.sbt` is the Genbank submission template. This function is simpler than htg, since the record names have not be assigned yet (so less bookkeeping). """ from jcvi.formats.fasta import sequin p = OptionParser(htgnew.__doc__) p.add_option("--comment", default="", help="Comments for this submission [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) fastafile, phasefile, sbtfile = args comment = opts.comment fastadir = "fasta" sqndir = "sqn" mkdir(fastadir) mkdir(sqndir) cmd = "faSplit byname {0} {1}/".format(fastafile, fastadir) sh(cmd, outfile="/dev/null", errfile="/dev/null") acmd = 'tbl2asn -a z -p fasta -r {sqndir}' acmd += ' -i {splitfile} -t {sbtfile} -C tigr' acmd += ' -j "[tech=htgs {phase}] [organism=Medicago truncatula] [strain=A17]"' acmd += ' -o {sqndir}/{accession_nv}.sqn -V Vbr' acmd += ' -y "{comment}" -W T -T T' nupdated = 0 for row in open(phasefile): name, phase = row.split()[:2] fafile = op.join(fastadir, name + ".fa") cloneopt = "--clone={0}".format(name) splitfile, gaps = sequin([fafile, cloneopt]) splitfile = op.basename(splitfile) accession = accession_nv = name phase = int(phase) assert phase in (1, 2, 3) cmd = acmd.format(accession_nv=accession_nv, sqndir=sqndir, sbtfile=sbtfile, splitfile=splitfile, phase=phase, comment=comment) sh(cmd) verify_sqn(sqndir, accession) nupdated += 1 print("A total of {0} records updated.".format(nupdated), file=sys.stderr)
[ "def", "htgnew", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "fasta", "import", "sequin", "p", "=", "OptionParser", "(", "htgnew", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--comment\"", ",", "default", "=", "\"\"", ",", "hel...
%prog htgnew fastafile phasefile template.sbt Prepare sqnfiles for submitting new Genbank HTG records. `fastafile` contains the sequences. `phasefile` contains the phase information, it is a two column file: mth2-45h12 3 `template.sbt` is the Genbank submission template. This function is simpler than htg, since the record names have not be assigned yet (so less bookkeeping).
[ "%prog", "htgnew", "fastafile", "phasefile", "template", ".", "sbt" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gbsubmit.py#L240-L303
train
200,771
tanghaibao/jcvi
jcvi/apps/gbsubmit.py
convert_96_to_384
def convert_96_to_384(c96, quad, nrows=Nrows, ncols=Ncols): """ Convert the 96-well number and quad number to 384-well number >>> convert_96_to_384("B02", 1) 'C3' >>> convert_96_to_384("H09", 4) 'P18' """ rows, cols = get_rows_cols() plate, splate = get_plate() n96 = rows.index(c96[0]) * ncols / 2 + int(c96[1:]) q = "{0:02d}{1}".format(n96, "ABCD"[quad - 1]) return splate[q]
python
def convert_96_to_384(c96, quad, nrows=Nrows, ncols=Ncols): """ Convert the 96-well number and quad number to 384-well number >>> convert_96_to_384("B02", 1) 'C3' >>> convert_96_to_384("H09", 4) 'P18' """ rows, cols = get_rows_cols() plate, splate = get_plate() n96 = rows.index(c96[0]) * ncols / 2 + int(c96[1:]) q = "{0:02d}{1}".format(n96, "ABCD"[quad - 1]) return splate[q]
[ "def", "convert_96_to_384", "(", "c96", ",", "quad", ",", "nrows", "=", "Nrows", ",", "ncols", "=", "Ncols", ")", ":", "rows", ",", "cols", "=", "get_rows_cols", "(", ")", "plate", ",", "splate", "=", "get_plate", "(", ")", "n96", "=", "rows", ".", ...
Convert the 96-well number and quad number to 384-well number >>> convert_96_to_384("B02", 1) 'C3' >>> convert_96_to_384("H09", 4) 'P18'
[ "Convert", "the", "96", "-", "well", "number", "and", "quad", "number", "to", "384", "-", "well", "number" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gbsubmit.py#L498-L512
train
200,772
tanghaibao/jcvi
jcvi/apps/gbsubmit.py
parse_description
def parse_description(s): """ Returns a dictionary based on the FASTA header, assuming JCVI data """ s = "".join(s.split()[1:]).replace("/", ";") a = parse_qs(s) return a
python
def parse_description(s): """ Returns a dictionary based on the FASTA header, assuming JCVI data """ s = "".join(s.split()[1:]).replace("/", ";") a = parse_qs(s) return a
[ "def", "parse_description", "(", "s", ")", ":", "s", "=", "\"\"", ".", "join", "(", "s", ".", "split", "(", ")", "[", "1", ":", "]", ")", ".", "replace", "(", "\"/\"", ",", "\";\"", ")", "a", "=", "parse_qs", "(", "s", ")", "return", "a" ]
Returns a dictionary based on the FASTA header, assuming JCVI data
[ "Returns", "a", "dictionary", "based", "on", "the", "FASTA", "header", "assuming", "JCVI", "data" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gbsubmit.py#L535-L541
train
200,773
tanghaibao/jcvi
jcvi/assembly/bambus.py
scaffold
def scaffold(args): """ %prog scaffold ctgfasta reads1.fasta mapping1.bed reads2.fasta mapping2.bed ... Run BAMBUS on set of contigs, reads and read mappings. """ from jcvi.formats.base import FileMerger from jcvi.formats.bed import mates from jcvi.formats.contig import frombed from jcvi.formats.fasta import join from jcvi.utils.iter import grouper p = OptionParser(scaffold.__doc__) p.set_rclip(rclip=1) p.add_option("--conf", help="BAMBUS configuration file [default: %default]") p.add_option("--prefix", default=False, action="store_true", help="Only keep links between IDs with same prefix [default: %default]") opts, args = p.parse_args(args) nargs = len(args) if nargs < 3 or nargs % 2 != 1: sys.exit(not p.print_help()) rclip = opts.rclip ctgfasta = args[0] duos = list(grouper(args[1:], 2)) trios = [] for fastafile, bedfile in duos: prefix = bedfile.rsplit(".", 1)[0] matefile = prefix + ".mates" matebedfile = matefile + ".bed" if need_update(bedfile, [matefile, matebedfile]): matesopt = [bedfile, "--lib", "--nointra", "--rclip={0}".format(rclip), "--cutoff={0}".format(opts.cutoff)] if opts.prefix: matesopt += ["--prefix"] matefile, matebedfile = mates(matesopt) trios.append((fastafile, matebedfile, matefile)) # Merge the readfasta, bedfile and matefile bbfasta, bbbed, bbmate = "bambus.reads.fasta", "bambus.bed", "bambus.mates" for files, outfile in zip(zip(*trios), (bbfasta, bbbed, bbmate)): FileMerger(files, outfile=outfile).merge(checkexists=True) ctgfile = "bambus.contig" idsfile = "bambus.ids" frombedInputs = [bbbed, ctgfasta, bbfasta] if need_update(frombedInputs, ctgfile): frombed(frombedInputs) inputfasta = "bambus.contigs.fasta" singletonfasta = "bambus.singletons.fasta" cmd = "faSomeRecords {0} {1} ".format(ctgfasta, idsfile) sh(cmd + inputfasta) sh(cmd + singletonfasta + " -exclude") # Run bambus prefix = "bambus" cmd = "goBambus -c {0} -m {1} -o {2}".format(ctgfile, bbmate, prefix) if opts.conf: cmd += " -C {0}".format(opts.conf) sh(cmd) cmd = "untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml".\ format(prefix) sh(cmd) final = "final" cmd = "printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib " \ "-merge -detail -oo -sum -o {1}".format(prefix, final) sh(cmd) oofile = final + ".oo" join([inputfasta, "--oo={0}".format(oofile)])
python
def scaffold(args): """ %prog scaffold ctgfasta reads1.fasta mapping1.bed reads2.fasta mapping2.bed ... Run BAMBUS on set of contigs, reads and read mappings. """ from jcvi.formats.base import FileMerger from jcvi.formats.bed import mates from jcvi.formats.contig import frombed from jcvi.formats.fasta import join from jcvi.utils.iter import grouper p = OptionParser(scaffold.__doc__) p.set_rclip(rclip=1) p.add_option("--conf", help="BAMBUS configuration file [default: %default]") p.add_option("--prefix", default=False, action="store_true", help="Only keep links between IDs with same prefix [default: %default]") opts, args = p.parse_args(args) nargs = len(args) if nargs < 3 or nargs % 2 != 1: sys.exit(not p.print_help()) rclip = opts.rclip ctgfasta = args[0] duos = list(grouper(args[1:], 2)) trios = [] for fastafile, bedfile in duos: prefix = bedfile.rsplit(".", 1)[0] matefile = prefix + ".mates" matebedfile = matefile + ".bed" if need_update(bedfile, [matefile, matebedfile]): matesopt = [bedfile, "--lib", "--nointra", "--rclip={0}".format(rclip), "--cutoff={0}".format(opts.cutoff)] if opts.prefix: matesopt += ["--prefix"] matefile, matebedfile = mates(matesopt) trios.append((fastafile, matebedfile, matefile)) # Merge the readfasta, bedfile and matefile bbfasta, bbbed, bbmate = "bambus.reads.fasta", "bambus.bed", "bambus.mates" for files, outfile in zip(zip(*trios), (bbfasta, bbbed, bbmate)): FileMerger(files, outfile=outfile).merge(checkexists=True) ctgfile = "bambus.contig" idsfile = "bambus.ids" frombedInputs = [bbbed, ctgfasta, bbfasta] if need_update(frombedInputs, ctgfile): frombed(frombedInputs) inputfasta = "bambus.contigs.fasta" singletonfasta = "bambus.singletons.fasta" cmd = "faSomeRecords {0} {1} ".format(ctgfasta, idsfile) sh(cmd + inputfasta) sh(cmd + singletonfasta + " -exclude") # Run bambus prefix = "bambus" cmd = "goBambus -c {0} -m {1} -o {2}".format(ctgfile, bbmate, prefix) if opts.conf: cmd += " -C {0}".format(opts.conf) sh(cmd) cmd = "untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml".\ format(prefix) sh(cmd) final = "final" cmd = "printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib " \ "-merge -detail -oo -sum -o {1}".format(prefix, final) sh(cmd) oofile = final + ".oo" join([inputfasta, "--oo={0}".format(oofile)])
[ "def", "scaffold", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "FileMerger", "from", "jcvi", ".", "formats", ".", "bed", "import", "mates", "from", "jcvi", ".", "formats", ".", "contig", "import", "frombed", "from", "jcv...
%prog scaffold ctgfasta reads1.fasta mapping1.bed reads2.fasta mapping2.bed ... Run BAMBUS on set of contigs, reads and read mappings.
[ "%prog", "scaffold", "ctgfasta", "reads1", ".", "fasta", "mapping1", ".", "bed", "reads2", ".", "fasta", "mapping2", ".", "bed", "..." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/bambus.py#L22-L99
train
200,774
tanghaibao/jcvi
jcvi/assembly/preprocess.py
diginorm
def diginorm(args): """ %prog diginorm fastqfile Run K-mer based normalization. Based on tutorial: <http://ged.msu.edu/angus/diginorm-2012/tutorial.html> Assume input is either an interleaved pairs file, or two separate files. To set up khmer: $ git clone git://github.com/ged-lab/screed.git $ git clone git://github.com/ged-lab/khmer.git $ cd screed $ python setup.py install $ cd ../khmer $ make test $ export PYTHONPATH=~/export/khmer """ from jcvi.formats.fastq import shuffle, pairinplace, split from jcvi.apps.base import getfilesize p = OptionParser(diginorm.__doc__) p.add_option("--single", default=False, action="store_true", help="Single end reads") p.add_option("--tablesize", help="Memory size") p.add_option("--npass", default="1", choices=("1", "2"), help="How many passes of normalization") p.set_depth(depth=50) p.set_home("khmer", default="/usr/local/bin/") opts, args = p.parse_args(args) if len(args) not in (1, 2): sys.exit(not p.print_help()) if len(args) == 2: fastq = shuffle(args + ["--tag"]) else: fastq, = args kh = opts.khmer_home depth = opts.depth PE = not opts.single sys.path.insert(0, op.join(kh, "python")) pf = fastq.rsplit(".", 1)[0] keepfile = fastq + ".keep" hashfile = pf + ".kh" mints = 10000000 ts = opts.tablesize or ((getfilesize(fastq) / 16 / mints + 1) * mints) norm_cmd = op.join(kh, "normalize-by-median.py") filt_cmd = op.join(kh, "filter-abund.py") if need_update(fastq, (hashfile, keepfile)): cmd = norm_cmd cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth, ts) if PE: cmd += " -p" cmd += " -s {0} {1}".format(hashfile, fastq) sh(cmd) abundfiltfile = keepfile + ".abundfilt" if need_update((hashfile, keepfile), abundfiltfile): cmd = filt_cmd cmd += " {0} {1}".format(hashfile, keepfile) sh(cmd) if opts.npass == "1": seckeepfile = abundfiltfile else: seckeepfile = abundfiltfile + ".keep" if need_update(abundfiltfile, seckeepfile): cmd = norm_cmd cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth - 10, ts / 2) cmd += " {0}".format(abundfiltfile) sh(cmd) if PE: pairsfile = pairinplace([seckeepfile, "--base={0}".format(pf + "_norm"), "--rclip=2"]) split([pairsfile])
python
def diginorm(args): """ %prog diginorm fastqfile Run K-mer based normalization. Based on tutorial: <http://ged.msu.edu/angus/diginorm-2012/tutorial.html> Assume input is either an interleaved pairs file, or two separate files. To set up khmer: $ git clone git://github.com/ged-lab/screed.git $ git clone git://github.com/ged-lab/khmer.git $ cd screed $ python setup.py install $ cd ../khmer $ make test $ export PYTHONPATH=~/export/khmer """ from jcvi.formats.fastq import shuffle, pairinplace, split from jcvi.apps.base import getfilesize p = OptionParser(diginorm.__doc__) p.add_option("--single", default=False, action="store_true", help="Single end reads") p.add_option("--tablesize", help="Memory size") p.add_option("--npass", default="1", choices=("1", "2"), help="How many passes of normalization") p.set_depth(depth=50) p.set_home("khmer", default="/usr/local/bin/") opts, args = p.parse_args(args) if len(args) not in (1, 2): sys.exit(not p.print_help()) if len(args) == 2: fastq = shuffle(args + ["--tag"]) else: fastq, = args kh = opts.khmer_home depth = opts.depth PE = not opts.single sys.path.insert(0, op.join(kh, "python")) pf = fastq.rsplit(".", 1)[0] keepfile = fastq + ".keep" hashfile = pf + ".kh" mints = 10000000 ts = opts.tablesize or ((getfilesize(fastq) / 16 / mints + 1) * mints) norm_cmd = op.join(kh, "normalize-by-median.py") filt_cmd = op.join(kh, "filter-abund.py") if need_update(fastq, (hashfile, keepfile)): cmd = norm_cmd cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth, ts) if PE: cmd += " -p" cmd += " -s {0} {1}".format(hashfile, fastq) sh(cmd) abundfiltfile = keepfile + ".abundfilt" if need_update((hashfile, keepfile), abundfiltfile): cmd = filt_cmd cmd += " {0} {1}".format(hashfile, keepfile) sh(cmd) if opts.npass == "1": seckeepfile = abundfiltfile else: seckeepfile = abundfiltfile + ".keep" if need_update(abundfiltfile, seckeepfile): cmd = norm_cmd cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth - 10, ts / 2) cmd += " {0}".format(abundfiltfile) sh(cmd) if PE: pairsfile = pairinplace([seckeepfile, "--base={0}".format(pf + "_norm"), "--rclip=2"]) split([pairsfile])
[ "def", "diginorm", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "fastq", "import", "shuffle", ",", "pairinplace", ",", "split", "from", "jcvi", ".", "apps", ".", "base", "import", "getfilesize", "p", "=", "OptionParser", "(", "diginorm", "...
%prog diginorm fastqfile Run K-mer based normalization. Based on tutorial: <http://ged.msu.edu/angus/diginorm-2012/tutorial.html> Assume input is either an interleaved pairs file, or two separate files. To set up khmer: $ git clone git://github.com/ged-lab/screed.git $ git clone git://github.com/ged-lab/khmer.git $ cd screed $ python setup.py install $ cd ../khmer $ make test $ export PYTHONPATH=~/export/khmer
[ "%prog", "diginorm", "fastqfile" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/preprocess.py#L77-L156
train
200,775
tanghaibao/jcvi
jcvi/assembly/preprocess.py
contamination
def contamination(args): """ %prog contamination Ecoli.fasta genome.fasta read.fastq Check read contamination on a folder of paired reads. Use bowtie2 to compare the reads against: 1. Ecoli.fsata - this will tell us the lower bound of contamination 2. genome.fasta - this will tell us the upper bound of contamination """ from jcvi.apps.bowtie import BowtieLogFile, align p = OptionParser(contamination.__doc__) p.set_firstN() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ecoli, genome, fq = args firstN_opt = "--firstN={0}".format(opts.firstN) samfile, logfile = align([ecoli, fq, firstN_opt]) bl = BowtieLogFile(logfile) lowerbound = bl.rate samfile, logfile = align([genome, fq, firstN_opt]) bl = BowtieLogFile(logfile) upperbound = 100 - bl.rate median = (lowerbound + upperbound) / 2 clogfile = fq + ".Ecoli" fw = open(clogfile, "w") lowerbound = "{0:.1f}".format(lowerbound) upperbound = "{0:.1f}".format(upperbound) median = "{0:.1f}".format(median) print("\t".join((fq, lowerbound, median, upperbound)), file=fw) print("{0}: Ecoli contamination rate {1}-{2}".\ format(fq, lowerbound, upperbound), file=sys.stderr) fw.close()
python
def contamination(args): """ %prog contamination Ecoli.fasta genome.fasta read.fastq Check read contamination on a folder of paired reads. Use bowtie2 to compare the reads against: 1. Ecoli.fsata - this will tell us the lower bound of contamination 2. genome.fasta - this will tell us the upper bound of contamination """ from jcvi.apps.bowtie import BowtieLogFile, align p = OptionParser(contamination.__doc__) p.set_firstN() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ecoli, genome, fq = args firstN_opt = "--firstN={0}".format(opts.firstN) samfile, logfile = align([ecoli, fq, firstN_opt]) bl = BowtieLogFile(logfile) lowerbound = bl.rate samfile, logfile = align([genome, fq, firstN_opt]) bl = BowtieLogFile(logfile) upperbound = 100 - bl.rate median = (lowerbound + upperbound) / 2 clogfile = fq + ".Ecoli" fw = open(clogfile, "w") lowerbound = "{0:.1f}".format(lowerbound) upperbound = "{0:.1f}".format(upperbound) median = "{0:.1f}".format(median) print("\t".join((fq, lowerbound, median, upperbound)), file=fw) print("{0}: Ecoli contamination rate {1}-{2}".\ format(fq, lowerbound, upperbound), file=sys.stderr) fw.close()
[ "def", "contamination", "(", "args", ")", ":", "from", "jcvi", ".", "apps", ".", "bowtie", "import", "BowtieLogFile", ",", "align", "p", "=", "OptionParser", "(", "contamination", ".", "__doc__", ")", "p", ".", "set_firstN", "(", ")", "opts", ",", "args"...
%prog contamination Ecoli.fasta genome.fasta read.fastq Check read contamination on a folder of paired reads. Use bowtie2 to compare the reads against: 1. Ecoli.fsata - this will tell us the lower bound of contamination 2. genome.fasta - this will tell us the upper bound of contamination
[ "%prog", "contamination", "Ecoli", ".", "fasta", "genome", ".", "fasta", "read", ".", "fastq" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/preprocess.py#L249-L287
train
200,776
tanghaibao/jcvi
jcvi/assembly/preprocess.py
alignextend
def alignextend(args): """ %prog alignextend ref.fasta read.1.fastq read.2.fastq Wrapper around AMOS alignextend. """ choices = "prepare,align,filter,rmdup,genreads".split(",") p = OptionParser(alignextend.__doc__) p.add_option("--nosuffix", default=False, action="store_true", help="Do not add /1/2 suffix to the read [default: %default]") p.add_option("--rc", default=False, action="store_true", help="Reverse complement the reads before alignment") p.add_option("--len", default=100, type="int", help="Extend to this length") p.add_option("--stage", default="prepare", choices=choices, help="Start from certain stage") p.add_option("--dup", default=10, type="int", help="Filter duplicates with coordinates within this distance") p.add_option("--maxdiff", default=1, type="int", help="Maximum number of differences") p.set_home("amos") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ref, r1, r2 = args pf = op.basename(r1).split(".")[0] cmd = op.join(opts.amos_home, "src/Experimental/alignextend.pl") if not opts.nosuffix: cmd += " -suffix" bwa_idx = "{0}.ref.fa.sa".format(pf) if not need_update(ref, bwa_idx): cmd += " -noindex" cmd += " -threads {0}".format(opts.cpus) offset = guessoffset([r1]) if offset == 64: cmd += " -I" if opts.rc: cmd += " -rc" cmd += " -allow -len {0} -dup {1}".format(opts.len, opts.dup) cmd += " -min {0} -max {1}".format(2 * opts.len, 20 * opts.len) cmd += " -maxdiff {0}".format(opts.maxdiff) cmd += " -stage {0}".format(opts.stage) cmd += " ".join(("", pf, ref, r1, r2)) sh(cmd)
python
def alignextend(args): """ %prog alignextend ref.fasta read.1.fastq read.2.fastq Wrapper around AMOS alignextend. """ choices = "prepare,align,filter,rmdup,genreads".split(",") p = OptionParser(alignextend.__doc__) p.add_option("--nosuffix", default=False, action="store_true", help="Do not add /1/2 suffix to the read [default: %default]") p.add_option("--rc", default=False, action="store_true", help="Reverse complement the reads before alignment") p.add_option("--len", default=100, type="int", help="Extend to this length") p.add_option("--stage", default="prepare", choices=choices, help="Start from certain stage") p.add_option("--dup", default=10, type="int", help="Filter duplicates with coordinates within this distance") p.add_option("--maxdiff", default=1, type="int", help="Maximum number of differences") p.set_home("amos") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ref, r1, r2 = args pf = op.basename(r1).split(".")[0] cmd = op.join(opts.amos_home, "src/Experimental/alignextend.pl") if not opts.nosuffix: cmd += " -suffix" bwa_idx = "{0}.ref.fa.sa".format(pf) if not need_update(ref, bwa_idx): cmd += " -noindex" cmd += " -threads {0}".format(opts.cpus) offset = guessoffset([r1]) if offset == 64: cmd += " -I" if opts.rc: cmd += " -rc" cmd += " -allow -len {0} -dup {1}".format(opts.len, opts.dup) cmd += " -min {0} -max {1}".format(2 * opts.len, 20 * opts.len) cmd += " -maxdiff {0}".format(opts.maxdiff) cmd += " -stage {0}".format(opts.stage) cmd += " ".join(("", pf, ref, r1, r2)) sh(cmd)
[ "def", "alignextend", "(", "args", ")", ":", "choices", "=", "\"prepare,align,filter,rmdup,genreads\"", ".", "split", "(", "\",\"", ")", "p", "=", "OptionParser", "(", "alignextend", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--nosuffix\"", ",", "def...
%prog alignextend ref.fasta read.1.fastq read.2.fastq Wrapper around AMOS alignextend.
[ "%prog", "alignextend", "ref", ".", "fasta", "read", ".", "1", ".", "fastq", "read", ".", "2", ".", "fastq" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/preprocess.py#L290-L336
train
200,777
tanghaibao/jcvi
jcvi/assembly/preprocess.py
hetsmooth
def hetsmooth(args): """ %prog hetsmooth reads_1.fq reads_2.fq jf-23_0 Wrapper against het-smooth. Below is the command used in het-smooth manual. $ het-smooth --kmer-len=23 --bottom-threshold=38 --top-threshold=220 --no-multibase-replacements --jellyfish-hash-file=23-mers.jf reads_1.fq reads_2.fq """ p = OptionParser(hetsmooth.__doc__) p.add_option("-K", default=23, type="int", help="K-mer size [default: %default]") p.add_option("-L", type="int", help="Bottom threshold, first min [default: %default]") p.add_option("-U", type="int", help="Top threshold, second min [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) reads1fq, reads2fq, jfdb = args K = opts.K L = opts.L U = opts.U assert L is not None and U is not None, "Please specify -L and -U" cmd = "het-smooth --kmer-len={0}".format(K) cmd += " --bottom-threshold={0} --top-threshold={1}".format(L, U) cmd += " --no-multibase-replacements --jellyfish-hash-file={0}".format(jfdb) cmd += " --no-reads-log" cmd += " " + " ".join((reads1fq, reads2fq)) sh(cmd)
python
def hetsmooth(args): """ %prog hetsmooth reads_1.fq reads_2.fq jf-23_0 Wrapper against het-smooth. Below is the command used in het-smooth manual. $ het-smooth --kmer-len=23 --bottom-threshold=38 --top-threshold=220 --no-multibase-replacements --jellyfish-hash-file=23-mers.jf reads_1.fq reads_2.fq """ p = OptionParser(hetsmooth.__doc__) p.add_option("-K", default=23, type="int", help="K-mer size [default: %default]") p.add_option("-L", type="int", help="Bottom threshold, first min [default: %default]") p.add_option("-U", type="int", help="Top threshold, second min [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) reads1fq, reads2fq, jfdb = args K = opts.K L = opts.L U = opts.U assert L is not None and U is not None, "Please specify -L and -U" cmd = "het-smooth --kmer-len={0}".format(K) cmd += " --bottom-threshold={0} --top-threshold={1}".format(L, U) cmd += " --no-multibase-replacements --jellyfish-hash-file={0}".format(jfdb) cmd += " --no-reads-log" cmd += " " + " ".join((reads1fq, reads2fq)) sh(cmd)
[ "def", "hetsmooth", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "hetsmooth", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-K\"", ",", "default", "=", "23", ",", "type", "=", "\"int\"", ",", "help", "=", "\"K-mer size [default: %default]\"...
%prog hetsmooth reads_1.fq reads_2.fq jf-23_0 Wrapper against het-smooth. Below is the command used in het-smooth manual. $ het-smooth --kmer-len=23 --bottom-threshold=38 --top-threshold=220 --no-multibase-replacements --jellyfish-hash-file=23-mers.jf reads_1.fq reads_2.fq
[ "%prog", "hetsmooth", "reads_1", ".", "fq", "reads_2", ".", "fq", "jf", "-", "23_0" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/preprocess.py#L380-L415
train
200,778
tanghaibao/jcvi
jcvi/utils/range.py
range_intersect
def range_intersect(a, b, extend=0): """ Returns the intersection between two reanges. >>> range_intersect((30, 45), (55, 65)) >>> range_intersect((48, 65), (45, 55)) [48, 55] """ a_min, a_max = a if a_min > a_max: a_min, a_max = a_max, a_min b_min, b_max = b if b_min > b_max: b_min, b_max = b_max, b_min if a_max + extend < b_min or b_max + extend < a_min: return None i_min = max(a_min, b_min) i_max = min(a_max, b_max) if i_min > i_max + extend: return None return [i_min, i_max]
python
def range_intersect(a, b, extend=0): """ Returns the intersection between two reanges. >>> range_intersect((30, 45), (55, 65)) >>> range_intersect((48, 65), (45, 55)) [48, 55] """ a_min, a_max = a if a_min > a_max: a_min, a_max = a_max, a_min b_min, b_max = b if b_min > b_max: b_min, b_max = b_max, b_min if a_max + extend < b_min or b_max + extend < a_min: return None i_min = max(a_min, b_min) i_max = min(a_max, b_max) if i_min > i_max + extend: return None return [i_min, i_max]
[ "def", "range_intersect", "(", "a", ",", "b", ",", "extend", "=", "0", ")", ":", "a_min", ",", "a_max", "=", "a", "if", "a_min", ">", "a_max", ":", "a_min", ",", "a_max", "=", "a_max", ",", "a_min", "b_min", ",", "b_max", "=", "b", "if", "b_min",...
Returns the intersection between two reanges. >>> range_intersect((30, 45), (55, 65)) >>> range_intersect((48, 65), (45, 55)) [48, 55]
[ "Returns", "the", "intersection", "between", "two", "reanges", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L36-L58
train
200,779
tanghaibao/jcvi
jcvi/utils/range.py
range_overlap
def range_overlap(a, b, ratio=False): """ Returns whether two ranges overlap. Set percentage=True returns overlap ratio over the shorter range of the two. >>> range_overlap(("1", 30, 45), ("1", 41, 55)) 5 >>> range_overlap(("1", 21, 45), ("1", 41, 75), ratio=True) 0.2 >>> range_overlap(("1", 30, 45), ("1", 15, 55)) 16 >>> range_overlap(("1", 30, 45), ("1", 15, 55), ratio=True) 1.0 >>> range_overlap(("1", 30, 45), ("1", 57, 68)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55), ratio=True) 0.0 """ a_chr, a_min, a_max = a b_chr, b_min, b_max = b a_min, a_max = sorted((a_min, a_max)) b_min, b_max = sorted((b_min, b_max)) shorter = min((a_max - a_min), (b_max - b_min)) + 1 # must be on the same chromosome if a_chr != b_chr: ov = 0 else: ov = min(shorter, (a_max - b_min + 1), (b_max - a_min + 1)) ov = max(ov, 0) if ratio: ov /= float(shorter) return ov
python
def range_overlap(a, b, ratio=False): """ Returns whether two ranges overlap. Set percentage=True returns overlap ratio over the shorter range of the two. >>> range_overlap(("1", 30, 45), ("1", 41, 55)) 5 >>> range_overlap(("1", 21, 45), ("1", 41, 75), ratio=True) 0.2 >>> range_overlap(("1", 30, 45), ("1", 15, 55)) 16 >>> range_overlap(("1", 30, 45), ("1", 15, 55), ratio=True) 1.0 >>> range_overlap(("1", 30, 45), ("1", 57, 68)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55), ratio=True) 0.0 """ a_chr, a_min, a_max = a b_chr, b_min, b_max = b a_min, a_max = sorted((a_min, a_max)) b_min, b_max = sorted((b_min, b_max)) shorter = min((a_max - a_min), (b_max - b_min)) + 1 # must be on the same chromosome if a_chr != b_chr: ov = 0 else: ov = min(shorter, (a_max - b_min + 1), (b_max - a_min + 1)) ov = max(ov, 0) if ratio: ov /= float(shorter) return ov
[ "def", "range_overlap", "(", "a", ",", "b", ",", "ratio", "=", "False", ")", ":", "a_chr", ",", "a_min", ",", "a_max", "=", "a", "b_chr", ",", "b_min", ",", "b_max", "=", "b", "a_min", ",", "a_max", "=", "sorted", "(", "(", "a_min", ",", "a_max",...
Returns whether two ranges overlap. Set percentage=True returns overlap ratio over the shorter range of the two. >>> range_overlap(("1", 30, 45), ("1", 41, 55)) 5 >>> range_overlap(("1", 21, 45), ("1", 41, 75), ratio=True) 0.2 >>> range_overlap(("1", 30, 45), ("1", 15, 55)) 16 >>> range_overlap(("1", 30, 45), ("1", 15, 55), ratio=True) 1.0 >>> range_overlap(("1", 30, 45), ("1", 57, 68)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55), ratio=True) 0.0
[ "Returns", "whether", "two", "ranges", "overlap", ".", "Set", "percentage", "=", "True", "returns", "overlap", "ratio", "over", "the", "shorter", "range", "of", "the", "two", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L80-L113
train
200,780
tanghaibao/jcvi
jcvi/utils/range.py
range_distance
def range_distance(a, b, distmode='ss'): """ Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) >>> range_distance(("1", 30, 45, '+'), ("1", 45, 55, '+')) (26, '++') >>> range_distance(("1", 30, 45, '-'), ("1", 57, 68, '-')) (39, '--') >>> range_distance(("1", 30, 42, '-'), ("1", 45, 55, '+')) (26, '-+') >>> range_distance(("1", 30, 42, '+'), ("1", 45, 55, '-'), distmode='ee') (2, '+-') """ assert distmode in ('ss', 'ee') a_chr, a_min, a_max, a_strand = a b_chr, b_min, b_max, b_strand = b # must be on the same chromosome if a_chr != b_chr: dist = -1 #elif range_overlap(a[:3], b[:3]): # dist = 0 else: # If the two ranges do not overlap, check stranded-ness and distance if a_min > b_min: a_min, b_min = b_min, a_min a_max, b_max = b_max, a_max a_strand, b_strand = b_strand, a_strand if distmode == "ss": dist = b_max - a_min + 1 elif distmode == "ee": dist = b_min - a_max - 1 orientation = a_strand + b_strand return dist, orientation
python
def range_distance(a, b, distmode='ss'): """ Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) >>> range_distance(("1", 30, 45, '+'), ("1", 45, 55, '+')) (26, '++') >>> range_distance(("1", 30, 45, '-'), ("1", 57, 68, '-')) (39, '--') >>> range_distance(("1", 30, 42, '-'), ("1", 45, 55, '+')) (26, '-+') >>> range_distance(("1", 30, 42, '+'), ("1", 45, 55, '-'), distmode='ee') (2, '+-') """ assert distmode in ('ss', 'ee') a_chr, a_min, a_max, a_strand = a b_chr, b_min, b_max, b_strand = b # must be on the same chromosome if a_chr != b_chr: dist = -1 #elif range_overlap(a[:3], b[:3]): # dist = 0 else: # If the two ranges do not overlap, check stranded-ness and distance if a_min > b_min: a_min, b_min = b_min, a_min a_max, b_max = b_max, a_max a_strand, b_strand = b_strand, a_strand if distmode == "ss": dist = b_max - a_min + 1 elif distmode == "ee": dist = b_min - a_max - 1 orientation = a_strand + b_strand return dist, orientation
[ "def", "range_distance", "(", "a", ",", "b", ",", "distmode", "=", "'ss'", ")", ":", "assert", "distmode", "in", "(", "'ss'", ",", "'ee'", ")", "a_chr", ",", "a_min", ",", "a_max", ",", "a_strand", "=", "a", "b_chr", ",", "b_min", ",", "b_max", ","...
Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) >>> range_distance(("1", 30, 45, '+'), ("1", 45, 55, '+')) (26, '++') >>> range_distance(("1", 30, 45, '-'), ("1", 57, 68, '-')) (39, '--') >>> range_distance(("1", 30, 42, '-'), ("1", 45, 55, '+')) (26, '-+') >>> range_distance(("1", 30, 42, '+'), ("1", 45, 55, '-'), distmode='ee') (2, '+-')
[ "Returns", "the", "distance", "between", "two", "ranges", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L116-L155
train
200,781
tanghaibao/jcvi
jcvi/utils/range.py
range_minmax
def range_minmax(ranges): """ Returns the span of a collection of ranges where start is the smallest of all starts, and end is the largest of all ends. >>> ranges = [(30, 45), (40, 50), (10, 100)] >>> range_minmax(ranges) (10, 100) """ rmin = min(ranges)[0] rmax = max(ranges, key=lambda x: x[1])[1] return rmin, rmax
python
def range_minmax(ranges): """ Returns the span of a collection of ranges where start is the smallest of all starts, and end is the largest of all ends. >>> ranges = [(30, 45), (40, 50), (10, 100)] >>> range_minmax(ranges) (10, 100) """ rmin = min(ranges)[0] rmax = max(ranges, key=lambda x: x[1])[1] return rmin, rmax
[ "def", "range_minmax", "(", "ranges", ")", ":", "rmin", "=", "min", "(", "ranges", ")", "[", "0", "]", "rmax", "=", "max", "(", "ranges", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "[", "1", "]", "return", "rmin", ",", "rma...
Returns the span of a collection of ranges where start is the smallest of all starts, and end is the largest of all ends. >>> ranges = [(30, 45), (40, 50), (10, 100)] >>> range_minmax(ranges) (10, 100)
[ "Returns", "the", "span", "of", "a", "collection", "of", "ranges", "where", "start", "is", "the", "smallest", "of", "all", "starts", "and", "end", "is", "the", "largest", "of", "all", "ends", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L158-L169
train
200,782
tanghaibao/jcvi
jcvi/utils/range.py
range_interleave
def range_interleave(ranges, sizes={}, empty=False): """ Returns the ranges in between the given ranges. >>> ranges = [("1", 30, 40), ("1", 45, 50), ("1", 10, 30)] >>> range_interleave(ranges) [('1', 41, 44)] >>> ranges = [("1", 30, 40), ("1", 42, 50)] >>> range_interleave(ranges) [('1', 41, 41)] >>> range_interleave(ranges, sizes={"1": 70}) [('1', 1, 29), ('1', 41, 41), ('1', 51, 70)] """ from jcvi.utils.iter import pairwise ranges = range_merge(ranges) interleaved_ranges = [] for ch, cranges in groupby(ranges, key=lambda x: x[0]): cranges = list(cranges) size = sizes.get(ch, None) if size: ch, astart, aend = cranges[0] if astart > 1: interleaved_ranges.append((ch, 1, astart - 1)) elif empty: interleaved_ranges.append(None) for a, b in pairwise(cranges): ch, astart, aend = a ch, bstart, bend = b istart, iend = aend + 1, bstart - 1 if istart <= iend: interleaved_ranges.append((ch, istart, iend)) elif empty: interleaved_ranges.append(None) if size: ch, astart, aend = cranges[-1] if aend < size: interleaved_ranges.append((ch, aend + 1, size)) elif empty: interleaved_ranges.append(None) return interleaved_ranges
python
def range_interleave(ranges, sizes={}, empty=False): """ Returns the ranges in between the given ranges. >>> ranges = [("1", 30, 40), ("1", 45, 50), ("1", 10, 30)] >>> range_interleave(ranges) [('1', 41, 44)] >>> ranges = [("1", 30, 40), ("1", 42, 50)] >>> range_interleave(ranges) [('1', 41, 41)] >>> range_interleave(ranges, sizes={"1": 70}) [('1', 1, 29), ('1', 41, 41), ('1', 51, 70)] """ from jcvi.utils.iter import pairwise ranges = range_merge(ranges) interleaved_ranges = [] for ch, cranges in groupby(ranges, key=lambda x: x[0]): cranges = list(cranges) size = sizes.get(ch, None) if size: ch, astart, aend = cranges[0] if astart > 1: interleaved_ranges.append((ch, 1, astart - 1)) elif empty: interleaved_ranges.append(None) for a, b in pairwise(cranges): ch, astart, aend = a ch, bstart, bend = b istart, iend = aend + 1, bstart - 1 if istart <= iend: interleaved_ranges.append((ch, istart, iend)) elif empty: interleaved_ranges.append(None) if size: ch, astart, aend = cranges[-1] if aend < size: interleaved_ranges.append((ch, aend + 1, size)) elif empty: interleaved_ranges.append(None) return interleaved_ranges
[ "def", "range_interleave", "(", "ranges", ",", "sizes", "=", "{", "}", ",", "empty", "=", "False", ")", ":", "from", "jcvi", ".", "utils", ".", "iter", "import", "pairwise", "ranges", "=", "range_merge", "(", "ranges", ")", "interleaved_ranges", "=", "["...
Returns the ranges in between the given ranges. >>> ranges = [("1", 30, 40), ("1", 45, 50), ("1", 10, 30)] >>> range_interleave(ranges) [('1', 41, 44)] >>> ranges = [("1", 30, 40), ("1", 42, 50)] >>> range_interleave(ranges) [('1', 41, 41)] >>> range_interleave(ranges, sizes={"1": 70}) [('1', 1, 29), ('1', 41, 41), ('1', 51, 70)]
[ "Returns", "the", "ranges", "in", "between", "the", "given", "ranges", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L203-L246
train
200,783
tanghaibao/jcvi
jcvi/utils/range.py
range_merge
def range_merge(ranges, dist=0): """ Returns merged range. Similar to range_union, except this returns new ranges. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_merge(ranges) [('1', 10, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges) [('1', 30, 40), ('1', 45, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges, dist=5) [('1', 30, 50)] """ if not ranges: return [] ranges.sort() cur_range = list(ranges[0]) merged_ranges = [] for r in ranges[1:]: # open new range if start > cur_end or seqid != cur_seqid if r[1] - cur_range[2] > dist or r[0] != cur_range[0]: merged_ranges.append(tuple(cur_range)) cur_range = list(r) else: cur_range[2] = max(cur_range[2], r[2]) merged_ranges.append(tuple(cur_range)) return merged_ranges
python
def range_merge(ranges, dist=0): """ Returns merged range. Similar to range_union, except this returns new ranges. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_merge(ranges) [('1', 10, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges) [('1', 30, 40), ('1', 45, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges, dist=5) [('1', 30, 50)] """ if not ranges: return [] ranges.sort() cur_range = list(ranges[0]) merged_ranges = [] for r in ranges[1:]: # open new range if start > cur_end or seqid != cur_seqid if r[1] - cur_range[2] > dist or r[0] != cur_range[0]: merged_ranges.append(tuple(cur_range)) cur_range = list(r) else: cur_range[2] = max(cur_range[2], r[2]) merged_ranges.append(tuple(cur_range)) return merged_ranges
[ "def", "range_merge", "(", "ranges", ",", "dist", "=", "0", ")", ":", "if", "not", "ranges", ":", "return", "[", "]", "ranges", ".", "sort", "(", ")", "cur_range", "=", "list", "(", "ranges", "[", "0", "]", ")", "merged_ranges", "=", "[", "]", "f...
Returns merged range. Similar to range_union, except this returns new ranges. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_merge(ranges) [('1', 10, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges) [('1', 30, 40), ('1', 45, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges, dist=5) [('1', 30, 50)]
[ "Returns", "merged", "range", ".", "Similar", "to", "range_union", "except", "this", "returns", "new", "ranges", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L249-L280
train
200,784
tanghaibao/jcvi
jcvi/utils/range.py
range_span
def range_span(ranges): """ Returns the total span between the left most range to the right most range. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_span(ranges) 41 >>> ranges = [("1", 30, 45), ("2", 40, 50)] >>> range_span(ranges) 27 >>> ranges = [("1", 30, 45), ("1", 45, 50)] >>> range_span(ranges) 21 >>> range_span([]) 0 """ if not ranges: return 0 ranges.sort() ans = 0 for seq, lt in groupby(ranges, key=lambda x: x[0]): lt = list(lt) ans += max(max(lt)[1:]) - min(min(lt)[1:]) + 1 return ans
python
def range_span(ranges): """ Returns the total span between the left most range to the right most range. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_span(ranges) 41 >>> ranges = [("1", 30, 45), ("2", 40, 50)] >>> range_span(ranges) 27 >>> ranges = [("1", 30, 45), ("1", 45, 50)] >>> range_span(ranges) 21 >>> range_span([]) 0 """ if not ranges: return 0 ranges.sort() ans = 0 for seq, lt in groupby(ranges, key=lambda x: x[0]): lt = list(lt) ans += max(max(lt)[1:]) - min(min(lt)[1:]) + 1 return ans
[ "def", "range_span", "(", "ranges", ")", ":", "if", "not", "ranges", ":", "return", "0", "ranges", ".", "sort", "(", ")", "ans", "=", "0", "for", "seq", ",", "lt", "in", "groupby", "(", "ranges", ",", "key", "=", "lambda", "x", ":", "x", "[", "...
Returns the total span between the left most range to the right most range. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_span(ranges) 41 >>> ranges = [("1", 30, 45), ("2", 40, 50)] >>> range_span(ranges) 27 >>> ranges = [("1", 30, 45), ("1", 45, 50)] >>> range_span(ranges) 21 >>> range_span([]) 0
[ "Returns", "the", "total", "span", "between", "the", "left", "most", "range", "to", "the", "right", "most", "range", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L321-L345
train
200,785
tanghaibao/jcvi
jcvi/utils/range.py
range_piles
def range_piles(ranges): """ Return piles of intervals that overlap. The piles are only interrupted by regions of zero coverage. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_piles(ranges)) [[0, 1], [2]] """ endpoints = _make_endpoints(ranges) for seqid, ends in groupby(endpoints, lambda x: x[0]): active = [] depth = 0 for seqid, pos, leftright, i, score in ends: if leftright == LEFT: active.append(i) depth += 1 else: depth -= 1 if depth == 0 and active: yield active active = []
python
def range_piles(ranges): """ Return piles of intervals that overlap. The piles are only interrupted by regions of zero coverage. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_piles(ranges)) [[0, 1], [2]] """ endpoints = _make_endpoints(ranges) for seqid, ends in groupby(endpoints, lambda x: x[0]): active = [] depth = 0 for seqid, pos, leftright, i, score in ends: if leftright == LEFT: active.append(i) depth += 1 else: depth -= 1 if depth == 0 and active: yield active active = []
[ "def", "range_piles", "(", "ranges", ")", ":", "endpoints", "=", "_make_endpoints", "(", "ranges", ")", "for", "seqid", ",", "ends", "in", "groupby", "(", "endpoints", ",", "lambda", "x", ":", "x", "[", "0", "]", ")", ":", "active", "=", "[", "]", ...
Return piles of intervals that overlap. The piles are only interrupted by regions of zero coverage. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_piles(ranges)) [[0, 1], [2]]
[ "Return", "piles", "of", "intervals", "that", "overlap", ".", "The", "piles", "are", "only", "interrupted", "by", "regions", "of", "zero", "coverage", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L359-L382
train
200,786
tanghaibao/jcvi
jcvi/utils/range.py
range_conflict
def range_conflict(ranges, depth=1): """ Find intervals that are overlapping in 1-dimension. Return groups of block IDs that are in conflict. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_conflict(ranges)) [(0, 1)] """ overlap = set() active = set() endpoints = _make_endpoints(ranges) for seqid, ends in groupby(endpoints, lambda x: x[0]): active.clear() for seqid, pos, leftright, i, score in ends: if leftright == LEFT: active.add(i) else: active.remove(i) if len(active) > depth: overlap.add(tuple(sorted(active))) for ov in overlap: yield ov
python
def range_conflict(ranges, depth=1): """ Find intervals that are overlapping in 1-dimension. Return groups of block IDs that are in conflict. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_conflict(ranges)) [(0, 1)] """ overlap = set() active = set() endpoints = _make_endpoints(ranges) for seqid, ends in groupby(endpoints, lambda x: x[0]): active.clear() for seqid, pos, leftright, i, score in ends: if leftright == LEFT: active.add(i) else: active.remove(i) if len(active) > depth: overlap.add(tuple(sorted(active))) for ov in overlap: yield ov
[ "def", "range_conflict", "(", "ranges", ",", "depth", "=", "1", ")", ":", "overlap", "=", "set", "(", ")", "active", "=", "set", "(", ")", "endpoints", "=", "_make_endpoints", "(", "ranges", ")", "for", "seqid", ",", "ends", "in", "groupby", "(", "en...
Find intervals that are overlapping in 1-dimension. Return groups of block IDs that are in conflict. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_conflict(ranges)) [(0, 1)]
[ "Find", "intervals", "that", "are", "overlapping", "in", "1", "-", "dimension", ".", "Return", "groups", "of", "block", "IDs", "that", "are", "in", "conflict", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L385-L410
train
200,787
tanghaibao/jcvi
jcvi/utils/table.py
loadtable
def loadtable(header, rows, major='=', minor='-', thousands=True): """ Print a tabular output, with horizontal separators """ formatted = load_csv(header, rows, sep=" ", thousands=thousands) header, rows = formatted[0], formatted[1:] return banner(header, rows)
python
def loadtable(header, rows, major='=', minor='-', thousands=True): """ Print a tabular output, with horizontal separators """ formatted = load_csv(header, rows, sep=" ", thousands=thousands) header, rows = formatted[0], formatted[1:] return banner(header, rows)
[ "def", "loadtable", "(", "header", ",", "rows", ",", "major", "=", "'='", ",", "minor", "=", "'-'", ",", "thousands", "=", "True", ")", ":", "formatted", "=", "load_csv", "(", "header", ",", "rows", ",", "sep", "=", "\" \"", ",", "thousands", "=", ...
Print a tabular output, with horizontal separators
[ "Print", "a", "tabular", "output", "with", "horizontal", "separators" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/table.py#L22-L29
train
200,788
tanghaibao/jcvi
jcvi/utils/table.py
write_csv
def write_csv(header, contents, sep=",", filename="stdout", thousands=False, tee=False, align=True, comment=False): """ Write csv that are aligned with the column headers. >>> header = ["x_value", "y_value"] >>> contents = [(1, 100), (2, 200)] >>> write_csv(header, contents) x_value, y_value 1, 100 2, 200 """ from jcvi.formats.base import must_open formatted = load_csv(header, contents, sep=sep, thousands=thousands, align=align) if comment: formatted[0] = '#' + formatted[0][1:] formatted = "\n".join(formatted) fw = must_open(filename, "w") print(formatted, file=fw) if tee and filename != "stdout": print(formatted)
python
def write_csv(header, contents, sep=",", filename="stdout", thousands=False, tee=False, align=True, comment=False): """ Write csv that are aligned with the column headers. >>> header = ["x_value", "y_value"] >>> contents = [(1, 100), (2, 200)] >>> write_csv(header, contents) x_value, y_value 1, 100 2, 200 """ from jcvi.formats.base import must_open formatted = load_csv(header, contents, sep=sep, thousands=thousands, align=align) if comment: formatted[0] = '#' + formatted[0][1:] formatted = "\n".join(formatted) fw = must_open(filename, "w") print(formatted, file=fw) if tee and filename != "stdout": print(formatted)
[ "def", "write_csv", "(", "header", ",", "contents", ",", "sep", "=", "\",\"", ",", "filename", "=", "\"stdout\"", ",", "thousands", "=", "False", ",", "tee", "=", "False", ",", "align", "=", "True", ",", "comment", "=", "False", ")", ":", "from", "jc...
Write csv that are aligned with the column headers. >>> header = ["x_value", "y_value"] >>> contents = [(1, 100), (2, 200)] >>> write_csv(header, contents) x_value, y_value 1, 100 2, 200
[ "Write", "csv", "that", "are", "aligned", "with", "the", "column", "headers", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/table.py#L111-L133
train
200,789
tanghaibao/jcvi
jcvi/assembly/geneticmap.py
blat
def blat(args): """ %prog blat map1.txt ref.fasta Make ALLMAPS input csv based on sequences. The tab-delimited txt file include: name, LG, position, sequence. """ from jcvi.formats.base import is_number from jcvi.formats.blast import best as blast_best, bed as blast_bed from jcvi.apps.align import blat as blat_align p = OptionParser(blat.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) maptxt, ref = args pf = maptxt.rsplit(".", 1)[0] register = {} fastafile = pf + ".fasta" fp = open(maptxt) fw = open(fastafile, "w") for row in fp: name, lg, pos, seq = row.split() if not is_number(pos): continue register[name] = (pf + '-' + lg, pos) print(">{0}\n{1}\n".format(name, seq), file=fw) fw.close() blatfile = blat_align([ref, fastafile]) bestfile = blast_best([blatfile]) bedfile = blast_bed([bestfile]) b = Bed(bedfile).order pf = ".".join((op.basename(maptxt).split(".")[0], op.basename(ref).split(".")[0])) csvfile = pf + ".csv" fp = open(maptxt) fw = open(csvfile, "w") for row in fp: name, lg, pos, seq = row.split() if name not in b: continue bbi, bb = b[name] scaffold, scaffold_pos = bb.seqid, bb.start print(",".join(str(x) for x in \ (scaffold, scaffold_pos, lg, pos)), file=fw) fw.close()
python
def blat(args): """ %prog blat map1.txt ref.fasta Make ALLMAPS input csv based on sequences. The tab-delimited txt file include: name, LG, position, sequence. """ from jcvi.formats.base import is_number from jcvi.formats.blast import best as blast_best, bed as blast_bed from jcvi.apps.align import blat as blat_align p = OptionParser(blat.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) maptxt, ref = args pf = maptxt.rsplit(".", 1)[0] register = {} fastafile = pf + ".fasta" fp = open(maptxt) fw = open(fastafile, "w") for row in fp: name, lg, pos, seq = row.split() if not is_number(pos): continue register[name] = (pf + '-' + lg, pos) print(">{0}\n{1}\n".format(name, seq), file=fw) fw.close() blatfile = blat_align([ref, fastafile]) bestfile = blast_best([blatfile]) bedfile = blast_bed([bestfile]) b = Bed(bedfile).order pf = ".".join((op.basename(maptxt).split(".")[0], op.basename(ref).split(".")[0])) csvfile = pf + ".csv" fp = open(maptxt) fw = open(csvfile, "w") for row in fp: name, lg, pos, seq = row.split() if name not in b: continue bbi, bb = b[name] scaffold, scaffold_pos = bb.seqid, bb.start print(",".join(str(x) for x in \ (scaffold, scaffold_pos, lg, pos)), file=fw) fw.close()
[ "def", "blat", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "is_number", "from", "jcvi", ".", "formats", ".", "blast", "import", "best", "as", "blast_best", ",", "bed", "as", "blast_bed", "from", "jcvi", ".", "apps", "....
%prog blat map1.txt ref.fasta Make ALLMAPS input csv based on sequences. The tab-delimited txt file include: name, LG, position, sequence.
[ "%prog", "blat", "map1", ".", "txt", "ref", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L154-L203
train
200,790
tanghaibao/jcvi
jcvi/assembly/geneticmap.py
header
def header(args): """ %prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`. """ from jcvi.formats.base import DictFile p = OptionParser(header.__doc__) p.add_option("--prefix", default="", help="Prepend text to line number [default: %default]") p.add_option("--ids", help="Write ids to file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, conversion_table = args data = MSTMap(mstmap) hd = data.header conversion = DictFile(conversion_table) newhd = [opts.prefix + conversion.get(x, x) for x in hd] print("\t".join(hd)) print("--->") print("\t".join(newhd)) ids = opts.ids if ids: fw = open(ids, "w") print("\n".join(newhd), file=fw) fw.close()
python
def header(args): """ %prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`. """ from jcvi.formats.base import DictFile p = OptionParser(header.__doc__) p.add_option("--prefix", default="", help="Prepend text to line number [default: %default]") p.add_option("--ids", help="Write ids to file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, conversion_table = args data = MSTMap(mstmap) hd = data.header conversion = DictFile(conversion_table) newhd = [opts.prefix + conversion.get(x, x) for x in hd] print("\t".join(hd)) print("--->") print("\t".join(newhd)) ids = opts.ids if ids: fw = open(ids, "w") print("\n".join(newhd), file=fw) fw.close()
[ "def", "header", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "header", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--prefix\"", ",", "default", "=", "\"\"", ",", "hel...
%prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`.
[ "%prog", "header", "map", "conversion_table" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L435-L467
train
200,791
tanghaibao/jcvi
jcvi/assembly/geneticmap.py
rename
def rename(args): """ %prog rename map markers.bed > renamed.map Rename markers according to the new mapping locations. """ p = OptionParser(rename.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, bedfile = args markersbed = Bed(bedfile) markers = markersbed.order data = MSTMap(mstmap) header = data.header header = [header[0]] + ["seqid", "start"] + header[1:] renamed = [] for b in data: m, geno = b.id, b.genotype om = m if m not in markers: m = m.rsplit(".", 1)[0] if m not in markers: continue i, mb = markers[m] renamed.append([om, mb.seqid, mb.start, "\t".join(list(geno))]) renamed.sort(key=lambda x: (x[1], x[2])) fw = must_open(opts.outfile, "w") print("\t".join(header), file=fw) for d in renamed: print("\t".join(str(x) for x in d), file=fw)
python
def rename(args): """ %prog rename map markers.bed > renamed.map Rename markers according to the new mapping locations. """ p = OptionParser(rename.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, bedfile = args markersbed = Bed(bedfile) markers = markersbed.order data = MSTMap(mstmap) header = data.header header = [header[0]] + ["seqid", "start"] + header[1:] renamed = [] for b in data: m, geno = b.id, b.genotype om = m if m not in markers: m = m.rsplit(".", 1)[0] if m not in markers: continue i, mb = markers[m] renamed.append([om, mb.seqid, mb.start, "\t".join(list(geno))]) renamed.sort(key=lambda x: (x[1], x[2])) fw = must_open(opts.outfile, "w") print("\t".join(header), file=fw) for d in renamed: print("\t".join(str(x) for x in d), file=fw)
[ "def", "rename", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "rename", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2",...
%prog rename map markers.bed > renamed.map Rename markers according to the new mapping locations.
[ "%prog", "rename", "map", "markers", ".", "bed", ">", "renamed", ".", "map" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L470-L506
train
200,792
tanghaibao/jcvi
jcvi/assembly/geneticmap.py
anchor
def anchor(args): """ %prog anchor map.bed markers.blast > anchored.bed Anchor scaffolds based on map. """ from jcvi.formats.blast import bed p = OptionParser(anchor.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapbed, blastfile = args bedfile = bed([blastfile]) markersbed = Bed(bedfile) markers = markersbed.order mapbed = Bed(mapbed, sorted=False) for b in mapbed: m = b.accn if m not in markers: continue i, mb = markers[m] new_accn = "{0}:{1}-{2}".format(mb.seqid, mb.start, mb.end) b.accn = new_accn print(b)
python
def anchor(args): """ %prog anchor map.bed markers.blast > anchored.bed Anchor scaffolds based on map. """ from jcvi.formats.blast import bed p = OptionParser(anchor.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapbed, blastfile = args bedfile = bed([blastfile]) markersbed = Bed(bedfile) markers = markersbed.order mapbed = Bed(mapbed, sorted=False) for b in mapbed: m = b.accn if m not in markers: continue i, mb = markers[m] new_accn = "{0}:{1}-{2}".format(mb.seqid, mb.start, mb.end) b.accn = new_accn print(b)
[ "def", "anchor", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "blast", "import", "bed", "p", "=", "OptionParser", "(", "anchor", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(...
%prog anchor map.bed markers.blast > anchored.bed Anchor scaffolds based on map.
[ "%prog", "anchor", "map", ".", "bed", "markers", ".", "blast", ">", "anchored", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L509-L537
train
200,793
tanghaibao/jcvi
jcvi/assembly/geneticmap.py
bed
def bed(args): """ %prog fasta map.out Convert MSTMAP output into bed format. """ p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mapout, = args pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed, switch=opts.switch) return mapbed
python
def bed(args): """ %prog fasta map.out Convert MSTMAP output into bed format. """ p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mapout, = args pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed, switch=opts.switch) return mapbed
[ "def", "bed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "bed", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--switch\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Switch reference and align...
%prog fasta map.out Convert MSTMAP output into bed format.
[ "%prog", "fasta", "map", ".", "out" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L540-L560
train
200,794
tanghaibao/jcvi
jcvi/assembly/geneticmap.py
fasta
def fasta(args): """ %prog fasta map.out scaffolds.fasta Extract marker sequences based on map. """ from jcvi.formats.sizes import Sizes p = OptionParser(fasta.__doc__) p.add_option("--extend", default=1000, type="int", help="Extend seq flanking the gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapout, sfasta = args Flank = opts.extend pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed) bed = Bed(mapbed, sorted=False) markersbed = pf + ".markers.bed" fw = open(markersbed, "w") sizes = Sizes(sfasta).mapping for b in bed: accn = b.accn scf, pos = accn.split(".") pos = int(pos) start = max(0, pos - Flank) end = min(pos + Flank, sizes[scf]) print("\t".join(str(x) for x in \ (scf, start, end, accn)), file=fw) fw.close() fastaFromBed(markersbed, sfasta, name=True)
python
def fasta(args): """ %prog fasta map.out scaffolds.fasta Extract marker sequences based on map. """ from jcvi.formats.sizes import Sizes p = OptionParser(fasta.__doc__) p.add_option("--extend", default=1000, type="int", help="Extend seq flanking the gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapout, sfasta = args Flank = opts.extend pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed) bed = Bed(mapbed, sorted=False) markersbed = pf + ".markers.bed" fw = open(markersbed, "w") sizes = Sizes(sfasta).mapping for b in bed: accn = b.accn scf, pos = accn.split(".") pos = int(pos) start = max(0, pos - Flank) end = min(pos + Flank, sizes[scf]) print("\t".join(str(x) for x in \ (scf, start, end, accn)), file=fw) fw.close() fastaFromBed(markersbed, sfasta, name=True)
[ "def", "fasta", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "sizes", "import", "Sizes", "p", "=", "OptionParser", "(", "fasta", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--extend\"", ",", "default", "=", "1000", ",", "type", ...
%prog fasta map.out scaffolds.fasta Extract marker sequences based on map.
[ "%prog", "fasta", "map", ".", "out", "scaffolds", ".", "fasta" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L563-L601
train
200,795
tanghaibao/jcvi
jcvi/assembly/geneticmap.py
breakpoint
def breakpoint(args): """ %prog breakpoint mstmap.input > breakpoints.bed Find scaffold breakpoints using genetic map. Use variation.vcf.mstmap() to generate the input for this routine. """ from jcvi.utils.iter import pairwise p = OptionParser(breakpoint.__doc__) p.add_option("--diff", default=.1, type="float", help="Maximum ratio of differences allowed [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mstmap, = args diff = opts.diff data = MSTMap(mstmap) # Remove singleton markers (avoid double cross-over) good = [] nsingletons = 0 for i in xrange(1, len(data) - 1): a = data[i] left_label, left_rr = check_markers(data[i - 1], a, diff) right_label, right_rr = check_markers(a, data[i + 1], diff) if left_label == BREAK and right_label == BREAK: nsingletons += 1 continue good.append(a) logging.debug("A total of {0} singleton markers removed.".format(nsingletons)) for a, b in pairwise(good): label, rr = check_markers(a, b, diff) if label == BREAK: print("\t".join(str(x) for x in rr))
python
def breakpoint(args): """ %prog breakpoint mstmap.input > breakpoints.bed Find scaffold breakpoints using genetic map. Use variation.vcf.mstmap() to generate the input for this routine. """ from jcvi.utils.iter import pairwise p = OptionParser(breakpoint.__doc__) p.add_option("--diff", default=.1, type="float", help="Maximum ratio of differences allowed [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mstmap, = args diff = opts.diff data = MSTMap(mstmap) # Remove singleton markers (avoid double cross-over) good = [] nsingletons = 0 for i in xrange(1, len(data) - 1): a = data[i] left_label, left_rr = check_markers(data[i - 1], a, diff) right_label, right_rr = check_markers(a, data[i + 1], diff) if left_label == BREAK and right_label == BREAK: nsingletons += 1 continue good.append(a) logging.debug("A total of {0} singleton markers removed.".format(nsingletons)) for a, b in pairwise(good): label, rr = check_markers(a, b, diff) if label == BREAK: print("\t".join(str(x) for x in rr))
[ "def", "breakpoint", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "iter", "import", "pairwise", "p", "=", "OptionParser", "(", "breakpoint", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--diff\"", ",", "default", "=", ".1", ",", "t...
%prog breakpoint mstmap.input > breakpoints.bed Find scaffold breakpoints using genetic map. Use variation.vcf.mstmap() to generate the input for this routine.
[ "%prog", "breakpoint", "mstmap", ".", "input", ">", "breakpoints", ".", "bed" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L628-L668
train
200,796
tanghaibao/jcvi
jcvi/formats/agp.py
trimNs
def trimNs(seq, line, newagp): """ Test if the sequences contain dangling N's on both sides. This component needs to be adjusted to the 'actual' sequence range. """ start, end = line.component_beg, line.component_end size = end - start + 1 leftNs, rightNs = 0, 0 lid, lo = line.component_id, line.orientation for s in seq: if s in 'nN': leftNs += 1 else: break for s in seq[::-1]: if s in 'nN': rightNs += 1 else: break if lo == '-': trimstart = start + rightNs trimend = end - leftNs else: trimstart = start + leftNs trimend = end - rightNs trimrange = (trimstart, trimend) oldrange = (start, end) if trimrange != oldrange: logging.debug("{0} trimmed of N's: {1} => {2}".\ format(lid, oldrange, trimrange)) if leftNs: print("\t".join(str(x) for x in (line.object, 0, 0, 0, 'N', leftNs, "fragment", "yes", "")), file=newagp) if trimend > trimstart: print("\t".join(str(x) for x in (line.object, 0, 0, 0, line.component_type, lid, trimstart, trimend, lo)), file=newagp) if rightNs and rightNs != size: print("\t".join(str(x) for x in (line.object, 0, 0, 0, 'N', rightNs, "fragment", "yes", "")), file=newagp) else: print(line, file=newagp)
python
def trimNs(seq, line, newagp): """ Test if the sequences contain dangling N's on both sides. This component needs to be adjusted to the 'actual' sequence range. """ start, end = line.component_beg, line.component_end size = end - start + 1 leftNs, rightNs = 0, 0 lid, lo = line.component_id, line.orientation for s in seq: if s in 'nN': leftNs += 1 else: break for s in seq[::-1]: if s in 'nN': rightNs += 1 else: break if lo == '-': trimstart = start + rightNs trimend = end - leftNs else: trimstart = start + leftNs trimend = end - rightNs trimrange = (trimstart, trimend) oldrange = (start, end) if trimrange != oldrange: logging.debug("{0} trimmed of N's: {1} => {2}".\ format(lid, oldrange, trimrange)) if leftNs: print("\t".join(str(x) for x in (line.object, 0, 0, 0, 'N', leftNs, "fragment", "yes", "")), file=newagp) if trimend > trimstart: print("\t".join(str(x) for x in (line.object, 0, 0, 0, line.component_type, lid, trimstart, trimend, lo)), file=newagp) if rightNs and rightNs != size: print("\t".join(str(x) for x in (line.object, 0, 0, 0, 'N', rightNs, "fragment", "yes", "")), file=newagp) else: print(line, file=newagp)
[ "def", "trimNs", "(", "seq", ",", "line", ",", "newagp", ")", ":", "start", ",", "end", "=", "line", ".", "component_beg", ",", "line", ".", "component_end", "size", "=", "end", "-", "start", "+", "1", "leftNs", ",", "rightNs", "=", "0", ",", "0", ...
Test if the sequences contain dangling N's on both sides. This component needs to be adjusted to the 'actual' sequence range.
[ "Test", "if", "the", "sequences", "contain", "dangling", "N", "s", "on", "both", "sides", ".", "This", "component", "needs", "to", "be", "adjusted", "to", "the", "actual", "sequence", "range", "." ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/agp.py#L680-L724
train
200,797
tanghaibao/jcvi
jcvi/formats/agp.py
fromcsv
def fromcsv(args): """ %prog fromcsv contigs.fasta map.csv map.agp Convert csv which contains list of scaffolds/contigs to AGP file. """ import csv from jcvi.formats.sizes import Sizes p = OptionParser(fromcsv.__doc__) p.add_option("--evidence", default="map", help="Linkage evidence to add in AGP") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) contigsfasta, mapcsv, mapagp = args reader = csv.reader(open(mapcsv)) sizes = Sizes(contigsfasta).mapping next(reader) # Header fwagp = must_open(mapagp, "w") o = OO() for row in reader: if len(row) == 2: object, ctg = row strand = '?' elif len(row) == 3: object, ctg, strand = row size = sizes[ctg] o.add(object, ctg, size, strand) o.write_AGP(fwagp, gapsize=100, gaptype="scaffold", phases={}, evidence=opts.evidence)
python
def fromcsv(args): """ %prog fromcsv contigs.fasta map.csv map.agp Convert csv which contains list of scaffolds/contigs to AGP file. """ import csv from jcvi.formats.sizes import Sizes p = OptionParser(fromcsv.__doc__) p.add_option("--evidence", default="map", help="Linkage evidence to add in AGP") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) contigsfasta, mapcsv, mapagp = args reader = csv.reader(open(mapcsv)) sizes = Sizes(contigsfasta).mapping next(reader) # Header fwagp = must_open(mapagp, "w") o = OO() for row in reader: if len(row) == 2: object, ctg = row strand = '?' elif len(row) == 3: object, ctg, strand = row size = sizes[ctg] o.add(object, ctg, size, strand) o.write_AGP(fwagp, gapsize=100, gaptype="scaffold", phases={}, evidence=opts.evidence)
[ "def", "fromcsv", "(", "args", ")", ":", "import", "csv", "from", "jcvi", ".", "formats", ".", "sizes", "import", "Sizes", "p", "=", "OptionParser", "(", "fromcsv", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--evidence\"", ",", "default", "=", ...
%prog fromcsv contigs.fasta map.csv map.agp Convert csv which contains list of scaffolds/contigs to AGP file.
[ "%prog", "fromcsv", "contigs", ".", "fasta", "map", ".", "csv", "map", ".", "agp" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/agp.py#L757-L790
train
200,798
tanghaibao/jcvi
jcvi/formats/agp.py
compress
def compress(args): """ %prog compress a.agp b.agp Convert coordinates based on multiple AGP files. Useful to simplify multiple liftOvers to compress multiple chain files into a single chain file, in upgrading locations of genomic features. Example: `a.agp` could contain split scaffolds: scaffold_0.1 1 600309 1 W scaffold_0 1 600309 + `b.agp` could contain mapping to chromosomes: LG05 6435690 7035998 53 W scaffold_0.1 1 600309 + The final AGP we want is: LG05 6435690 7035998 53 W scaffold_0 1 600309 + """ p = OptionParser(compress.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) aagpfile, bagpfile = args # First AGP provides the mapping store = {} agp = AGP(aagpfile) for a in agp: if a.is_gap: continue # Ignore '?' in the mapping if a.sign == 0: a.sign = 1 store[(a.object, a.object_beg, a.object_end)] = \ (a.component_id, a.component_beg, a.component_end, a.sign) # Second AGP forms the backbone agp = AGP(bagpfile) fw = must_open(opts.outfile, "w") print("\n".join(agp.header), file=fw) for a in agp: if a.is_gap: print(a, file=fw) continue component_id, component_beg, component_end, sign = \ store[(a.component_id, a.component_beg, a.component_end)] orientation = {1: '+', -1: '-', 0: '?'}.get(sign * a.sign) atoms = (a.object, a.object_beg, a.object_end, a.part_number, a.component_type, component_id, component_beg, component_end, orientation) a = AGPLine("\t".join(str(x) for x in atoms)) print(a, file=fw)
python
def compress(args): """ %prog compress a.agp b.agp Convert coordinates based on multiple AGP files. Useful to simplify multiple liftOvers to compress multiple chain files into a single chain file, in upgrading locations of genomic features. Example: `a.agp` could contain split scaffolds: scaffold_0.1 1 600309 1 W scaffold_0 1 600309 + `b.agp` could contain mapping to chromosomes: LG05 6435690 7035998 53 W scaffold_0.1 1 600309 + The final AGP we want is: LG05 6435690 7035998 53 W scaffold_0 1 600309 + """ p = OptionParser(compress.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) aagpfile, bagpfile = args # First AGP provides the mapping store = {} agp = AGP(aagpfile) for a in agp: if a.is_gap: continue # Ignore '?' in the mapping if a.sign == 0: a.sign = 1 store[(a.object, a.object_beg, a.object_end)] = \ (a.component_id, a.component_beg, a.component_end, a.sign) # Second AGP forms the backbone agp = AGP(bagpfile) fw = must_open(opts.outfile, "w") print("\n".join(agp.header), file=fw) for a in agp: if a.is_gap: print(a, file=fw) continue component_id, component_beg, component_end, sign = \ store[(a.component_id, a.component_beg, a.component_end)] orientation = {1: '+', -1: '-', 0: '?'}.get(sign * a.sign) atoms = (a.object, a.object_beg, a.object_end, a.part_number, a.component_type, component_id, component_beg, component_end, orientation) a = AGPLine("\t".join(str(x) for x in atoms)) print(a, file=fw)
[ "def", "compress", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "compress", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", ...
%prog compress a.agp b.agp Convert coordinates based on multiple AGP files. Useful to simplify multiple liftOvers to compress multiple chain files into a single chain file, in upgrading locations of genomic features. Example: `a.agp` could contain split scaffolds: scaffold_0.1 1 600309 1 W scaffold_0 1 600309 + `b.agp` could contain mapping to chromosomes: LG05 6435690 7035998 53 W scaffold_0.1 1 600309 + The final AGP we want is: LG05 6435690 7035998 53 W scaffold_0 1 600309 +
[ "%prog", "compress", "a", ".", "agp", "b", ".", "agp" ]
d2e31a77b6ade7f41f3b321febc2b4744d1cdeca
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/agp.py#L793-L846
train
200,799