text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fasta(args): """ %prog fasta bedfile scf.fasta pseudomolecules.fasta Use OM bed to scaffold and create pseudomolecules. bedfile can be generated by running jcvi.assembly.opticalmap bed --blockonly """
from jcvi.formats.sizes import Sizes from jcvi.formats.agp import OO, build p = OptionParser(fasta.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, scffasta, pmolfasta = args pf = bedfile.rsplit(".", 1)[0] bed = Bed(bedfile) selected = select_bed(bed) oo = OO() seen = set() sizes = Sizes(scffasta).mapping agpfile = pf + ".agp" agp = open(agpfile, "w") for b in selected: scf = range_parse(b.accn).seqid chr = b.seqid cs = (chr, scf) if cs not in seen: oo.add(chr, scf, sizes[scf], b.strand) seen.add(cs) else: logging.debug("Seen {0}, ignored.".format(cs)) oo.write_AGP(agp, gaptype="contig") agp.close() build([agpfile, scffasta, pmolfasta])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bed(args): """ %prog bed xmlfile Print summary of optical map alignment in BED format. """
from jcvi.formats.bed import sort p = OptionParser(bed.__doc__) p.add_option("--blockonly", default=False, action="store_true", help="Only print out large blocks, not fragments [default: %default]") p.add_option("--point", default=False, action="store_true", help="Print accesssion as single point instead of interval") p.add_option("--scale", type="float", help="Scale the OM distance by factor") p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements [default: %default]") p.add_option("--nosort", default=False, action="store_true", help="Do not sort bed [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) xmlfile, = args bedfile = xmlfile.rsplit(".", 1)[0] + ".bed" om = OpticalMap(xmlfile) om.write_bed(bedfile, point=opts.point, scale=opts.scale, blockonly=opts.blockonly, switch=opts.switch) if not opts.nosort: sort([bedfile, "--inplace"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bam(args): """ %prog snp input.gsnap ref.fasta Convert GSNAP output to BAM. """
from jcvi.formats.sizes import Sizes from jcvi.formats.sam import index p = OptionParser(bam.__doc__) p.set_home("eddyyeh") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gsnapfile, fastafile = args EYHOME = opts.eddyyeh_home pf = gsnapfile.rsplit(".", 1)[0] uniqsam = pf + ".unique.sam" samstats = uniqsam + ".stats" sizesfile = Sizes(fastafile).filename if need_update((gsnapfile, sizesfile), samstats): cmd = op.join(EYHOME, "gsnap2gff3.pl") cmd += " --format sam -i {0} -o {1}".format(gsnapfile, uniqsam) cmd += " -u -l {0} -p {1}".format(sizesfile, opts.cpus) sh(cmd) index([uniqsam]) return uniqsam
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index(args): """ %prog index database.fasta ` Wrapper for `gmap_build`. Same interface. """
p = OptionParser(index.__doc__) p.add_option("--supercat", default=False, action="store_true", help="Concatenate reference to speed up alignment") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) dbfile, = args check_index(dbfile, supercat=opts.supercat)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gmap(args): """ %prog gmap database.fasta fastafile Wrapper for `gmap`. """
p = OptionParser(gmap.__doc__) p.add_option("--cross", default=False, action="store_true", help="Cross-species alignment") p.add_option("--npaths", default=0, type="int", help="Maximum number of paths to show." " If set to 0, prints two paths if chimera" " detected, else one.") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) dbfile, fastafile = args assert op.exists(dbfile) and op.exists(fastafile) prefix = get_prefix(fastafile, dbfile) logfile = prefix + ".log" gmapfile = prefix + ".gmap.gff3" if not need_update((dbfile, fastafile), gmapfile): logging.error("`{0}` exists. `gmap` already run.".format(gmapfile)) else: dbdir, dbname = check_index(dbfile) cmd = "gmap -D {0} -d {1}".format(dbdir, dbname) cmd += " -f 2 --intronlength=100000" # Output format 2 cmd += " -t {0}".format(opts.cpus) cmd += " --npaths {0}".format(opts.npaths) if opts.cross: cmd += " --cross-species" cmd += " " + fastafile sh(cmd, outfile=gmapfile, errfile=logfile) return gmapfile, logfile
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def align(args): """ %prog align database.fasta read1.fq read2.fq Wrapper for `gsnap` single-end or paired-end, depending on the number of args. """
from jcvi.formats.fastq import guessoffset p = OptionParser(align.__doc__) p.add_option("--rnaseq", default=False, action="store_true", help="Input is RNA-seq reads, turn splicing on") p.add_option("--native", default=False, action="store_true", help="Convert GSNAP output to NATIVE format") p.set_home("eddyyeh") p.set_outdir() p.set_cpus() opts, args = p.parse_args(args) if len(args) == 2: logging.debug("Single-end alignment") elif len(args) == 3: logging.debug("Paired-end alignment") else: sys.exit(not p.print_help()) dbfile, readfile = args[:2] outdir = opts.outdir assert op.exists(dbfile) and op.exists(readfile) prefix = get_prefix(readfile, dbfile) logfile = op.join(outdir, prefix + ".log") gsnapfile = op.join(outdir, prefix + ".gsnap") nativefile = gsnapfile.rsplit(".", 1)[0] + ".unique.native" if not need_update((dbfile, readfile), gsnapfile): logging.error("`{0}` exists. `gsnap` already run.".format(gsnapfile)) else: dbdir, dbname = check_index(dbfile) cmd = "gsnap -D {0} -d {1}".format(dbdir, dbname) cmd += " -B 5 -m 0.1 -i 2 -n 3" # memory, mismatch, indel penalty, nhits if opts.rnaseq: cmd += " -N 1" cmd += " -t {0}".format(opts.cpus) cmd += " --gmap-mode none --nofails" if readfile.endswith(".gz"): cmd += " --gunzip" try: offset = "sanger" if guessoffset([readfile]) == 33 else "illumina" cmd += " --quality-protocol {0}".format(offset) except AssertionError: pass cmd += " " + " ".join(args[1:]) sh(cmd, outfile=gsnapfile, errfile=logfile) if opts.native: EYHOME = opts.eddyyeh_home if need_update(gsnapfile, nativefile): cmd = op.join(EYHOME, "convert2native.pl") cmd += " --gsnap {0} -o {1}".format(gsnapfile, nativefile) cmd += " -proc {0}".format(opts.cpus) sh(cmd) return gsnapfile, logfile
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_1D_overlap(eclusters, depth=1): """ Find blocks that are 1D overlapping, returns cliques of block ids that are in conflict """
overlap_set = set() active = set() ends = [] for i, (chr, left, right) in enumerate(eclusters): ends.append((chr, left, 0, i)) # 0/1 for left/right-ness ends.append((chr, right, 1, i)) ends.sort() chr_last = "" for chr, pos, left_right, i in ends: if chr != chr_last: active.clear() if left_right == 0: active.add(i) else: active.remove(i) if len(active) > depth: overlap_set.add(tuple(sorted(active))) chr_last = chr return overlap_set
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_range(clusters, extend=0): """ Convert to interval ends from a list of anchors extend modifies the xmax, ymax boundary of the box, which can be positive or negative very useful when we want to make the range as fuzzy as we specify """
eclusters = [] for cluster in clusters: xlist, ylist, scores = zip(*cluster) score = _score(cluster) xchr, xmin = min(xlist) xchr, xmax = max(xlist) ychr, ymin = min(ylist) ychr, ymax = max(ylist) # allow fuzziness to the boundary xmax += extend ymax += extend # because extend can be negative values, we don't want it to be less than min if xmax < xmin: xmin, xmax = xmax, xmin if ymax < ymin: ymin, ymax = ymax, ymin eclusters.append(((xchr, xmin, xmax), (ychr, ymin, ymax), score)) return eclusters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_constraints(clusters, quota=(1, 1), Nmax=0): """ Check pairwise cluster comparison, if they overlap then mark edge as conflict """
qa, qb = quota eclusters = make_range(clusters, extend=-Nmax) # (1-based index, cluster score) nodes = [(i+1, c[-1]) for i, c in enumerate(eclusters)] eclusters_x, eclusters_y, scores = zip(*eclusters) # represents the contraints over x-axis and y-axis constraints_x = get_1D_overlap(eclusters_x, qa) constraints_y = get_1D_overlap(eclusters_y, qb) return nodes, constraints_x, constraints_y
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_lp(nodes, constraints_x, qa, constraints_y, qb): """ Maximize 4 x1 + 2 x2 + 3 x3 + x4 Subject To x1 + x2 <= 1 End """
lp_handle = cStringIO.StringIO() lp_handle.write("Maximize\n ") records = 0 for i, score in nodes: lp_handle.write("+ %d x%d " % (score, i)) # SCIP does not like really long string per row records += 1 if records % 10 == 0: lp_handle.write("\n") lp_handle.write("\n") num_of_constraints = 0 lp_handle.write("Subject To\n") for c in constraints_x: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qa)) num_of_constraints += len(constraints_x) # non-self if not (constraints_x is constraints_y): for c in constraints_y: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qb)) num_of_constraints += len(constraints_y) print("number of variables (%d), number of constraints (%d)" % (len(nodes), num_of_constraints), file=sys.stderr) lp_handle.write("Binary\n") for i, score in nodes: lp_handle.write(" x%d\n" % i) lp_handle.write("End\n") lp_data = lp_handle.getvalue() lp_handle.close() return lp_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_lp(clusters, quota, work_dir="work", Nmax=0, self_match=False, solver="SCIP", verbose=False): """ Solve the formatted LP instance """
qb, qa = quota # flip it nodes, constraints_x, constraints_y = get_constraints( clusters, (qa, qb), Nmax=Nmax) if self_match: constraints_x = constraints_y = constraints_x | constraints_y lp_data = format_lp(nodes, constraints_x, qa, constraints_y, qb) if solver == "SCIP": filtered_list = SCIPSolver(lp_data, work_dir, verbose=verbose).results if not filtered_list: print("SCIP fails... trying GLPK", file=sys.stderr) filtered_list = GLPKSolver( lp_data, work_dir, verbose=verbose).results elif solver == "GLPK": filtered_list = GLPKSolver(lp_data, work_dir, verbose=verbose).results if not filtered_list: print("GLPK fails... trying SCIP", file=sys.stderr) filtered_list = SCIPSolver( lp_data, work_dir, verbose=verbose).results return filtered_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_maps_by_type(map_type, number=None): """ Print all available maps of a given type. Parameters map_type : {'Sequential', 'Diverging', 'Qualitative'} Select map type to print. number : int, optional Filter output by number of defined colors. By default there is no numeric filtering. """
map_type = map_type.lower().capitalize() if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) print(map_type) map_keys = sorted(COLOR_MAPS[map_type].keys()) format_str = '{0:8} : {1}' for mk in map_keys: num_keys = sorted(COLOR_MAPS[map_type][mk].keys(), key=int) if not number or str(number) in num_keys: num_str = '{' + ', '.join(num_keys) + '}' print(format_str.format(mk, num_str))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_map(name, map_type, number, reverse=False): """ Return a `BrewerMap` representation of the specified color map. Parameters name : str Name of color map. Use `print_maps` to see available color maps. map_type : {'Sequential', 'Diverging', 'Qualitative'} Select color map type. number : int Number of defined colors in color map. reverse : bool, optional Set to True to get the reversed color map. """
number = str(number) map_type = map_type.lower().capitalize() # check for valid type if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) # make a dict of lower case map name to map name so this can be # insensitive to case. # this would be a perfect spot for a dict comprehension but going to # wait on that to preserve 2.6 compatibility. # map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()} map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys()) # check for valid name if name.lower() not in map_names: s = 'Invalid color map name {0!r} for type {1!r}.\n' s = s.format(name, map_type) valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()] valid_names.sort() s += 'Valid names are: {0}'.format(valid_names) raise ValueError(s) name = map_names[name.lower()] # check for valid number if number not in COLOR_MAPS[map_type][name]: s = 'Invalid number for map type {0!r} and name {1!r}.\n' s = s.format(map_type, str(name)) valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()] valid_numbers.sort() s += 'Valid numbers are : {0}'.format(valid_numbers) raise ValueError(s) colors = COLOR_MAPS[map_type][name][number]['Colors'] if reverse: name += '_r' colors = [x for x in reversed(colors)] return BrewerMap(name, map_type, colors)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_maps_by_type(map_type): """ Load all maps of a given type into a dictionary. Color maps are loaded as BrewerMap objects. Dictionary is keyed by map name and then integer numbers of defined colors. There is an additional 'max' key that points to the color map with the largest number of defined colors. Parameters map_type : {'Sequential', 'Diverging', 'Qualitative'} Returns ------- maps : dict of BrewerMap """
seq_maps = COLOR_MAPS[map_type] loaded_maps = {} for map_name in seq_maps: loaded_maps[map_name] = {} for num in seq_maps[map_name]: inum = int(num) colors = seq_maps[map_name][num]['Colors'] bmap = BrewerMap(map_name, map_type, colors) loaded_maps[map_name][inum] = bmap max_num = int(max(seq_maps[map_name].keys(), key=int)) loaded_maps[map_name]['max'] = loaded_maps[map_name][max_num] return loaded_maps
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mpl_colors(self): """ Colors expressed on the range 0-1 as used by matplotlib. """
mc = [] for color in self.colors: mc.append(tuple([x / 255. for x in color])) return mc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mpl_colormap(self, **kwargs): """ A color map that can be used in matplotlib plots. Requires matplotlib to be importable. Keyword arguments are passed to `matplotlib.colors.LinearSegmentedColormap.from_list`. """
if not HAVE_MPL: # pragma: no cover raise RuntimeError('matplotlib not available.') cmap = LinearSegmentedColormap.from_list(self.name, self.mpl_colors, **kwargs) return cmap
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def show_as_blocks(self, block_size=100): """ Show colors in the IPython Notebook using ipythonblocks. Parameters block_size : int, optional Size of displayed blocks. """
from ipythonblocks import BlockGrid grid = BlockGrid(self.number, 1, block_size=block_size) for block, color in zip(grid, self.colors): block.rgb = color grid.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def colorbrewer2_url(self): """ URL that can be used to view the color map at colorbrewer2.org. """
url = 'http://colorbrewer2.org/index.html?type={0}&scheme={1}&n={2}' return url.format(self.type.lower(), self.name, self.number)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def summary(args): """ %prog summary old.new.chain old.fasta new.fasta Provide stats of the chain file. """
from jcvi.formats.fasta import summary as fsummary from jcvi.utils.cbook import percentage, human_size p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) chainfile, oldfasta, newfasta = args chain = Chain(chainfile) ungapped, dt, dq = chain.ungapped, chain.dt, chain.dq print("File `{0}` contains {1} chains.".\ format(chainfile, len(chain)), file=sys.stderr) print("ungapped={0} dt={1} dq={2}".\ format(human_size(ungapped), human_size(dt), human_size(dq)), file=sys.stderr) oldreal, oldnn, oldlen = fsummary([oldfasta, "--outfile=/dev/null"]) print("Old fasta (`{0}`) mapped: {1}".\ format(oldfasta, percentage(ungapped, oldreal)), file=sys.stderr) newreal, newnn, newlen = fsummary([newfasta, "--outfile=/dev/null"]) print("New fasta (`{0}`) mapped: {1}".\ format(newfasta, percentage(ungapped, newreal)), file=sys.stderr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromagp(args): """ %prog fromagp agpfile componentfasta objectfasta Generate chain file from AGP format. The components represent the old genome (target) and the objects represent new genome (query). """
from jcvi.formats.agp import AGP from jcvi.formats.sizes import Sizes p = OptionParser(fromagp.__doc__) p.add_option("--novalidate", default=False, action="store_true", help="Do not validate AGP") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) agpfile, componentfasta, objectfasta = args chainfile = agpfile.rsplit(".", 1)[0] + ".chain" fw = open(chainfile, "w") agp = AGP(agpfile, validate=(not opts.novalidate)) componentsizes = Sizes(componentfasta).mapping objectsizes = Sizes(objectfasta).mapping chain = "chain" score = 1000 tStrand = "+" id = 0 for a in agp: if a.is_gap: continue tName = a.component_id tSize = componentsizes[tName] tStart = a.component_beg tEnd = a.component_end tStart -= 1 qName = a.object qSize = objectsizes[qName] qStrand = "-" if a.orientation == "-" else "+" qStart = a.object_beg qEnd = a.object_end if qStrand == '-': _qStart = qSize - qEnd + 1 _qEnd = qSize - qStart + 1 qStart, qEnd = _qStart, _qEnd qStart -= 1 id += 1 size = a.object_span headerline = "\t".join(str(x) for x in ( chain, score, tName, tSize, tStrand, tStart, tEnd, qName, qSize, qStrand, qStart, qEnd, id )) alignmentline = size print(headerline, file=fw) print(alignmentline, file=fw) print(file=fw) fw.close() logging.debug("File written to `{0}`.".format(chainfile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def blat(args): """ %prog blat old.fasta new.fasta Generate psl file using blat. """
p = OptionParser(blat.__doc__) p.add_option("--minscore", default=100, type="int", help="Matches minus mismatches gap penalty [default: %default]") p.add_option("--minid", default=98, type="int", help="Minimum sequence identity [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) oldfasta, newfasta = args twobitfiles = [] for fastafile in args: tbfile = faToTwoBit(fastafile) twobitfiles.append(tbfile) oldtwobit, newtwobit = twobitfiles cmd = "pblat -threads={0}".format(opts.cpus) if which("pblat") else "blat" cmd += " {0} {1}".format(oldtwobit, newfasta) cmd += " -tileSize=12 -minScore={0} -minIdentity={1} ".\ format(opts.minscore, opts.minid) pslfile = "{0}.{1}.psl".format(*(op.basename(x).split('.')[0] \ for x in (newfasta, oldfasta))) cmd += pslfile sh(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frompsl(args): """ %prog frompsl old.new.psl old.fasta new.fasta Generate chain file from psl file. The pipeline is describe in: <http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver> """
from jcvi.formats.sizes import Sizes p = OptionParser(frompsl.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) pslfile, oldfasta, newfasta = args pf = oldfasta.split(".")[0] # Chain together alignments from using axtChain chainfile = pf + ".chain" twobitfiles = [] for fastafile in (oldfasta, newfasta): tbfile = faToTwoBit(fastafile) twobitfiles.append(tbfile) oldtwobit, newtwobit = twobitfiles if need_update(pslfile, chainfile): cmd = "axtChain -linearGap=medium -psl {0}".format(pslfile) cmd += " {0} {1} {2}".format(oldtwobit, newtwobit, chainfile) sh(cmd) # Sort chain files sortedchain = chainfile.rsplit(".", 1)[0] + ".sorted.chain" if need_update(chainfile, sortedchain): cmd = "chainSort {0} {1}".format(chainfile, sortedchain) sh(cmd) # Make alignment nets from chains netfile = pf + ".net" oldsizes = Sizes(oldfasta).filename newsizes = Sizes(newfasta).filename if need_update((sortedchain, oldsizes, newsizes), netfile): cmd = "chainNet {0} {1} {2}".format(sortedchain, oldsizes, newsizes) cmd += " {0} /dev/null".format(netfile) sh(cmd) # Create liftOver chain file liftoverfile = pf + ".liftover.chain" if need_update((netfile, sortedchain), liftoverfile): cmd = "netChainSubset {0} {1} {2}".\ format(netfile, sortedchain, liftoverfile) sh(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lastz_to_blast(row): """ Convert the lastz tabular to the blast tabular, see headers above Obsolete after LASTZ version 1.02.40 """
atoms = row.strip().split("\t") name1, name2, coverage, identity, nmismatch, ngap, \ start1, end1, strand1, start2, end2, strand2, score = atoms identity = identity.replace("%", "") hitlen = coverage.split("/")[1] score = float(score) same_strand = (strand1 == strand2) if not same_strand: start2, end2 = end2, start2 evalue = blastz_score_to_ncbi_expectation(score) score = blastz_score_to_ncbi_bits(score) evalue, score = "%.2g" % evalue, "%.1f" % score return "\t".join((name1, name2, identity, hitlen, nmismatch, ngap, \ start1, end1, start2, end2, evalue, score))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lastz_2bit(t): """ Used for formats other than BLAST, i.e. lav, maf, etc. which requires the database file to contain a single FASTA record. """
bfasta_fn, afasta_fn, outfile, lastz_bin, extra, mask, format = t ref_tags = [Darkspace] qry_tags = [Darkspace] ref_tags, qry_tags = add_mask(ref_tags, qry_tags, mask=mask) lastz_cmd = Lastz_template.format(lastz_bin, bfasta_fn, ref_tags, \ afasta_fn, qry_tags) if extra: lastz_cmd += " " + extra.strip() lastz_cmd += " --format={0}".format(format) proc = Popen(lastz_cmd) out_fh = open(outfile, "w") logging.debug("job <%d> started: %s" % (proc.pid, lastz_cmd)) for row in proc.stdout: out_fh.write(row) out_fh.flush() logging.debug("job <%d> finished" % proc.pid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def augustus(args): """ %prog augustus fastafile Run parallel AUGUSTUS. Final results can be reformatted using annotation.reformat.augustus(). """
p = OptionParser(augustus.__doc__) p.add_option("--species", default="maize", help="Use species model for prediction") p.add_option("--hintsfile", help="Hint-guided AUGUSTUS") p.add_option("--nogff3", default=False, action="store_true", help="Turn --gff3=off") p.set_home("augustus") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args cpus = opts.cpus mhome = opts.augustus_home gff3 = not opts.nogff3 suffix = ".gff3" if gff3 else ".out" cfgfile = op.join(mhome, "config/extrinsic/extrinsic.M.RM.E.W.cfg") outdir = mkdtemp(dir=".") fs = split([fastafile, outdir, str(cpus)]) augustuswrap_params = partial(augustuswrap, species=opts.species, gff3=gff3, cfgfile=cfgfile, hintsfile=opts.hintsfile) g = Jobs(augustuswrap_params, fs.names) g.run() gff3files = [x.rsplit(".", 1)[0] + suffix for x in fs.names] outfile = fastafile.rsplit(".", 1)[0] + suffix FileMerger(gff3files, outfile=outfile).merge() shutil.rmtree(outdir) if gff3: from jcvi.annotation.reformat import augustus as reformat_augustus reformat_outfile = outfile.replace(".gff3", ".reformat.gff3") reformat_augustus([outfile, "--outfile={0}".format(reformat_outfile)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def star(args): """ %prog star folder reference Run star on a folder with reads. """
p = OptionParser(star.__doc__) p.add_option("--single", default=False, action="store_true", help="Single end mapping") p.set_fastq_names() p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, reference = args cpus = opts.cpus mm = MakeManager() num = 1 if opts.single else 2 folder, reference = args gd = "GenomeDir" mkdir(gd) STAR = "STAR --runThreadN {0} --genomeDir {1}".format(cpus, gd) # Step 0: build genome index genomeidx = op.join(gd, "Genome") if need_update(reference, genomeidx): cmd = STAR + " --runMode genomeGenerate" cmd += " --genomeFastaFiles {0}".format(reference) mm.add(reference, genomeidx, cmd) # Step 1: align for p, prefix in iter_project(folder, opts.names, num): pf = "{0}_star".format(prefix) bamfile = pf + "Aligned.sortedByCoord.out.bam" cmd = STAR + " --readFilesIn {0}".format(" ".join(p)) if p[0].endswith(".gz"): cmd += " --readFilesCommand zcat" cmd += " --outSAMtype BAM SortedByCoordinate" cmd += " --outFileNamePrefix {0}".format(pf) cmd += " --twopassMode Basic" # Compatibility for cufflinks cmd += " --outSAMstrandField intronMotif" cmd += " --outFilterIntronMotifs RemoveNoncanonical" mm.add(p, bamfile, cmd) mm.write()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cufflinks(args): """ %prog cufflinks folder reference Run cufflinks on a folder containing tophat results. """
p = OptionParser(cufflinks.__doc__) p.add_option("--gtf", help="Reference annotation [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, reference = args cpus = opts.cpus gtf = opts.gtf transcripts = "transcripts.gtf" mm = MakeManager() gtfs = [] for bam in iglob(folder, "*.bam"): pf = op.basename(bam).split(".")[0] outdir = pf + "_cufflinks" cmd = "cufflinks" cmd += " -o {0}".format(outdir) cmd += " -p {0}".format(cpus) if gtf: cmd += " -g {0}".format(gtf) cmd += " --frag-bias-correct {0}".format(reference) cmd += " --multi-read-correct" cmd += " {0}".format(bam) cgtf = op.join(outdir, transcripts) mm.add(bam, cgtf, cmd) gtfs.append(cgtf) assemblylist = "assembly_list.txt" cmd = 'find . -name "{0}" > {1}'.format(transcripts, assemblylist) mm.add(gtfs, assemblylist, cmd) mergedgtf = "merged/merged.gtf" cmd = "cuffmerge" cmd += " -o merged" cmd += " -p {0}".format(cpus) if gtf: cmd += " -g {0}".format(gtf) cmd += " -s {0}".format(reference) cmd += " {0}".format(assemblylist) mm.add(assemblylist, mergedgtf, cmd) mm.write()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tophat(args): """ %prog tophat folder reference Run tophat on a folder of reads. """
from jcvi.apps.bowtie import check_index from jcvi.formats.fastq import guessoffset p = OptionParser(tophat.__doc__) p.add_option("--gtf", help="Reference annotation [default: %default]") p.add_option("--single", default=False, action="store_true", help="Single end mapping") p.add_option("--intron", default=15000, type="int", help="Max intron size [default: %default]") p.add_option("--dist", default=-50, type="int", help="Mate inner distance [default: %default]") p.add_option("--stdev", default=50, type="int", help="Mate standard deviation [default: %default]") p.set_phred() p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) num = 1 if opts.single else 2 folder, reference = args reference = check_index(reference) for p, prefix in iter_project(folder, n=num): outdir = "{0}_tophat".format(prefix) outfile = op.join(outdir, "accepted_hits.bam") if op.exists(outfile): logging.debug("File `{0}` found. Skipping.".format(outfile)) continue cmd = "tophat -p {0}".format(opts.cpus) if opts.gtf: cmd += " -G {0}".format(opts.gtf) cmd += " -o {0}".format(outdir) if num == 1: # Single-end a, = p else: # Paired-end a, b = p cmd += " --max-intron-length {0}".format(opts.intron) cmd += " --mate-inner-dist {0}".format(opts.dist) cmd += " --mate-std-dev {0}".format(opts.stdev) phred = opts.phred or str(guessoffset([a])) if phred == "64": cmd += " --phred64-quals" cmd += " {0} {1}".format(reference, " ".join(p)) sh(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hmean_int(a, a_min=5778, a_max=1149851): """ Harmonic mean of an array, returns the closest int """
from scipy.stats import hmean return int(round(hmean(np.clip(a, a_min, a_max))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def golden_array(a, phi=1.61803398875, lb=LB, ub=UB): """ Given list of ints, we aggregate similar values so that it becomes an array of multiples of phi, where phi is the golden ratio. phi ^ 14 = 843 phi ^ 33 = 7881196 So the array of counts go between 843 to 788196. One triva is that the exponents of phi gets closer to integers as N grows. See interesting discussion here: <https://www.johndcook.com/blog/2017/03/22/golden-powers-are-nearly-integers/> """
counts = np.zeros(BB, dtype=int) for x in a: c = int(round(math.log(x, phi))) if c < lb: c = lb if c > ub: c = ub counts[c - lb] += 1 return counts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def heatmap(args): """ %prog heatmap input.npy genome.json Plot heatmap based on .npy data file. The .npy stores a square matrix with bins of genome, and cells inside the matrix represent number of links between bin i and bin j. The `genome.json` contains the offsets of each contig/chr so that we know where to draw boundary lines, or extract per contig/chromosome heatmap. """
p = OptionParser(heatmap.__doc__) p.add_option("--resolution", default=500000, type="int", help="Resolution when counting the links") p.add_option("--chr", help="Plot this contig/chr only") p.add_option("--nobreaks", default=False, action="store_true", help="Do not plot breaks (esp. if contigs are small)") opts, args, iopts = p.set_image_options(args, figsize="10x10", style="white", cmap="coolwarm", format="png", dpi=120) if len(args) != 2: sys.exit(not p.print_help()) npyfile, jsonfile = args contig = opts.chr # Load contig/chromosome starts and sizes header = json.loads(open(jsonfile).read()) resolution = header.get("resolution", opts.resolution) logging.debug("Resolution set to {}".format(resolution)) # Load the matrix A = np.load(npyfile) # Select specific submatrix if contig: contig_start = header["starts"][contig] contig_size = header["sizes"][contig] contig_end = contig_start + contig_size A = A[contig_start: contig_end, contig_start: contig_end] # Several concerns in practice: # The diagonal counts may be too strong, this can either be resolved by # masking them. Or perform a log transform on the entire heatmap. B = A.astype("float64") B += 1.0 B = np.log(B) vmin, vmax = 1, 7 B[B < vmin] = vmin B[B > vmax] = vmax print(B) logging.debug("Matrix log-transformation and thresholding ({}-{}) done" .format(vmin, vmax)) # Canvas fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # whole canvas ax = fig.add_axes([.05, .05, .9, .9]) # just the heatmap breaks = header["starts"].values() breaks += [header["total_bins"]] # This is actually discarded breaks = sorted(breaks)[1:] if contig or opts.nobreaks: breaks = [] plot_heatmap(ax, B, breaks, iopts, binsize=resolution) # Title pf = npyfile.rsplit(".", 1)[0] title = pf if contig: title += "-{}".format(contig) root.text(.5, .98, title, color="darkslategray", size=18, ha="center", va="center") normalize_axes(root) image_name = title + "." + iopts.format # macOS sometimes has way too verbose output logging.getLogger().setLevel(logging.CRITICAL) savefig(image_name, dpi=iopts.dpi, iopts=iopts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_seqstarts(bamfile, N): """ Go through the SQ headers and pull out all sequences with size greater than the resolution settings, i.e. contains at least a few cells """
import pysam bamfile = pysam.AlignmentFile(bamfile, "rb") seqsize = {} for kv in bamfile.header["SQ"]: if kv["LN"] < 10 * N: continue seqsize[kv["SN"]] = kv["LN"] / N + 1 allseqs = natsorted(seqsize.keys()) allseqsizes = np.array([seqsize[x] for x in allseqs]) seqstarts = np.cumsum(allseqsizes) seqstarts = np.roll(seqstarts, 1) total_bins = seqstarts[0] seqstarts[0] = 0 seqstarts = dict(zip(allseqs, seqstarts)) return seqstarts, seqsize, total_bins
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_distbins(start=100, bins=2500, ratio=1.01): """ Get exponentially sized """
b = np.ones(bins, dtype="float64") b[0] = 100 for i in range(1, bins): b[i] = b[i - 1] * ratio bins = np.around(b).astype(dtype="int") binsizes = np.diff(bins) return bins, binsizes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def simulate(args): """ %prog simulate test Simulate CLM and IDS files with given names. The simulator assumes several distributions: - Links are distributed uniformly across genome - Log10(link_size) are distributed normally - Genes are distributed uniformly """
p = OptionParser(simulate.__doc__) p.add_option("--genomesize", default=10000000, type="int", help="Genome size") p.add_option("--genes", default=1000, type="int", help="Number of genes") p.add_option("--contigs", default=100, type="int", help="Number of contigs") p.add_option("--coverage", default=10, type="int", help="Link coverage") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) pf, = args GenomeSize = opts.genomesize Genes = opts.genes Contigs = opts.contigs Coverage = opts.coverage PE = 500 Links = int(GenomeSize * Coverage / PE) # Simulate the contig sizes that sum to GenomeSize # See also: # <https://en.wikipedia.org/wiki/User:Skinnerd/Simplex_Point_Picking> ContigSizes, = np.random.dirichlet([1] * Contigs, 1) * GenomeSize ContigSizes = np.array(np.round_(ContigSizes, decimals=0), dtype=int) ContigStarts = np.zeros(Contigs, dtype=int) ContigStarts[1:] = np.cumsum(ContigSizes)[:-1] # Write IDS file idsfile = pf + ".ids" fw = open(idsfile, "w") print("#Contig\tRECounts\tLength", file=fw) for i, s in enumerate(ContigSizes): print("tig{:04d}\t{}\t{}".format(i, s / (4 ** 4), s), file=fw) fw.close() # Simulate the gene positions GenePositions = np.sort(np.random.random_integers(0, GenomeSize - 1, size=Genes)) write_last_and_beds(pf, GenePositions, ContigStarts) # Simulate links, uniform start, with link distances following 1/x, where x # is the distance between the links. As an approximation, we have links # between [1e3, 1e7], so we map from uniform [1e-7, 1e-3] LinkStarts = np.sort(np.random.random_integers(0, GenomeSize - 1, size=Links)) a, b = 1e-7, 1e-3 LinkSizes = np.array(np.round_(1 / ((b - a) * np.random.rand(Links) + a), decimals=0), dtype="int") LinkEnds = LinkStarts + LinkSizes # Find link to contig membership LinkStartContigs = np.searchsorted(ContigStarts, LinkStarts) - 1 LinkEndContigs = np.searchsorted(ContigStarts, LinkEnds) - 1 # Extract inter-contig links InterContigLinks = (LinkStartContigs != LinkEndContigs) & \ (LinkEndContigs != Contigs) ICLinkStartContigs = LinkStartContigs[InterContigLinks] ICLinkEndContigs = LinkEndContigs[InterContigLinks] ICLinkStarts = LinkStarts[InterContigLinks] ICLinkEnds = LinkEnds[InterContigLinks] # Write CLM file write_clm(pf, ICLinkStartContigs, ICLinkEndContigs, ICLinkStarts, ICLinkEnds, ContigStarts, ContigSizes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_last_and_beds(pf, GenePositions, ContigStarts): """ Write LAST file, query and subject BED files. """
qbedfile = pf + "tigs.bed" sbedfile = pf + "chr.bed" lastfile = "{}tigs.{}chr.last".format(pf, pf) qbedfw = open(qbedfile, "w") sbedfw = open(sbedfile, "w") lastfw = open(lastfile, "w") GeneContigs = np.searchsorted(ContigStarts, GenePositions) - 1 for i, (c, gstart) in enumerate(zip(GeneContigs, GenePositions)): gene = "gene{:05d}".format(i) tig = "tig{:04d}".format(c) start = ContigStarts[c] cstart = gstart - start print("\t".join(str(x) for x in (tig, cstart, cstart + 1, gene)), file=qbedfw) print("\t".join(str(x) for x in ("chr1", gstart, gstart + 1, gene)), file=sbedfw) lastatoms = [gene, gene, 100] + [0] * 8 + [100] print("\t".join(str(x) for x in lastatoms), file=lastfw) qbedfw.close() sbedfw.close() lastfw.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_clm(pf, ICLinkStartContigs, ICLinkEndContigs, ICLinkStarts, ICLinkEnds, ContigStarts, ContigSizes): """ Write CLM file from simulated data. """
clm = defaultdict(list) for start, end, linkstart, linkend in \ zip(ICLinkStartContigs, ICLinkEndContigs, ICLinkStarts, ICLinkEnds): start_a = ContigStarts[start] start_b = start_a + ContigSizes[start] end_a = ContigStarts[end] end_b = end_a + ContigSizes[end] if linkend >= end_b: continue clm[(start, end)].append((linkstart - start_a, start_b - linkstart, linkend - end_a, end_b - linkend)) clmfile = pf + ".clm" fw = open(clmfile, "w") def format_array(a): return [str(x) for x in sorted(a) if x > 0] for (start, end), links in sorted(clm.items()): start = "tig{:04d}".format(start) end = "tig{:04d}".format(end) nlinks = len(links) if not nlinks: continue ff = format_array([(b + c) for a, b, c, d in links]) fr = format_array([(b + d) for a, b, c, d in links]) rf = format_array([(a + c) for a, b, c, d in links]) rr = format_array([(a + d) for a, b, c, d in links]) print("{}+ {}+\t{}\t{}".format(start, end, nlinks, " ".join(ff)), file=fw) print("{}+ {}-\t{}\t{}".format(start, end, nlinks, " ".join(fr)), file=fw) print("{}- {}+\t{}\t{}".format(start, end, nlinks, " ".join(rf)), file=fw) print("{}- {}-\t{}\t{}".format(start, end, nlinks, " ".join(rr)), file=fw) fw.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def density(args): """ %prog density test.clm Estimate link density of contigs. """
p = OptionParser(density.__doc__) p.add_option("--save", default=False, action="store_true", help="Write log densitites of contigs to file") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clmfile, = args clm = CLMFile(clmfile) pf = clmfile.rsplit(".", 1)[0] if opts.save: logdensities = clm.calculate_densities() densityfile = pf + ".density" fw = open(densityfile, "w") for name, logd in logdensities.items(): s = clm.tig_to_size[name] print("\t".join(str(x) for x in (name, s, logd)), file=fw) fw.close() logging.debug("Density written to `{}`".format(densityfile)) tourfile = clmfile.rsplit(".", 1)[0] + ".tour" tour = clm.activate(tourfile=tourfile, backuptour=False) clm.flip_all(tour) clm.flip_whole(tour) clm.flip_one(tour)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def optimize(args): """ %prog optimize test.clm Optimize the contig order and orientation, based on CLM file. """
p = OptionParser(optimize.__doc__) p.add_option("--skiprecover", default=False, action="store_true", help="Do not import 'recover' contigs") p.add_option("--startover", default=False, action="store_true", help="Do not resume from existing tour file") p.add_option("--skipGA", default=False, action="store_true", help="Skip GA step") p.set_outfile(outfile=None) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clmfile, = args startover = opts.startover runGA = not opts.skipGA cpus = opts.cpus # Load contact map clm = CLMFile(clmfile, skiprecover=opts.skiprecover) tourfile = opts.outfile or clmfile.rsplit(".", 1)[0] + ".tour" if startover: tourfile = None tour = clm.activate(tourfile=tourfile) fwtour = open(tourfile, "w") # Store INIT tour print_tour(fwtour, clm.tour, "INIT", clm.active_contigs, clm.oo, signs=clm.signs) if runGA: for phase in range(1, 3): tour = optimize_ordering(fwtour, clm, phase, cpus) tour = clm.prune_tour(tour, cpus) # Flip orientations phase = 1 while True: tag1, tag2 = optimize_orientations(fwtour, clm, phase, cpus) if tag1 == REJECT and tag2 == REJECT: logging.debug("Terminating ... no more {}".format(ACCEPT)) break phase += 1 fwtour.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def optimize_orientations(fwtour, clm, phase, cpus): """ Optimize the orientations of contigs by using heuristic flipping. """
# Prepare input files tour_contigs = clm.active_contigs tour = clm.tour oo = clm.oo print_tour(fwtour, tour, "FLIPALL{}".format(phase), tour_contigs, oo, signs=clm.signs) tag1 = clm.flip_whole(tour) print_tour(fwtour, tour, "FLIPWHOLE{}".format(phase), tour_contigs, oo, signs=clm.signs) tag2 = clm.flip_one(tour) print_tour(fwtour, tour, "FLIPONE{}".format(phase), tour_contigs, oo, signs=clm.signs) return tag1, tag2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_last_tour(tourfile, clm): """ Extract last tour from tourfile. The clm instance is also passed in to see if any contig is covered in the clm. """
row = open(tourfile).readlines()[-1] _tour, _tour_o = separate_tour_and_o(row) tour = [] tour_o = [] for tc, to in zip(_tour, _tour_o): if tc not in clm.contigs: logging.debug("Contig `{}` in file `{}` not found in `{}`" .format(tc, tourfile, clm.idsfile)) continue tour.append(tc) tour_o.append(to) return tour, tour_o
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_tours(tourfile, frames=1): """ Extract tours from tourfile. Tourfile contains a set of contig configurations, generated at each iteration of the genetic algorithm. Each configuration has two rows, first row contains iteration id and score, second row contains list of contigs, separated by comma. """
fp = open(tourfile) i = 0 for row in fp: if row[0] == '>': label = row[1:].strip() if label.startswith("GA"): pf, j, score = label.split("-", 2) j = int(j) else: j = 0 i += 1 else: if j % frames != 0: continue tour, tour_o = separate_tour_and_o(row) yield i, label, tour, tour_o fp.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def movie(args): """ %prog movie test.tour test.clm ref.contigs.last Plot optimization history. """
p = OptionParser(movie.__doc__) p.add_option("--frames", default=500, type="int", help="Only plot every N frames") p.add_option("--engine", default="ffmpeg", choices=("ffmpeg", "gifsicle"), help="Movie engine, output MP4 or GIF") p.set_beds() opts, args, iopts = p.set_image_options(args, figsize="16x8", style="white", cmap="coolwarm", format="png", dpi=300) if len(args) != 3: sys.exit(not p.print_help()) tourfile, clmfile, lastfile = args tourfile = op.abspath(tourfile) clmfile = op.abspath(clmfile) lastfile = op.abspath(lastfile) cwd = os.getcwd() odir = op.basename(tourfile).rsplit(".", 1)[0] + "-movie" anchorsfile, qbedfile, contig_to_beds = \ prepare_synteny(tourfile, lastfile, odir, p, opts) args = [] for i, label, tour, tour_o in iter_tours(tourfile, frames=opts.frames): padi = "{:06d}".format(i) # Make sure the anchorsfile and bedfile has the serial number in, # otherwise parallelization may fail a, b = op.basename(anchorsfile).split(".", 1) ianchorsfile = a + "_" + padi + "." + b symlink(anchorsfile, ianchorsfile) # Make BED file with new order qb = Bed() for contig, o in zip(tour, tour_o): if contig not in contig_to_beds: continue bedlines = contig_to_beds[contig][:] if o == '-': bedlines.reverse() for x in bedlines: qb.append(x) a, b = op.basename(qbedfile).split(".", 1) ibedfile = a + "_" + padi + "." + b qb.print_to_file(ibedfile) # Plot dot plot, but do not sort contigs by name (otherwise losing # order) image_name = padi + "." + iopts.format tour = ",".join(tour) args.append([[tour, clmfile, ianchorsfile, "--outfile", image_name, "--label", label]]) Jobs(movieframe, args).run() os.chdir(cwd) make_movie(odir, odir, engine=opts.engine, format=iopts.format)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_ec(oo, sizes, M): """ This prepares EC and converts from contig_id to an index. """
tour = range(len(oo)) tour_sizes = np.array([sizes.sizes[x] for x in oo]) tour_M = M[oo, :][:, oo] return tour, tour_sizes, tour_M
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def score_evaluate(tour, tour_sizes=None, tour_M=None): """ SLOW python version of the evaluation function. For benchmarking purposes only. Do not use in production. """
sizes_oo = np.array([tour_sizes[x] for x in tour]) sizes_cum = np.cumsum(sizes_oo) - sizes_oo / 2 s = 0 size = len(tour) for ia in xrange(size): a = tour[ia] for ib in xrange(ia + 1, size): b = tour[ib] links = tour_M[a, b] dist = sizes_cum[ib] - sizes_cum[ia] if dist > 1e7: break s += links * 1. / dist return s,
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def movieframe(args): """ %prog movieframe tour test.clm contigs.ref.anchors Draw heatmap and synteny in the same plot. """
p = OptionParser(movieframe.__doc__) p.add_option("--label", help="Figure title") p.set_beds() p.set_outfile(outfile=None) opts, args, iopts = p.set_image_options(args, figsize="16x8", style="white", cmap="coolwarm", format="png", dpi=120) if len(args) != 3: sys.exit(not p.print_help()) tour, clmfile, anchorsfile = args tour = tour.split(",") image_name = opts.outfile or ("movieframe." + iopts.format) label = opts.label or op.basename(image_name).rsplit(".", 1)[0] clm = CLMFile(clmfile) totalbins, bins, breaks = make_bins(tour, clm.tig_to_size) M = read_clm(clm, totalbins, bins) fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # whole canvas ax1 = fig.add_axes([.05, .1, .4, .8]) # heatmap ax2 = fig.add_axes([.55, .1, .4, .8]) # dot plot ax2_root = fig.add_axes([.5, 0, .5, 1]) # dot plot canvas # Left axis: heatmap plot_heatmap(ax1, M, breaks, iopts) # Right axis: synteny qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts, sorted=False) dotplot(anchorsfile, qbed, sbed, fig, ax2_root, ax2, sep=False, title="") root.text(.5, .98, clm.name, color="g", ha="center", va="center") root.text(.5, .95, label, color="darkslategray", ha="center", va="center") normalize_axes(root) savefig(image_name, dpi=iopts.dpi, iopts=iopts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write_agp(self, obj, sizes, fw=sys.stdout, gapsize=100, gaptype="contig", evidence="map"): '''Converts the ContigOrdering file into AGP format ''' contigorder = [(x.contig_name, x.strand) for x in self] order_to_agp(obj, contigorder, sizes, fw, gapsize=gapsize, gaptype=gaptype, evidence=evidence)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_ids(self, skiprecover): '''IDS file has a list of contigs that need to be ordered. 'recover', keyword, if available in the third column, is less confident. tig00015093 46912 tig00035238 46779 recover tig00030900 119291 ''' idsfile = self.idsfile logging.debug("Parse idsfile `{}`".format(idsfile)) fp = open(idsfile) tigs = [] for row in fp: if row[0] == '#': # Header continue atoms = row.split() tig, size = atoms[:2] size = int(size) if skiprecover and len(atoms) == 3 and atoms[2] == 'recover': continue tigs.append((tig, size)) # Arrange contig names and sizes _tigs, _sizes = zip(*tigs) self.contigs = set(_tigs) self.sizes = np.array(_sizes) self.tig_to_size = dict(tigs) # Initially all contigs are considered active self.active = set(_tigs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_densities(self): """ Calculate the density of inter-contig links per base. Strong contigs considered to have high level of inter-contig links in the current partition. """
active = self.active densities = defaultdict(int) for (at, bt), links in self.contacts.items(): if not (at in active and bt in active): continue densities[at] += links densities[bt] += links logdensities = {} for x, d in densities.items(): s = self.tig_to_size[x] logd = np.log10(d * 1. / min(s, 500000)) logdensities[x] = logd return logdensities
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_tour_M(self, tour): """ Use Cythonized version to evaluate the score of a current tour """
from .chic import score_evaluate_M return score_evaluate_M(tour, self.active_sizes, self.M)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_tour_P(self, tour): """ Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs. """
from .chic import score_evaluate_P return score_evaluate_P(tour, self.active_sizes, self.P)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_tour_Q(self, tour): """ Use Cythonized version to evaluate the score of a current tour, taking orientation into consideration. This may be the most accurate evaluation under the right condition. """
from .chic import score_evaluate_Q return score_evaluate_Q(tour, self.active_sizes, self.Q)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flip_all(self, tour): """ Initialize the orientations based on pairwise O matrix. """
if self.signs is None: # First run score = 0 else: old_signs = self.signs[:self.N] score, = self.evaluate_tour_Q(tour) # Remember we cannot have ambiguous orientation code (0 or '?') here self.signs = get_signs(self.O, validate=False, ambiguous=False) score_flipped, = self.evaluate_tour_Q(tour) if score_flipped >= score: tag = ACCEPT else: self.signs = old_signs[:] tag = REJECT self.flip_log("FLIPALL", score, score_flipped, tag) return tag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flip_whole(self, tour): """ Test flipping all contigs at the same time to see if score improves. """
score, = self.evaluate_tour_Q(tour) self.signs = -self.signs score_flipped, = self.evaluate_tour_Q(tour) if score_flipped > score: tag = ACCEPT else: self.signs = -self.signs tag = REJECT self.flip_log("FLIPWHOLE", score, score_flipped, tag) return tag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flip_one(self, tour): """ Test flipping every single contig sequentially to see if score improves. """
n_accepts = n_rejects = 0 any_tag_ACCEPT = False for i, t in enumerate(tour): if i == 0: score, = self.evaluate_tour_Q(tour) self.signs[t] = -self.signs[t] score_flipped, = self.evaluate_tour_Q(tour) if score_flipped > score: n_accepts += 1 tag = ACCEPT else: self.signs[t] = -self.signs[t] n_rejects += 1 tag = REJECT self.flip_log("FLIPONE ({}/{})".format(i + 1, len(self.signs)), score, score_flipped, tag) if tag == ACCEPT: any_tag_ACCEPT = True score = score_flipped logging.debug("FLIPONE: N_accepts={} N_rejects={}" .format(n_accepts, n_rejects)) return ACCEPT if any_tag_ACCEPT else REJECT
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prune_tour(self, tour, cpus): """ Test deleting each contig and check the delta_score; tour here must be an array of ints. """
while True: tour_score, = self.evaluate_tour_M(tour) logging.debug("Starting score: {}".format(tour_score)) active_sizes = self.active_sizes M = self.M args = [] for i, t in enumerate(tour): stour = tour[:i] + tour[i + 1:] args.append((t, stour, tour_score, active_sizes, M)) # Parallel run p = Pool(processes=cpus) results = list(p.imap(prune_tour_worker, args)) assert len(tour) == len(results), \ "Array size mismatch, tour({}) != results({})"\ .format(len(tour), len(results)) # Identify outliers active_contigs = self.active_contigs idx, log10deltas = zip(*results) lb, ub = outlier_cutoff(log10deltas) logging.debug("Log10(delta_score) ~ [{}, {}]".format(lb, ub)) remove = set(active_contigs[x] for (x, d) in results if d < lb) self.active -= remove self.report_active() tig_to_idx = self.tig_to_idx tour = [active_contigs[x] for x in tour] tour = array.array('i', [tig_to_idx[x] for x in tour if x not in remove]) if not remove: break self.tour = tour self.flip_all(tour) return tour
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def M(self): """ Contact frequency matrix. Each cell contains how many inter-contig links between i-th and j-th contigs. """
N = self.N tig_to_idx = self.tig_to_idx M = np.zeros((N, N), dtype=int) for (at, bt), links in self.contacts.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] M[ai, bi] = M[bi, ai] = links return M
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def O(self): """ Pairwise strandedness matrix. Each cell contains whether i-th and j-th contig are the same orientation +1, or opposite orientation -1. """
N = self.N tig_to_idx = self.tig_to_idx O = np.zeros((N, N), dtype=int) for (at, bt), (strandedness, md, mh) in self.orientations.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] score = strandedness * md O[ai, bi] = O[bi, ai] = score return O
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def P(self): """ Contact frequency matrix with better precision on distance between contigs. In the matrix M, the distance is assumed to be the distance between mid-points of two contigs. In matrix Q, however, we compute harmonic mean of the links for the orientation configuration that is shortest. This offers better precision for the distance between big contigs. """
N = self.N tig_to_idx = self.tig_to_idx P = np.zeros((N, N, 2), dtype=int) for (at, bt), (strandedness, md, mh) in self.orientations.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] P[ai, bi, 0] = P[bi, ai, 0] = md P[ai, bi, 1] = P[bi, ai, 1] = mh return P
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Q(self): """ Contact frequency matrix when contigs are already oriented. This is s a similar matrix as M, but rather than having the number of links in the cell, it points to an array that has the actual distances. """
N = self.N tig_to_idx = self.tig_to_idx signs = self.signs Q = np.ones((N, N, BB), dtype=int) * -1 # Use -1 as the sentinel for (at, bt), k in self.contacts_oriented.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] ao = signs[ai] bo = signs[bi] Q[ai, bi] = k[(ao, bo)] return Q
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insertionpairs(args): """ %prog insertionpairs endpoints.bed Pair up the candidate endpoints. A candidate exision point would contain both left-end (LE) and right-end (RE) within a given distance. -------| |-------- (RE) (LE) """
p = OptionParser(insertionpairs.__doc__) p.add_option("--extend", default=10, type="int", help="Allow insertion sites to match up within distance") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args mergedbedfile = mergeBed(bedfile, d=opts.extend, nms=True) bed = Bed(mergedbedfile) fw = must_open(opts.outfile, "w") support = lambda x: -x.reads for b in bed: names = b.accn.split(",") ends = [EndPoint(x) for x in names] REs = sorted([x for x in ends if x.leftright == "RE"], key=support) LEs = sorted([x for x in ends if x.leftright == "LE"], key=support) if not (REs and LEs): continue mRE, mLE = REs[0], LEs[0] pRE, pLE = mRE.position, mLE.position if pLE < pRE: b.start, b.end = pLE - 1, pRE else: b.start, b.end = pRE - 1, pLE b.accn = "{0}|{1}".format(mRE.label, mLE.label) b.score = pLE - pRE - 1 print(b, file=fw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insertion(args): """ %prog insertion mic.mac.bed Find IES based on mapping MIC reads to MAC genome. Output a bedfile with 'lesions' (stack of broken reads) in the MAC genome. """
p = OptionParser(insertion.__doc__) p.add_option("--mindepth", default=6, type="int", help="Minimum depth to call an insertion") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args mindepth = opts.mindepth bed = Bed(bedfile) fw = must_open(opts.outfile, "w") for seqid, feats in bed.sub_beds(): left_ends = Counter([x.start for x in feats]) right_ends = Counter([x.end for x in feats]) selected = [] for le, count in left_ends.items(): if count >= mindepth: selected.append((seqid, le, "LE-{0}".format(le), count)) for re, count in right_ends.items(): if count >= mindepth: selected.append((seqid, re, "RE-{0}".format(re), count)) selected.sort() for seqid, pos, label, count in selected: label = "{0}-r{1}".format(label, count) print("\t".join((seqid, str(pos - 1), str(pos), label)), file=fw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_sim_options(p): """ Add options shared by eagle or wgsim. """
p.add_option("--distance", default=500, type="int", help="Outer distance between the two ends [default: %default]") p.add_option("--readlen", default=150, type="int", help="Length of the read") p.set_depth(depth=10) p.set_outfile(outfile=None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wgsim(args): """ %prog wgsim fastafile Run dwgsim on fastafile. """
p = OptionParser(wgsim.__doc__) p.add_option("--erate", default=.01, type="float", help="Base error rate of the read [default: %default]") p.add_option("--noerrors", default=False, action="store_true", help="Simulate reads with no errors [default: %default]") p.add_option("--genomesize", type="int", help="Genome size in Mb [default: estimate from data]") add_sim_options(p) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args pf = op.basename(fastafile).split(".")[0] genomesize = opts.genomesize size = genomesize * 1000000 if genomesize else Fasta(fastafile).totalsize depth = opts.depth readlen = opts.readlen readnum = int(math.ceil(size * depth / (2 * readlen))) distance = opts.distance stdev = distance / 10 outpf = opts.outfile or "{0}.{1}bp.{2}x".format(pf, distance, depth) logging.debug("Total genome size: {0} bp".format(size)) logging.debug("Target depth: {0}x".format(depth)) logging.debug("Number of read pairs (2x{0}): {1}".format(readlen, readnum)) if opts.noerrors: opts.erate = 0 cmd = "dwgsim -e {0} -E {0}".format(opts.erate) if opts.noerrors: cmd += " -r 0 -R 0 -X 0 -y 0" cmd += " -d {0} -s {1}".format(distance, stdev) cmd += " -N {0} -1 {1} -2 {1}".format(readnum, readlen) cmd += " {0} {1}".format(fastafile, outpf) sh(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fig4(args): """ %prog fig4 layout data Napus Figure 4A displays an example deleted region for quartet chromosomes, showing read alignments from high GL and low GL lines. """
p = OptionParser(fig4.__doc__) p.add_option("--gauge_step", default=200000, type="int", help="Step size for the base scale") opts, args, iopts = p.set_image_options(args, figsize="9x7") if len(args) != 2: sys.exit(not p.print_help()) layout, datadir = args layout = F4ALayout(layout, datadir=datadir) gs = opts.gauge_step fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) block, napusbed, slayout = "r28.txt", "all.bed", "r28.layout" s = Synteny(fig, root, block, napusbed, slayout, chr_label=False) synteny_exts = [(x.xstart, x.xend) for x in s.rr] h = .1 order = "bzh,yudal".split(",") labels = (r"\textit{B. napus} A$\mathsf{_n}$2", r"\textit{B. rapa} A$\mathsf{_r}$2", r"\textit{B. oleracea} C$\mathsf{_o}$2", r"\textit{B. napus} C$\mathsf{_n}$2") for t in layout: xstart, xend = synteny_exts[2 * t.i] canvas = [xstart, t.y, xend - xstart, h] root.text(xstart - h, t.y + h / 2, labels[t.i], ha="center", va="center") ch, ab = t.box_region.split(":") a, b = ab.split("-") vlines = [int(x) for x in (a, b)] Coverage(fig, root, canvas, t.seqid, (t.start, t.end), datadir, order=order, gauge="top", plot_chr_label=False, gauge_step=gs, palette="gray", cap=40, hlsuffix="regions.forhaibao", vlines=vlines) # Highlight GSL biosynthesis genes a, b = (3, "Bra029311"), (5, "Bo2g161590") for gid in (a, b): start, end = s.gg[gid] xstart, ystart = start xend, yend = end x = (xstart + xend) / 2 arrow = FancyArrowPatch(posA=(x, ystart - .04), posB=(x, ystart - .005), arrowstyle="fancy,head_width=6,head_length=8", lw=3, fc='k', ec='k', zorder=20) root.add_patch(arrow) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() image_name = "napus-fig4." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ploidy(args): """ %prog ploidy seqids layout Build a figure that calls graphics.karyotype to illustrate the high ploidy of B. napus genome. """
p = OptionParser(ploidy.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x7") if len(args) != 2: sys.exit(not p.print_help()) seqidsfile, klayout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Karyotype(fig, root, seqidsfile, klayout) fc = "darkslategrey" radius = .012 ot = -.05 # use this to adjust vertical position of the left panel TextCircle(root, .1, .9 + ot, r'$\gamma$', radius=radius, fc=fc) root.text(.1, .88 + ot, r"$\times3$", ha="center", va="top", color=fc) TextCircle(root, .08, .79 + ot, r'$\alpha$', radius=radius, fc=fc) TextCircle(root, .12, .79 + ot, r'$\beta$', radius=radius, fc=fc) root.text(.1, .77 + ot, r"$\times3\times2\times2$", ha="center", va="top", color=fc) root.text(.1, .67 + ot, r"Brassica triplication", ha="center", va="top", color=fc, size=11) root.text(.1, .65 + ot, r"$\times3\times2\times2\times3$", ha="center", va="top", color=fc) root.text(.1, .42 + ot, r"Allo-tetraploidy", ha="center", va="top", color=fc, size=11) root.text(.1, .4 + ot, r"$\times3\times2\times2\times3\times2$", ha="center", va="top", color=fc) bb = dict(boxstyle="round,pad=.5", fc="w", ec="0.5", alpha=0.5) root.text(.5, .2 + ot, r"\noindent\textit{Brassica napus}\\" "(A$\mathsf{_n}$C$\mathsf{_n}$ genome)", ha="center", size=16, color="k", bbox=bb) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "napus" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pasteprepare(args): """ %prog pasteprepare bacs.fasta Prepare sequences for paste. """
p = OptionParser(pasteprepare.__doc__) p.add_option("--flank", default=5000, type="int", help="Get the seq of size on two ends [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) goodfasta, = args flank = opts.flank pf = goodfasta.rsplit(".", 1)[0] extbed = pf + ".ext.bed" sizes = Sizes(goodfasta) fw = open(extbed, "w") for bac, size in sizes.iter_sizes(): print("\t".join(str(x) for x in \ (bac, 0, min(flank, size), bac + "L")), file=fw) print("\t".join(str(x) for x in \ (bac, max(size - flank, 0), size, bac + "R")), file=fw) fw.close() fastaFromBed(extbed, goodfasta, name=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def paste(args): """ %prog paste flanks.bed flanks_vs_assembly.blast backbone.fasta Paste in good sequences in the final assembly. """
from jcvi.formats.bed import uniq p = OptionParser(paste.__doc__) p.add_option("--maxsize", default=300000, type="int", help="Maximum size of patchers to be replaced [default: %default]") p.add_option("--prefix", help="Prefix of the new object [default: %default]") p.set_rclip(rclip=1) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) pbed, blastfile, bbfasta = args maxsize = opts.maxsize # Max DNA size to replace gap order = Bed(pbed).order beforebed, afterbed = blast_to_twobeds(blastfile, order, log=True, rclip=opts.rclip, maxsize=maxsize, flipbeds=True) beforebed = uniq([beforebed]) afbed = Bed(beforebed) bfbed = Bed(afterbed) shuffle_twobeds(afbed, bfbed, bbfasta, prefix=opts.prefix)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eject(args): """ %prog eject candidates.bed chr.fasta Eject scaffolds from assembly, using the range identified by closest(). """
p = OptionParser(eject.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) candidates, chrfasta = args sizesfile = Sizes(chrfasta).filename cbedfile = complementBed(candidates, sizesfile) cbed = Bed(cbedfile) for b in cbed: b.accn = b.seqid b.score = 1000 b.strand = '+' cbed.print_to_file()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def closest(args): """ %prog closest candidates.bed gaps.bed fastafile Identify the nearest gaps flanking suggested regions. """
p = OptionParser(closest.__doc__) p.add_option("--om", default=False, action="store_true", help="The bedfile is OM blocks [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) candidates, gapsbed, fastafile = args sizes = Sizes(fastafile).mapping bed = Bed(candidates) ranges = [] for b in bed: r = range_parse(b.accn) if opts.om else b ranges.append([r.seqid, r.start, r.end]) gapsbed = Bed(gapsbed) granges = [(x.seqid, x.start, x.end) for x in gapsbed] ranges = range_merge(ranges) for r in ranges: a = range_closest(granges, r) b = range_closest(granges, r, left=False) seqid = r[0] if a is not None and a[0] != seqid: a = None if b is not None and b[0] != seqid: b = None mmin = 1 if a is None else a[1] mmax = sizes[seqid] if b is None else b[2] print("\t".join(str(x) for x in (seqid, mmin - 1, mmax)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(args): """ %prog insert candidates.bed gaps.bed chrs.fasta unplaced.fasta Insert scaffolds into assembly. """
from jcvi.formats.agp import mask, bed from jcvi.formats.sizes import agp p = OptionParser(insert.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) candidates, gapsbed, chrfasta, unplacedfasta = args refinedbed = refine([candidates, gapsbed]) sizes = Sizes(unplacedfasta).mapping cbed = Bed(candidates) corder = cbed.order gbed = Bed(gapsbed) gorder = gbed.order gpbed = Bed() gappositions = {} # (chr, start, end) => gapid fp = open(refinedbed) gap_to_scf = defaultdict(list) seen = set() for row in fp: atoms = row.split() if len(atoms) <= 6: continue unplaced = atoms[3] strand = atoms[5] gapid = atoms[9] if gapid not in seen: seen.add(gapid) gi, gb = gorder[gapid] gpbed.append(gb) gappositions[(gb.seqid, gb.start, gb.end)] = gapid gap_to_scf[gapid].append((unplaced, strand)) gpbedfile = "candidate.gaps.bed" gpbed.print_to_file(gpbedfile, sorted=True) agpfile = agp([chrfasta]) maskedagpfile = mask([agpfile, gpbedfile]) maskedbedfile = maskedagpfile.rsplit(".", 1)[0] + ".bed" bed([maskedagpfile, "--outfile={0}".format(maskedbedfile)]) mbed = Bed(maskedbedfile) finalbed = Bed() for b in mbed: sid = b.seqid key = (sid, b.start, b.end) if key not in gappositions: finalbed.add("{0}\n".format(b)) continue gapid = gappositions[key] scfs = gap_to_scf[gapid] # For scaffolds placed in the same gap, sort according to positions scfs.sort(key=lambda x: corder[x[0]][1].start + corder[x[0]][1].end) for scf, strand in scfs: size = sizes[scf] finalbed.add("\t".join(str(x) for x in \ (scf, 0, size, sid, 1000, strand))) finalbedfile = "final.bed" finalbed.print_to_file(finalbedfile) # Clean-up toclean = [gpbedfile, agpfile, maskedagpfile, maskedbedfile] FileShredder(toclean)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gaps(args): """ %prog gaps OM.bed fastafile Create patches around OM gaps. """
from jcvi.formats.bed import uniq from jcvi.utils.iter import pairwise p = OptionParser(gaps.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ombed, fastafile = args ombed = uniq([ombed]) bed = Bed(ombed) for a, b in pairwise(bed): om_a = (a.seqid, a.start, a.end, "+") om_b = (b.seqid, b.start, b.end, "+") ch_a = range_parse(a.accn) ch_b = range_parse(b.accn) ch_a = (ch_a.seqid, ch_a.start, ch_a.end, "+") ch_b = (ch_b.seqid, ch_b.start, ch_b.end, "+") om_dist, x = range_distance(om_a, om_b, distmode="ee") ch_dist, x = range_distance(ch_a, ch_b, distmode="ee") if om_dist <= 0 and ch_dist <= 0: continue print(a) print(b) print(om_dist, ch_dist)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tips(args): """ %prog tips patchers.bed complements.bed original.fasta backbone.fasta Append telomeric sequences based on patchers and complements. """
p = OptionParser(tips.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) pbedfile, cbedfile, sizesfile, bbfasta = args pbed = Bed(pbedfile, sorted=False) cbed = Bed(cbedfile, sorted=False) complements = dict() for object, beds in groupby(cbed, key=lambda x: x.seqid): beds = list(beds) complements[object] = beds sizes = Sizes(sizesfile).mapping bbsizes = Sizes(bbfasta).mapping tbeds = [] for object, beds in groupby(pbed, key=lambda x: x.accn): beds = list(beds) startbed, endbed = beds[0], beds[-1] start_id, end_id = startbed.seqid, endbed.seqid if startbed.start == 1: start_id = None if endbed.end == sizes[end_id]: end_id = None print(object, start_id, end_id, file=sys.stderr) if start_id: b = complements[start_id][0] b.accn = object tbeds.append(b) tbeds.append(BedLine("\t".join(str(x) for x in \ (object, 0, bbsizes[object], object, 1000, "+")))) if end_id: b = complements[end_id][-1] b.accn = object tbeds.append(b) tbed = Bed() tbed.extend(tbeds) tbedfile = "tips.bed" tbed.print_to_file(tbedfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill(args): """ %prog fill gaps.bed bad.fasta Perform gap filling of one assembly (bad) using sequences from another. """
p = OptionParser(fill.__doc__) p.add_option("--extend", default=2000, type="int", help="Extend seq flanking the gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gapsbed, badfasta = args Ext = opts.extend gapdist = 2 * Ext + 1 # This is to prevent to replacement ranges intersect gapsbed = mergeBed(gapsbed, d=gapdist, nms=True) bed = Bed(gapsbed) sizes = Sizes(badfasta).mapping pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" fw = open(extbed, "w") for b in bed: gapname = b.accn start, end = max(0, b.start - Ext - 1), b.start - 1 print("\t".join(str(x) for x in \ (b.seqid, start, end, gapname + "L")), file=fw) start, end = b.end, min(sizes[b.seqid], b.end + Ext) print("\t".join(str(x) for x in \ (b.seqid, start, end, gapname + "R")), file=fw) fw.close() fastaFromBed(extbed, badfasta, name=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install(args): """ %prog install patchers.bed patchers.fasta backbone.fasta alt.fasta Install patches into backbone, using sequences from alternative assembly. The patches sequences are generated via jcvi.assembly.patch.fill(). The output is a bedfile that can be converted to AGP using jcvi.formats.agp.frombed(). """
from jcvi.apps.align import blast from jcvi.formats.fasta import SeqIO p = OptionParser(install.__doc__) p.set_rclip(rclip=1) p.add_option("--maxsize", default=300000, type="int", help="Maximum size of patchers to be replaced [default: %default]") p.add_option("--prefix", help="Prefix of the new object [default: %default]") p.add_option("--strict", default=False, action="store_true", help="Only update if replacement has no gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) pbed, pfasta, bbfasta, altfasta = args maxsize = opts.maxsize # Max DNA size to replace gap rclip = opts.rclip blastfile = blast([altfasta, pfasta,"--wordsize=100", "--pctid=99"]) order = Bed(pbed).order beforebed, afterbed = blast_to_twobeds(blastfile, order, rclip=rclip, maxsize=maxsize) beforefasta = fastaFromBed(beforebed, bbfasta, name=True, stranded=True) afterfasta = fastaFromBed(afterbed, altfasta, name=True, stranded=True) # Exclude the replacements that contain more Ns than before ah = SeqIO.parse(beforefasta, "fasta") bh = SeqIO.parse(afterfasta, "fasta") count_Ns = lambda x: x.seq.count('n') + x.seq.count('N') exclude = set() for arec, brec in zip(ah, bh): an = count_Ns(arec) bn = count_Ns(brec) if opts.strict: if bn == 0: continue elif bn < an: continue id = arec.id exclude.add(id) logging.debug("Ignore {0} updates because of decreasing quality."\ .format(len(exclude))) abed = Bed(beforebed, sorted=False) bbed = Bed(afterbed, sorted=False) abed = [x for x in abed if x.accn not in exclude] bbed = [x for x in bbed if x.accn not in exclude] abedfile = "before.filtered.bed" bbedfile = "after.filtered.bed" afbed = Bed() afbed.extend(abed) bfbed = Bed() bfbed.extend(bbed) afbed.print_to_file(abedfile) bfbed.print_to_file(bbedfile) shuffle_twobeds(afbed, bfbed, bbfasta, prefix=opts.prefix)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refine(args): """ %prog refine breakpoints.bed gaps.bed Find gaps within or near breakpoint region. For breakpoint regions with no gaps, there are two options: - Break in the middle of the region - Break at the closest gap (--closest) """
p = OptionParser(refine.__doc__) p.add_option("--closest", default=False, action="store_true", help="In case of no gaps, use closest [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) breakpointsbed, gapsbed = args ncols = len(open(breakpointsbed).next().split()) logging.debug("File {0} contains {1} columns.".format(breakpointsbed, ncols)) cmd = "intersectBed -wao -a {0} -b {1}".format(breakpointsbed, gapsbed) pf = "{0}.{1}".format(breakpointsbed.split(".")[0], gapsbed.split(".")[0]) ingapsbed = pf + ".bed" sh(cmd, outfile=ingapsbed) fp = open(ingapsbed) data = [x.split() for x in fp] nogapsbed = pf + ".nogaps.bed" largestgapsbed = pf + ".largestgaps.bed" nogapsfw = open(nogapsbed, "w") largestgapsfw = open(largestgapsbed, "w") for b, gaps in groupby(data, key=lambda x: x[:ncols]): gaps = list(gaps) gap = gaps[0] if len(gaps) == 1 and gap[-1] == "0": assert gap[-3] == "." print("\t".join(b), file=nogapsfw) continue gaps = [(int(x[-1]), x) for x in gaps] maxgap = max(gaps)[1] print("\t".join(maxgap), file=largestgapsfw) nogapsfw.close() largestgapsfw.close() beds = [largestgapsbed] toclean = [nogapsbed, largestgapsbed] if opts.closest: closestgapsbed = pf + ".closestgaps.bed" cmd = "closestBed -a {0} -b {1} -d".format(nogapsbed, gapsbed) sh(cmd, outfile=closestgapsbed) beds += [closestgapsbed] toclean += [closestgapsbed] else: pointbed = pf + ".point.bed" pbed = Bed() bed = Bed(nogapsbed) for b in bed: pos = (b.start + b.end) / 2 b.start, b.end = pos, pos pbed.append(b) pbed.print_to_file(pointbed) beds += [pointbed] toclean += [pointbed] refinedbed = pf + ".refined.bed" FileMerger(beds, outfile=refinedbed).merge() # Clean-up FileShredder(toclean) return refinedbed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patcher(args): """ %prog patcher backbone.bed other.bed Given optical map alignment, prepare the patchers. Use --backbone to suggest which assembly is the major one, and the patchers will be extracted from another assembly. """
from jcvi.formats.bed import uniq p = OptionParser(patcher.__doc__) p.add_option("--backbone", default="OM", help="Prefix of the backbone assembly [default: %default]") p.add_option("--object", default="object", help="New object name [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) backbonebed, otherbed = args backbonebed = uniq([backbonebed]) otherbed = uniq([otherbed]) pf = backbonebed.split(".")[0] key = lambda x: (x.seqid, x.start, x.end) # Make a uniq bed keeping backbone at redundant intervals cmd = "intersectBed -v -wa" cmd += " -a {0} -b {1}".format(otherbed, backbonebed) outfile = otherbed.rsplit(".", 1)[0] + ".not." + backbonebed sh(cmd, outfile=outfile) uniqbed = Bed() uniqbedfile = pf + ".merged.bed" uniqbed.extend(Bed(backbonebed)) uniqbed.extend(Bed(outfile)) uniqbed.print_to_file(uniqbedfile, sorted=True) # Condense adjacent intervals, allow some chaining bed = uniqbed key = lambda x: range_parse(x.accn).seqid bed_fn = pf + ".patchers.bed" bed_fw = open(bed_fn, "w") for k, sb in groupby(bed, key=key): sb = list(sb) chr, start, end, strand = merge_ranges(sb) print("\t".join(str(x) for x in \ (chr, start, end, opts.object, 1000, strand)), file=bed_fw) bed_fw.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def treds(args): """ %prog treds hli.tred.tsv Compile allele_frequency for TREDs results. Write data.tsv, meta.tsv and mask.tsv in one go. """
p = OptionParser(treds.__doc__) p.add_option("--csv", default=False, action="store_true", help="Also write `meta.csv`") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tredresults, = args df = pd.read_csv(tredresults, sep="\t") tredsfile = datafile("TREDs.meta.csv") tf = pd.read_csv(tredsfile) tds = list(tf["abbreviation"]) ids = list(tf["id"]) tags = ["SampleKey"] final_columns = ["SampleKey"] afs = [] for td, id in zip(tds, ids): tag1 = "{}.1".format(td) tag2 = "{}.2".format(td) if tag2 not in df: afs.append("{}") continue tags.append(tag2) final_columns.append(id) a = np.array(list(df[tag1]) + list(df[tag2])) counts = alleles_to_counts(a) af = counts_to_af(counts) afs.append(af) tf["allele_frequency"] = afs metafile = "TREDs_{}_SEARCH.meta.tsv".format(timestamp()) tf.to_csv(metafile, sep="\t", index=False) logging.debug("File `{}` written.".format(metafile)) if opts.csv: metacsvfile = metafile.rsplit(".", 1)[0] + ".csv" tf.to_csv(metacsvfile, index=False) logging.debug("File `{}` written.".format(metacsvfile)) pp = df[tags] pp.columns = final_columns datafile = "TREDs_{}_SEARCH.data.tsv".format(timestamp()) pp.to_csv(datafile, sep="\t", index=False) logging.debug("File `{}` written.".format(datafile)) mask([datafile, metafile])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stutter(args): """ %prog stutter a.vcf.gz Extract info from lobSTR vcf file. Generates a file that has the following fields: CHR, POS, MOTIF, RL, ALLREADS, Q """
p = OptionParser(stutter.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcf, = args pf = op.basename(vcf).split(".")[0] execid, sampleid = pf.split("_") C = "vcftools --remove-filtered-all --min-meanDP 10" C += " --gzvcf {} --out {}".format(vcf, pf) C += " --indv {}".format(sampleid) info = pf + ".INFO" if need_update(vcf, info): cmd = C + " --get-INFO MOTIF --get-INFO RL" sh(cmd) allreads = pf + ".ALLREADS.FORMAT" if need_update(vcf, allreads): cmd = C + " --extract-FORMAT-info ALLREADS" sh(cmd) q = pf + ".Q.FORMAT" if need_update(vcf, q): cmd = C + " --extract-FORMAT-info Q" sh(cmd) outfile = pf + ".STUTTER" if need_update((info, allreads, q), outfile): cmd = "cut -f1,2,5,6 {}".format(info) cmd += r" | sed -e 's/\t/_/g'" cmd += " | paste - {} {}".format(allreads, q) cmd += " | cut -f1,4,7" sh(cmd, outfile=outfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filtervcf(args): """ %prog filtervcf NA12878.hg38.vcf.gz Filter lobSTR VCF using script shipped in lobSTR. Input file can be a list of vcf files. """
p = OptionParser(filtervcf.__doc__) p.set_home("lobstr", default="/mnt/software/lobSTR") p.set_aws_opts(store="hli-mv-data-science/htang/str") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) samples, = args lhome = opts.lobstr_home store = opts.output_path if samples.endswith((".vcf", ".vcf.gz")): vcffiles = [samples] else: vcffiles = [x.strip() for x in must_open(samples)] vcffiles = [x for x in vcffiles if ".filtered." not in x] run_args = [(x, lhome, x.startswith("s3://") and store) for x in vcffiles] cpus = min(opts.cpus, len(run_args)) p = Pool(processes=cpus) for res in p.map_async(run_filter, run_args).get(): continue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def meta(args): """ %prog meta data.bin samples STR.ids STR-exons.wo.bed Compute allele frequencies and prune sites based on missingness. Filter subset of loci that satisfy: 1. no redundancy (unique chr:pos) 2. variable (n_alleles > 1) 3. low level of missing data (>= 50% autosomal + X, > 25% for Y) Write meta file with the following infor: 1. id 2. title 3. gene_name 4. variant_type 5. motif 6. allele_frequency `STR-exons.wo.bed` can be generated like this: $ tail -n 694105 /mnt/software/lobSTR/hg38/index.tab | cut -f1-3 > all-STR.bed $ intersectBed -a all-STR.bed -b all-exons.bed -wo > STR-exons.wo.bed """
p = OptionParser(meta.__doc__) p.add_option("--cutoff", default=.5, type="float", help="Percent observed required (chrY half cutoff)") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) binfile, sampleids, strids, wobed = args cutoff = opts.cutoff af_file = "allele_freq" if need_update(binfile, af_file): df, m, samples, loci = read_binfile(binfile, sampleids, strids) nalleles = len(samples) fw = must_open(af_file, "w") for i, locus in enumerate(loci): a = m[:, i] counts = alleles_to_counts(a) af = counts_to_af(counts) seqid = locus.split("_")[0] remove = counts_filter(counts, nalleles, seqid, cutoff=cutoff) print("\t".join((locus, af, remove)), file=fw) fw.close() logging.debug("Load gene intersections from `{}`".format(wobed)) fp = open(wobed) gene_map = defaultdict(set) for row in fp: chr1, start1, end1, chr2, start2, end2, name, ov = row.split() gene_map[(chr1, start1)] |= set(name.split(",")) for k, v in gene_map.items(): non_enst = sorted(x for x in v if not x.startswith("ENST")) #enst = sorted(x.rsplit(".", 1)[0] for x in v if x.startswith("ENST")) gene_map[k] = ",".join(non_enst) TREDS, df = read_treds() metafile = "STRs_{}_SEARCH.meta.tsv".format(timestamp()) write_meta(af_file, gene_map, TREDS, filename=metafile) logging.debug("File `{}` written.".format(metafile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bin(args): """ %prog bin data.tsv Conver tsv to binary format. """
p = OptionParser(bin.__doc__) p.add_option("--dtype", choices=("float32", "int32"), help="dtype of the matrix") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tsvfile, = args dtype = opts.dtype if dtype is None: # Guess dtype = np.int32 if "data" in tsvfile else np.float32 else: dtype = np.int32 if dtype == "int32" else np.float32 print("dtype: {}".format(dtype), file=sys.stderr) fp = open(tsvfile) next(fp) arrays = [] for i, row in enumerate(fp): a = np.fromstring(row, sep="\t", dtype=dtype) a = a[1:] arrays.append(a) print(i, a, file=sys.stderr) print("Merging", file=sys.stderr) b = np.concatenate(arrays) print("Binary shape: {}".format(b.shape), file=sys.stderr) binfile = tsvfile.rsplit(".", 1)[0] + ".bin" b.tofile(binfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data(args): """ %prog data data.bin samples.ids STR.ids meta.tsv Make data.tsv based on meta.tsv. """
p = OptionParser(data.__doc__) p.add_option("--notsv", default=False, action="store_true", help="Do not write data.tsv") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) databin, sampleids, strids, metafile = args final_columns, percentiles = read_meta(metafile) df, m, samples, loci = read_binfile(databin, sampleids, strids) # Clean the data m %= 1000 # Get the larger of the two alleles m[m == 999] = -1 # Missing data final = set(final_columns) remove = [] for i, locus in enumerate(loci): if locus not in final: remove.append(locus) continue pf = "STRs_{}_SEARCH".format(timestamp()) filteredstrids = "{}.STR.ids".format(pf) fw = open(filteredstrids, "w") print("\n".join(final_columns), file=fw) fw.close() logging.debug("Dropped {} columns; Retained {} columns (`{}`)".\ format(len(remove), len(final_columns), filteredstrids)) # Remove low-quality columns! df.drop(remove, inplace=True, axis=1) df.columns = final_columns filtered_bin = "{}.data.bin".format(pf) if need_update(databin, filtered_bin): m = df.as_matrix() m.tofile(filtered_bin) logging.debug("Filtered binary matrix written to `{}`".format(filtered_bin)) # Write data output filtered_tsv = "{}.data.tsv".format(pf) if not opts.notsv and need_update(databin, filtered_tsv): df.to_csv(filtered_tsv, sep="\t", index_label="SampleKey")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mask(args): """ %prog mask data.bin samples.ids STR.ids meta.tsv OR %prog mask data.tsv meta.tsv Compute P-values based on meta and data. The `data.bin` should be the matrix containing filtered loci and the output mask.tsv will have the same dimension. """
p = OptionParser(mask.__doc__) opts, args = p.parse_args(args) if len(args) not in (2, 4): sys.exit(not p.print_help()) if len(args) == 4: databin, sampleids, strids, metafile = args df, m, samples, loci = read_binfile(databin, sampleids, strids) mode = "STRs" elif len(args) == 2: databin, metafile = args df = pd.read_csv(databin, sep="\t", index_col=0) m = df.as_matrix() samples = df.index loci = list(df.columns) mode = "TREDs" pf = "{}_{}_SEARCH".format(mode, timestamp()) final_columns, percentiles = read_meta(metafile) maskfile = pf + ".mask.tsv" run_args = [] for i, locus in enumerate(loci): a = m[:, i] percentile = percentiles[locus] run_args.append((i, a, percentile)) if mode == "TREDs" or need_update(databin, maskfile): cpus = min(8, len(run_args)) write_mask(cpus, samples, final_columns, run_args, filename=maskfile) logging.debug("File `{}` written.".format(maskfile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compilevcf(args): """ %prog compilevcf samples.csv Compile vcf results into master spreadsheet. """
p = OptionParser(compilevcf.__doc__) p.add_option("--db", default="hg38", help="Use these lobSTR db") p.add_option("--nofilter", default=False, action="store_true", help="Do not filter the variants") p.set_home("lobstr") p.set_cpus() p.set_aws_opts(store="hli-mv-data-science/htang/str-data") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) samples, = args workdir = opts.workdir store = opts.output_path cleanup = not opts.nocleanup filtered = not opts.nofilter dbs = opts.db.split(",") cwd = os.getcwd() mkdir(workdir) os.chdir(workdir) samples = op.join(cwd, samples) stridsfile = "STR.ids" if samples.endswith((".vcf", ".vcf.gz")): vcffiles = [samples] else: vcffiles = [x.strip() for x in must_open(samples)] if not op.exists(stridsfile): ids = [] for db in dbs: ids.extend(STRFile(opts.lobstr_home, db=db).ids) uids = uniqify(ids) logging.debug("Combined: {} Unique: {}".format(len(ids), len(uids))) fw = open(stridsfile, "w") print("\n".join(uids), file=fw) fw.close() run_args = [(x, filtered, cleanup, store) for x in vcffiles] cpus = min(opts.cpus, len(run_args)) p = Pool(processes=cpus) for res in p.map_async(run_compile, run_args).get(): continue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ystr(args): """ %prog ystr chrY.vcf Print out Y-STR info given VCF. Marker name extracted from tabfile. """
from jcvi.utils.table import write_csv p = OptionParser(ystr.__doc__) p.set_home("lobstr") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcffile, = args si = STRFile(opts.lobstr_home, db="hg38-named") register = si.register header = "Marker|Reads|Ref|Genotype|Motif".split("|") contents = [] fp = must_open(vcffile) reader = vcf.Reader(fp) simple_register = {} for record in reader: name = register[(record.CHROM, record.POS)] info = record.INFO ref = int(float(info["REF"])) rpa = info.get("RPA", ref) if isinstance(rpa, list): rpa = "|".join(str(int(float(x))) for x in rpa) ru = info["RU"] simple_register[name] = rpa for sample in record.samples: contents.append((name, sample["ALLREADS"], ref, rpa, ru)) # Multi-part markers a, b, c = "DYS389I", "DYS389B.1", "DYS389B" if a in simple_register and b in simple_register: simple_register[c] = int(simple_register[a]) + int(simple_register[b]) # Multi-copy markers mm = ["DYS385", "DYS413", "YCAII"] for m in mm: ma, mb = m + 'a', m + 'b' if ma not in simple_register or mb not in simple_register: simple_register[ma] = simple_register[mb] = None del simple_register[ma] del simple_register[mb] continue if simple_register[ma] > simple_register[mb]: simple_register[ma], simple_register[mb] = \ simple_register[mb], simple_register[ma] write_csv(header, contents, sep=" ") print("[YSEARCH]") build_ysearch_link(simple_register) print("[YFILER]") build_yhrd_link(simple_register, panel=YHRD_YFILER) print("[YFILERPLUS]") build_yhrd_link(simple_register, panel=YHRD_YFILERPLUS) print("[YSTR-ALL]") build_yhrd_link(simple_register, panel=USYSTR_ALL)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def liftover(args): """ %prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa LiftOver CODIS/Y-STR markers. """
p = OptionParser(liftover.__doc__) p.add_option("--checkvalid", default=False, action="store_true", help="Check minscore, period and length") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) refbed, fastafile = args genome = pyfasta.Fasta(fastafile) edits = [] fp = open(refbed) for i, row in enumerate(fp): s = STRLine(row) seq = genome[s.seqid][s.start - 1: s.end].upper() s.motif = get_motif(seq, len(s.motif)) s.fix_counts(seq) if opts.checkvalid and not s.is_valid(): continue edits.append(s) if i % 10000 == 0: print(i, "lines read", file=sys.stderr) edits = natsorted(edits, key=lambda x: (x.seqid, x.start)) for e in edits: print(str(e))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trf(args): """ %prog trf outdir Run TRF on FASTA files. """
from jcvi.apps.base import iglob cparams = "1 1 2 80 5 200 2000" p = OptionParser(trf.__doc__) p.add_option("--mismatch", default=31, type="int", help="Mismatch and gap penalty") p.add_option("--minscore", default=MINSCORE, type="int", help="Minimum score to report") p.add_option("--period", default=6, type="int", help="Maximum period to report") p.add_option("--lobstr", default=False, action="store_true", help="Generate output for lobSTR") p.add_option("--telomeres", default=False, action="store_true", help="Run telomere search: minscore=140 period=7") p.add_option("--centromeres", default=False, action="store_true", help="Run centromere search: {}".format(cparams)) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) outdir, = args minlength = opts.minscore / 2 mm = MakeManager() if opts.telomeres: opts.minscore, opts.period = 140, 7 params = "2 {0} {0} 80 10 {1} {2}".\ format(opts.mismatch, opts.minscore, opts.period).split() if opts.centromeres: params = cparams.split() bedfiles = [] for fastafile in natsorted(iglob(outdir, "*.fa,*.fasta")): pf = op.basename(fastafile).split(".")[0] cmd1 = "trf {0} {1} -d -h".format(fastafile, " ".join(params)) datfile = op.basename(fastafile) + "." + ".".join(params) + ".dat" bedfile = "{0}.trf.bed".format(pf) cmd2 = "cat {} | grep -v ^Parameters".format(datfile) if opts.lobstr: cmd2 += " | awk '($8 >= {} && $8 <= {})'".\ format(minlength, READLEN - minlength) else: cmd2 += " | awk '($8 >= 0)'" cmd2 += " | sed 's/ /\\t/g'" cmd2 += " | awk '{{print \"{0}\\t\" $0}}' > {1}".format(pf, bedfile) mm.add(fastafile, datfile, cmd1) mm.add(datfile, bedfile, cmd2) bedfiles.append(bedfile) bedfile = "trf.bed" cmd = "cat {0} > {1}".format(" ".join(natsorted(bedfiles)), bedfile) mm.add(bedfiles, bedfile, cmd) mm.write()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def batchlobstr(args): """ %prog batchlobstr samples.csv Run lobSTR sequentially on list of samples. Each line contains: sample-name,s3-location """
p = OptionParser(batchlobstr.__doc__) p.add_option("--sep", default=",", help="Separator for building commandline") p.set_home("lobstr", default="s3://hli-mv-data-science/htang/str-build/lobSTR/") p.set_aws_opts(store="hli-mv-data-science/htang/str-data") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) samplesfile, = args store = opts.output_path computed = ls_s3(store) fp = open(samplesfile) skipped = total = 0 for row in fp: total += 1 sample, s3file = row.strip().split(",")[:2] exec_id, sample_id = sample.split("_") bamfile = s3file.replace(".gz", "").replace(".vcf", ".bam") gzfile = sample + ".{0}.vcf.gz".format("hg38") if gzfile in computed: skipped += 1 continue print(opts.sep.join("python -m jcvi.variation.str lobstr".split() + \ ["hg38", "--input_bam_path", bamfile, "--output_path", store, "--sample_id", sample_id, "--workflow_execution_id", exec_id, "--lobstr_home", opts.lobstr_home, "--workdir", opts.workdir])) fp.close() logging.debug("Total skipped: {0}".format(percentage(skipped, total)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def locus(args): """ %prog locus bamfile Extract selected locus from a list of TREDs for validation, and run lobSTR. """
from jcvi.formats.sam import get_minibam # See `Format-lobSTR-database.ipynb` for a list of TREDs for validation INCLUDE = ["HD", "SBMA", "SCA1", "SCA2", "SCA8", "SCA17", "DM1", "DM2", "FXTAS"] db_choices = ("hg38", "hg19") p = OptionParser(locus.__doc__) p.add_option("--tred", choices=INCLUDE, help="TRED name") p.add_option("--ref", choices=db_choices, default="hg38", help="Reference genome") p.set_home("lobstr") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bamfile, = args ref = opts.ref lhome = opts.lobstr_home tred = opts.tred tredsfile = datafile("TREDs.meta.csv") tf = pd.read_csv(tredsfile, index_col=0) row = tf.ix[tred] tag = "repeat_location" ldb = "TREDs" if ref == "hg19": tag += "." + ref ldb += "-" + ref seqid, start_end = row[tag].split(":") PAD = 1000 start, end = start_end.split('-') start, end = int(start) - PAD, int(end) + PAD region = "{}:{}-{}".format(seqid, start, end) minibamfile = get_minibam(bamfile, region) c = seqid.replace("chr", "") cmd, vcf = allelotype_on_chr(minibamfile, c, lhome, ldb) sh(cmd) parser = LobSTRvcf(columnidsfile=None) parser.parse(vcf, filtered=False) items = parser.items() if not items: print("No entry found!", file=sys.stderr) return k, v = parser.items()[0] print("{} => {}".format(tred, v.replace(',', '/')), file=sys.stderr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lobstrindex(args): """ %prog lobstrindex hg38.trf.bed hg38.upper.fa Make lobSTR index. Make sure the FASTA contain only upper case (so use fasta.format --upper to convert from UCSC fasta). The bed file is generated by str(). """
p = OptionParser(lobstrindex.__doc__) p.add_option("--notreds", default=False, action="store_true", help="Remove TREDs from the bed file") p.set_home("lobstr") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) trfbed, fastafile = args pf = fastafile.split(".")[0] lhome = opts.lobstr_home mkdir(pf) if opts.notreds: newbedfile = trfbed + ".new" newbed = open(newbedfile, "w") fp = open(trfbed) retained = total = 0 seen = set() for row in fp: r = STRLine(row) total += 1 name = r.longname if name in seen: continue seen.add(name) print(r, file=newbed) retained += 1 newbed.close() logging.debug("Retained: {0}".format(percentage(retained, total))) else: newbedfile = trfbed mm = MakeManager() cmd = "python {0}/scripts/lobstr_index.py".format(lhome) cmd += " --str {0} --ref {1} --out {2}".format(newbedfile, fastafile, pf) mm.add((newbedfile, fastafile), op.join(pf, "lobSTR_ref.fasta.rsa"), cmd) tabfile = "{0}/index.tab".format(pf) cmd = "python {0}/scripts/GetSTRInfo.py".format(lhome) cmd += " {0} {1} > {2}".format(newbedfile, fastafile, tabfile) mm.add((newbedfile, fastafile), tabfile, cmd) infofile = "{0}/index.info".format(pf) cmd = "cp {0} {1}".format(newbedfile, infofile) mm.add(trfbed, infofile, cmd) mm.write()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def agp(args): """ %prog agp evidencefile contigs.fasta Convert SSPACE scaffold structure to AGP format. """
p = OptionParser(agp.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) evidencefile, contigs = args ef = EvidenceFile(evidencefile, contigs) agpfile = evidencefile.replace(".evidence", ".agp") ef.write_agp(agpfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spades(args): """ %prog spades folder Run automated SPADES. """
from jcvi.formats.fastq import readlen p = OptionParser(spades.__doc__) opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) folder, = args for p, pf in iter_project(folder): rl = readlen([p[0], "--silent"]) # <http://spades.bioinf.spbau.ru/release3.1.0/manual.html#sec3.4> kmers = None if rl >= 150: kmers = "21,33,55,77" elif rl >= 250: kmers = "21,33,55,77,99,127" cmd = "spades.py" if kmers: cmd += " -k {0}".format(kmers) cmd += " --careful" cmd += " --pe1-1 {0} --pe1-2 {1}".format(*p) cmd += " -o {0}_spades".format(pf) print(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def contamination(args): """ %prog contamination folder Ecoli.fasta Remove contaminated reads. The FASTQ files in the folder will automatically pair and filtered against Ecoli.fasta to remove contaminants using BOWTIE2. """
from jcvi.apps.bowtie import align p = OptionParser(contamination.__doc__) p.add_option("--mapped", default=False, action="store_true", help="Retain contaminated reads instead [default: %default]") p.set_cutoff(cutoff=800) p.set_mateorientation(mateorientation="+-") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, ecoli = args ecoli = get_abs_path(ecoli) tag = "--mapped" if opts.mapped else "--unmapped" for p, pf in iter_project(folder): align_opts = [ecoli] + p + [tag] align_opts += ["--cutoff={0}".format(opts.cutoff), "--null"] if opts.mateorientation: align_opts += ["--mateorientation={0}".format(opts.mateorientation)] samfile, logfile = align(align_opts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pairs(args): """ %prog pairs folder reference.fasta Estimate insert size distribution. Compatible with a variety of aligners, including BOWTIE and BWA. """
p = OptionParser(pairs.__doc__) p.set_firstN() p.set_mates() p.set_aligner() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cwd = os.getcwd() aligner = opts.aligner work = "-".join(("pairs", aligner)) mkdir(work) from jcvi.formats.sam import pairs as ps if aligner == "bowtie": from jcvi.apps.bowtie import align elif aligner == "bwa": from jcvi.apps.bwa import align folder, ref = args ref = get_abs_path(ref) messages = [] for p, prefix in iter_project(folder): samplefq = [] for i in range(2): samplefq.append(op.join(work, prefix + "_{0}.first.fastq".format(i+1))) first([str(opts.firstN)] + [p[i]] + ["-o", samplefq[i]]) os.chdir(work) align_args = [ref] + [op.basename(fq) for fq in samplefq] outfile, logfile = align(align_args) bedfile, stats = ps([outfile, "--rclip={0}".format(opts.rclip)]) os.chdir(cwd) median = stats.median tag = "MP" if median > 1000 else "PE" median = str(median) pf, sf = median[:2], median[2:] if sf and int(sf) != 0: pf = str(int(pf) + 1) # Get the first two effective digits lib = "{0}-{1}".format(tag, pf + '0' * len(sf)) for i, xp in enumerate(p): suffix = "fastq.gz" if xp.endswith(".gz") else "fastq" link = "{0}-{1}.{2}.{3}".format(lib, prefix.replace("-", ""), i + 1, suffix) m = "\t".join(str(x) for x in (xp, link)) messages.append(m) messages = "\n".join(messages) write_file("f.meta", messages, tee=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def allpaths(args): """ Run automated ALLPATHS on list of dirs. """
p = OptionParser(allpaths.__doc__) p.add_option("--ploidy", default="1", choices=("1", "2"), help="Ploidy [default: %default]") opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) folders = args for pf in folders: if not op.isdir(pf): continue assemble_dir(pf, target=["final.contigs.fasta", "final.assembly.fasta"], ploidy=opts.ploidy)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare(args): """ %prog prepare jira.txt Parse JIRA report and prepare input. Look for all FASTQ files in the report and get the prefix. Assign fastq to a folder and a new file name indicating the library type (e.g. PE-500, MP-5000, etc.). Note that JIRA report can also be a list of FASTQ files. """
p = OptionParser(prepare.__doc__) p.add_option("--first", default=0, type="int", help="Use only first N reads [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) jfile, = args metafile = jfile + ".meta" if need_update(jfile, metafile): fp = open(jfile) fastqfiles = [x.strip() for x in fp if ".fastq" in x] metas = [Meta(x) for x in fastqfiles] fw = open(metafile, "w") print("\n".join(str(x) for x in metas), file=fw) print("Now modify `{0}`, and restart this script.".\ format(metafile), file=sys.stderr) print("Each line is : genome library fastqfile", file=sys.stderr) fw.close() return mf = MetaFile(metafile) for m in mf: m.make_link(firstN=opts.first)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assemble_pairs(p, pf, tag, target=["final.contigs.fasta"]): """ Take one pair of reads and assemble to contigs.fasta. """
slink(p, pf, tag) assemble_dir(pf, target)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def soap_trios(p, pf, tag, extra): """ Take one pair of reads and 'widow' reads after correction and run SOAP. """
from jcvi.assembly.soap import prepare logging.debug("Work on {0} ({1})".format(pf, ','.join(p))) asm = "{0}.closed.scafSeq".format(pf) if not need_update(p, asm): logging.debug("Assembly found: {0}. Skipped.".format(asm)) return slink(p, pf, tag, extra) cwd = os.getcwd() os.chdir(pf) prepare(sorted(glob("*.fastq") + glob("*.fastq.gz")) + \ ["--assemble_1st_rank_only", "-K 31"]) sh("./run.sh") sh("cp asm31.closed.scafSeq ../{0}".format(asm)) logging.debug("Assembly finished: {0}".format(asm)) os.chdir(cwd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def correctX(args): """ %prog correctX folder tag Run ALLPATHS correction on a folder of paired reads and apply tag. """
p = OptionParser(correctX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): correct_pairs(p, pf, tag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def allpathsX(args): """ %prog allpathsX folder tag Run assembly on a folder of paired reads and apply tag (PE-200, PE-500). Allow multiple tags separated by comma, e.g. PE-350,TT-1050 """
p = OptionParser(allpathsX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): assemble_pairs(p, pf, tag)