text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stats(args):
""" %prog stats folder Generate table summarizing .stats files. """ |
p = OptionParser(stats.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
statsfiles = iglob(folder, "*.stats")
after_equal = lambda x: x.split("=")[-1]
header = "Library Assembled_reads Contigs".split()
contents = []
# label=M0096 total=7443 cnts=948 mean=7.851 std=35.96
for statsfile in statsfiles:
fp = open(statsfile)
for row in fp:
if row.startswith("label="):
break
label, total, cnts = row.split()[:3]
label = after_equal(label)
reads = int(after_equal(total))
contigs = int(after_equal(cnts))
contents.append((label, reads, contigs))
all_labels, all_reads, all_contigs = zip(*contents)
contents.append(("SUM", sum(all_reads), sum(all_contigs)))
contents.append(("AVERAGE (per sample)", \
int(np.mean(all_reads)), int(np.mean(all_contigs))))
contents.append(("MEDIAN (per sample)", \
int(np.median(all_reads)), int(np.median(all_contigs))))
write_csv(header, contents, filename=opts.outfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stack(S):
""" From list of bases at a site D, make counts of bases """ |
S, nreps = zip(*S)
S = np.array([list(x) for x in S])
rows, cols = S.shape
counts = []
for c in xrange(cols):
freq = [0] * NBASES
for b, nrep in zip(S[:, c], nreps):
freq[BASES.index(b)] += nrep
counts.append(freq)
return counts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_left_right(seq):
""" Find position of the first and last base """ |
cseq = seq.strip(GAPS)
leftjust = seq.index(cseq[0])
rightjust = seq.rindex(cseq[-1])
return leftjust, rightjust |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cons(f, mindepth):
""" Makes a list of lists of reads at each site """ |
C = ClustFile(f)
for data in C:
names, seqs, nreps = zip(*data)
total_nreps = sum(nreps)
# Depth filter
if total_nreps < mindepth:
continue
S = []
for name, seq, nrep in data:
# Append sequence * number of dereps
S.append([seq, nrep])
# Make list for each site in sequences
res = stack(S)
yield [x[:4] for x in res if sum(x[:4]) >= mindepth] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def estimateHE(args):
""" %prog estimateHE clustSfile Estimate heterozygosity (H) and error rate (E). Idea borrowed heavily from the PyRad paper. """ |
p = OptionParser(estimateHE.__doc__)
add_consensus_options(p)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
clustSfile, = args
HEfile = clustSfile.rsplit(".", 1)[0] + ".HE"
if not need_update(clustSfile, HEfile):
logging.debug("File `{0}` found. Computation skipped.".format(HEfile))
return HEfile
D = []
for d in cons(clustSfile, opts.mindepth):
D.extend(d)
logging.debug("Computing base frequencies ...")
P = makeP(D)
C = makeC(D)
logging.debug("Solving log-likelihood function ...")
x0 = [.01, .001] # initital values
H, E = scipy.optimize.fmin(LL, x0, args=(P, C))
fw = must_open(HEfile, "w")
print(H, E, file=fw)
fw.close()
return HEfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def alignfast(names, seqs):
""" Performs MUSCLE alignments on cluster and returns output as string """ |
matfile = op.join(datadir, "blosum80.mat")
cmd = "poa -read_fasta - -pir stdout {0} -tolower -silent -hb -fuse_all".format(matfile)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
s = ""
for i, j in zip(names, seqs):
s += "\n".join((i, j)) + "\n"
return p.communicate(s)[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cluster(args):
""" %prog cluster prefix fastqfiles Use `vsearch` to remove duplicate reads. This routine is heavily influenced by PyRAD: <https://github.com/dereneaton/pyrad>. """ |
p = OptionParser(cluster.__doc__)
add_consensus_options(p)
p.set_align(pctid=95)
p.set_outdir()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
prefix = args[0]
fastqfiles = args[1:]
cpus = opts.cpus
pctid = opts.pctid
mindepth = opts.mindepth
minlength = opts.minlength
fastafile, qualfile = fasta(fastqfiles + ["--seqtk",
"--outdir={0}".format(opts.outdir),
"--outfile={0}".format(prefix + ".fasta")])
prefix = op.join(opts.outdir, prefix)
pf = prefix + ".P{0}".format(pctid)
derepfile = prefix + ".derep"
if need_update(fastafile, derepfile):
derep(fastafile, derepfile, minlength, cpus)
userfile = pf + ".u"
notmatchedfile = pf + ".notmatched"
if need_update(derepfile, userfile):
cluster_smallmem(derepfile, userfile, notmatchedfile,
minlength, pctid, cpus)
clustfile = pf + ".clust"
if need_update((derepfile, userfile, notmatchedfile), clustfile):
makeclust(derepfile, userfile, notmatchedfile, clustfile,
mindepth=mindepth)
clustSfile = pf + ".clustS"
if need_update(clustfile, clustSfile):
parallel_musclewrap(clustfile, cpus)
statsfile = pf + ".stats"
if need_update(clustSfile, statsfile):
makestats(clustSfile, statsfile, mindepth=mindepth) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def align(args):
""" %prog align clustfile Align clustfile to clustSfile. Useful for benchmarking aligners. """ |
p = OptionParser(align.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
clustfile, = args
parallel_musclewrap(clustfile, opts.cpus) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \ plot=False):
""" Return a discrete colormap and the set of colors. modified from <http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations> cmap: colormap instance, eg. cm.jet. N: Number of colors. Example See available matplotlib colormaps at: <http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/> If N>20 the sampled colors might not be very distinctive. If you want to error and try anyway, set usepreset=False """ |
import random
from scipy import interpolate
if usepreset:
if 0 < N <= 5:
cmap = cm.gist_rainbow
elif N <= 20:
cmap = cm.Set1
else:
sys.exit(discrete_rainbow.__doc__)
cdict = cmap._segmentdata.copy()
# N colors
colors_i = np.linspace(0,1.,N)
# N+1 indices
indices = np.linspace(0,1.,N+1)
rgbs = []
for key in ('red','green','blue'):
# Find the N colors
D = np.array(cdict[key])
I = interpolate.interp1d(D[:,0], D[:,1])
colors = I(colors_i)
rgbs.append(colors)
# Place these colors at the correct indices.
A = np.zeros((N+1,3), float)
A[:,0] = indices
A[1:,1] = colors
A[:-1,2] = colors
# Create a tuple for the dictionary.
L = []
for l in A:
L.append(tuple(l))
cdict[key] = tuple(L)
palette = zip(*rgbs)
if shuffle:
random.shuffle(palette)
if plot:
print_colors(palette)
# Return (colormap object, RGB tuples)
return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024), palette |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_messages(ax, messages):
""" Write text on canvas, usually on the top right corner. """ |
tc = "gray"
axt = ax.transAxes
yy = .95
for msg in messages:
ax.text(.95, yy, msg, color=tc, transform=axt, ha="right")
yy -= .05 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quickplot(data, xmin, xmax, xlabel, title, ylabel="Counts", figname="plot.pdf", counts=True, print_stats=True):
""" Simple plotting function - given a dictionary of data, produce a bar plot with the counts shown on the plot. """ |
plt.figure(1, (6, 6))
left, height = zip(*sorted(data.items()))
pad = max(height) * .01
if counts:
for l, h in zip(left, height):
if xmax and l > xmax:
break
plt.text(l, h + pad, str(h), color="darkslategray", size=8,
ha="center", va="bottom", rotation=90)
if xmax is None:
xmax = max(left)
plt.bar(left, height, align="center")
plt.xlabel(markup(xlabel))
plt.ylabel(markup(ylabel))
plt.title(markup(title))
plt.xlim((xmin - .5, xmax + .5))
# Basic statistics
messages = []
counts_over_xmax = sum([v for k, v in data.items() if k > xmax])
if counts_over_xmax:
messages += ["Counts over xmax({0}): {1}".format(xmax, counts_over_xmax)]
kk = []
for k, v in data.items():
kk += [k] * v
messages += ["Total: {0}".format(np.sum(height))]
messages += ["Maximum: {0}".format(np.max(kk))]
messages += ["Minimum: {0}".format(np.min(kk))]
messages += ["Average: {0:.2f}".format(np.mean(kk))]
messages += ["Median: {0}".format(np.median(kk))]
ax = plt.gca()
if print_stats:
write_messages(ax, messages)
set_human_axis(ax)
set_ticklabels_helvetica(ax)
savefig(figname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_name_parts(au):
""" Fares Z. Najar => last, first, initials ('Najar', 'Fares', 'F.Z.') """ |
parts = au.split()
first = parts[0]
middle = [x for x in parts if x[-1] == '.']
middle = "".join(middle)
last = [x for x in parts[1:] if x[-1] != '.']
last = " ".join(last)
initials = "{0}.{1}".format(first[0], middle)
if first[-1] == '.': # Some people use full middle name
middle, last = last.split(None, 1)
initials = "{0}.{1}.".format(first[0], middle)
return last, first, initials |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def names(args):
""" %prog names namelist templatefile Generate name blocks from the `namelist` file. The `namelist` file is tab-delimited that contains >=4 columns of data. Three columns are mandatory. First name, middle initial and last name. First row is table header. For the extra columns, the first column will go in the `$N0` field in the template file, second to the `$N1` field, etc. In the alternative mode, the namelist just contains several sections. First row will go in the `$N0` in the template file, second to the `$N1` field. The namelist may look like: [Sequence] Bruce A. Roe, Frederic Debelle, Giles Oldroyd, Rene Geurts [Manuscript] Haibao Tang1, Vivek Krishnakumar1, Shelby Bidwell1, Benjamin Rosen1 Then in this example Sequence section goes into N0, Manuscript goes into N1. Useful hints for constructing the template file can be found in: <http://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/asn_spec/seq.asn.html> Often the template file can be retrieved from web form: <http://www.ncbi.nlm.nih.gov/WebSub/template.cgi> """ |
p = OptionParser(names.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
namelist, templatefile = args
# First check the alternative format
if open(namelist).read()[0] == '[':
out = parse_names(namelist)
make_template(templatefile, out)
return
reader = csv.reader(open(namelist), delimiter="\t")
header = next(reader)
ncols = len(header)
assert ncols > 3
nextras = ncols - 3
blocks = []
bools = []
for row in reader:
first, middle, last = row[:3]
extras = row[3:]
bools.append([(x.upper() == 'Y') for x in extras])
middle = middle.strip()
if middle != "":
middle = middle.rstrip('.') + '.'
initials = "{0}.{1}".format(first[0], middle)
suffix = ""
nameblock = NameTemplate.format(last=last, first=first,
initials=initials, suffix=suffix)
blocks.append(nameblock)
selected_idx = zip(*bools)
out = [] * nextras
for i, sbools in enumerate(selected_idx):
selected = []
for b, ss in zip(blocks, sbools):
if ss:
selected.append(b)
bigblock = ",\n".join(selected)
out.append(bigblock)
logging.debug("List N{0} contains a total of {1} names.".format(i,
len(selected)))
make_template(templatefile, out) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" %prog scriptname.py create a minimal boilerplate for a new script """ |
p = OptionParser(main.__doc__)
p.add_option("-g", "--graphic", default=False, action="store_true",
help="Create boilerplate for a graphic script")
opts, args = p.parse_args()
if len(args) != 1:
sys.exit(not p.print_help())
script, = args
imports = graphic_imports if opts.graphic else default_imports
app = graphic_app if opts.graphic else default_app
template = default_template.format(imports, app)
write_file(script, template)
message = "template writes to `{0}`".format(script)
if opts.graphic:
message = "graphic " + message
message = message[0].upper() + message[1:]
logging.debug(message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack_ambiguous(s):
""" List sequences with ambiguous characters in all possibilities. """ |
sd = [ambiguous_dna_values[x] for x in s]
return ["".join(x) for x in list(product(*sd))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split(args):
""" %prog split barcodefile fastqfile1 .. Deconvolute fastq files into subsets of fastq reads, based on the barcodes in the barcodefile, which is a two-column file like: ID01 AGTCCAG Input fastqfiles can be several files. Output files are ID01.fastq, ID02.fastq, one file per line in barcodefile. When --paired is set, the number of input fastqfiles must be two. Output file (the deconvoluted reads) will be in interleaved format. """ |
p = OptionParser(split.__doc__)
p.set_outdir(outdir="deconv")
p.add_option("--nocheckprefix", default=False, action="store_true",
help="Don't check shared prefix [default: %default]")
p.add_option("--paired", default=False, action="store_true",
help="Paired-end data [default: %default]")
p.add_option("--append", default=False, action="store_true",
help="Append barcode to 2nd read [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
barcodefile = args[0]
fastqfile = args[1:]
paired = opts.paired
append = opts.append
if append:
assert paired, "--append only works with --paired"
nfiles = len(fastqfile)
barcodes = []
fp = open(barcodefile)
for row in fp:
id, seq = row.split()
for s in unpack_ambiguous(seq):
barcodes.append(BarcodeLine._make((id, s)))
nbc = len(barcodes)
logging.debug("Imported {0} barcodes (ambiguous codes expanded).".format(nbc))
checkprefix = not opts.nocheckprefix
if checkprefix:
# Sanity check of shared prefix
excludebarcodes = []
for bc in barcodes:
exclude = []
for s in barcodes:
if bc.id == s.id:
continue
assert bc.seq != s.seq
if s.seq.startswith(bc.seq) and len(s.seq) > len(bc.seq):
logging.error("{0} shares same prefix as {1}.".format(s, bc))
exclude.append(s)
excludebarcodes.append(exclude)
else:
excludebarcodes = nbc * [[]]
outdir = opts.outdir
mkdir(outdir)
cpus = opts.cpus
logging.debug("Create a pool of {0} workers.".format(cpus))
pool = Pool(cpus)
if paired:
assert nfiles == 2, "You asked for --paired, but sent in {0} files".\
format(nfiles)
split_fun = append_barcode_paired if append else split_barcode_paired
mode = "paired"
else:
split_fun = split_barcode
mode = "single"
logging.debug("Mode: {0}".format(mode))
pool.map(split_fun, \
zip(barcodes, excludebarcodes,
nbc * [outdir], nbc * [fastqfile])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(args):
""" Consolidate split contents in the folders. The folders can be generated by the split() process and several samples may be in separate fastq files. This program merges them. """ |
p = OptionParser(merge.__doc__)
p.set_outdir(outdir="outdir")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
folders = args
outdir = opts.outdir
mkdir(outdir)
files = flatten(glob("{0}/*.*.fastq".format(x)) for x in folders)
files = list(files)
key = lambda x: op.basename(x).split(".")[0]
files.sort(key=key)
for id, fns in groupby(files, key=key):
fns = list(fns)
outfile = op.join(outdir, "{0}.fastq".format(id))
FileMerger(fns, outfile=outfile).merge(checkexists=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand_alleles(p, tolerance=0):
""" Returns expanded allele set given the tolerance. """ |
_p = set()
for x in p:
_p |= set(range(x - tolerance, x + tolerance + 1))
return _p |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_progenies(p1, p2, x_linked=False, tolerance=0):
""" Returns possible progenies in a trio. """ |
_p1 = expand_alleles(p1, tolerance=tolerance)
_p2 = expand_alleles(p2, tolerance=tolerance)
possible_progenies = set(tuple(sorted(x)) for x in product(_p1, _p2))
if x_linked: # Add all hemizygotes
possible_progenies |= set((x, x) for x in (set(_p1) | set(_p2)))
return possible_progenies |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mendelian_errors2(args):
""" %prog mendelian_errors2 Trios.summary.csv Plot Mendelian errors as calculated by mendelian(). File `Trios.summary.csv` looks like: Name,Motif,Inheritance,N_Correct,N_Error,N_missing,ErrorRate [N_Error / (N_Correct + N_Error))] DM1,CTG,AD,790,12,0,1.5% DM2,CCTG,AD,757,45,0,5.6% DRPLA,CAG,AD,791,11,0,1.4% """ |
p = OptionParser(mendelian_errors2.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="7x7", format="png")
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
fig, ax = plt.subplots(ncols=1, nrows=1,
figsize=(iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
ymin = -.2
df = pd.read_csv(csvfile)
data = []
for i, d in df.iterrows():
tred = d['Name']
motif = d['Motif']
if tred in ignore:
logging.debug("Ignore {}".format(d['TRED']))
continue
if len(motif) > 6:
if "/" in motif: # CTG/CAG
motif = motif.split("/")[0]
else:
motif = motif[:6] + ".."
xtred = "{} {}".format(tred, motif)
accuracy = d[-1]
data.append((xtred, accuracy))
key = lambda x: float(x.rstrip('%'))
data.sort(key=lambda x: key(x[-1]))
print(data)
treds, accuracies = zip(*data)
ntreds = len(treds)
ticks = range(ntreds)
accuracies = [key(x) for x in accuracies]
for tick, accuracy in zip(ticks, accuracies):
ax.plot([tick, tick], [ymin, accuracy],
"-", lw=2, color='lightslategray')
trios, = ax.plot(accuracies, "o", mfc='w', mec='b')
ax.set_title("Mendelian errors based on STR calls in trios in HLI samples")
ntrios = "Mendelian errors in 802 trios"
ax.legend([trios], [ntrios], loc='best')
ax.set_xticks(ticks)
ax.set_xticklabels(treds, rotation=45, ha="right", size=8)
ax.set_yticklabels([int(x) for x in ax.get_yticks()], family='Helvetica')
ax.set_ylabel("Mendelian errors (\%)")
ax.set_ylim(ymin, 100)
normalize_axes(root)
image_name = "mendelian_errors2." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mendelian_check(tp1, tp2, tpp, is_xlinked=False):
""" Compare TRED calls for Parent1, Parent2 and Proband. """ |
call_to_ints = lambda x: tuple(int(_) for _ in x.split("|") if _ != ".")
tp1_sex, tp1_call = tp1[:2]
tp2_sex, tp2_call = tp2[:2]
tpp_sex, tpp_call = tpp[:2]
# tp1_evidence = sum(int(x) for x in tp1[2:])
# tp2_evidence = sum(int(x) for x in tp2[2:])
# tpp_evidence = sum(int(x) for x in tpp[2:])
tp1_call = call_to_ints(tp1_call)
tp2_call = call_to_ints(tp2_call)
tpp_call = call_to_ints(tpp_call)
possible_progenies = set(tuple(sorted(x)) \
for x in product(tp1_call, tp2_call))
if is_xlinked and tpp_sex == "Male":
possible_progenies = set(tuple((x,)) for x in tp1_call)
if -1 in tp1_call or -1 in tp2_call or -1 in tpp_call:
tag = "Missing"
# elif tp1_evidence < 2 or tp2_evidence < 2 or tpp_evidence < 2:
# tag = "Missing"
else:
tag = "Correct" if tpp_call in possible_progenies else "Error"
return tag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_region(rname, rstart, target_chr, target_start, target_end):
""" Quick check if a point is within the target region. """ |
return (rname == target_chr) and \
(target_start <= rstart <= target_end) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mendelian_errors(args):
""" %prog mendelian_errors STR-Mendelian-errors.csv Plot Mendelian errors as calculated by mendelian(). File `STR-Mendelian-errors.csv` looks like: ,Duos - Mendelian errors,Trios - Mendelian errors SCA36,1.40%,0.60% ULD,0.30%,1.50% BPES,0.00%,1.80% One TRED disease per line, followed by duo errors and trio errors. """ |
p = OptionParser(mendelian_errors.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="6x6")
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
fig, ax = plt.subplots(ncols=1, nrows=1,
figsize=(iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
ymin = -.2
df = pd.read_csv(csvfile)
data = []
for i, d in df.iterrows():
if d['TRED'].split()[0] in ignore:
logging.debug("Ignore {}".format(d['TRED']))
continue
data.append(d)
treds, duos, trios = zip(*data)
ntreds = len(treds)
ticks = range(ntreds)
treds = [x.split()[0] for x in treds]
duos = [float(x.rstrip('%')) for x in duos]
trios = [float(x.rstrip('%')) for x in trios]
for tick, duo, trio in zip(ticks, duos, trios):
m = max(duo, trio)
ax.plot([tick, tick], [ymin, m], "-", lw=2, color='lightslategray')
duos, = ax.plot(duos, "o", mfc='w', mec='g')
trios, = ax.plot(trios, "o", mfc='w', mec='b')
ax.set_title("Mendelian errors based on trios and duos in HLI samples")
nduos = "Mendelian errors in 362 duos"
ntrios = "Mendelian errors in 339 trios"
ax.legend([trios, duos], [ntrios, nduos], loc='best')
ax.set_xticks(ticks)
ax.set_xticklabels(treds, rotation=45, ha="right", size=8)
yticklabels = [int(x) for x in ax.get_yticks()]
ax.set_yticklabels(yticklabels, family='Helvetica')
ax.set_ylabel("Mendelian errors (\%)")
ax.set_ylim(ymin, 20)
normalize_axes(root)
image_name = "mendelian_errors." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_tred_tsv(tsvfile):
""" Read the TRED table into a dataframe. """ |
df = pd.read_csv(tsvfile, sep="\t", index_col=0, dtype={"SampleKey": str})
return df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mendelian(args):
""" %prog mendelian trios_candidate.json hli.20170424.tred.tsv Calculate Mendelian errors based on trios and duos. """ |
p = OptionParser(mendelian.__doc__)
p.add_option("--tolerance", default=0, type="int",
help="Tolernace for differences")
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
triosjson, tredtsv = args
verbose = opts.verbose
tolerance = opts.tolerance
js = json.load(open(triosjson))
allterms = set()
duos = set()
trios = set()
for v in js:
allterms |= set(v.keys())
for trio_or_duo in extract_trios(v):
assert len(trio_or_duo) in (2, 3)
if len(trio_or_duo) == 2:
duos.add(trio_or_duo)
else:
trios.add(trio_or_duo)
# print "\n".join(allterms)
print("A total of {} families imported".format(len(js)))
# Read in all data
df = read_tred_tsv(tredtsv)
ids, treds = read_treds()
table = {}
for tred, inheritance in zip(treds["abbreviation"], treds["inheritance"]):
x_linked = inheritance[0] == 'X' # X-linked
name = tred
if x_linked:
name += " (X-linked)"
print("[TRED] {}".format(name))
n_total = len(duos)
n_error = 0
for duo in duos:
n_error += duo.check_mendelian(df, tred, tolerance=tolerance,
x_linked=x_linked, verbose=verbose)
tag = "Duos - Mendelian errors"
print("{}: {}".format(tag, percentage(n_error, n_total)))
duo_error = percentage(n_error, n_total, mode=2)
table[(name, tag)] = "{0:.1f}%".format(duo_error)
n_total = len(trios)
n_error = 0
for trio in trios:
n_error += trio.check_mendelian(df, tred, tolerance=tolerance,
x_linked=x_linked, verbose=verbose)
tag = "Trios - Mendelian errors"
print("{}: {}".format(tag, percentage(n_error, n_total)))
trio_error = percentage(n_error, n_total, mode=2)
table[(name, tag)] = "{0:.1f}%".format(trio_error)
# Summarize
print(tabulate(table)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mini(args):
""" %prog mini bamfile minibamfile Prepare mini-BAMs that contain only the STR loci. """ |
p = OptionParser(mini.__doc__)
p.add_option("--pad", default=20000, type="int",
help="Add padding to the STR reigons")
p.add_option("--treds", default=None,
help="Extract specific treds, use comma to separate")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, minibam = args
treds = opts.treds.split(",") if opts.treds else None
pad = opts.pad
bedfile = make_STR_bed(pad=pad, treds=treds)
get_minibam_bed(bamfile, bedfile, minibam)
logging.debug("Mini-BAM written to `{}`".format(minibam)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def likelihood2(args):
""" %prog likelihood2 100_20.json Plot the likelihood surface and marginal distributions. """ |
from matplotlib import gridspec
p = OptionParser(likelihood2.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x5",
style="white", cmap="coolwarm")
if len(args) != 1:
sys.exit(not p.print_help())
jsonfile, = args
fig = plt.figure(figsize=(iopts.w, iopts.h))
gs = gridspec.GridSpec(2, 2)
ax1 = fig.add_subplot(gs[:, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[1, 1])
plt.tight_layout(pad=3)
pf = plot_panel(jsonfile, ax1, ax2, ax3, opts.cmap)
root = fig.add_axes([0, 0, 1, 1])
normalize_axes(root)
image_name = "likelihood2.{}.".format(pf) + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def likelihood3(args):
""" %prog likelihood3 140_20.json 140_70.json Plot the likelihood surface and marginal distributions for two settings. """ |
from matplotlib import gridspec
p = OptionParser(likelihood3.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x10",
style="white", cmap="coolwarm")
if len(args) != 2:
sys.exit(not p.print_help())
jsonfile1, jsonfile2 = args
fig = plt.figure(figsize=(iopts.w, iopts.h))
gs = gridspec.GridSpec(9, 2)
ax1 = fig.add_subplot(gs[:4, 0])
ax2 = fig.add_subplot(gs[:2, 1])
ax3 = fig.add_subplot(gs[2:4, 1])
ax4 = fig.add_subplot(gs[5:, 0])
ax5 = fig.add_subplot(gs[5:7, 1])
ax6 = fig.add_subplot(gs[7:, 1])
plt.tight_layout(pad=2)
plot_panel(jsonfile1, ax1, ax2, ax3, opts.cmap)
plot_panel(jsonfile2, ax4, ax5, ax6, opts.cmap)
root = fig.add_axes([0, 0, 1, 1])
pad = .02
panel_labels(root, ((pad, 1 - pad, "A"), (pad, 4. / 9, "B")))
normalize_axes(root)
image_name = "likelihood3." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def allelefreqall(args):
""" %prog allelefreqall HN_Platinum_Gold.20180525.tsv.report.txt Plot all 30 STR allele frequencies. """ |
p = OptionParser(allelefreqall.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
reportfile, = args
treds, df = read_treds(reportfile)
# Prepare 5 pages, each page with 6 distributions
treds = sorted(treds)
count = 6
pdfs = []
for page in xrange(len(treds) / count + 1):
start = page * count
page_treds = treds[start: start + count]
if not page_treds:
break
allelefreq([",".join(page_treds), "--usereport", reportfile,
"--nopanels", "--figsize", "12x16"])
outpdf = "allelefreq.{}.pdf".format(page)
sh("mv allelefreq.pdf {}".format(outpdf))
pdfs.append(outpdf)
from jcvi.formats.pdf import cat
pf = op.basename(reportfile).split(".")[0]
finalpdf = pf + ".allelefreq.pdf"
logging.debug("Merging pdfs into `{}`".format(finalpdf))
cat(pdfs + ["-o", finalpdf, "--cleanup"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def allelefreq(args):
""" %prog allelefreq HD,DM1,SCA1,SCA17,FXTAS,FRAXE Plot the allele frequencies of some STRs. """ |
p = OptionParser(allelefreq.__doc__)
p.add_option("--nopanels", default=False, action="store_true",
help="No panel labels A, B, ...")
p.add_option("--usereport", help="Use allele frequency in report file")
opts, args, iopts = p.set_image_options(args, figsize="9x13")
if len(args) != 1:
sys.exit(not p.print_help())
loci, = args
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(ncols=2, nrows=3,
figsize=(iopts.w, iopts.h))
plt.tight_layout(pad=4)
if opts.usereport:
treds, df = read_treds(tredsfile=opts.usereport)
else:
treds, df = read_treds()
df = df.set_index(["abbreviation"])
axes = (ax1, ax2, ax3, ax4, ax5, ax6)
loci = loci.split(",")
for ax, locus in zip(axes, loci):
plot_allelefreq(ax, df, locus)
# Delete unused axes
for ax in axes[len(loci):]:
ax.set_axis_off()
root = fig.add_axes([0, 0, 1, 1])
pad = .03
if not opts.nopanels:
panel_labels(root, ((pad / 2, 1 - pad, "A"), (.5 + pad, 1 - pad, "B"),
(pad / 2, 2 / 3. - pad / 2, "C"), (.5 + pad, 2 / 3. - pad / 2, "D"),
(pad / 2, 1 / 3. , "E"), (.5 + pad, 1 / 3. , "F"),
))
normalize_axes(root)
image_name = "allelefreq." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simulate(args):
""" %prog simulate run_dir 1 300 Simulate BAMs with varying inserts with dwgsim. The above command will simulate between 1 to 300 CAGs in the HD region, in a directory called `run_dir`. """ |
p = OptionParser(simulate.__doc__)
p.add_option("--method", choices=("wgsim", "eagle"), default="eagle",
help="Read simulator")
p.add_option("--ref", default="hg38", choices=("hg38", "hg19"),
help="Reference genome version")
p.add_option("--tred", default="HD", help="TRED locus")
add_simulate_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
rundir, startunits, endunits = args
ref = opts.ref
ref_fasta = "/mnt/ref/{}.upper.fa".format(ref)
startunits, endunits = int(startunits), int(endunits)
basecwd = os.getcwd()
mkdir(rundir)
os.chdir(rundir)
cwd = os.getcwd()
# TRED region (e.g. Huntington)
pad_left, pad_right = 1000, 10000
repo = TREDsRepo(ref=ref)
tred = repo[opts.tred]
chr, start, end = tred.chr, tred.repeat_start, tred.repeat_end
logging.debug("Simulating {}".format(tred))
fasta = Fasta(ref_fasta)
seq_left = fasta[chr][start - pad_left:start - 1]
seq_right = fasta[chr][end: end + pad_right]
motif = tred.repeat
simulate_method = wgsim if opts.method == "wgsim" else eagle
# Write fake sequence
for units in range(startunits, endunits + 1):
pf = str(units)
mkdir(pf)
os.chdir(pf)
seq = str(seq_left) + motif * units + str(seq_right)
fastafile = pf + ".fasta"
make_fasta(seq, fastafile, id=chr.upper())
# Simulate reads on it
simulate_method([fastafile, "--depth={}".format(opts.depth),
"--readlen={}".format(opts.readlen),
"--distance={}".format(opts.distance),
"--outfile={}".format(pf)])
read1 = pf + ".bwa.read1.fastq"
read2 = pf + ".bwa.read2.fastq"
samfile, _ = align([ref_fasta, read1, read2])
indexed_samfile = index([samfile])
sh("mv {} ../{}.bam".format(indexed_samfile, pf))
sh("mv {}.bai ../{}.bam.bai".format(indexed_samfile, pf))
os.chdir(cwd)
shutil.rmtree(pf)
os.chdir(basecwd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batchlobstr(args):
""" %prog batchlobstr bamlist Run lobSTR on a list of BAMs. The corresponding batch command for TREDPARSE: $ tred.py bamlist --haploid chr4 --workdir tredparse_results """ |
p = OptionParser(batchlobstr.__doc__)
p.add_option("--haploid", default="chrY,chrM",
help="Use haploid model for these chromosomes")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamlist, = args
cmd = "python -m jcvi.variation.str lobstr TREDs"
cmd += " --input_bam_path {}"
cmd += " --haploid {}".format(opts.haploid)
cmd += " --simulation"
cmds = [cmd.format(x.strip()) for x in open(bamlist).readlines()]
p = Parallel(cmds, cpus=opts.cpus)
p.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compilevcf(args):
""" %prog compilevcf dir Compile vcf outputs into lists. """ |
from jcvi.variation.str import LobSTRvcf
p = OptionParser(compilevcf.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
vcf_files = iglob(folder, "*.vcf,*.vcf.gz")
for vcf_file in vcf_files:
try:
p = LobSTRvcf(columnidsfile=None)
p.parse(vcf_file, filtered=False)
res = p.items()
if res:
k, v = res[0]
res = v.replace(',', '/')
else:
res = "-1/-1"
num = op.basename(vcf_file).split(".")[0]
print(num, res)
except (TypeError, AttributeError) as e:
p = TREDPARSEvcf(vcf_file)
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def draw_jointplot(figname, x, y, data=None, kind="reg", color=None, xlim=None, ylim=None, format="pdf"):
""" Wraps around sns.jointplot """ |
import seaborn as sns
sns.set_context('talk')
plt.clf()
register = {"MeanCoverage": "Sample Mean Coverage",
"HD.FDP": "Depth of full spanning reads",
"HD.PDP": "Depth of partial spanning reads",
"HD.PEDP": "Depth of paired-end reads",
"HD.2": "Repeat size of the longer allele"}
g = sns.jointplot(x, y, data=data, kind=kind, color=color,
xlim=xlim, ylim=ylim)
g.ax_joint.set_xlabel(register.get(x, x))
g.ax_joint.set_ylabel(register.get(y, y))
savefig(figname + "." + format, cleanup=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_lo_hi_from_CI(s, exclude=None):
""" Parse the confidence interval from CI. (40, 60) """ |
a, b = s.split("|")
ai, aj = a.split("-")
bi, bj = b.split("-")
los = [int(ai), int(bi)]
his = [int(aj), int(bj)]
if exclude and exclude in los:
los.remove(exclude)
if exclude and exclude in his:
his.remove(exclude)
return max(los), max(his) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compare(args):
""" %prog compare Evaluation.csv Compare performances of various variant callers on simulated STR datasets. """ |
p = OptionParser(compare.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x10")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
pf = datafile.rsplit(".", 1)[0]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2,
figsize=(iopts.w, iopts.h))
plt.tight_layout(pad=3)
bbox = {'facecolor': 'tomato', 'alpha': .2, 'ec': 'w'}
pad = 2
# Read benchmark data
df = pd.read_csv("Evaluation.csv")
truth = df["Truth"]
axes = (ax1, ax2, ax3, ax4)
progs = ("Manta", "Isaac", "GATK", "lobSTR")
markers = ("bx-", "yo-", "md-", "c+-")
for ax, prog, marker in zip(axes, progs, markers):
ax.plot(truth, df[prog], marker)
ax.plot(truth, truth, 'k--') # to show diagonal
ax.axhline(infected_thr, color='tomato')
ax.text(max(truth) - pad, infected_thr + pad, 'Risk threshold',
bbox=bbox, ha="right")
ax.axhline(ref_thr, color='tomato')
ax.text(max(truth) - pad, ref_thr - pad, 'Reference repeat count',
bbox=bbox, ha="right", va="top")
ax.set_title(SIMULATED_HAPLOID)
ax.set_xlabel(r'Num of CAG repeats inserted ($\mathit{h}$)')
ax.set_ylabel('Num of CAG repeats called')
ax.legend([prog, 'Truth'], loc='best')
root = fig.add_axes([0, 0, 1, 1])
pad = .03
panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"),
(pad / 2, 1 / 2., "C"), (1 / 2., 1 / 2. , "D")))
normalize_axes(root)
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def stem_leaf_plot(data, vmin, vmax, bins, digit=1, title=None):
'''
Generate stem and leaf plot given a collection of numbers
'''
assert bins > 0
range = vmax - vmin
step = range * 1. / bins
if isinstance(range, int):
step = int(ceil(step))
step = step or 1
bins = np.arange(vmin, vmax + step, step)
hist, bin_edges = np.histogram(data, bins=bins)
# By default, len(bin_edges) = len(hist) + 1
bin_edges = bin_edges[:len(hist)]
asciiplot(bin_edges, hist, digit=digit, title=title)
print("Last bin ends in {0}, inclusive.".format(vmax), file=sys.stderr)
return bin_edges, hist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare(args):
""" %prog prepare barcode_key.csv reference.fasta Prepare TASSEL pipeline. """ |
valid_enzymes = "ApeKI|ApoI|BamHI|EcoT22I|HinP1I|HpaII|MseI|MspI|" \
"NdeI|PasI|PstI|Sau3AI|SbfI|AsiSI-MspI|BssHII-MspI|" \
"FseI-MspI|PaeR7I-HhaI|PstI-ApeKI|PstI-EcoT22I|PstI-MspI" \
"PstI-TaqI|SalI-MspI|SbfI-MspI".split("|")
p = OptionParser(prepare.__doc__)
p.add_option("--enzyme", default="ApeKI", choices=valid_enzymes,
help="Restriction enzyme used [default: %default]")
p.set_home("tassel")
p.set_aligner(aligner="bwa")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
barcode, reference = args
thome = opts.tassel_home
reference = get_abs_path(reference)
folders = ("fastq", "tagCounts", "mergedTagCounts", "topm",
"tbt", "mergedTBT", "hapmap", "hapmap/raw",
"hapmap/mergedSNPs", "hapmap/filt", "hapmap/bpec")
for f in folders:
mkdir(f)
# Build the pipeline
runsh = []
o = "-i fastq -k {0} -e {1} -o tagCounts".format(barcode, opts.enzyme)
cmd = run_pipeline(thome, "FastqToTagCountPlugin", o)
runsh.append(cmd)
o = "-i tagCounts -o mergedTagCounts/myMasterTags.cnt"
o += " -c 5 -t mergedTagCounts/myMasterTags.cnt.fq"
cmd = run_pipeline(thome, "MergeMultipleTagCountPlugin", o)
runsh.append(cmd)
runsh.append("cd mergedTagCounts")
cmd = "python -m jcvi.apps.{0} align --cpus {1}".\
format(opts.aligner, opts.cpus)
cmd += " {0} myMasterTags.cnt.fq".format(reference)
runsh.append(cmd)
runsh.append("cd ..")
o = "-i mergedTagCounts/*.sam -o topm/myMasterTags.topm"
cmd = run_pipeline(thome, "SAMConverterPlugin", o)
runsh.append(cmd)
o = "-i mergedTBT/myStudy.tbt.byte -y -m topm/myMasterTags.topm"
o += " -mUpd topm/myMasterTagsWithVariants.topm"
o += " -o hapmap/raw/myGBSGenos_chr+.hmp.txt"
o += " -mnF 0.8 -p myPedigreeFile.ped -mnMAF 0.02 -mnMAC 100000"
o += " -ref {0} -sC 1 -eC 10".format(reference)
cmd = run_pipeline(thome, "TagsToSNPByAlignmentPlugin", o)
runsh.append(cmd)
o = "-hmp hapmap/raw/myGBSGenos_chr+.hmp.txt"
o += " -o hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt"
o += " -misMat 0.1 -p myPedigreeFile.ped -callHets -sC 1 -eC 10"
cmd = run_pipeline(thome, "MergeDuplicateSNPsPlugin", o)
runsh.append(cmd)
o = "-hmp hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt"
o += " -o hapmap/filt/myGBSGenos_mergedSNPsFilt_chr+.hmp.txt"
o += " -mnTCov 0.01 -mnSCov 0.2 -mnMAF 0.01 -sC 1 -eC 10"
#o += "-hLD -mnR2 0.2 -mnBonP 0.005"
cmd = run_pipeline(thome, "GBSHapMapFiltersPlugin", o)
runsh.append(cmd)
runfile = "run.sh"
write_file(runfile, "\n".join(runsh)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batch(args):
""" %proj batch database.fasta project_dir output_dir Run bwa in batch mode. """ |
p = OptionParser(batch.__doc__)
set_align_options(p)
p.set_sam_options()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ref_fasta, proj_dir, outdir = args
outdir = outdir.rstrip("/")
s3dir = None
if outdir.startswith("s3://"):
s3dir = outdir
outdir = op.basename(outdir)
mkdir(outdir)
mm = MakeManager()
for p, pf in iter_project(proj_dir):
targs = [ref_fasta] + p
cmd1, bamfile = mem(targs, opts)
if cmd1:
cmd1 = output_bam(cmd1, bamfile)
nbamfile = op.join(outdir, bamfile)
cmd2 = "mv {} {}".format(bamfile, nbamfile)
cmds = [cmd1, cmd2]
if s3dir:
cmd = "aws s3 cp {} {} --sse".format(nbamfile,
op.join(s3dir, bamfile))
cmds.append(cmd)
mm.add(p, nbamfile, cmds)
mm.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index(args):
""" %prog index database.fasta Wrapper for `bwa index`. Same interface. """ |
p = OptionParser(index.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
dbfile, = args
check_index(dbfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def samse(args, opts):
""" %prog samse database.fasta short_read.fastq Wrapper for `bwa samse`. Output will be short_read.sam. """ |
dbfile, readfile = args
dbfile = check_index(dbfile)
saifile = check_aln(dbfile, readfile, cpus=opts.cpus)
samfile, _, unmapped = get_samfile(readfile, dbfile,
bam=opts.bam, unmapped=opts.unmapped)
if not need_update((dbfile, saifile), samfile):
logging.error("`{0}` exists. `bwa samse` already run.".format(samfile))
return "", samfile
cmd = "bwa samse {0} {1} {2}".format(dbfile, saifile, readfile)
cmd += " " + opts.extra
if opts.uniq:
cmd += " -n 1"
return cmd, samfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sampe(args, opts):
""" %prog sampe database.fasta read1.fq read2.fq Wrapper for `bwa sampe`. Output will be read1.sam. """ |
dbfile, read1file, read2file = args
dbfile = check_index(dbfile)
sai1file = check_aln(dbfile, read1file, cpus=opts.cpus)
sai2file = check_aln(dbfile, read2file, cpus=opts.cpus)
samfile, _, unmapped = get_samfile(read1file, dbfile,
bam=opts.bam, unmapped=opts.unmapped)
if not need_update((dbfile, sai1file, sai2file), samfile):
logging.error("`{0}` exists. `bwa samse` already run.".format(samfile))
return "", samfile
cmd = "bwa sampe " + " ".join((dbfile, sai1file, sai2file,
read1file, read2file))
cmd += " " + opts.extra
if opts.cutoff:
cmd += " -a {0}".format(opts.cutoff)
if opts.uniq:
cmd += " -n 1"
return cmd, samfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bwasw(args, opts):
""" %prog bwasw database.fasta long_read.fastq Wrapper for `bwa bwasw`. Output will be long_read.sam. """ |
dbfile, readfile = args
dbfile = check_index(dbfile)
samfile, _, unmapped = get_samfile(readfile, dbfile,
bam=opts.bam, unmapped=opts.unmapped)
if not need_update(dbfile, samfile):
logging.error("`{0}` exists. `bwa bwasw` already run.".format(samfile))
return "", samfile
cmd = "bwa bwasw " + " ".join(args)
cmd += " -t {0}".format(opts.cpus)
cmd += " " + opts.extra
return cmd, samfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def link(args):
""" %prog link metafile Link source to target based on a tabular file. """ |
from jcvi.apps.base import mkdir
p = OptionParser(link.__doc__)
p.add_option("--dir",
help="Place links in a subdirectory [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
meta, = args
d = opts.dir
if d:
mkdir(d)
fp = open(meta)
cwd = op.dirname(get_abs_path(meta))
for row in fp:
source, target = row.split()
source = op.join(cwd, source)
if d:
target = op.join(d, target)
lnsf(source, target, log=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def touch(args):
""" find . -type l | %prog touch Linux commands `touch` wouldn't modify mtime for links, this script can. Use find to pipe in all the symlinks. """ |
p = OptionParser(touch.__doc__)
opts, args = p.parse_args(args)
fp = sys.stdin
for link_name in fp:
link_name = link_name.strip()
if not op.islink(link_name):
continue
if not op.exists(link_name):
continue
source = get_abs_path(link_name)
lnsf(source, link_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cp(args):
""" find folder -type l | %prog cp Copy all the softlinks to the current folder, using absolute paths """ |
p = OptionParser(cp.__doc__)
fp = sys.stdin
for link_name in fp:
link_name = link_name.strip()
if not op.exists(link_name):
continue
source = get_abs_path(link_name)
link_name = op.basename(link_name)
if not op.exists(link_name):
os.symlink(source, link_name)
logging.debug(" => ".join((source, link_name))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def size(args):
""" find folder -type l | %prog size Get the size for all the paths that are pointed by the links """ |
from jcvi.utils.cbook import human_size
p = OptionParser(size.__doc__)
fp = sys.stdin
results = []
for link_name in fp:
link_name = link_name.strip()
if not op.islink(link_name):
continue
source = get_abs_path(link_name)
link_name = op.basename(link_name)
filesize = op.getsize(source)
results.append((filesize, link_name))
# sort by descending file size
for filesize, link_name in sorted(results, reverse=True):
filesize = human_size(filesize, a_kilobyte_is_1024_bytes=True)
print("%10s\t%s" % (filesize, link_name), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nucmer(args):
""" %prog nucmer mappings.bed MTR.fasta assembly.fasta chr1 3 Select specific chromosome region based on MTR mapping. The above command will extract chr1:2,000,001-3,000,000. """ |
p = OptionParser(nucmer.__doc__)
opts, args = p.parse_args(args)
if len(args) != 5:
sys.exit(not p.print_help())
mapbed, mtrfasta, asmfasta, chr, idx = args
idx = int(idx)
m1 = 1000000
bedfile = "sample.bed"
bed = Bed()
bed.add("\t".join(str(x) for x in (chr, (idx - 1) * m1, idx * m1)))
bed.print_to_file(bedfile)
cmd = "intersectBed -a {0} -b {1} -nonamecheck -sorted | cut -f4".\
format(mapbed, bedfile)
idsfile = "query.ids"
sh(cmd, outfile=idsfile)
sfasta = fastaFromBed(bedfile, mtrfasta)
qfasta = "query.fasta"
cmd = "faSomeRecords {0} {1} {2}".format(asmfasta, idsfile, qfasta)
sh(cmd)
cmd = "nucmer {0} {1}".format(sfasta, qfasta)
sh(cmd)
mummerplot_main(["out.delta", "--refcov=0"])
sh("mv out.pdf {0}.{1}.pdf".format(chr, idx)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_term(term, so=None, method="verify"):
""" Validate an SO term against so.obo """ |
if so is None:
so = load_GODag()
oterm = term
if term not in so.valid_names:
if "resolve" in method:
if "_" in term:
tparts = deque(term.split("_"))
tparts.pop() if "prefix" in method else tparts.popleft()
nterm = "_".join(tparts).strip()
term = validate_term(nterm, so=so, method=method)
if term is None:
return None
else:
logging.error("Term `{0}` does not exist".format(term))
sys.exit(1)
if oterm != term:
logging.debug("Resolved term `{0}` to `{1}`".format(oterm, term))
return term |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def agp(args):
""" %prog agp Siirt_Female_pistachio_23May2017_table.txt The table file, as prepared by Dovetail Genomics, is not immediately useful to convert gene model coordinates, as assumed by formats.chain.fromagp(). This is a quick script to do such conversion. The file structure of this table file is described in the .manifest file shipped in the same package:: pistachio_b_23May2017_MeyIy.table.txt Tab-delimited table describing positions of input assembly scaffolds in the Hirise scaffolds. The table has the following format: 1. HiRise scaffold name 2. Input sequence name 3. Starting base (zero-based) of the input sequence 4. Ending base of the input sequence 5. Strand (- or +) of the input sequence in the scaffold 6. Starting base (zero-based) in the HiRise scaffold 7. Ending base in the HiRise scaffold where '-' in the strand column indicates that the sequence is reverse complemented relative to the input assembly. CAUTION: This is NOT a proper AGP format since it does not have gaps in them. """ |
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tablefile, = args
fp = open(tablefile)
for row in fp:
atoms = row.split()
hr = atoms[0]
scaf = atoms[1]
scaf_start = int(atoms[2]) + 1
scaf_end = int(atoms[3])
strand = atoms[4]
hr_start = int(atoms[5]) + 1
hr_end = int(atoms[6])
print("\t".join(str(x) for x in \
(hr, hr_start, hr_end, 1, 'W',
scaf, scaf_start, scaf_end, strand))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def traits(args):
""" %prog traits directory Make HTML page that reports eye and skin color. """ |
p = OptionParser(traits.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
samples = []
for folder in args:
targets = iglob(folder, "*-traits.json")
if not targets:
continue
filename = targets[0]
js = json.load(open(filename))
js["skin_rgb"] = make_rgb(
js["traits"]["skin-color"]["L"],
js["traits"]["skin-color"]["A"],
js["traits"]["skin-color"]["B"])
js["eye_rgb"] = make_rgb(
js["traits"]["eye-color"]["L"],
js["traits"]["eye-color"]["A"],
js["traits"]["eye-color"]["B"])
samples.append(js)
template = Template(traits_template)
fw = open("report.html", "w")
print(template.render(samples=samples), file=fw)
logging.debug("Report written to `{}`".format(fw.name))
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def regression(args):
""" %prog regression postgenomic-s.tsv Plot chronological vs. predicted age. """ |
p = OptionParser(regression.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 1:
sys.exit(not p.print_help())
tsvfile, = args
df = pd.read_csv(tsvfile, sep="\t")
chrono = "Chronological age (yr)"
pred = "Predicted age (yr)"
resdf = pd.DataFrame({chrono: df["hli_calc_age_sample_taken"], pred: df["Predicted Age"]})
g = sns.jointplot(chrono, pred, resdf, joint_kws={"s": 6},
xlim=(0, 100), ylim=(0, 80))
g.fig.set_figwidth(iopts.w)
g.fig.set_figheight(iopts.h)
outfile = tsvfile.rsplit(".", 1)[0] + ".regression.pdf"
savefig(outfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def composite_correlation(df, size=(12, 8)):
""" Plot composite correlation figure """ |
fig = plt.figure(1, size)
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
ax4 = plt.subplot2grid((2, 2), (1, 1))
chemistry = ["V1", "V2", "V2.5", float("nan")]
colors = sns.color_palette("Set2", 8)
color_map = dict(zip(chemistry, colors))
age_label = "Chronological age (yr)"
ax1.scatter(df["hli_calc_age_sample_taken"], df["teloLength"],
s=10, marker='.',
color=df["Chemistry"].map(color_map))
ax1.set_ylim(0, 15)
ax1.set_ylabel("Telomere length (Kb)")
ax2.scatter(df["hli_calc_age_sample_taken"], df["ccn.chrX"],
s=10, marker='.',
color=df["Chemistry"].map(color_map))
ax2.set_ylim(1.8, 2.1)
ax2.set_ylabel("ChrX copy number")
ax4.scatter(df["hli_calc_age_sample_taken"], df["ccn.chrY"],
s=10, marker='.',
color=df["Chemistry"].map(color_map))
ax4.set_ylim(0.8, 1.1)
ax4.set_ylabel("ChrY copy number")
ax3.scatter(df["hli_calc_age_sample_taken"], df["TRA.PPM"],
s=10, marker='.',
color=df["Chemistry"].map(color_map))
ax3.set_ylim(0, 250)
ax3.set_ylabel("$TCR-\\alpha$ deletions (count per million reads)")
from matplotlib.lines import Line2D
legend_elements = [Line2D([0], [0], marker='.', color='w', label=chem,
markerfacecolor=color, markersize=16) \
for (chem, color) in zip(chemistry, colors)[:3]]
for ax in (ax1, ax2, ax3, ax4):
ax.set_xlabel(age_label)
ax.legend(handles=legend_elements, loc="upper right")
plt.tight_layout()
root = fig.add_axes((0, 0, 1, 1))
labels = ((.02, .98, "A"),
(.52, .98, "B"),
(.02, .5, "C"),
(.52, .5, "D"))
panel_labels(root, labels)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def correlation(args):
""" %prog correlation postgenomic-s.tsv Plot correlation of age vs. postgenomic features. """ |
p = OptionParser(correlation.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x8")
if len(args) != 1:
sys.exit(not p.print_help())
tsvfile, = args
df = pd.read_csv(tsvfile, sep="\t")
composite_correlation(df, size=(iopts.w, iopts.h))
outfile = tsvfile.rsplit(".", 1)[0] + ".correlation.pdf"
savefig(outfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_twin_values(triples, traits, gender=None):
"""Calculate the heritability of certain traits in triplets. Parameters ========== triples: (a, b, "Female/Male") triples. The sample IDs are then used to query the traits dictionary. traits: sample_id => value dictionary Returns ======= tuples of size 2, that contain paired trait values of the twins """ |
# Construct the pairs of trait values
traitValuesAbsent = 0
nanValues = 0
genderSkipped = 0
twinValues = []
for a, b, t in triples:
if gender is not None and t != gender:
genderSkipped += 1
continue
if not (a in traits and b in traits):
traitValuesAbsent += 1
continue
if np.isnan(traits[a]) or np.isnan(traits[b]):
nanValues += 1
continue
twinValues.append((traits[a], traits[b]))
print("A total of {} pairs extracted ({} absent; {} nan; {} genderSkipped)"\
.format(len(twinValues), traitValuesAbsent, nanValues, genderSkipped))
return twinValues |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def heritability(args):
""" %prog pg.tsv MZ-twins.csv DZ-twins.csv Plot composite figures ABCD on absolute difference of 4 traits, EFGH on heritability of 4 traits. The 4 traits are: telomere length, ccn.chrX, ccn.chrY, TRA.PPM """ |
p = OptionParser(heritability.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x18")
if len(args) != 3:
sys.exit(not p.print_help())
combined, mz, dz = args
# Prepare twins data
def get_pairs(filename):
with open(filename) as fp:
for row in fp:
yield row.strip().split(",")
MZ = list(get_pairs(mz))
DZ = list(get_pairs(dz))
print(len(MZ), "monozygotic twins")
print(len(DZ), "dizygotic twins")
df = pd.read_csv(combined, sep="\t", index_col=0)
df["Sample name"] = np.array(df["Sample name"], dtype=np.str)
gender = extract_trait(df, "Sample name", "hli_calc_gender")
sameGenderMZ = list(filter_same_gender(MZ, gender))
sameGenderDZ = list(filter_same_gender(DZ, gender))
composite(df, sameGenderMZ, sameGenderDZ, size=(iopts.w, iopts.h))
logging.getLogger().setLevel(logging.CRITICAL)
savefig("heritability.pdf") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile(args):
""" %prog compile directory Extract telomere length and ccn. """ |
p = OptionParser(compile.__doc__)
p.set_outfile(outfile="age.tsv")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
dfs = []
for folder in args:
ofolder = os.listdir(folder)
# telomeres
subdir = [x for x in ofolder if x.startswith("telomeres")][0]
subdir = op.join(folder, subdir)
filename = op.join(subdir, "tel_lengths.txt")
df = pd.read_csv(filename, sep="\t")
d1 = df.ix[0].to_dict()
# ccn
subdir = [x for x in ofolder if x.startswith("ccn")][0]
subdir = op.join(folder, subdir)
filename = iglob(subdir, "*.ccn.json")[0]
js = json.load(open(filename))
d1.update(js)
df = pd.DataFrame(d1, index=[0])
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
df.to_csv(opts.outfile, sep="\t", index=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simulate_one(fw, name, size):
""" Simulate a random sequence with name and size """ |
from random import choice
seq = Seq(''.join(choice('ACGT') for _ in xrange(size)))
s = SeqRecord(seq, id=name, description="Fake sequence")
SeqIO.write([s], fw, "fasta") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simulate(args):
""" %prog simulate idsfile Simulate random FASTA file based on idsfile, which is a two-column tab-separated file with sequence name and size. """ |
p = OptionParser(simulate.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
fp = open(idsfile)
fw = must_open(opts.outfile, "w")
for row in fp:
name, size = row.split()
size = int(size)
simulate_one(fw, name, size)
fp.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gc(args):
""" %prog gc fastafile Plot G+C content distribution. """ |
p = OptionParser(gc.__doc__)
p.add_option("--binsize", default=500, type="int",
help="Bin size to use")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
binsize = opts.binsize
allbins = []
for name, seq in parse_fasta(fastafile):
for i in range(len(seq) / binsize):
atcnt = gccnt = 0
for c in seq[i * binsize: (i + 1) * binsize].upper():
if c in "AT":
atcnt += 1
elif c in "GC":
gccnt += 1
totalcnt = atcnt + gccnt
if totalcnt == 0:
continue
gcpct = gccnt * 100 / totalcnt
allbins.append(gcpct)
from jcvi.graphics.base import asciiplot
from collections import Counter
title = "Total number of bins={}".format(len(allbins))
c = Counter(allbins)
x, y = zip(*sorted(c.items()))
asciiplot(x, y, title=title) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trimsplit(args):
""" %prog trimsplit fastafile Split sequences at lower-cased letters and stretch of Ns. This is useful at cleaning up the low quality bases for the QUIVER output. """ |
from jcvi.utils.cbook import SummaryStats
p = OptionParser(trimsplit.__doc__)
p.add_option("--minlength", default=1000, type="int",
help="Min length of contigs to keep")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
minlength = opts.minlength
fw = must_open(fastafile.rsplit(".", 1)[0] + ".split.fasta", "w")
ntotal = 0
removed = []
Ns = []
for name, seq in parse_fasta(fastafile):
stretches = []
ntotal += len(seq)
for lower, stretch in groupby(seq, key=lambda x: x.islower()):
stretch = "".join(stretch)
if lower or len(stretch) < minlength:
removed.append(len(stretch))
continue
for isN, s in groupby(stretch, key=lambda x: x in "Nn"):
s = "".join(s)
if isN or len(s) < minlength:
Ns.append(len(s))
continue
stretches.append(s)
for i, seq in enumerate(stretches):
id = "{0}_{1}".format(name.split("|")[0], i)
s = SeqRecord(Seq(seq), id=id, description="")
SeqIO.write([s], fw, "fasta")
fw.close()
# Reporting
if removed:
logging.debug("Total bases removed: {0}".\
format(percentage(sum(removed), ntotal)))
print(SummaryStats(removed), file=sys.stderr)
if Ns:
logging.debug("Total Ns removed: {0}".\
format(percentage(sum(Ns), ntotal)))
print(SummaryStats(Ns), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def qual(args):
""" %prog qual fastafile Generate dummy .qual file based on FASTA file. """ |
from jcvi.formats.sizes import Sizes
p = OptionParser(qual.__doc__)
p.add_option("--qv", default=31, type="int",
help="Dummy qv score for extended bases")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
sizes = Sizes(fastafile)
qvchar = str(opts.qv)
fw = must_open(opts.outfile, "w")
total = 0
for s, slen in sizes.iter_sizes():
print(">" + s, file=fw)
print(" ".join([qvchar] * slen), file=fw)
total += 1
fw.close()
logging.debug("Written {0} records in `{1}`.".format(total, opts.outfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromtab(args):
""" %prog fromtab tabfile fastafile Convert 2-column sequence file to FASTA format. One usage for this is to generatea `adapters.fasta` for TRIMMOMATIC. """ |
p = OptionParser(fromtab.__doc__)
p.set_sep(sep=None)
p.add_option("--noheader", default=False, action="store_true",
help="Ignore first line")
p.add_option("--replace",
help="Replace spaces in name to char [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, fastafile = args
sep = opts.sep
replace = opts.replace
fp = must_open(tabfile)
fw = must_open(fastafile, "w")
nseq = 0
if opts.noheader:
next(fp)
for row in fp:
row = row.strip()
if not row or row[0] == '#':
continue
name, seq = row.rsplit(sep, 1)
if replace:
name = name.replace(" ", replace)
print(">{0}\n{1}".format(name, seq), file=fw)
nseq += 1
fw.close()
logging.debug("A total of {0} sequences written to `{1}`.".\
format(nseq, fastafile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def longestorf(args):
""" %prog longestorf fastafile Find longest ORF for each sequence in fastafile. """ |
p = OptionParser(longestorf.__doc__)
p.add_option("--ids", action="store_true",
help="Generate table with ORF info [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
pf = fastafile.rsplit(".", 1)[0]
orffile = pf + ".orf.fasta"
idsfile = None
if opts.ids:
idsfile = pf + ".orf.ids"
fwids = open(idsfile, "w")
f = Fasta(fastafile, lazy=True)
fw = must_open(orffile, "w")
before, after = 0, 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
before += len(cds)
# Try all six frames
orf = ORFFinder(cds)
lorf = orf.get_longest_orf()
newcds = Seq(lorf)
after += len(newcds)
newrec = SeqRecord(newcds, id=name, description=rec.description)
SeqIO.write([newrec], fw, "fasta")
if idsfile:
print("\t".join((name, orf.info)), file=fwids)
fw.close()
if idsfile:
fwids.close()
logging.debug("Longest ORFs written to `{0}` ({1}).".\
format(orffile, percentage(after, before)))
return orffile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ispcr(args):
""" %prog ispcr fastafile Reformat paired primers into isPcr query format, which is three column format: name, forward, reverse """ |
from jcvi.utils.iter import grouper
p = OptionParser(ispcr.__doc__)
p.add_option("-r", dest="rclip", default=1, type="int",
help="pair ID is derived from rstrip N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
ispcrfile = fastafile + ".isPcr"
fw = open(ispcrfile, "w")
N = opts.rclip
strip_name = lambda x: x[:-N] if N else str
npairs = 0
fastaiter = SeqIO.parse(fastafile, "fasta")
for a, b in grouper(fastaiter, 2):
aid, bid = [strip_name(x) for x in (a.id, b.id)]
assert aid == bid, "Name mismatch {0}".format((aid, bid))
print("\t".join((aid, str(a.seq), str(b.seq))), file=fw)
npairs += 1
fw.close()
logging.debug("A total of {0} pairs written to `{1}`.".\
format(npairs, ispcrfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_fasta(infile, upper=False):
'''
parse a fasta-formatted file and returns header
can be a fasta file that contains multiple records.
'''
try:
fp = must_open(infile)
except:
fp = infile
# keep header
fa_iter = (x[1] for x in groupby(fp, lambda row: row[0] == '>'))
for header in fa_iter:
header = next(header)
if header[0] != '>':
continue
# drop '>'
header = header.strip()[1:]
# stitch the sequence lines together and make into upper case
seq = "".join(s.strip() for s in next(fa_iter))
if upper:
seq = seq.upper()
yield header, seq |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(args):
""" %prog clean fastafile Remove irregular chars in FASTA seqs. """ |
p = OptionParser(clean.__doc__)
p.add_option("--fancy", default=False, action="store_true",
help="Pretty print the sequence [default: %default]")
p.add_option("--canonical", default=False, action="store_true",
help="Use only acgtnACGTN [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fw = must_open(opts.outfile, "w")
if opts.fancy:
for header, seq in iter_clean_fasta(fastafile):
print(">" + header, file=fw)
fancyprint(fw, seq)
return 0
iterator = iter_canonical_fasta if opts.canonical else iter_clean_fasta
for header, seq in iterator(fastafile):
seq = Seq(seq)
s = SeqRecord(seq, id=header, description="")
SeqIO.write([s], fw, "fasta") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter(args):
""" %prog filter fastafile 100 Filter the FASTA file to contain records with size >= or <= certain cutoff. """ |
p = OptionParser(filter.__doc__)
p.add_option("--less", default=False, action="store_true",
help="filter the sizes < certain cutoff [default: >=]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, cutoff = args
try:
cutoff = int(cutoff)
except ValueError:
sys.exit(not p.print_help())
f = Fasta(fastafile, lazy=True)
fw = must_open(opts.outfile, "w")
for name, rec in f.iteritems_ordered():
if opts.less and len(rec) >= cutoff:
continue
if (not opts.less) and len(rec) < cutoff:
continue
SeqIO.write([rec], fw, "fasta")
fw.flush()
return fw.name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pool(args):
""" %prog pool fastafiles > pool.fasta Pool a bunch of FASTA files, and add prefix to each record based on filenames. File names are simplified to longest unique prefix to avoid collisions after getting shortened. """ |
from jcvi.formats.base import longest_unique_prefix
p = OptionParser(pool.__doc__)
p.add_option("--sep", default=".", help="Separator between prefix and name")
p.add_option("--sequential", default=False, action="store_true",
help="Add sequential IDs")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for fastafile in args:
pf = longest_unique_prefix(fastafile, args)
print(fastafile, "=>", pf, file=sys.stderr)
prefixopt = "--prefix={0}{1}".format(pf, opts.sep)
format_args = [fastafile, "stdout", prefixopt]
if opts.sequential:
format_args += ["--sequential=replace"]
format(format_args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ids(args):
""" %prog ids fastafiles Generate the FASTA headers without the '>'. """ |
p = OptionParser(ids.__doc__)
p.add_option("--until", default=None,
help="Truncate the name and description at words [default: %default]")
p.add_option("--description", default=False, action="store_true",
help="Generate a second column with description [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
until = opts.until
fw = must_open(opts.outfile, "w")
for row in must_open(args):
if row[0] == ">":
row = row[1:].rstrip()
if until:
row = row.split(until)[0]
atoms = row.split(None, 1)
if opts.description:
outrow = "\t".join(atoms)
else:
outrow = atoms[0]
print(outrow, file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort(args):
""" %prog sort fastafile Sort a list of sequences and output with sorted IDs, etc. """ |
p = OptionParser(sort.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Sort by decreasing size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
sortedfastafile = fastafile.rsplit(".", 1)[0] + ".sorted.fasta"
f = Fasta(fastafile, index=False)
fw = must_open(sortedfastafile, "w")
if opts.sizes:
# Sort by decreasing size
sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))
logging.debug("Sort by size: max: {0}, min: {1}".\
format(sortlist[0], sortlist[-1]))
sortlist = [x for x, s in sortlist]
else:
sortlist = sorted(f.iterkeys())
for key in sortlist:
rec = f[key]
SeqIO.write([rec], fw, "fasta")
logging.debug("Sorted file written to `{0}`.".format(sortedfastafile))
fw.close()
return sortedfastafile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_first_difference(arec, brec, ignore_case=False, ignore_N=False, rc=False, report_match=True):
""" Returns the first different nucleotide in two sequence comparisons runs both Plus and Minus strand """ |
plus_match = _print_first_difference(arec, brec, ignore_case=ignore_case,
ignore_N=ignore_N, report_match=report_match)
if rc and not plus_match:
logging.debug("trying reverse complement of %s" % brec.id)
brec.seq = brec.seq.reverse_complement()
minus_match = _print_first_difference(arec, brec,
ignore_case=ignore_case, ignore_N=ignore_N,
report_match=report_match)
return minus_match
else:
return plus_match |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _print_first_difference(arec, brec, ignore_case=False, ignore_N=False, report_match=True):
""" Returns the first different nucleotide in two sequence comparisons """ |
aseq, bseq = arec.seq, brec.seq
asize, bsize = len(aseq), len(bseq)
matched = True
for i, (a, b) in enumerate(zip_longest(aseq, bseq)):
if ignore_case and None not in (a, b):
a, b = a.upper(), b.upper()
if ignore_N and ('N' in (a, b) or 'X' in (a, b)):
continue
if a != b:
matched = False
break
if i + 1 == asize and matched:
if report_match:
print(green("Two sequences match"))
match = True
else:
print(red("Two sequences do not match"))
snippet_size = 20 # show the context of the difference
print(red("Sequence start to differ at position %d:" % (i + 1)))
begin = max(i - snippet_size, 0)
aend = min(i + snippet_size, asize)
bend = min(i + snippet_size, bsize)
print(red(aseq[begin:i] + "|" + aseq[i:aend]))
print(red(bseq[begin:i] + "|" + bseq[i:bend]))
match = False
return match |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hash_fasta(seq, ignore_case=False, ignore_N=False, ignore_stop=False, checksum="MD5"):
""" Generates checksum of input sequence element """ |
if ignore_stop:
seq = seq.rstrip("*")
if ignore_case:
seq = seq.upper()
if ignore_N:
if not all(c.upper() in 'ATGCN' for c in seq):
seq = re.sub('X', '', seq)
else:
seq = re.sub('N', '', seq)
if checksum == "MD5":
hashed = md5(seq).hexdigest()
elif checksum == "GCG":
hashed = seguid(seq)
return hashed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_qual(fastafile, suffix=QUALSUFFIX, check=True):
""" Check if current folder contains a qual file associated with the fastafile """ |
qualfile1 = fastafile.rsplit(".", 1)[0] + suffix
qualfile2 = fastafile + suffix
if check:
if op.exists(qualfile1):
logging.debug("qual file `{0}` found".format(qualfile1))
return qualfile1
elif op.exists(qualfile2):
logging.debug("qual file `{0}` found".format(qualfile2))
return qualfile2
else:
logging.warning("qual file not found")
return None
return qualfile1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def some(args):
""" %prog some fastafile listfile outfastafile generate a subset of fastafile, based on a list """ |
p = OptionParser(some.__doc__)
p.add_option("--exclude", default=False, action="store_true",
help="Output sequences not in the list file [default: %default]")
p.add_option("--uniprot", default=False, action="store_true",
help="Header is from uniprot [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(p.print_help())
fastafile, listfile, outfastafile = args
outfastahandle = must_open(outfastafile, "w")
qualfile = get_qual(fastafile)
names = set(x.strip() for x in open(listfile))
if qualfile:
outqualfile = outfastafile + ".qual"
outqualhandle = open(outqualfile, "w")
parser = iter_fasta_qual(fastafile, qualfile)
else:
parser = SeqIO.parse(fastafile, "fasta")
num_records = 0
for rec in parser:
name = rec.id
if opts.uniprot:
name = name.split("|")[-1]
if opts.exclude:
if name in names:
continue
else:
if name not in names:
continue
SeqIO.write([rec], outfastahandle, "fasta")
if qualfile:
SeqIO.write([rec], outqualhandle, "qual")
num_records += 1
logging.debug("A total of %d records written to `%s`" % \
(num_records, outfastafile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fastq(args):
""" %prog fastq fastafile Generate fastqfile by combining fastafile and fastafile.qual. Also check --qv option to use a default qv score. """ |
from jcvi.formats.fastq import FastqLite
p = OptionParser(fastq.__doc__)
p.add_option("--qv", type="int",
help="Use generic qv value [dafault: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastqfile = fastafile.rsplit(".", 1)[0] + ".fastq"
fastqhandle = open(fastqfile, "w")
num_records = 0
if opts.qv is not None:
qv = chr(ord('!') + opts.qv)
logging.debug("QV char '{0}' ({1})".format(qv, opts.qv))
else:
qv = None
if qv:
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
r = FastqLite("@" + name, str(rec.seq).upper(), qv * len(rec.seq))
print(r, file=fastqhandle)
num_records += 1
else:
qualfile = get_qual(fastafile)
for rec in iter_fasta_qual(fastafile, qualfile):
SeqIO.write([rec], fastqhandle, "fastq")
num_records += 1
fastqhandle.close()
logging.debug("A total of %d records written to `%s`" % \
(num_records, fastqfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pair(args):
""" %prog pair fastafile Generate .pairs.fasta and .fragments.fasta by matching records into the pairs and the rest go to fragments. """ |
p = OptionParser(pair.__doc__)
p.set_sep(sep=None, help="Separator in name to reduce to clone id" +\
"e.g. GFNQ33242/1 use /, BOT01-2453H.b1 use .")
p.add_option("-m", dest="matepairs", default=False, action="store_true",
help="generate .matepairs file [often used for Celera Assembler]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
qualfile = get_qual(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
pairsfile = prefix + ".pairs.fasta"
fragsfile = prefix + ".frags.fasta"
pairsfw = open(pairsfile, "w")
fragsfw = open(fragsfile, "w")
#TODO: need a class to handle coupled fasta and qual iterating and indexing
if opts.matepairs:
matepairsfile = prefix + ".matepairs"
matepairsfw = open(matepairsfile, "w")
if qualfile:
pairsqualfile = pairsfile + ".qual"
pairsqualhandle = open(pairsqualfile, "w")
fragsqualfile = fragsfile + ".qual"
fragsqualhandle = open(fragsqualfile, "w")
f = Fasta(fastafile)
if qualfile:
q = SeqIO.index(qualfile, "qual")
all_keys = list(f.keys())
all_keys.sort()
sep = opts.sep
if sep:
key_fun = lambda x: x.split(sep, 1)[0]
else:
key_fun = lambda x: x[:-1]
for key, variants in groupby(all_keys, key=key_fun):
variants = list(variants)
paired = (len(variants) == 2)
if paired and opts.matepairs:
print("\t".join(("%s/1" % key, "%s/2" % key)), file=matepairsfw)
fw = pairsfw if paired else fragsfw
if qualfile:
qualfw = pairsqualhandle if paired else fragsqualhandle
for i, var in enumerate(variants):
rec = f[var]
if qualfile:
recqual = q[var]
newid = "%s/%d" % (key, i + 1)
rec.id = newid
rec.description = ""
SeqIO.write([rec], fw, "fasta")
if qualfile:
recqual.id = newid
recqual.description = ""
SeqIO.write([recqual], qualfw, "qual")
logging.debug("sequences written to `%s` and `%s`" % \
(pairsfile, fragsfile))
if opts.matepairs:
logging.debug("mates written to `%s`" % matepairsfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pairinplace(args):
""" %prog pairinplace bulk.fasta Pair up the records in bulk.fasta by comparing the names for adjacent records. If they match, print to bulk.pairs.fasta, else print to bulk.frags.fasta. """ |
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.add_option("-r", dest="rclip", default=1, type="int",
help="pair ID is derived from rstrip N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
base = op.basename(fastafile).split(".")[0]
frags = base + ".frags.fasta"
pairs = base + ".pairs.fasta"
if fastafile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
strip_name = lambda x: x[:-N] if N else str
skipflag = False # controls the iterator skip
fastaiter = SeqIO.parse(fastafile, "fasta")
for a, b in pairwise(fastaiter):
aid, bid = [strip_name(x) for x in (a.id, b.id)]
if skipflag:
skipflag = False
continue
if aid == bid:
SeqIO.write([a, b], pairsfw, "fasta")
skipflag = True
else:
SeqIO.write([a], fragsfw, "fasta")
# don't forget the last one, when b is None
if not skipflag:
SeqIO.write([a], fragsfw, "fasta")
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _uniq_rec(fastafile, seq=False):
""" Returns unique records """ |
seen = set()
for rec in SeqIO.parse(fastafile, "fasta"):
name = str(rec.seq) if seq else rec.id
if name in seen:
logging.debug("ignore {0}".format(rec.id))
continue
seen.add(name)
yield rec |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uniq(args):
""" %prog uniq fasta uniq.fasta remove fasta records that are the same """ |
p = OptionParser(uniq.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Uniqify the sequences [default: %default]")
p.add_option("-t", "--trimname", dest="trimname",
action="store_true", default=False,
help="turn on the defline trim to first space [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, uniqfastafile = args
fw = must_open(uniqfastafile, "w")
seq = opts.seq
for rec in _uniq_rec(fastafile, seq=seq):
if opts.trimname:
rec.description = ""
SeqIO.write([rec], fw, "fasta") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def random(args):
""" %prog random fasta 100 > random100.fasta Take number of records randomly from fasta """ |
from random import sample
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, N = args
N = int(N)
assert N > 0
f = Fasta(fastafile)
fw = must_open("stdout", "w")
for key in sample(f.keys(), N):
rec = f[key]
SeqIO.write([rec], fw, "fasta")
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_fasta_qual(fastafile, qualfile, defaultqual=OKQUAL, modify=False):
""" used by trim, emits one SeqRecord with quality values in it """ |
from Bio.SeqIO.QualityIO import PairedFastaQualIterator
if not qualfile:
qualfile = make_qual(fastafile, score=defaultqual)
rec_iter = PairedFastaQualIterator(open(fastafile), open(qualfile))
for rec in rec_iter:
yield rec if not modify else modify_qual(rec) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trim(args):
""" %prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum """ |
from jcvi.algorithms.maxsum import max_sum
p = OptionParser(trim.__doc__)
p.add_option("-c", dest="min_length", type="int", default=64,
help="minimum sequence length after trimming")
p.add_option("-s", dest="score", default=QUAL,
help="quality trimming cutoff [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, newfastafile = args
qualfile = get_qual(fastafile)
newqualfile = get_qual(newfastafile, check=False)
logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \
(fastafile, newfastafile))
fw = must_open(newfastafile, "w")
fw_qual = open(newqualfile, "w")
dropped = trimmed = 0
for rec in iter_fasta_qual(fastafile, qualfile, modify=True):
qv = [x - opts.score for x in \
rec.letter_annotations["phred_quality"]]
msum, trim_start, trim_end = max_sum(qv)
score = trim_end - trim_start + 1
if score < opts.min_length:
dropped += 1
continue
if score < len(rec):
trimmed += 1
rec = rec[trim_start:trim_end + 1]
write_fasta_qual(rec, fw, fw_qual)
print("A total of %d sequences modified." % trimmed, file=sys.stderr)
print("A total of %d sequences dropped (length < %d)." % \
(dropped, opts.min_length), file=sys.stderr)
fw.close()
fw_qual.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sequin(args):
""" %prog sequin inputfasta Generate a gapped fasta format with known gap sizes embedded. suitable for Sequin submission. A gapped sequence represents a newer method for describing non-contiguous sequences, but only requires a single sequence identifier. A gap is represented by a line that starts with >? and is immediately followed by either a length (for gaps of known length) or "unk100" for gaps of unknown length. For example, ">?200". The next sequence segment continues on the next line, with no separate definition line or identifier. The difference between a gapped sequence and a segmented sequence is that the gapped sequence uses a single identifier and can specify known length gaps. Gapped sequences are preferred over segmented sequences. A sample gapped sequence file is shown here: ATGGAGCATACATATCAATATTCATGGATCATACCGTTTGTGCCACTTCCAATTCCTATTTTAATAGGAA TTGGACTCCTACTTTTTCCGACGGCAACAAAAAATCTTCGTCGTATGTGGGCTCTTCCCAATATTTTATT >?200 GGTATAATAACAGTATTATTAGGGGCTACTTTAGCTCTTGC TCAAAAAGATATTAAGAGGGGTTTAGCCTATTCTACAATGTCCCAACTGGGTTATATGATGTTAGCTCTA >?unk100 TCAATAAAACTATGGGGTAAAGAAGAACAAAAAATAATTAACAGAAATTTTCGTTTATCTCCTTTATTAA TATTAACGATGAATAATAATGAGAAGCCATATAGAATTGGTGATAATGTAAAAAAAGGGGCTCTTATTAC """ |
p = OptionParser(sequin.__doc__)
p.add_option("--unk", default=100, type="int",
help="The size for unknown gaps [default: %default]")
p.add_option("--newid", default=None,
help="Use this identifier instead [default: %default]")
p.add_option("--chromosome", default=None,
help="Add [chromosome= ] to FASTA header [default: %default]")
p.add_option("--clone", default=None,
help="Add [clone= ] to FASTA header [default: %default]")
p.set_mingap(default=100)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
unk = opts.unk
outputfasta = inputfasta.rsplit(".", 1)[0] + ".split"
rec = next(SeqIO.parse(must_open(inputfasta), "fasta"))
seq = ""
unknowns, knowns = 0, 0
for gap, gap_group in groupby(rec.seq, lambda x: x.upper() == 'N'):
subseq = "".join(gap_group)
if gap:
gap_length = len(subseq)
if gap_length == unk:
subseq = "\n>?unk{0}\n".format(unk)
unknowns += 1
elif gap_length >= opts.mingap:
subseq = "\n>?{0}\n".format(gap_length)
knowns += 1
seq += subseq
fw = must_open(outputfasta, "w")
id = opts.newid or rec.id
fastaheader = ">{0}".format(id)
if opts.chromosome:
fastaheader += " [chromosome={0}]".format(opts.chromosome)
if opts.clone:
fastaheader += " [clone={0}]".format(opts.clone)
print(fastaheader, file=fw)
print(seq, file=fw)
fw.close()
logging.debug("Sequin FASTA written to `{0}` (gaps: {1} unknowns, {2} knowns).".\
format(outputfasta, unknowns, knowns))
return outputfasta, unknowns + knowns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tidy(args):
""" %prog tidy fastafile Trim terminal Ns, normalize gap sizes and remove small components. """ |
p = OptionParser(tidy.__doc__)
p.add_option("--gapsize", dest="gapsize", default=0, type="int",
help="Set all gaps to the same size [default: %default]")
p.add_option("--minlen", dest="minlen", default=100, type="int",
help="Minimum component size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
gapsize = opts.gapsize
minlen = opts.minlen
tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta"
fw = must_open(tidyfastafile, "w")
removed = normalized = 0
fasta = Fasta(fastafile, lazy=True)
for name, rec in fasta.iteritems_ordered():
rec.seq = rec.seq.upper()
if minlen:
removed += remove_small_components(rec, minlen)
trim_terminal_Ns(rec)
if gapsize:
normalized += normalize_gaps(rec, gapsize)
if len(rec) == 0:
logging.debug("Drop seq {0}".format(rec.id))
continue
SeqIO.write([rec], fw, "fasta")
# Print statistics
if removed:
logging.debug("Total discarded bases: {0}".format(removed))
if normalized:
logging.debug("Gaps normalized: {0}".format(normalized))
logging.debug("Tidy FASTA written to `{0}`.".format(tidyfastafile))
fw.close()
return tidyfastafile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gaps(args):
""" %prog gaps fastafile Print out a list of gaps in BED format (.gaps.bed). """ |
from jcvi.formats.sizes import agp
from jcvi.formats.agp import mask, build
p = OptionParser(gaps.__doc__)
p.add_option("--split", default=False, action="store_true",
help="Generate .split.fasta [default: %default]")
p.set_mingap(default=100)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
mingap = opts.mingap
split = opts.split
prefix = inputfasta.rsplit(".", 1)[0]
bedfile = prefix + ".gaps.bed"
if need_update(inputfasta, bedfile):
write_gaps_bed(inputfasta, prefix, mingap, opts.cpus)
if split:
splitfile = prefix + ".split.fasta"
oagpfile = prefix + ".splitobject.agp"
cagpfile = prefix + ".splitcomponent.agp"
if need_update((inputfasta, bedfile), splitfile):
sizesagpfile = agp([inputfasta])
maskedagpfile = mask([sizesagpfile, bedfile, "--splitobject"])
shutil.move(maskedagpfile, oagpfile)
logging.debug("AGP file written to `{0}`.".format(oagpfile))
maskedagpfile = mask([sizesagpfile, bedfile, "--splitcomponent"])
shutil.move(maskedagpfile, cagpfile)
logging.debug("AGP file written to `{0}`.".format(cagpfile))
build([oagpfile, inputfasta, splitfile])
os.remove(sizesagpfile)
return splitfile, oagpfile, cagpfile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan_sequence(self, frame, direction):
""" Search in one reading frame """ |
orf_start = None
for c, index in self.codons(frame):
if (c not in self.stop and (c in self.start or not self.start)
and orf_start is None):
orf_start = index
elif c in self.stop and orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame)
orf_start = None
if orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False):
""" This converts the bedfile to bedpefile, assuming the reads are from CA. """ |
fp = must_open(bedfile)
fw = must_open(bedpefile, "w")
if pairsbedfile:
fwpairs = must_open(pairsbedfile, "w")
clones = defaultdict(list)
for row in fp:
b = BedLine(row)
name = b.accn
clonename = clone_name(name, ca=ca)
clones[clonename].append(b)
if matesfile:
fp = open(matesfile)
libraryline = next(fp)
# 'library bes 37896 126916'
lib, name, smin, smax = libraryline.split()
assert lib == "library"
smin, smax = int(smin), int(smax)
logging.debug("Happy mates for lib {0} fall between {1} - {2}".\
format(name, smin, smax))
nbedpe = 0
nspan = 0
for clonename, blines in clones.items():
nlines = len(blines)
if nlines == 2:
a, b = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = b.seqid, b.start, b.end
outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename]
if strand:
outcols.extend([0, a.strand, b.strand])
print("\t".join(str(x) for x in outcols), file=fw)
nbedpe += 1
elif nlines == 1:
a, = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = 0, 0, 0
else: # More than two lines per pair
pass
if pairsbedfile:
start = min(astart, bstart) if bstart > 0 else astart
end = max(aend, bend) if bend > 0 else aend
if aseqid != bseqid:
continue
span = end - start + 1
if (not matesfile) or (smin <= span <= smax):
print("\t".join(str(x) for x in \
(aseqid, start - 1, end, clonename)), file=fwpairs)
nspan += 1
fw.close()
logging.debug("A total of {0} bedpe written to `{1}`.".\
format(nbedpe, bedpefile))
if pairsbedfile:
fwpairs.close()
logging.debug("A total of {0} spans written to `{1}`.".\
format(nspan, pairsbedfile)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def posmap(args):
""" %prog posmap frgscf.sorted scf.fasta scfID Perform QC on the selected scfID, generate multiple BED files for plotting. """ |
p = OptionParser(posmap.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(p.print_help())
frgscffile, fastafile, scf = args
# fasta
cmd = "faOneRecord {0} {1}".format(fastafile, scf)
scffastafile = scf + ".fasta"
if not op.exists(scffastafile):
sh(cmd, outfile=scffastafile)
# sizes
sizesfile = scffastafile + ".sizes"
sizes = Sizes(scffastafile).mapping
scfsize = sizes[scf]
logging.debug("`{0}` has length of {1}.".format(scf, scfsize))
# gaps.bed
gapsbedfile = scf + ".gaps.bed"
if not op.exists(gapsbedfile):
args = [scffastafile, "--bed", "--mingap=100"]
gaps(args)
# reads frgscf posmap
posmapfile = scf + ".posmap"
if not op.exists(posmapfile):
args = [frgscffile, scf]
query(args)
# reads bed
bedfile = scf + ".bed"
if not op.exists(bedfile):
args = [posmapfile]
bed(args)
# reads bedpe
bedpefile = scf + ".bedpe"
pairsbedfile = scf + ".pairs.bed"
if not (op.exists(bedpefile) and op.exists(pairsbedfile)):
bed_to_bedpe(bedfile, bedpefile, pairsbedfile=pairsbedfile, ca=True)
# base coverage
Coverage(bedfile, sizesfile)
Coverage(pairsbedfile, sizesfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def update(self, pbar):
'''Updates the widget to show the next marker or the first marker when
finished'''
if pbar.finished: return self.markers[0]
self.curmark = (self.curmark + 1) % len(self.markers)
return self.markers[self.curmark] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _format_line(self):
'Joins the widgets and justifies the line'
widgets = ''.join(self._format_widgets())
if self.left_justify: return widgets.ljust(self.term_width)
else: return widgets.rjust(self.term_width) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _update_widgets(self):
'Checks all widgets for the time sensitive bit'
self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
for w in self.widgets) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare(bedfile):
""" Remove prepended tags in gene names. """ |
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
fwa = open(abedfile, "w")
fwb = open(bbedfile, "w")
bed = Bed(bedfile)
seen = set()
for b in bed:
accns = b.accn.split(";")
new_accns = []
for accn in accns:
if ":" in accn:
method, a = accn.split(":", 1)
if method in ("liftOver", "GMAP", ""):
accn = a
if accn in seen:
logging.error("Duplicate id {0} found. Ignored.".format(accn))
continue
new_accns.append(accn)
b.accn = accn
print(b, file=fwa)
seen.add(accn)
b.accn = ";".join(new_accns)
print(b, file=fwb)
fwa.close()
fwb.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def renumber(args):
""" %prog renumber Mt35.consolidated.bed > tagged.bed Renumber genes for annotation updates. """ |
from jcvi.algorithms.lis import longest_increasing_subsequence
from jcvi.utils.grouper import Grouper
p = OptionParser(renumber.__doc__)
p.set_annot_reformat_opts()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
if need_update(bedfile, (abedfile, bbedfile)):
prepare(bedfile)
mbed = Bed(bbedfile)
g = Grouper()
for s in mbed:
accn = s.accn
g.join(*accn.split(";"))
bed = Bed(abedfile)
for chr, sbed in bed.sub_beds():
current_chr = chr_number(chr)
if not current_chr:
continue
ranks = []
gg = set()
for s in sbed:
accn = s.accn
achr, arank = atg_name(accn)
if achr != current_chr:
continue
ranks.append(arank)
gg.add(accn)
lranks = longest_increasing_subsequence(ranks)
print(current_chr, len(sbed), "==>", len(ranks), \
"==>", len(lranks), file=sys.stderr)
granks = set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, uc=opts.uc) for x in lranks) | \
set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, sep="te", uc=opts.uc) for x in lranks)
tagstore = {}
for s in sbed:
achr, arank = atg_name(s.accn)
accn = s.accn
if accn in granks:
tag = (accn, FRAME)
elif accn in gg:
tag = (accn, RETAIN)
else:
tag = (".", NEW)
tagstore[accn] = tag
# Find cases where genes overlap
for s in sbed:
accn = s.accn
gaccn = g[accn]
tags = [((tagstore[x][-1] if x in tagstore else NEW), x) for x in gaccn]
group = [(PRIORITY.index(tag), x) for tag, x in tags]
best = min(group)[-1]
if accn != best:
tag = (best, OVERLAP)
else:
tag = tagstore[accn]
print("\t".join((str(s), "|".join(tag)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def publocus(args):
""" %prog publocus idsfile > idsfiles.publocus Given a list of model identifiers, convert each into a GenBank approved pub_locus. Example output: Medtr1g007020.1 MTR_1g007020 Medtr1g007030.1 MTR_1g007030 Medtr1g007060.1 MTR_1g007060A Medtr1g007060.2 MTR_1g007060B """ |
p = OptionParser(publocus.__doc__)
p.add_option("--locus_tag", default="MTR_",
help="GenBank locus tag [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
locus_tag = opts.locus_tag
index = AutoVivification()
idsfile, = args
fp = must_open(idsfile)
for row in fp:
locus, chrom, sep, rank, iso = atg_name(row, retval="locus,chr,sep,rank,iso")
if None in (locus, chrom, sep, rank, iso):
logging.warning("{0} is not a valid gene model identifier".format(row))
continue
if locus not in index.keys():
pub_locus = gene_name(chrom, rank, prefix=locus_tag, sep=sep)
index[locus]['pub_locus'] = pub_locus
index[locus]['isos'] = set()
index[locus]['isos'].add(int(iso))
for locus in index:
pub_locus = index[locus]['pub_locus']
index[locus]['isos'] = sorted(index[locus]['isos'])
if len(index[locus]['isos']) > 1:
new = [chr(n+64) for n in index[locus]['isos'] if n < 27]
for i, ni in zip(index[locus]['isos'], new):
print("\t".join(x for x in ("{0}.{1}".format(locus, i), \
"{0}{1}".format(pub_locus, ni))))
else:
print("\t".join(x for x in ("{0}.{1}".format(locus, index[locus]['isos'][0]), \
pub_locus))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def augustus(args):
""" %prog augustus augustus.gff3 > reformatted.gff3 AUGUSTUS does generate a gff3 (--gff3=on) but need some refinement. """ |
from jcvi.formats.gff import Gff
p = OptionParser(augustus.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
ingff3, = args
gff = Gff(ingff3)
fw = must_open(opts.outfile, "w")
seen = defaultdict(int)
for g in gff:
if g.type not in ("gene", "transcript", "CDS"):
continue
if g.type == "transcript":
g.type = "mRNA"
prefix = g.seqid + "_"
pid = prefix + g.id
newid = "{0}-{1}".format(pid, seen[pid]) if pid in seen else pid
seen[pid] += 1
g.attributes["ID"] = [newid]
g.attributes["Parent"] = [(prefix + x) for x in g.attributes["Parent"]]
g.update_attributes()
print(g, file=fw)
fw.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tRNAscan(args):
""" %prog tRNAscan all.trna > all.trna.gff3 Convert tRNAscan-SE output into gff3 format. Sequence tRNA Bounds tRNA Anti Intron Bounds Cove Name tRNA # Begin End Type Codon Begin End Score -------- ------ ---- ------ ---- ----- ----- ---- ------ 23231 1 335355 335440 Tyr GTA 335392 335404 69.21 23231 2 1076190 1076270 Leu AAG 0 0 66.33 Conversion based on PERL one-liner in: <https://github.com/sujaikumar/assemblage/blob/master/README-annotation.md> """ |
from jcvi.formats.gff import sort
p = OptionParser(tRNAscan.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
trnaout, = args
gffout = trnaout + ".gff3"
fp = open(trnaout)
fw = open(gffout, "w")
next(fp)
next(fp)
row = next(fp)
assert row.startswith("--------")
for row in fp:
atoms = [x.strip() for x in row.split("\t")]
contig, trnanum, start, end, aa, codon, \
intron_start, intron_end, score = atoms
start, end = int(start), int(end)
orientation = '+'
if start > end:
start, end = end, start
orientation = '-'
source = "tRNAscan"
type = "tRNA"
if codon == "???":
codon = "XXX"
comment = "ID={0}.tRNA.{1};Name=tRNA-{2} (anticodon: {3})".\
format(contig, trnanum, aa, codon)
print("\t".join(str(x) for x in (contig, source, type, start,\
end, score, orientation, ".", comment)), file=fw)
fw.close()
sort([gffout, "-i"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(args):
""" %prog summary fastafile Report the number of bases and sequences masked. """ |
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
f = Fasta(fastafile, index=False)
halfmaskedseqs = set()
allmasked = 0
allbases = 0
cutoff = 50
for key, seq in f.iteritems():
masked = 0
for base in seq:
if base not in "AGCT":
masked += 1
seqlen = len(seq)
if masked * 100. / seqlen > cutoff:
halfmaskedseqs.add(key)
allmasked += masked
allbases += seqlen
seqnum = len(f)
maskedseqnum = len(halfmaskedseqs)
print("Total masked bases: {0}".\
format(percentage(allmasked, allbases)), file=sys.stderr)
print("Total masked sequences (contain > {0}% masked): {1}".\
format(cutoff, percentage(maskedseqnum, seqnum)), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_synteny_region(query, sbed, data, window, cutoff, colinear=False):
""" Get all synteny blocks for a query, algorithm is single linkage anchors are a window centered on query Two categories of syntenic regions depending on what query is: (Syntelog):
syntenic region is denoted by the syntelog (Gray gene):
syntenic region is marked by the closest flanker """ |
regions = []
ysorted = sorted(data, key=lambda x: x[1])
g = Grouper()
a, b = tee(ysorted)
next(b, None)
for ia, ib in izip(a, b):
pos1, pos2 = ia[1], ib[1]
if pos2 - pos1 < window and sbed[pos1].seqid == sbed[pos2].seqid:
g.join(ia, ib)
for group in sorted(g):
(qflanker, syntelog), (far_flanker, far_syntelog), flanked = \
get_flanker(group, query)
# run a mini-dagchainer here, take the direction that gives us most anchors
if colinear:
y_indexed_group = [(y, i) for i, (x, y) in enumerate(group)]
lis = longest_increasing_subsequence(y_indexed_group)
lds = longest_decreasing_subsequence(y_indexed_group)
if len(lis) >= len(lds):
track = lis
orientation = "+"
else:
track = lds
orientation = "-"
group = [group[i] for (y, i) in track]
xpos, ypos = zip(*group)
score = min(len(set(xpos)), len(set(ypos)))
if qflanker == query:
gray = "S"
else:
gray = "G" if not flanked else "F"
score -= 1 # slight penalty for not finding syntelog
if score < cutoff:
continue
# y-boundary of the block
left, right = group[0][1], group[-1][1]
# this characterizes a syntenic region (left, right).
# syntelog is -1 if it's a gray gene
syn_region = (syntelog, far_syntelog, left,
right, gray, orientation, score)
regions.append(syn_region)
return sorted(regions, key=lambda x: -x[-1]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.