_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q259200 | Functions.run | validation | async def run(self, *args, data):
""" run the function you want """
cmd = self._get(data.text)
try:
if cmd is not None:
command = self[cmd](*args, data=data)
return await peony.utils.execute(command)
except:
fmt = "Error occurred while running function {cmd}:"
peony.utils.log_error(fmt.format(cmd=cmd)) | python | {
"resource": ""
} |
q259201 | HivePlot.simplified_edges | validation | def simplified_edges(self):
"""
A generator for getting all of the edges without consuming extra
memory.
"""
for group, edgelist in self.edges.items():
for u, v, d in edgelist:
yield (u, v) | python | {
"resource": ""
} |
q259202 | HivePlot.has_edge_within_group | validation | def has_edge_within_group(self, group):
"""
Checks whether there are within-group edges or not.
"""
assert group in self.nodes.keys(),\
"{0} not one of the group of nodes".format(group)
nodelist = self.nodes[group]
for n1, n2 in self.simplified_edges():
if n1 in nodelist and n2 in nodelist:
return True | python | {
"resource": ""
} |
q259203 | HivePlot.plot_axis | validation | def plot_axis(self, rs, theta):
"""
Renders the axis.
"""
xs, ys = get_cartesian(rs, theta)
self.ax.plot(xs, ys, 'black', alpha=0.3) | python | {
"resource": ""
} |
q259204 | HivePlot.plot_nodes | validation | def plot_nodes(self, nodelist, theta, group):
"""
Plots nodes to screen.
"""
for i, node in enumerate(nodelist):
r = self.internal_radius + i * self.scale
x, y = get_cartesian(r, theta)
circle = plt.Circle(xy=(x, y), radius=self.dot_radius,
color=self.node_colormap[group], linewidth=0)
self.ax.add_patch(circle) | python | {
"resource": ""
} |
q259205 | HivePlot.group_theta | validation | def group_theta(self, group):
"""
Computes the theta along which a group's nodes are aligned.
"""
for i, g in enumerate(self.nodes.keys()):
if g == group:
break
return i * self.major_angle | python | {
"resource": ""
} |
q259206 | HivePlot.find_node_group_membership | validation | def find_node_group_membership(self, node):
"""
Identifies the group for which a node belongs to.
"""
for group, nodelist in self.nodes.items():
if node in nodelist:
return group | python | {
"resource": ""
} |
q259207 | HivePlot.get_idx | validation | def get_idx(self, node):
"""
Finds the index of the node in the sorted list.
"""
group = self.find_node_group_membership(node)
return self.nodes[group].index(node) | python | {
"resource": ""
} |
q259208 | HivePlot.node_radius | validation | def node_radius(self, node):
"""
Computes the radial position of the node.
"""
return self.get_idx(node) * self.scale + self.internal_radius | python | {
"resource": ""
} |
q259209 | HivePlot.node_theta | validation | def node_theta(self, node):
"""
Convenience function to find the node's theta angle.
"""
group = self.find_node_group_membership(node)
return self.group_theta(group) | python | {
"resource": ""
} |
q259210 | HivePlot.add_edges | validation | def add_edges(self):
"""
Draws all of the edges in the graph.
"""
for group, edgelist in self.edges.items():
for (u, v, d) in edgelist:
self.draw_edge(u, v, d, group) | python | {
"resource": ""
} |
q259211 | HivePlot.draw | validation | def draw(self):
"""
The master function that is called that draws everything.
"""
self.ax.set_xlim(-self.plot_radius(), self.plot_radius())
self.ax.set_ylim(-self.plot_radius(), self.plot_radius())
self.add_axes_and_nodes()
self.add_edges()
self.ax.axis('off') | python | {
"resource": ""
} |
q259212 | HivePlot.adjust_angles | validation | def adjust_angles(self, start_node, start_angle, end_node, end_angle):
"""
This function adjusts the start and end angles to correct for
duplicated axes.
"""
start_group = self.find_node_group_membership(start_node)
end_group = self.find_node_group_membership(end_node)
if start_group == 0 and end_group == len(self.nodes.keys())-1:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle -
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle +
self.minor_angle)
elif start_group == len(self.nodes.keys())-1 and end_group == 0:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle +
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle -
self.minor_angle)
elif start_group < end_group:
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle -
self.minor_angle)
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle +
self.minor_angle)
elif end_group < start_group:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle -
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle +
self.minor_angle)
return start_angle, end_angle | python | {
"resource": ""
} |
q259213 | Type.mods_genre | validation | def mods_genre(self):
"""
Guesses an appropriate MODS XML genre type.
"""
type2genre = {
'conference': 'conference publication',
'book chapter': 'bibliography',
'unpublished': 'article'
}
tp = str(self.type).lower()
return type2genre.get(tp, tp) | python | {
"resource": ""
} |
q259214 | get_publications | validation | def get_publications(context, template='publications/publications.html'):
"""
Get all publications.
"""
types = Type.objects.filter(hidden=False)
publications = Publication.objects.select_related()
publications = publications.filter(external=False, type__in=types)
publications = publications.order_by('-year', '-month', '-id')
if not publications:
return ''
# load custom links and files
populate(publications)
return render_template(template, context['request'], {'publications': publications}) | python | {
"resource": ""
} |
q259215 | get_publication | validation | def get_publication(context, id):
"""
Get a single publication.
"""
pbl = Publication.objects.filter(pk=int(id))
if len(pbl) < 1:
return ''
pbl[0].links = pbl[0].customlink_set.all()
pbl[0].files = pbl[0].customfile_set.all()
return render_template(
'publications/publication.html', context['request'], {'publication': pbl[0]}) | python | {
"resource": ""
} |
q259216 | get_publication_list | validation | def get_publication_list(context, list, template='publications/publications.html'):
"""
Get a publication list.
"""
list = List.objects.filter(list__iexact=list)
if not list:
return ''
list = list[0]
publications = list.publication_set.all()
publications = publications.order_by('-year', '-month', '-id')
if not publications:
return ''
# load custom links and files
populate(publications)
return render_template(
template, context['request'], {'list': list, 'publications': publications}) | python | {
"resource": ""
} |
q259217 | tex_parse | validation | def tex_parse(string):
"""
Renders some basic TeX math to HTML.
"""
string = string.replace('{', '').replace('}', '')
def tex_replace(match):
return \
sub(r'\^(\w)', r'<sup>\1</sup>',
sub(r'\^\{(.*?)\}', r'<sup>\1</sup>',
sub(r'\_(\w)', r'<sub>\1</sub>',
sub(r'\_\{(.*?)\}', r'<sub>\1</sub>',
sub(r'\\(' + GREEK_LETTERS + ')', r'&\1;', match.group(1))))))
return mark_safe(sub(r'\$([^\$]*)\$', tex_replace, escape(string))) | python | {
"resource": ""
} |
q259218 | parse | validation | def parse(string):
"""
Takes a string in BibTex format and returns a list of BibTex entries, where
each entry is a dictionary containing the entries' key-value pairs.
@type string: string
@param string: bibliography in BibTex format
@rtype: list
@return: a list of dictionaries representing a bibliography
"""
# bibliography
bib = []
# make sure we are dealing with unicode strings
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
# replace special characters
for key, value in special_chars:
string = string.replace(key, value)
string = re.sub(r'\\[cuHvs]{?([a-zA-Z])}?', r'\1', string)
# split into BibTex entries
entries = re.findall(
r'(?u)@(\w+)[ \t]?{[ \t]*([^,\s]*)[ \t]*,?\s*((?:[^=,\s]+\s*\=\s*(?:"[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,}]*),?\s*?)+)\s*}',
string)
for entry in entries:
# parse entry
pairs = re.findall(r'(?u)([^=,\s]+)\s*\=\s*("[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,]*)', entry[2])
# add to bibliography
bib.append({'type': entry[0].lower(), 'key': entry[1]})
for key, value in pairs:
# post-process key and value
key = key.lower()
if value and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if value and value[0] == '{' and value[-1] == '}':
value = value[1:-1]
if key not in ['booktitle', 'title']:
value = value.replace('}', '').replace('{', '')
else:
if value.startswith('{') and value.endswith('}'):
value = value[1:]
value = value[:-1]
value = value.strip()
value = re.sub(r'\s+', ' ', value)
# store pair in bibliography
bib[-1][key] = value
return bib | python | {
"resource": ""
} |
q259219 | OrderedModel.swap | validation | def swap(self, qs):
"""
Swap the positions of this object with a reference object.
"""
try:
replacement = qs[0]
except IndexError:
# already first/last
return
if not self._valid_ordering_reference(replacement):
raise ValueError(
"%r can only be swapped with instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
self.order, replacement.order = replacement.order, self.order
self.save()
replacement.save() | python | {
"resource": ""
} |
q259220 | OrderedModel.up | validation | def up(self):
"""
Move this object up one position.
"""
self.swap(self.get_ordering_queryset().filter(order__lt=self.order).order_by('-order')) | python | {
"resource": ""
} |
q259221 | OrderedModel.down | validation | def down(self):
"""
Move this object down one position.
"""
self.swap(self.get_ordering_queryset().filter(order__gt=self.order)) | python | {
"resource": ""
} |
q259222 | OrderedModel.to | validation | def to(self, order):
"""
Move object to a certain position, updating all affected objects to move accordingly up or down.
"""
if order is None or self.order == order:
# object is already at desired position
return
qs = self.get_ordering_queryset()
if self.order > order:
qs.filter(order__lt=self.order, order__gte=order).update(order=F('order') + 1)
else:
qs.filter(order__gt=self.order, order__lte=order).update(order=F('order') - 1)
self.order = order
self.save() | python | {
"resource": ""
} |
q259223 | OrderedModel.above | validation | def above(self, ref):
"""
Move this object above the referenced object.
"""
if not self._valid_ordering_reference(ref):
raise ValueError(
"%r can only be moved above instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
if self.order == ref.order:
return
if self.order > ref.order:
o = ref.order
else:
o = self.get_ordering_queryset().filter(order__lt=ref.order).aggregate(Max('order')).get('order__max') or 0
self.to(o) | python | {
"resource": ""
} |
q259224 | OrderedModel.below | validation | def below(self, ref):
"""
Move this object below the referenced object.
"""
if not self._valid_ordering_reference(ref):
raise ValueError(
"%r can only be moved below instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
if self.order == ref.order:
return
if self.order > ref.order:
o = self.get_ordering_queryset().filter(order__gt=ref.order).aggregate(Min('order')).get('order__min') or 0
else:
o = ref.order
self.to(o) | python | {
"resource": ""
} |
q259225 | OrderedModel.top | validation | def top(self):
"""
Move this object to the top of the ordered stack.
"""
o = self.get_ordering_queryset().aggregate(Min('order')).get('order__min')
self.to(o) | python | {
"resource": ""
} |
q259226 | OrderedModel.bottom | validation | def bottom(self):
"""
Move this object to the bottom of the ordered stack.
"""
o = self.get_ordering_queryset().aggregate(Max('order')).get('order__max')
self.to(o) | python | {
"resource": ""
} |
q259227 | populate | validation | def populate(publications):
"""
Load custom links and files from database and attach to publications.
"""
customlinks = CustomLink.objects.filter(publication__in=publications)
customfiles = CustomFile.objects.filter(publication__in=publications)
publications_ = {}
for publication in publications:
publication.links = []
publication.files = []
publications_[publication.id] = publication
for link in customlinks:
publications_[link.publication_id].links.append(link)
for file in customfiles:
publications_[file.publication_id].files.append(file) | python | {
"resource": ""
} |
q259228 | worker | validation | def worker(self):
"""
Calculates the quartet weights for the test at a random
subsampled chunk of loci.
"""
## subsample loci
fullseqs = self.sample_loci()
## find all iterations of samples for this quartet
liters = itertools.product(*self.imap.values())
## run tree inference for each iteration of sampledict
hashval = uuid.uuid4().hex
weights = []
for ridx, lidx in enumerate(liters):
## get subalignment for this iteration and make to nex
a,b,c,d = lidx
sub = {}
for i in lidx:
if self.rmap[i] == "p1":
sub["A"] = fullseqs[i]
elif self.rmap[i] == "p2":
sub["B"] = fullseqs[i]
elif self.rmap[i] == "p3":
sub["C"] = fullseqs[i]
else:
sub["D"] = fullseqs[i]
## write as nexus file
nex = []
for tax in list("ABCD"):
nex.append(">{} {}".format(tax, sub[tax]))
## check for too much missing or lack of variants
nsites, nvar = count_var(nex)
## only run test if there's variation present
if nvar > self.minsnps:
## format as nexus file
nexus = "{} {}\n".format(4, len(fullseqs[a])) + "\n".join(nex)
## infer ML tree
treeorder = self.run_tree_inference(nexus, "{}.{}".format(hashval, ridx))
## add to list
weights.append(treeorder)
## cleanup - remove all files with the hash val
rfiles = glob.glob(os.path.join(tempfile.tempdir, "*{}*".format(hashval)))
for rfile in rfiles:
if os.path.exists(rfile):
os.remove(rfile)
## return result as weights for the set topologies.
trees = ["ABCD", "ACBD", "ADBC"]
wdict = {i:float(weights.count(i))/len(weights) for i in trees}
return wdict | python | {
"resource": ""
} |
q259229 | get_order | validation | def get_order(tre):
"""
return tree order
"""
anode = tre.tree&">A"
sister = anode.get_sisters()[0]
sisters = (anode.name[1:], sister.name[1:])
others = [i for i in list("ABCD") if i not in sisters]
return sorted(sisters) + sorted(others) | python | {
"resource": ""
} |
q259230 | count_var | validation | def count_var(nex):
"""
count number of sites with cov=4, and number of variable sites.
"""
arr = np.array([list(i.split()[-1]) for i in nex])
miss = np.any(arr=="N", axis=0)
nomiss = arr[:, ~miss]
nsnps = np.invert(np.all(nomiss==nomiss[0, :], axis=0)).sum()
return nomiss.shape[1], nsnps | python | {
"resource": ""
} |
q259231 | Twiist.sample_loci | validation | def sample_loci(self):
""" finds loci with sufficient sampling for this test"""
## store idx of passing loci
idxs = np.random.choice(self.idxs, self.ntests)
## open handle, make a proper generator to reduce mem
with open(self.data) as indata:
liter = (indata.read().strip().split("|\n"))
## store data as dict
seqdata = {i:"" for i in self.samples}
## put chunks into a list
for idx, loc in enumerate(liter):
if idx in idxs:
## parse chunk
lines = loc.split("\n")[:-1]
names = [i.split()[0] for i in lines]
seqs = [i.split()[1] for i in lines]
dd = {i:j for i,j in zip(names, seqs)}
## add data to concatenated seqdict
for name in seqdata:
if name in names:
seqdata[name] += dd[name]
else:
seqdata[name] += "N"*len(seqs[0])
## concatenate into a phylip file
return seqdata | python | {
"resource": ""
} |
q259232 | Twiist.run_tree_inference | validation | def run_tree_inference(self, nexus, idx):
"""
Write nexus to tmpfile, runs phyml tree inference, and parses
and returns the resulting tree.
"""
## create a tmpdir for this test
tmpdir = tempfile.tempdir
tmpfile = os.path.join(tempfile.NamedTemporaryFile(
delete=False,
prefix=str(idx),
dir=tmpdir,
))
## write nexus to tmpfile
tmpfile.write(nexus)
tmpfile.flush()
## infer the tree
rax = raxml(name=str(idx), data=tmpfile.name, workdir=tmpdir, N=1, T=2)
rax.run(force=True, block=True, quiet=True)
## clean up
tmpfile.close()
## return tree order
order = get_order(toytree.tree(rax.trees.bestTree))
return "".join(order) | python | {
"resource": ""
} |
q259233 | Twiist.plot | validation | def plot(self):
"""
return a toyplot barplot of the results table.
"""
if self.results_table == None:
return "no results found"
else:
bb = self.results_table.sort_values(
by=["ABCD", "ACBD"],
ascending=[False, True],
)
## make a barplot
import toyplot
c = toyplot.Canvas(width=600, height=200)
a = c.cartesian()
m = a.bars(bb)
return c, a, m | python | {
"resource": ""
} |
q259234 | PCA.plot_pairwise_dist | validation | def plot_pairwise_dist(self, labels=None, ax=None, cmap=None, cdict=None, metric="euclidean"):
"""
Plot pairwise distances between all samples
labels: bool or list
by default labels aren't included. If labels == True, then labels are read in
from the vcf file. Alternatively, labels can be passed in as a list, should
be same length as the number of samples.
"""
allele_counts = self.genotypes.to_n_alt()
dist = allel.pairwise_distance(allele_counts, metric=metric)
if not ax:
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1)
if isinstance(labels, bool):
if labels:
labels = list(self.samples_vcforder)
elif isinstance(labels, type(None)):
pass
else:
## If not bool or None (default), then check to make sure the list passed in
## is the right length
if not len(labels) == len(self.samples_vcforder):
raise IPyradError(LABELS_LENGTH_ERROR.format(len(labels), len(self.samples_vcforder)))
allel.plot.pairwise_distance(dist, labels=labels, ax=ax, colorbar=False) | python | {
"resource": ""
} |
q259235 | PCA.copy | validation | def copy(self):
""" returns a copy of the pca analysis object """
cp = copy.deepcopy(self)
cp.genotypes = allel.GenotypeArray(self.genotypes, copy=True)
return cp | python | {
"resource": ""
} |
q259236 | loci2migrate | validation | def loci2migrate(name, locifile, popdict, mindict=1):
"""
A function to build an input file for the program migrate from an ipyrad
.loci file, and a dictionary grouping Samples into populations.
Parameters:
-----------
name: (str)
The name prefix for the migrate formatted output file.
locifile: (str)
The path to the .loci file produced by ipyrad.
popdict: (dict)
A Python dictionary grouping Samples into Populations.
Examples:
---------
You can create the population dictionary by hand, and pass in the path
to your .loci file as a string.
>> popdict = {'A': ['a', 'b', 'c'], 'B': ['d', 'e', 'f']}
>> loci2migrate("outfile.migrate", "./mydata.loci", popdict)
Or, if you load your ipyrad.Assembly object from it's JSON file, you can
access the loci file path and population information from there directly.
>> data = ip.load_json("mydata.json")
>> loci2migrate("outfile.migrate", data.outfiles.loci, data.populations)
"""
## I/O
outfile = open(name+".migrate", 'w')
infile = open(locifile, 'r')
## minhits dictionary can be an int (all same) or a dictionary (set each)
if isinstance(mindict, int):
mindict = {pop: mindict for pop in popdict}
else:
mindict = mindict
## filter data to only the loci that have data for mindict setting
keep = []
MINS = zip(taxa.keys(), minhits)
## read in data to sample names
loci = infile.read().strip().split("|")[:-1]
for loc in loci:
samps = [i.split()[0].replace(">","") for i in loc.split("\n") if ">" in i]
## filter for coverage
GG = []
for group,mins in MINS:
GG.append( sum([i in samps for i in taxa[group]]) >= int(mins) )
if all(GG):
keep.append(loc)
## print data to file
print >>outfile, len(taxa), len(keep), "( npops nloci for data set", data.name+".loci",")"
## print all data for each population at a time
done = 0
for group in taxa:
## print a list of lengths of each locus
if not done:
loclens = [len(loc.split("\n")[1].split()[-1].replace("x","n").replace("n","")) for loc in keep]
print >>outfile, " ".join(map(str,loclens))
done += 1
## print a list of number of individuals in each locus
indslist = []
for loc in keep:
samps = [i.split()[0].replace(">","") for i in loc.split("\n") if ">" in i]
inds = sum([i in samps for i in taxa[group]])
indslist.append(inds)
print >>outfile, " ".join(map(str,indslist)), group
## print sample id, spaces, and sequence data
#for loc in range(len(keep)):
for loc in range(len(keep)):
seqs = [i.split()[-1] for i in keep[loc].split("\n") if \
i.split()[0].replace(">","") in taxa[group]]
for i in range(len(seqs)):
print >>outfile, group[0:8]+"_"+str(i)+\
(" "*(10-len(group[0:8]+"_"+str(i))))+seqs[i].replace("x","n").replace("n","")
outfile.close() | python | {
"resource": ""
} |
q259237 | update | validation | def update(assembly, idict, count):
""" updates dictionary with the next .5M reads from the super long string
phylip file. Makes for faster reading. """
data = iter(open(os.path.join(assembly.dirs.outfiles,
assembly.name+".phy"), 'r'))
ntax, nchar = data.next().strip().split()
## read in max N bp at a time
for line in data:
tax, seq = line.strip().split()
idict[tax] = idict[tax][100000:]
idict[tax] += seq[count:count+100000]
del line
return idict | python | {
"resource": ""
} |
q259238 | make | validation | def make(assembly, samples):
""" Make phylip and nexus formats. This is hackish since I'm recycling the
code whole-hog from pyrad V3. Probably could be good to go back through
and clean up the conversion code some time.
"""
## get the longest name
longname = max([len(i) for i in assembly.samples.keys()])
names = [i.name for i in samples]
partitions = makephy(assembly, samples, longname)
makenex(assembly, names, longname, partitions) | python | {
"resource": ""
} |
q259239 | sample_cleanup | validation | def sample_cleanup(data, sample):
"""
Clean up a bunch of loose files.
"""
umap1file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap1.fastq")
umap2file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap2.fastq")
unmapped = os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")
samplesam = os.path.join(data.dirs.refmapping, sample.name+".sam")
split1 = os.path.join(data.dirs.edits, sample.name+"-split1.fastq")
split2 = os.path.join(data.dirs.edits, sample.name+"-split2.fastq")
refmap_derep = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq")
for f in [umap1file, umap2file, unmapped, samplesam, split1, split2, refmap_derep]:
try:
os.remove(f)
except:
pass | python | {
"resource": ""
} |
q259240 | index_reference_sequence | validation | def index_reference_sequence(data, force=False):
"""
Index the reference sequence, unless it already exists. Also make a mapping
of scaffolds to index numbers for later user in steps 5-6.
"""
## get ref file from params
refseq_file = data.paramsdict['reference_sequence']
index_files = []
## Check for existence of index files. Default to bwa unless you specify smalt
if "smalt" in data._hackersonly["aligner"]:
# These are smalt index files. Only referenced here to ensure they exist
index_files.extend([".sma", ".smi"])
else:
index_files.extend([".amb", ".ann", ".bwt", ".pac", ".sa"])
## samtools specific index
index_files.extend([".fai"])
## If reference sequence already exists then bail out of this func
if not force:
if all([os.path.isfile(refseq_file+i) for i in index_files]):
return
#if data._headers:
# print(INDEX_MSG.format(data._hackersonly["aligner"]))
if "smalt" in data._hackersonly["aligner"]:
## Create smalt index for mapping
## smalt index [-k <wordlen>] [-s <stepsiz>] <index_name> <reference_file>
cmd1 = [ipyrad.bins.smalt, "index",
"-k", str(data._hackersonly["smalt_index_wordlen"]),
refseq_file,
refseq_file]
else:
## bwa index <reference_file>
cmd1 = [ipyrad.bins.bwa, "index", refseq_file]
## call the command
LOGGER.info(" ".join(cmd1))
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
error1 = proc1.communicate()[0]
## simple samtools index for grabbing ref seqs
cmd2 = [ipyrad.bins.samtools, "faidx", refseq_file]
LOGGER.info(" ".join(cmd2))
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
error2 = proc2.communicate()[0]
## error handling
if proc1.returncode:
raise IPyradWarningExit(error1)
if error2:
if "please use bgzip" in error2:
raise IPyradWarningExit(NO_ZIP_BINS.format(refseq_file))
else:
raise IPyradWarningExit(error2) | python | {
"resource": ""
} |
q259241 | fetch_cluster_se | validation | def fetch_cluster_se(data, samfile, chrom, rstart, rend):
"""
Builds a single end cluster from the refmapped data.
"""
## If SE then we enforce the minimum overlap distance to avoid the
## staircase syndrome of multiple reads overlapping just a little.
overlap_buffer = data._hackersonly["min_SE_refmap_overlap"]
## the *_buff variables here are because we have to play patty
## cake here with the rstart/rend vals because we want pysam to
## enforce the buffer for SE, but we want the reference sequence
## start and end positions to print correctly for downstream.
rstart_buff = rstart + overlap_buffer
rend_buff = rend - overlap_buffer
## Reads that map to only very short segements of the reference
## sequence will return buffer end values that are before the
## start values causing pysam to complain. Very short mappings.
if rstart_buff > rend_buff:
tmp = rstart_buff
rstart_buff = rend_buff
rend_buff = tmp
## Buffering can't make start and end equal or pysam returns nothing.
if rstart_buff == rend_buff:
rend_buff += 1
## store pairs
rdict = {}
clust = []
iterreg = []
iterreg = samfile.fetch(chrom, rstart_buff, rend_buff)
## use dict to match up read pairs
for read in iterreg:
if read.qname not in rdict:
rdict[read.qname] = read
## sort dict keys so highest derep is first ('seed')
sfunc = lambda x: int(x.split(";size=")[1].split(";")[0])
rkeys = sorted(rdict.keys(), key=sfunc, reverse=True)
## get blocks from the seed for filtering, bail out if seed is not paired
try:
read1 = rdict[rkeys[0]]
except ValueError:
LOGGER.error("Found bad cluster, skipping - key:{} rdict:{}".format(rkeys[0], rdict))
return ""
## the starting blocks for the seed
poss = read1.get_reference_positions(full_length=True)
seed_r1start = min(poss)
seed_r1end = max(poss)
## store the seed -------------------------------------------
if read1.is_reverse:
seq = revcomp(read1.seq)
else:
seq = read1.seq
## store, could write orient but just + for now.
size = sfunc(rkeys[0])
clust.append(">{}:{}:{};size={};*\n{}"\
.format(chrom, seed_r1start, seed_r1end, size, seq))
## If there's only one hit in this region then rkeys will only have
## one element and the call to `rkeys[1:]` will raise. Test for this.
if len(rkeys) > 1:
## store the hits to the seed -------------------------------
for key in rkeys[1:]:
skip = False
try:
read1 = rdict[key]
except ValueError:
## enter values that will make this read get skipped
read1 = rdict[key][0]
skip = True
## orient reads only if not skipping
if not skip:
poss = read1.get_reference_positions(full_length=True)
minpos = min(poss)
maxpos = max(poss)
## store the seq
if read1.is_reverse:
seq = revcomp(read1.seq)
else:
seq = read1.seq
## store, could write orient but just + for now.
size = sfunc(key)
clust.append(">{}:{}:{};size={};+\n{}"\
.format(chrom, minpos, maxpos, size, seq))
else:
## seq is excluded, though, we could save it and return
## it as a separate cluster that will be aligned separately.
pass
return clust | python | {
"resource": ""
} |
q259242 | ref_build_and_muscle_chunk | validation | def ref_build_and_muscle_chunk(data, sample):
"""
1. Run bedtools to get all overlapping regions
2. Parse out reads from regions using pysam and dump into chunk files.
We measure it out to create 10 chunk files per sample.
3. If we really wanted to speed this up, though it is pretty fast already,
we could parallelize it since we can easily break the regions into
a list of chunks.
"""
## get regions using bedtools
regions = bedtools_merge(data, sample).strip().split("\n")
nregions = len(regions)
chunksize = (nregions / 10) + (nregions % 10)
LOGGER.debug("nregions {} chunksize {}".format(nregions, chunksize))
## create an output file to write clusters to
idx = 0
tmpfile = os.path.join(data.tmpdir, sample.name+"_chunk_{}.ali")
## remove old files if they exist to avoid append errors
for i in range(11):
if os.path.exists(tmpfile.format(i)):
os.remove(tmpfile.format(i))
fopen = open
## If reference+denovo we drop the reads back into clust.gz
## and let the muscle_chunker do it's thing back in cluster_within
if data.paramsdict["assembly_method"] == "denovo+reference":
tmpfile = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
fopen = gzip.open
## build clusters for aligning with muscle from the sorted bam file
samfile = pysam.AlignmentFile(sample.files.mapped_reads, 'rb')
#"./tortas_refmapping/PZ70-mapped-sorted.bam", "rb")
## fill clusts list and dump periodically
clusts = []
nclusts = 0
for region in regions:
chrom, pos1, pos2 = region.split()
try:
## fetches pairs quickly but then goes slow to merge them.
if "pair" in data.paramsdict["datatype"]:
clust = fetch_cluster_pairs(data, samfile, chrom, int(pos1), int(pos2))
## fetch but no need to merge
else:
clust = fetch_cluster_se(data, samfile, chrom, int(pos1), int(pos2))
except IndexError as inst:
LOGGER.error("Bad region chrom:start-end {}:{}-{}".format(chrom, pos1, pos2))
continue
if clust:
clusts.append("\n".join(clust))
nclusts += 1
if nclusts == chunksize:
## write to file
tmphandle = tmpfile.format(idx)
with fopen(tmphandle, 'a') as tmp:
#LOGGER.debug("Writing tmpfile - {}".format(tmpfile.format(idx)))
#if data.paramsdict["assembly_method"] == "denovo+reference":
# ## This is dumb, but for this method you need to prepend the
# ## separator to maintain proper formatting of clust.gz
tmp.write("\n//\n//\n".join(clusts)+"\n//\n//\n")
idx += 1
nclusts = 0
clusts = []
if clusts:
## write remaining to file
with fopen(tmpfile.format(idx), 'a') as tmp:
#tmp.write("\n//\n//\n" + ("\n//\n//\n".join(clusts)))
tmp.write("\n//\n//\n".join(clusts)+"\n//\n//\n")
clusts = []
if not data.paramsdict["assembly_method"] == "denovo+reference":
chunkfiles = glob.glob(os.path.join(data.tmpdir, sample.name+"_chunk_*.ali"))
LOGGER.info("created chunks %s", chunkfiles)
## cleanup
samfile.close() | python | {
"resource": ""
} |
q259243 | ref_muscle_chunker | validation | def ref_muscle_chunker(data, sample):
"""
Run bedtools to get all overlapping regions. Pass this list into the func
'get_overlapping_reads' which will write fastq chunks to the clust.gz file.
1) Run bedtools merge to get a list of all contiguous blocks of bases
in the reference seqeunce where one or more of our reads overlap.
The output will look like this:
1 45230754 45230783
1 74956568 74956596
...
1 116202035 116202060
"""
LOGGER.info('entering ref_muscle_chunker')
## Get regions, which will be a giant list of 5-tuples, of which we're
## only really interested in the first three: (chrom, start, end) position.
regions = bedtools_merge(data, sample)
if len(regions) > 0:
## this calls bam_region_to_fasta a billion times
get_overlapping_reads(data, sample, regions)
else:
msg = "No reads mapped to reference sequence - {}".format(sample.name)
LOGGER.warn(msg) | python | {
"resource": ""
} |
q259244 | check_insert_size | validation | def check_insert_size(data, sample):
"""
check mean insert size for this sample and update
hackersonly.max_inner_mate_distance if need be. This value controls how
far apart mate pairs can be to still be considered for bedtools merging
downstream.
"""
## pipe stats output to grep
cmd1 = [ipyrad.bins.samtools, "stats", sample.files.mapped_reads]
cmd2 = ["grep", "SN"]
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
## get piped result
res = proc2.communicate()[0]
## raise exception on failure and do cleanup
if proc2.returncode:
raise IPyradWarningExit("error in %s: %s", cmd2, res)
## starting vals
avg_insert = 0
stdv_insert = 0
avg_len = 0
## iterate over results
for line in res.split("\n"):
if "insert size average" in line:
avg_insert = float(line.split(":")[-1].strip())
elif "insert size standard deviation" in line:
## hack to fix sim data when stdv is 0.0. Shouldn't
## impact real data bcz stdv gets rounded up below
stdv_insert = float(line.split(":")[-1].strip()) + 0.1
elif "average length" in line:
avg_len = float(line.split(":")[-1].strip())
LOGGER.debug("avg {} stdv {} avg_len {}"\
.format(avg_insert, stdv_insert, avg_len))
## If all values return successfully set the max inner mate distance.
## This is tricky. avg_insert is the average length of R1+R2+inner mate
## distance. avg_len is the average length of a read. If there are lots
## of reads that overlap then avg_insert will be close to but bigger than
## avg_len. We are looking for the right value for `bedtools merge -d`
## which wants to know the max distance between reads.
if all([avg_insert, stdv_insert, avg_len]):
## If 2 * the average length of a read is less than the average
## insert size then most reads DO NOT overlap
if stdv_insert < 5:
stdv_insert = 5.
if (2 * avg_len) < avg_insert:
hack = avg_insert + (3 * np.math.ceil(stdv_insert)) - (2 * avg_len)
## If it is > than the average insert size then most reads DO
## overlap, so we have to calculate inner mate distance a little
## differently.
else:
hack = (avg_insert - avg_len) + (3 * np.math.ceil(stdv_insert))
## set the hackerdict value
LOGGER.info("stdv: hacked insert size is %s", hack)
data._hackersonly["max_inner_mate_distance"] = int(np.math.ceil(hack))
else:
## If something fsck then set a relatively conservative distance
data._hackersonly["max_inner_mate_distance"] = 300
LOGGER.debug("inner mate distance for {} - {}".format(sample.name,\
data._hackersonly["max_inner_mate_distance"])) | python | {
"resource": ""
} |
q259245 | bedtools_merge | validation | def bedtools_merge(data, sample):
"""
Get all contiguous genomic regions with one or more overlapping
reads. This is the shell command we'll eventually run
bedtools bamtobed -i 1A_0.sorted.bam | bedtools merge [-d 100]
-i <input_bam> : specifies the input file to bed'ize
-d <int> : For PE set max distance between reads
"""
LOGGER.info("Entering bedtools_merge: %s", sample.name)
mappedreads = os.path.join(data.dirs.refmapping,
sample.name+"-mapped-sorted.bam")
## command to call `bedtools bamtobed`, and pipe output to stdout
## Usage: bedtools bamtobed [OPTIONS] -i <bam>
## Usage: bedtools merge [OPTIONS] -i <bam>
cmd1 = [ipyrad.bins.bedtools, "bamtobed", "-i", mappedreads]
cmd2 = [ipyrad.bins.bedtools, "merge", "-i", "-"]
## If PE the -d flag to tell bedtools how far apart to allow mate pairs.
## If SE the -d flag is negative, specifying that SE reads need to
## overlap by at least a specific number of bp. This prevents the
## stairstep syndrome when a + and - read are both extending from
## the same cutsite. Passing a negative number to `merge -d` gets this done.
if 'pair' in data.paramsdict["datatype"]:
check_insert_size(data, sample)
#cmd2.insert(2, str(data._hackersonly["max_inner_mate_distance"]))
cmd2.insert(2, str(data._hackersonly["max_inner_mate_distance"]))
cmd2.insert(2, "-d")
else:
cmd2.insert(2, str(-1 * data._hackersonly["min_SE_refmap_overlap"]))
cmd2.insert(2, "-d")
## pipe output from bamtobed into merge
LOGGER.info("stdv: bedtools merge cmds: %s %s", cmd1, cmd2)
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
result = proc2.communicate()[0]
proc1.stdout.close()
## check for errors and do cleanup
if proc2.returncode:
raise IPyradWarningExit("error in %s: %s", cmd2, result)
## Write the bedfile out, because it's useful sometimes.
if os.path.exists(ipyrad.__debugflag__):
with open(os.path.join(data.dirs.refmapping, sample.name + ".bed"), 'w') as outfile:
outfile.write(result)
## Report the number of regions we're returning
nregions = len(result.strip().split("\n"))
LOGGER.info("bedtools_merge: Got # regions: %s", nregions)
return result | python | {
"resource": ""
} |
q259246 | refmap_stats | validation | def refmap_stats(data, sample):
"""
Get the number of mapped and unmapped reads for a sample
and update sample.stats
"""
## shorter names
mapf = os.path.join(data.dirs.refmapping, sample.name+"-mapped-sorted.bam")
umapf = os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")
## get from unmapped
cmd1 = [ipyrad.bins.samtools, "flagstat", umapf]
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
result1 = proc1.communicate()[0]
## get from mapped
cmd2 = [ipyrad.bins.samtools, "flagstat", mapf]
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
result2 = proc2.communicate()[0]
## store results
## If PE, samtools reports the _actual_ number of reads mapped, both
## R1 and R2, so here if PE divide the results by 2 to stay consistent
## with how we've been reporting R1 and R2 as one "read pair"
if "pair" in data.paramsdict["datatype"]:
sample.stats["refseq_unmapped_reads"] = int(result1.split()[0]) / 2
sample.stats["refseq_mapped_reads"] = int(result2.split()[0]) / 2
else:
sample.stats["refseq_unmapped_reads"] = int(result1.split()[0])
sample.stats["refseq_mapped_reads"] = int(result2.split()[0])
sample_cleanup(data, sample) | python | {
"resource": ""
} |
q259247 | refmap_init | validation | def refmap_init(data, sample, force):
""" create some file handles for refmapping """
## make some persistent file handles for the refmap reads files
sample.files.unmapped_reads = os.path.join(data.dirs.edits,
"{}-refmap_derep.fastq".format(sample.name))
sample.files.mapped_reads = os.path.join(data.dirs.refmapping,
"{}-mapped-sorted.bam".format(sample.name)) | python | {
"resource": ""
} |
q259248 | Treemix._subsample | validation | def _subsample(self):
""" returns a subsample of unlinked snp sites """
spans = self.maparr
samp = np.zeros(spans.shape[0], dtype=np.uint64)
for i in xrange(spans.shape[0]):
samp[i] = np.random.randint(spans[i, 0], spans[i, 1], 1)
return samp | python | {
"resource": ""
} |
q259249 | Treemix.draw | validation | def draw(self, axes):
"""
Returns a treemix plot on a toyplot.axes object.
"""
## create a toytree object from the treemix tree result
tre = toytree.tree(newick=self.results.tree)
tre.draw(
axes=axes,
use_edge_lengths=True,
tree_style='c',
tip_labels_align=True,
edge_align_style={"stroke-width": 1}
);
## get coords
for admix in self.results.admixture:
## parse admix event
pidx, pdist, cidx, cdist, weight = admix
a = _get_admix_point(tre, pidx, pdist)
b = _get_admix_point(tre, cidx, cdist)
## add line for admixture edge
mark = axes.plot(
a = (a[0], b[0]),
b = (a[1], b[1]),
style={"stroke-width": 10*weight,
"stroke-opacity": 0.95,
"stroke-linecap": "round"}
)
## add points at admixture sink
axes.scatterplot(
a = (b[0]),
b = (b[1]),
size=8,
title="weight: {}".format(weight),
)
## add scale bar for edge lengths
axes.y.show=False
axes.x.ticks.show=True
axes.x.label.text = "Drift parameter"
return axes | python | {
"resource": ""
} |
q259250 | _resolveambig | validation | def _resolveambig(subseq):
"""
Randomly resolves iupac hetero codes. This is a shortcut
for now, we could instead use the phased alleles in RAD loci.
"""
N = []
for col in subseq:
rand = np.random.binomial(1, 0.5)
N.append([_AMBIGS[i][rand] for i in col])
return np.array(N) | python | {
"resource": ""
} |
q259251 | _count_PIS | validation | def _count_PIS(seqsamp, N):
""" filters for loci with >= N PIS """
counts = [Counter(col) for col in seqsamp.T if not ("-" in col or "N" in col)]
pis = [i.most_common(2)[1][1] > 1 for i in counts if len(i.most_common(2))>1]
if sum(pis) >= N:
return sum(pis)
else:
return 0 | python | {
"resource": ""
} |
q259252 | Bucky._write_nex | validation | def _write_nex(self, mdict, nlocus):
"""
function that takes a dictionary mapping names to sequences,
and a locus number, and writes it as a NEXUS file with a mrbayes
analysis block given a set of mcmc arguments.
"""
## create matrix as a string
max_name_len = max([len(i) for i in mdict])
namestring = "{:<" + str(max_name_len+1) + "} {}\n"
matrix = ""
for i in mdict.items():
matrix += namestring.format(i[0], i[1])
## ensure dir
minidir = os.path.realpath(os.path.join(self.workdir, self.name))
if not os.path.exists(minidir):
os.makedirs(minidir)
## write nexus block
handle = os.path.join(minidir, "{}.nex".format(nlocus))
with open(handle, 'w') as outnex:
outnex.write(NEXBLOCK.format(**{
"ntax": len(mdict),
"nchar": len(mdict.values()[0]),
"matrix": matrix,
"ngen": self.params.mb_mcmc_ngen,
"sfreq": self.params.mb_mcmc_sample_freq,
"burnin": self.params.mb_mcmc_burnin,
})) | python | {
"resource": ""
} |
q259253 | _read_sample_names | validation | def _read_sample_names(fname):
""" Read in sample names from a plain text file. This is a convenience
function for branching so if you have tons of sample names you can
pass in a file rather than having to set all the names at the command
line.
"""
try:
with open(fname, 'r') as infile:
subsamples = [x.split()[0] for x in infile.readlines() if x.strip()]
except Exception as inst:
print("Failed to read input file with sample names.\n{}".format(inst))
raise inst
return subsamples | python | {
"resource": ""
} |
q259254 | _bufcountlines | validation | def _bufcountlines(filename, gzipped):
"""
fast line counter. Used to quickly sum number of input reads when running
link_fastqs to append files. """
if gzipped:
fin = gzip.open(filename)
else:
fin = open(filename)
nlines = 0
buf_size = 1024 * 1024
read_f = fin.read # loop optimization
buf = read_f(buf_size)
while buf:
nlines += buf.count('\n')
buf = read_f(buf_size)
fin.close()
return nlines | python | {
"resource": ""
} |
q259255 | _zbufcountlines | validation | def _zbufcountlines(filename, gzipped):
""" faster line counter """
if gzipped:
cmd1 = ["gunzip", "-c", filename]
else:
cmd1 = ["cat", filename]
cmd2 = ["wc"]
proc1 = sps.Popen(cmd1, stdout=sps.PIPE, stderr=sps.PIPE)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, stderr=sps.PIPE)
res = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("error zbufcountlines {}:".format(res))
LOGGER.info(res)
nlines = int(res.split()[0])
return nlines | python | {
"resource": ""
} |
q259256 | _tuplecheck | validation | def _tuplecheck(newvalue, dtype=str):
"""
Takes a string argument and returns value as a tuple.
Needed for paramfile conversion from CLI to set_params args
"""
if isinstance(newvalue, list):
newvalue = tuple(newvalue)
if isinstance(newvalue, str):
newvalue = newvalue.rstrip(")").strip("(")
try:
newvalue = tuple([dtype(i.strip()) for i in newvalue.split(",")])
## Type error is thrown by tuple if it's applied to a non-iterable.
except TypeError:
newvalue = tuple(dtype(newvalue))
## If dtype fails to cast any element of newvalue
except ValueError:
LOGGER.info("Assembly.tuplecheck() failed to cast to {} - {}"\
.format(dtype, newvalue))
raise
except Exception as inst:
LOGGER.info(inst)
raise SystemExit(\
"\nError: Param`{}` is not formatted correctly.\n({})\n"\
.format(newvalue, inst))
return newvalue | python | {
"resource": ""
} |
q259257 | Assembly.stats | validation | def stats(self):
""" Returns a data frame with Sample data and state. """
nameordered = self.samples.keys()
nameordered.sort()
## Set pandas to display all samples instead of truncating
pd.options.display.max_rows = len(self.samples)
statdat = pd.DataFrame([self.samples[i].stats for i in nameordered],
index=nameordered).dropna(axis=1, how='all')
# ensure non h,e columns print as ints
for column in statdat:
if column not in ["hetero_est", "error_est"]:
statdat[column] = np.nan_to_num(statdat[column]).astype(int)
return statdat | python | {
"resource": ""
} |
q259258 | Assembly.files | validation | def files(self):
""" Returns a data frame with Sample files. Not very readable... """
nameordered = self.samples.keys()
nameordered.sort()
## replace curdir with . for shorter printing
#fullcurdir = os.path.realpath(os.path.curdir)
return pd.DataFrame([self.samples[i].files for i in nameordered],
index=nameordered).dropna(axis=1, how='all') | python | {
"resource": ""
} |
q259259 | Assembly._build_stat | validation | def _build_stat(self, idx):
""" Returns a data frame with Sample stats for each step """
nameordered = self.samples.keys()
nameordered.sort()
newdat = pd.DataFrame([self.samples[i].stats_dfs[idx] \
for i in nameordered], index=nameordered)\
.dropna(axis=1, how='all')
return newdat | python | {
"resource": ""
} |
q259260 | Assembly.get_params | validation | def get_params(self, param=""):
""" pretty prints params if called as a function """
fullcurdir = os.path.realpath(os.path.curdir)
if not param:
for index, (key, value) in enumerate(self.paramsdict.items()):
if isinstance(value, str):
value = value.replace(fullcurdir+"/", "./")
sys.stdout.write("{}{:<4}{:<28}{:<45}\n"\
.format(self._spacer, index, key, value))
else:
try:
if int(param):
#sys.stdout.write(self.paramsdict.values()[int(param)-1])
return self.paramsdict.values()[int(param)]
except (ValueError, TypeError, NameError, IndexError):
try:
return self.paramsdict[param]
except KeyError:
return 'key not recognized' | python | {
"resource": ""
} |
q259261 | Assembly.set_params | validation | def set_params(self, param, newvalue):
"""
Set a parameter to a new value. Raises error if newvalue is wrong type.
Note
----
Use [Assembly].get_params() to see the parameter values currently
linked to the Assembly object.
Parameters
----------
param : int or str
The index (e.g., 1) or string name (e.g., "project_dir")
for the parameter that will be changed.
newvalue : int, str, or tuple
The new value for the parameter selected for `param`. Use
`ipyrad.get_params_info()` to get further information about
a given parameter. If the wrong type is entered for newvalue
(e.g., a str when it should be an int), an error will be raised.
Further information about each parameter is also available
in the documentation.
Examples
--------
## param 'project_dir' takes only a str as input
[Assembly].set_params('project_dir', 'new_directory')
## param 'restriction_overhang' must be a tuple or str, if str it is
## converted to a tuple with the second entry empty.
[Assembly].set_params('restriction_overhang', ('CTGCAG', 'CCGG')
## param 'max_shared_Hs_locus' can be an int or a float:
[Assembly].set_params('max_shared_Hs_locus', 0.25)
"""
## this includes current params and some legacy params for conversion
legacy_params = ["edit_cutsites", "trim_overhang"]
current_params = self.paramsdict.keys()
allowed_params = current_params + legacy_params
## require parameter recognition
#if not ((param in range(50)) or \
# (param in [str(i) for i in range(50)]) or \
# (param in allowed_params)):
if not param in allowed_params:
raise IPyradParamsError("Parameter key not recognized: {}"\
.format(param))
## make string
param = str(param)
## get index if param is keyword arg (this index is now zero based!)
if len(param) < 3:
param = self.paramsdict.keys()[int(param)]
## run assertions on new param
try:
self = _paramschecker(self, param, newvalue)
except Exception as inst:
raise IPyradWarningExit(BAD_PARAMETER\
.format(param, inst, newvalue)) | python | {
"resource": ""
} |
q259262 | Assembly.branch | validation | def branch(self, newname, subsamples=None, infile=None):
"""
Returns a copy of the Assembly object. Does not allow Assembly
object names to be replicated in namespace or path.
"""
## subsample by removal or keeping.
remove = 0
## is there a better way to ask if it already exists?
if (newname == self.name or os.path.exists(
os.path.join(self.paramsdict["project_dir"],
newname+".assembly"))):
print("{}Assembly object named {} already exists"\
.format(self._spacer, newname))
else:
## Make sure the new name doesn't have any wacky characters
self._check_name(newname)
## Bozo-check. Carve off 'params-' if it's in the new name.
if newname.startswith("params-"):
newname = newname.split("params-")[1]
## create a copy of the Assembly obj
newobj = copy.deepcopy(self)
newobj.name = newname
newobj.paramsdict["assembly_name"] = newname
if subsamples and infile:
print(BRANCH_NAMES_AND_INPUT)
if infile:
if infile[0] == "-":
remove = 1
infile = infile[1:]
if os.path.exists(infile):
subsamples = _read_sample_names(infile)
## if remove then swap the samples
if remove:
subsamples = list(set(self.samples.keys()) - set(subsamples))
## create copies of each subsampled Sample obj
if subsamples:
for sname in subsamples:
if sname in self.samples:
newobj.samples[sname] = copy.deepcopy(self.samples[sname])
else:
print("Sample name not found: {}".format(sname))
## reload sample dict w/o non subsamples
newobj.samples = {name:sample for name, sample in \
newobj.samples.items() if name in subsamples}
## create copies of each subsampled Sample obj
else:
for sample in self.samples:
newobj.samples[sample] = copy.deepcopy(self.samples[sample])
## save json of new obj and return object
newobj.save()
return newobj | python | {
"resource": ""
} |
q259263 | Assembly._step1func | validation | def _step1func(self, force, ipyclient):
""" hidden wrapped function to start step 1 """
## check input data files
sfiles = self.paramsdict["sorted_fastq_path"]
rfiles = self.paramsdict["raw_fastq_path"]
## do not allow both a sorted_fastq_path and a raw_fastq
if sfiles and rfiles:
raise IPyradWarningExit(NOT_TWO_PATHS)
## but also require that at least one exists
if not (sfiles or rfiles):
raise IPyradWarningExit(NO_SEQ_PATH_FOUND)
## print headers
if self._headers:
if sfiles:
print("\n{}Step 1: Loading sorted fastq data to Samples"\
.format(self._spacer))
else:
print("\n{}Step 1: Demultiplexing fastq data to Samples"\
.format(self._spacer))
## if Samples already exist then no demultiplexing
if self.samples:
if not force:
print(SAMPLES_EXIST.format(len(self.samples), self.name))
else:
## overwrite existing data else do demux
if glob.glob(sfiles):
self._link_fastqs(ipyclient=ipyclient, force=force)
else:
assemble.demultiplex.run2(self, ipyclient, force)
## Creating new Samples
else:
## first check if demultiplexed files exist in sorted path
if glob.glob(sfiles):
self._link_fastqs(ipyclient=ipyclient)
## otherwise do the demultiplexing
else:
assemble.demultiplex.run2(self, ipyclient, force) | python | {
"resource": ""
} |
q259264 | Assembly._step2func | validation | def _step2func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 2"""
## print header
if self._headers:
print("\n Step 2: Filtering reads ")
## If no samples in this assembly then it means you skipped step1,
if not self.samples.keys():
raise IPyradWarningExit(FIRST_RUN_1)
## Get sample objects from list of strings, if API.
samples = _get_samples(self, samples)
if not force:
## print warning and skip if all are finished
if all([i.stats.state >= 2 for i in samples]):
print(EDITS_EXIST.format(len(samples)))
return
## Run samples through rawedit
assemble.rawedit.run2(self, samples, force, ipyclient) | python | {
"resource": ""
} |
q259265 | Assembly._step4func | validation | def _step4func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 4 """
if self._headers:
print("\n Step 4: Joint estimation of error rate and heterozygosity")
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## Check if all/none in the right state
if not self._samples_precheck(samples, 4, force):
raise IPyradError(FIRST_RUN_3)
elif not force:
## skip if all are finished
if all([i.stats.state >= 4 for i in samples]):
print(JOINTS_EXIST.format(len(samples)))
return
## send to function
assemble.jointestimate.run(self, samples, force, ipyclient) | python | {
"resource": ""
} |
q259266 | Assembly._step5func | validation | def _step5func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 5 """
## print header
if self._headers:
print("\n Step 5: Consensus base calling ")
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## Check if all/none in the right state
if not self._samples_precheck(samples, 5, force):
raise IPyradError(FIRST_RUN_4)
elif not force:
## skip if all are finished
if all([i.stats.state >= 5 for i in samples]):
print(CONSENS_EXIST.format(len(samples)))
return
## pass samples to rawedit
assemble.consens_se.run(self, samples, force, ipyclient) | python | {
"resource": ""
} |
q259267 | Assembly._step6func | validation | def _step6func(self,
samples,
noreverse,
force,
randomseed,
ipyclient,
**kwargs):
"""
Hidden function to start Step 6.
"""
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## remove samples that aren't ready
csamples = self._samples_precheck(samples, 6, force)
## print CLI header
if self._headers:
print("\n Step 6: Clustering at {} similarity across {} samples".\
format(self.paramsdict["clust_threshold"], len(csamples)))
## Check if all/none in the right state
if not csamples:
raise IPyradError(FIRST_RUN_5)
elif not force:
## skip if all are finished
if all([i.stats.state >= 6 for i in csamples]):
print(DATABASE_EXISTS.format(len(samples)))
return
## run if this point is reached. We no longer check for existing
## h5 file, since checking Sample states should suffice.
assemble.cluster_across.run(
self,
csamples,
noreverse,
force,
randomseed,
ipyclient,
**kwargs) | python | {
"resource": ""
} |
q259268 | Assembly._samples_precheck | validation | def _samples_precheck(self, samples, mystep, force):
""" Return a list of samples that are actually ready for the next step.
Each step runs this prior to calling run, makes it easier to
centralize and normalize how each step is checking sample states.
mystep is the state produced by the current step.
"""
subsample = []
## filter by state
for sample in samples:
if sample.stats.state < mystep - 1:
LOGGER.debug("Sample {} not in proper state."\
.format(sample.name))
else:
subsample.append(sample)
return subsample | python | {
"resource": ""
} |
q259269 | combinefiles | validation | def combinefiles(filepath):
""" Joins first and second read file names """
## unpack seq files in filepath
fastqs = glob.glob(filepath)
firsts = [i for i in fastqs if "_R1_" in i]
## check names
if not firsts:
raise IPyradWarningExit("First read files names must contain '_R1_'.")
## get paired reads
seconds = [ff.replace("_R1_", "_R2_") for ff in firsts]
return zip(firsts, seconds) | python | {
"resource": ""
} |
q259270 | get_barcode_func | validation | def get_barcode_func(data, longbar):
""" returns the fastest func given data & longbar"""
## build func for finding barcode
if longbar[1] == 'same':
if data.paramsdict["datatype"] == '2brad':
def getbarcode(cutters, read1, longbar):
""" find barcode for 2bRAD data """
return read1[1][:-(len(cutters[0][0]) + 1)][-longbar[0]:]
else:
def getbarcode(_, read1, longbar):
""" finds barcode for invariable length barcode data """
return read1[1][:longbar[0]]
else:
def getbarcode(cutters, read1, longbar):
""" finds barcode for variable barcode lengths"""
return findbcode(cutters, longbar, read1)
return getbarcode | python | {
"resource": ""
} |
q259271 | get_quart_iter | validation | def get_quart_iter(tups):
""" returns an iterator to grab four lines at a time """
if tups[0].endswith(".gz"):
ofunc = gzip.open
else:
ofunc = open
## create iterators
ofile1 = ofunc(tups[0], 'r')
fr1 = iter(ofile1)
quart1 = itertools.izip(fr1, fr1, fr1, fr1)
if tups[1]:
ofile2 = ofunc(tups[1], 'r')
fr2 = iter(ofile2)
quart2 = itertools.izip(fr2, fr2, fr2, fr2)
quarts = itertools.izip(quart1, quart2)
else:
ofile2 = 0
quarts = itertools.izip(quart1, iter(int, 1))
## make a generator
def feedme(quarts):
for quart in quarts:
yield quart
genquarts = feedme(quarts)
## return generator and handles
return genquarts, ofile1, ofile2 | python | {
"resource": ""
} |
q259272 | writetofastq | validation | def writetofastq(data, dsort, read):
"""
Writes sorted data 'dsort dict' to a tmp files
"""
if read == 1:
rrr = "R1"
else:
rrr = "R2"
for sname in dsort:
## skip writing if empty. Write to tmpname
handle = os.path.join(data.dirs.fastqs,
"{}_{}_.fastq".format(sname, rrr))
with open(handle, 'a') as out:
out.write("".join(dsort[sname])) | python | {
"resource": ""
} |
q259273 | collate_files | validation | def collate_files(data, sname, tmp1s, tmp2s):
"""
Collate temp fastq files in tmp-dir into 1 gzipped sample.
"""
## out handle
out1 = os.path.join(data.dirs.fastqs, "{}_R1_.fastq.gz".format(sname))
out = io.BufferedWriter(gzip.open(out1, 'w'))
## build cmd
cmd1 = ['cat']
for tmpfile in tmp1s:
cmd1 += [tmpfile]
## compression function
proc = sps.Popen(['which', 'pigz'], stderr=sps.PIPE, stdout=sps.PIPE).communicate()
if proc[0].strip():
compress = ["pigz"]
else:
compress = ["gzip"]
## call cmd
proc1 = sps.Popen(cmd1, stderr=sps.PIPE, stdout=sps.PIPE)
proc2 = sps.Popen(compress, stdin=proc1.stdout, stderr=sps.PIPE, stdout=out)
err = proc2.communicate()
if proc2.returncode:
raise IPyradWarningExit("error in collate_files R1 %s", err)
proc1.stdout.close()
out.close()
## then cleanup
for tmpfile in tmp1s:
os.remove(tmpfile)
if 'pair' in data.paramsdict["datatype"]:
## out handle
out2 = os.path.join(data.dirs.fastqs, "{}_R2_.fastq.gz".format(sname))
out = io.BufferedWriter(gzip.open(out2, 'w'))
## build cmd
cmd1 = ['cat']
for tmpfile in tmp2s:
cmd1 += [tmpfile]
## call cmd
proc1 = sps.Popen(cmd1, stderr=sps.PIPE, stdout=sps.PIPE)
proc2 = sps.Popen(compress, stdin=proc1.stdout, stderr=sps.PIPE, stdout=out)
err = proc2.communicate()
if proc2.returncode:
raise IPyradWarningExit("error in collate_files R2 %s", err)
proc1.stdout.close()
out.close()
## then cleanup
for tmpfile in tmp2s:
os.remove(tmpfile) | python | {
"resource": ""
} |
q259274 | estimate_optim | validation | def estimate_optim(data, testfile, ipyclient):
"""
Estimate a reasonable optim value by grabbing a chunk of sequences,
decompressing and counting them, to estimate the full file size.
"""
## count the len of one file and assume all others are similar len
insize = os.path.getsize(testfile)
tmp_file_name = os.path.join(data.paramsdict["project_dir"], "tmp-step1-count.fq")
if testfile.endswith(".gz"):
infile = gzip.open(testfile)
outfile = gzip.open(tmp_file_name, 'wb', compresslevel=5)
else:
infile = open(testfile)
outfile = open(tmp_file_name, 'w')
## We'll take the average of the size of a file based on the
## first 10000 reads to approximate number of reads in the main file
outfile.write("".join(itertools.islice(infile, 40000)))
outfile.close()
infile.close()
## Get the size of the tmp file
tmp_size = os.path.getsize(tmp_file_name)
## divide by the tmp file size and multiply by 10000 to approximate
## the size of the input .fq files
inputreads = int(insize / tmp_size) * 10000
os.remove(tmp_file_name)
return inputreads | python | {
"resource": ""
} |
q259275 | _cleanup_and_die | validation | def _cleanup_and_die(data):
""" cleanup func for step 1 """
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
os.remove(tmpf) | python | {
"resource": ""
} |
q259276 | splitfiles | validation | def splitfiles(data, raws, ipyclient):
""" sends raws to be chunked"""
## create a tmpdir for chunked_files and a chunk optimizer
tmpdir = os.path.join(data.paramsdict["project_dir"], "tmp-chunks-"+data.name)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
## chunk into 8M reads
totalreads = estimate_optim(data, raws[0][0], ipyclient)
optim = int(8e6)
njobs = int(totalreads/(optim/4.)) * len(raws)
## if more files than cpus: no chunking
nosplit = 0
if (len(raws) > len(ipyclient)) or (totalreads < optim):
nosplit = 1
## send slices N at a time. The dict chunkfiles stores a tuple of rawpairs
## dictionary to store asyncresults for sorting jobs
start = time.time()
chunkfiles = {}
for fidx, tups in enumerate(raws):
handle = os.path.splitext(os.path.basename(tups[0]))[0]
## if number of lines is > 20M then just submit it
if nosplit:
chunkfiles[handle] = [tups]
else:
## chunk the file using zcat_make_temps
chunklist = zcat_make_temps(data, tups, fidx, tmpdir, optim, njobs, start)
chunkfiles[handle] = chunklist
if not nosplit:
print("")
return chunkfiles | python | {
"resource": ""
} |
q259277 | putstats | validation | def putstats(pfile, handle, statdicts):
""" puts stats from pickles into a dictionary """
## load in stats
with open(pfile, 'r') as infile:
filestats, samplestats = pickle.load(infile)
## get dicts from statdicts tuple
perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts
## pull new stats
#handle = os.path.splitext(os.path.basename(handle))[0]
perfile[handle] += filestats
## update sample stats
samplehits, barhits, misses, dbars = samplestats
fsamplehits.update(samplehits)
fbarhits.update(barhits)
fmisses.update(misses)
fdbars.update(dbars)
## repack the tuple and return
statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars
return statdicts | python | {
"resource": ""
} |
q259278 | _countmatrix | validation | def _countmatrix(lxs):
""" fill a matrix with pairwise data sharing """
## an empty matrix
share = np.zeros((lxs.shape[0], lxs.shape[0]))
## fill above
names = range(lxs.shape[0])
for row in lxs:
for samp1, samp2 in itertools.combinations(names, 2):
shared = lxs[samp1, lxs[samp2] > 0].sum()
share[samp1, samp2] = shared
## mirror below
##share[]
## fill diagonal with total sample coverage
for row in xrange(len(names)):
share[row, row] = lxs[row].sum()
return share | python | {
"resource": ""
} |
q259279 | paramname | validation | def paramname(param=""):
""" Get the param name from the dict index value.
"""
try:
name = pinfo[str(param)][0].strip().split(" ")[1]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return name | python | {
"resource": ""
} |
q259280 | save_json2 | validation | def save_json2(data):
""" save to json."""
## convert everything to dicts
## skip _ipcluster cuz it's made new.
datadict = OrderedDict([
("outfiles", data.__dict__["outfiles"]),
("stats_files", dict(data.__dict__["stats_files"])),
("stats_dfs", data.__dict__["stats_dfs"])
]) | python | {
"resource": ""
} |
q259281 | save_json | validation | def save_json(data):
""" Save assembly and samples as json """
## data as dict
#### skip _ipcluster because it's made new
#### skip _headers because it's loaded new
#### statsfiles save only keys
#### samples save only keys
datadict = OrderedDict([
("_version", data.__dict__["_version"]),
("_checkpoint", data.__dict__["_checkpoint"]),
("name", data.__dict__["name"]),
("dirs", data.__dict__["dirs"]),
("paramsdict", data.__dict__["paramsdict"]),
("samples", data.__dict__["samples"].keys()),
("populations", data.__dict__["populations"]),
("database", data.__dict__["database"]),
("clust_database", data.__dict__["clust_database"]),
("outfiles", data.__dict__["outfiles"]),
("barcodes", data.__dict__["barcodes"]),
("stats_files", data.__dict__["stats_files"]),
("_hackersonly", data.__dict__["_hackersonly"]),
])
## sample dict
sampledict = OrderedDict([])
for key, sample in data.samples.iteritems():
sampledict[key] = sample._to_fulldict()
## json format it using cumstom Encoder class
fulldumps = json.dumps({
"assembly": datadict,
"samples": sampledict
},
cls=Encoder,
sort_keys=False, indent=4, separators=(",", ":"),
)
## save to file
assemblypath = os.path.join(data.dirs.project, data.name+".json")
if not os.path.exists(data.dirs.project):
os.mkdir(data.dirs.project)
## protect save from interruption
done = 0
while not done:
try:
with open(assemblypath, 'w') as jout:
jout.write(fulldumps)
done = 1
except (KeyboardInterrupt, SystemExit):
print('.')
continue | python | {
"resource": ""
} |
q259282 | Encoder.encode | validation | def encode(self, obj):
""" function to encode json string"""
def hint_tuples(item):
""" embeds __tuple__ hinter in json strings """
if isinstance(item, tuple):
return {'__tuple__': True, 'items': item}
if isinstance(item, list):
return [hint_tuples(e) for e in item]
if isinstance(item, dict):
return {
key: hint_tuples(val) for key, val in item.iteritems()
}
else:
return item
return super(Encoder, self).encode(hint_tuples(obj)) | python | {
"resource": ""
} |
q259283 | depthplot | validation | def depthplot(data, samples=None, dims=(None,None), canvas=(None,None),
xmax=50, log=False, outprefix=None, use_maxdepth=False):
""" plots histogram of coverages across clusters"""
## select samples to be plotted, requires depths info
if not samples:
samples = data.samples.keys()
samples.sort()
subsamples = OrderedDict([(i, data.samples[i]) for i in samples])
## get canvas dimensions based on n-samples
if any(dims):
## user-supplied dimensions (...)
print("userdims")
else:
if len(subsamples) <= 4:
## set dimension to N samples
dims = (1, len(subsamples))
else:
dims = (len(subsamples)/4, 4)
## create canvas
if any(canvas):
print("usercanvas")
canvas = toyplot.Canvas(width=canvas[0], height=canvas[1])
else:
canvas = toyplot.Canvas(width=200*dims[1], height=150*dims[0])
## get all of the data arrays
for panel, sample in enumerate(subsamples):
## statistical called bins
statdat = subsamples[sample].depths
statdat = statdat[statdat >= data.paramsdict["mindepth_statistical"]]
if use_maxdepth:
statdat = {i:j for (i, j) in statdat if \
i < data.paramsdict["maxdepth"]}
sdat = np.histogram(statdat, range(50))
## majrule called bins
statdat = subsamples[sample].depths
statdat = statdat[statdat < data.paramsdict["mindepth_statistical"]]
statdat = statdat[statdat >= data.paramsdict["mindepth_majrule"]]
if use_maxdepth:
statdat = statdat[statdat < data.paramsdict["maxdepth"]]
mdat = np.histogram(statdat, range(50))
## excluded bins
tots = data.samples[sample].depths
tots = tots[tots < data.paramsdict["mindepth_majrule"]]
if use_maxdepth:
tots = tots[tots < data.paramsdict["maxdepth"]]
edat = np.histogram(tots, range(50))
## fill in each panel of canvas with a sample
axes = canvas.cartesian(grid=(dims[0], dims[1], panel), gutter=25)
axes.x.domain.xmax = xmax
axes.label.text = sample
if log:
axes.y.scale = "log"
# heights = np.column_stack((sdat,mdat,edat))
axes.bars(sdat)
axes.bars(edat)
axes.bars(mdat)
## return objects to be saved...
if outprefix:
toyplot.html.render(canvas, fobj=outprefix+".html")
toyplot.svg.render(canvas, fobj=outprefix+".svg") | python | {
"resource": ""
} |
q259284 | _parse_00 | validation | def _parse_00(ofile):
"""
return 00 outfile as a pandas DataFrame
"""
with open(ofile) as infile:
## read in the results summary from the end of the outfile
arr = np.array(
[" "] + infile.read().split("Summary of MCMC results\n\n\n")[1:][0]\
.strip().split())
## reshape array
rows = 12
cols = (arr.shape[0] + 1) / rows
arr = arr.reshape(rows, cols)
## make into labeled data frame
df = pd.DataFrame(
data=arr[1:, 1:],
columns=arr[0, 1:],
index=arr[1:, 0],
).T
return df | python | {
"resource": ""
} |
q259285 | _parse_01 | validation | def _parse_01(ofiles, individual=False):
"""
a subfunction for summarizing results
"""
## parse results from outfiles
cols = []
dats = []
for ofile in ofiles:
## parse file
with open(ofile) as infile:
dat = infile.read()
lastbits = dat.split(".mcmc.txt\n\n")[1:]
results = lastbits[0].split("\n\n")[0].split()
## get shape from ...
shape = (((len(results) - 3) / 4), 4)
dat = np.array(results[3:]).reshape(shape)
cols.append(dat[:, 3].astype(float))
if not individual:
## get mean results across reps
cols = np.array(cols)
cols = cols.sum(axis=0) / len(ofiles) #10.
dat[:, 3] = cols.astype(str)
## format as a DF
df = pd.DataFrame(dat[:, 1:])
df.columns = ["delim", "prior", "posterior"]
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
df["nspecies"] = nspecies
return df
else:
## get mean results across reps
#return cols
res = []
for i in xrange(len(cols)):
x = dat
x[:, 3] = cols[i].astype(str)
x = pd.DataFrame(x[:, 1:])
x.columns = ['delim', 'prior', 'posterior']
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
x["nspecies"] = nspecies
res.append(x)
return res | python | {
"resource": ""
} |
q259286 | Bpp._load_existing_results | validation | def _load_existing_results(self, name, workdir):
"""
Load existing results files for an object with this workdir and name.
This does NOT reload the parameter settings for the object...
"""
## get mcmcs
path = os.path.realpath(os.path.join(self.workdir, self.name))
mcmcs = glob.glob(path+"_r*.mcmc.txt")
outs = glob.glob(path+"_r*.out.txt")
trees = glob.glob(path+"_r*.tre")
for mcmcfile in mcmcs:
if mcmcfile not in self.files.mcmcfiles:
self.files.mcmcfiles.append(mcmcfile)
for outfile in outs:
if outfile not in self.files.outfiles:
self.files.outfiles.append(outfile)
for tree in trees:
if tree not in self.files.treefiles:
self.files.treefiles.append(tree) | python | {
"resource": ""
} |
q259287 | Bpp.summarize_results | validation | def summarize_results(self, individual_results=False):
"""
Prints a summarized table of results from replicate runs, or,
if individual_result=True, then returns a list of separate
dataframes for each replicate run.
"""
## return results depending on algorithm
## algorithm 00
if (not self.params.infer_delimit) & (not self.params.infer_sptree):
if individual_results:
## return a list of parsed CSV results
return [_parse_00(i) for i in self.files.outfiles]
else:
## concatenate each CSV and then get stats w/ describe
return pd.concat(
[pd.read_csv(i, sep='\t', index_col=0) \
for i in self.files.mcmcfiles]).describe().T
## algorithm 01
if self.params.infer_delimit & (not self.params.infer_sptree):
return _parse_01(self.files.outfiles, individual=individual_results)
## others
else:
return "summary function not yet ready for this type of result" | python | {
"resource": ""
} |
q259288 | multi_muscle_align | validation | def multi_muscle_align(data, samples, ipyclient):
"""
Sends the cluster bits to nprocessors for muscle alignment. They return
with indel.h5 handles to be concatenated into a joint h5.
"""
LOGGER.info("starting alignments")
## get client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " aligning clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## submit clustbits as jobs to engines. The chunkfiles are removed when they
## are finished so this job can even be restarted if it was half finished,
## though that is probably rare.
path = os.path.join(data.tmpdir, data.name + ".chunk_*")
clustbits = glob.glob(path)
jobs = {}
for idx in xrange(len(clustbits)):
args = [data, samples, clustbits[idx]]
jobs[idx] = lbview.apply(persistent_popen_align3, *args)
allwait = len(jobs)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## print progress while bits are aligning
while 1:
finished = [i.ready() for i in jobs.values()]
fwait = sum(finished)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(allwait, fwait, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(finished):
break
## check for errors in muscle_align_across
keys = jobs.keys()
for idx in keys:
if not jobs[idx].successful():
LOGGER.error("error in persistent_popen_align %s", jobs[idx].exception())
raise IPyradWarningExit("error in step 6 {}".format(jobs[idx].exception()))
del jobs[idx]
print("") | python | {
"resource": ""
} |
q259289 | concatclusts | validation | def concatclusts(outhandle, alignbits):
""" concatenates sorted aligned cluster tmpfiles and removes them."""
with gzip.open(outhandle, 'wb') as out:
for fname in alignbits:
with open(fname) as infile:
out.write(infile.read()+"//\n//\n") | python | {
"resource": ""
} |
q259290 | fill_dups_arr | validation | def fill_dups_arr(data):
"""
fills the duplicates array from the multi_muscle_align tmp files
"""
## build the duplicates array
duplefiles = glob.glob(os.path.join(data.tmpdir, "duples_*.tmp.npy"))
duplefiles.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-8]))
## enter the duplicates filter into super h5 array
io5 = h5py.File(data.clust_database, 'r+')
dfilter = io5["duplicates"]
## enter all duple arrays into full duplicates array
init = 0
for dupf in duplefiles:
end = int(dupf.rsplit("_", 1)[-1][:-8])
inarr = np.load(dupf)
dfilter[init:end] = inarr
init += end-init
#os.remove(dupf)
#del inarr
## continued progress bar
LOGGER.info("all duplicates: %s", dfilter[:].sum())
io5.close() | python | {
"resource": ""
} |
q259291 | build_tmp_h5 | validation | def build_tmp_h5(data, samples):
""" build tmp h5 arrays that can return quick access for nloci"""
## get samples and names, sorted
snames = [i.name for i in samples]
snames.sort()
## Build an array for quickly indexing consens reads from catg files.
## save as a npy int binary file.
uhandle = os.path.join(data.dirs.across, data.name+".utemp.sort")
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
## send as first async1 job
get_seeds_and_hits(uhandle, bseeds, snames) | python | {
"resource": ""
} |
q259292 | get_nloci | validation | def get_nloci(data):
""" return nloci from the tmp h5 arr"""
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
with h5py.File(bseeds) as io5:
return io5["seedsarr"].shape[0] | python | {
"resource": ""
} |
q259293 | singlecat | validation | def singlecat(data, sample, bseeds, sidx, nloci):
"""
Orders catg data for each sample into the final locus order. This allows
all of the individual catgs to simply be combined later. They are also in
the same order as the indels array, so indels are inserted from the indel
array that is passed in.
"""
LOGGER.info("in single cat here")
## enter ref data?
isref = 'reference' in data.paramsdict["assembly_method"]
## grab seeds and hits info for this sample
with h5py.File(bseeds, 'r') as io5:
## get hits just for this sample and sort them by sample order index
hits = io5["uarr"][:]
hits = hits[hits[:, 1] == sidx, :]
#hits = hits[hits[:, 2].argsort()]
## get seeds just for this sample and sort them by sample order index
seeds = io5["seedsarr"][:]
seeds = seeds[seeds[:, 1] == sidx, :]
#seeds = seeds[seeds[:, 2].argsort()]
full = np.concatenate((seeds, hits))
full = full[full[:, 0].argsort()]
## still using max+20 len limit, rare longer merged reads get trimmed
## we need to allow room for indels to be added too
maxlen = data._hackersonly["max_fragment_length"] + 20
## we'll fill a new catg and alleles arr for this sample in locus order,
## which is known from seeds and hits
ocatg = np.zeros((nloci, maxlen, 4), dtype=np.uint32)
onall = np.zeros(nloci, dtype=np.uint8)
ochrom = np.zeros((nloci, 3), dtype=np.int64)
## grab the sample's data and write to ocatg and onall
if not sample.files.database:
raise IPyradWarningExit("missing catg file - {}".format(sample.name))
with h5py.File(sample.files.database, 'r') as io5:
## get it and delete it
catarr = io5["catg"][:]
tmp = catarr[full[:, 2], :maxlen, :]
del catarr
ocatg[full[:, 0], :tmp.shape[1], :] = tmp
del tmp
## get it and delete it
nall = io5["nalleles"][:]
onall[full[:, 0]] = nall[full[:, 2]]
del nall
## fill the reference data
if isref:
chrom = io5["chroms"][:]
ochrom[full[:, 0]] = chrom[full[:, 2]]
del chrom
## get indel locations for this sample
ipath = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
with h5py.File(ipath, 'r') as ih5:
indels = ih5["indels"][sidx, :, :maxlen]
## insert indels into ocatg
newcatg = inserted_indels(indels, ocatg)
del ocatg, indels
## save individual tmp h5 data
smpio = os.path.join(data.dirs.across, sample.name+'.tmp.h5')
with h5py.File(smpio, 'w') as oh5:
oh5.create_dataset("icatg", data=newcatg, dtype=np.uint32)
oh5.create_dataset("inall", data=onall, dtype=np.uint8)
if isref:
oh5.create_dataset("ichrom", data=ochrom, dtype=np.int64) | python | {
"resource": ""
} |
q259294 | write_to_fullarr | validation | def write_to_fullarr(data, sample, sidx):
""" writes arrays to h5 disk """
## enter ref data?
#isref = 'reference' in data.paramsdict["assembly_method"]
LOGGER.info("writing fullarr %s %s", sample.name, sidx)
## save big arrays to disk temporarily
with h5py.File(data.clust_database, 'r+') as io5:
## open views into the arrays we plan to fill
chunk = io5["catgs"].attrs["chunksize"][0]
catg = io5["catgs"]
nall = io5["nalleles"]
## adding an axis to newcatg makes it write about 1000X faster.
smpio = os.path.join(data.dirs.across, sample.name+'.tmp.h5')
with h5py.File(smpio) as indat:
## grab all of the data from this sample's arrays
newcatg = indat["icatg"] #[:]
onall = indat["inall"] #[:]
## enter it into the full array one chunk at a time
for cidx in xrange(0, catg.shape[0], chunk):
end = cidx + chunk
catg[cidx:end, sidx:sidx+1, :] = np.expand_dims(newcatg[cidx:end, :], axis=1)
nall[:, sidx:sidx+1] = np.expand_dims(onall, axis=1) | python | {
"resource": ""
} |
q259295 | dask_chroms | validation | def dask_chroms(data, samples):
"""
A dask relay function to fill chroms for all samples
"""
## example concatenating with dask
h5s = [os.path.join(data.dirs.across, s.name+".tmp.h5") for s in samples]
handles = [h5py.File(i) for i in h5s]
dsets = [i['/ichrom'] for i in handles]
arrays = [da.from_array(dset, chunks=(10000, 3)) for dset in dsets]
stack = da.stack(arrays, axis=2)
## max chrom (should we check for variable hits? if so, things can get wonk)
maxchrom = da.max(stack, axis=2)[:, 0]
## max pos
maxpos = da.max(stack, axis=2)[:, 2]
## min pos
mask = stack == 0
stack[mask] = 9223372036854775807 ## max int64 value
minpos = da.min(stack, axis=2)[:, 1]
final = da.stack([maxchrom, minpos, maxpos], axis=1)
final.to_hdf5(data.clust_database, "/chroms")
## close the h5 handles
_ = [i.close() for i in handles] | python | {
"resource": ""
} |
q259296 | inserted_indels | validation | def inserted_indels(indels, ocatg):
"""
inserts indels into the catg array
"""
## return copy with indels inserted
newcatg = np.zeros(ocatg.shape, dtype=np.uint32)
## iterate over loci and make extensions for indels
for iloc in xrange(ocatg.shape[0]):
## get indels indices
indidx = np.where(indels[iloc, :])[0]
if np.any(indidx):
## which new (empty) rows will be added
allrows = np.arange(ocatg.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newcatg[iloc][not_idx] = ocatg[iloc, :not_idx.shape[0]]
else:
newcatg[iloc] = ocatg[iloc]
return newcatg | python | {
"resource": ""
} |
q259297 | count_seeds | validation | def count_seeds(usort):
"""
uses bash commands to quickly count N seeds from utemp file
"""
with open(usort, 'r') as insort:
cmd1 = ["cut", "-f", "2"]
cmd2 = ["uniq"]
cmd3 = ["wc"]
proc1 = sps.Popen(cmd1, stdin=insort, stdout=sps.PIPE, close_fds=True)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True)
proc3 = sps.Popen(cmd3, stdin=proc2.stdout, stdout=sps.PIPE, close_fds=True)
res = proc3.communicate()
nseeds = int(res[0].split()[0])
proc1.stdout.close()
proc2.stdout.close()
proc3.stdout.close()
return nseeds | python | {
"resource": ""
} |
q259298 | sort_seeds | validation | def sort_seeds(uhandle, usort):
""" sort seeds from cluster results"""
cmd = ["sort", "-k", "2", uhandle, "-o", usort]
proc = sps.Popen(cmd, close_fds=True)
proc.communicate() | python | {
"resource": ""
} |
q259299 | build_clustbits | validation | def build_clustbits(data, ipyclient, force):
"""
Reconstitutes clusters from .utemp and htemp files and writes them
to chunked files for aligning in muscle.
"""
## If you run this step then we clear all tmp .fa and .indel.h5 files
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
os.mkdir(data.tmpdir)
## parallel client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " building clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
uhandle = os.path.join(data.dirs.across, data.name+".utemp")
usort = os.path.join(data.dirs.across, data.name+".utemp.sort")
async1 = ""
## skip usorting if not force and already exists
if not os.path.exists(usort) or force:
## send sort job to engines. Sorted seeds allows us to work through
## the utemp file one locus at a time instead of reading all into mem.
LOGGER.info("building reads file -- loading utemp file into mem")
async1 = lbview.apply(sort_seeds, *(uhandle, usort))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
if async1.ready():
break
else:
time.sleep(0.1)
## send count seeds job to engines.
async2 = lbview.apply(count_seeds, usort)
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 1, printstr.format(elapsed), spacer=data._spacer)
if async2.ready():
break
else:
time.sleep(0.1)
## wait for both to finish while printing progress timer
nseeds = async2.result()
## send the clust bit building job to work and track progress
async3 = lbview.apply(sub_build_clustbits, *(data, usort, nseeds))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 2, printstr.format(elapsed), spacer=data._spacer)
if async3.ready():
break
else:
time.sleep(0.1)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 3, printstr.format(elapsed), spacer=data._spacer)
print("")
## check for errors
for job in [async1, async2, async3]:
try:
if not job.successful():
raise IPyradWarningExit(job.result())
except AttributeError:
## If we skip usorting then async1 == "" so the call to
## successful() raises, but we can ignore it.
pass | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.