_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q259300
sub_build_clustbits
validation
def sub_build_clustbits(data, usort, nseeds): """ A subfunction of build_clustbits to allow progress tracking. This func splits the unaligned clusters into bits for aligning on separate cores. """ ## load FULL concat fasta file into a dict. This could cause RAM issues. ## this file has iupac codes in it, not ambigs resolved, and is gzipped. LOGGER.info("loading full _catcons file into memory") allcons = {} conshandle = os.path.join(data.dirs.across, data.name+"_catcons.tmp") with gzip.open(conshandle, 'rb') as iocons: cons = itertools.izip(*[iter(iocons)]*2) for namestr, seq in cons: nnn, sss = [i.strip() for i in namestr, seq] allcons[nnn[1:]] = sss ## set optim to approximately 4 chunks per core. Smaller allows for a bit ## cleaner looking progress bar. 40 cores will make 160 files. optim = ((nseeds // (data.cpus*4)) + (nseeds % (data.cpus*4))) LOGGER.info("building clustbits, optim=%s, nseeds=%s, cpus=%s", optim, nseeds, data.cpus) ## iterate through usort grabbing seeds and matches with open(usort, 'rb') as insort: ## iterator, seed null, and seqlist null isort = iter(insort) loci = 0 lastseed = 0 fseqs = [] seqlist = [] seqsize = 0 while 1: ## grab the next line try: hit, seed, ori = isort.next().strip().split() except StopIteration: break try: ## if same seed, append match if seed != lastseed: ## store the last fseq, count it, and clear it if fseqs: seqlist.append("\n".join(fseqs)) seqsize += 1 fseqs = [] ## occasionally write to file if seqsize >= optim: if seqlist: loci += seqsize with open(os.path.join(data.tmpdir, data.name+".chunk_{}".format(loci)), 'w') as clustsout: LOGGER.debug("writing chunk - seqsize {} loci {} {}".format(seqsize, loci, clustsout.name)) clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n") ## reset list and counter seqlist = [] seqsize = 0 ## store the new seed on top of fseq fseqs.append(">{}\n{}".format(seed, allcons[seed])) lastseed = seed ## add match to the seed seq = allcons[hit] ## revcomp if orientation is reversed if ori == "-": seq = fullcomp(seq)[::-1] fseqs.append(">{}\n{}".format(hit, seq)) except KeyError as inst: ## Caught bad seed or hit? Log and continue. LOGGER.error("Bad Seed/Hit: seqsize {}\tloci {}\tseed {}\thit {}".format(seqsize, loci, seed, hit)) ## write whatever is left over to the clusts file if fseqs: seqlist.append("\n".join(fseqs)) seqsize += 1 loci += seqsize if seqlist: with open(os.path.join(data.tmpdir, data.name+".chunk_{}".format(loci)), 'w') as clustsout: clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n") ## final progress and cleanup del allcons clustbits = glob.glob(os.path.join(data.tmpdir, data.name+".chunk_*")) ## return stuff return clustbits, loci
python
{ "resource": "" }
q259301
cleanup_tempfiles
validation
def cleanup_tempfiles(data): """ Function to remove older files. This is called either in substep 1 or after the final substep so that tempfiles are retained for restarting interrupted jobs until we're sure they're no longer needed. """ ## remove align-related tmp files tmps1 = glob.glob(os.path.join(data.tmpdir, "*.fa")) tmps2 = glob.glob(os.path.join(data.tmpdir, "*.npy")) for tmp in tmps1 + tmps2: if os.path.exists(tmp): os.remove(tmp) ## remove cluster related files removal = [ os.path.join(data.dirs.across, data.name+".utemp"), os.path.join(data.dirs.across, data.name+".htemp"), os.path.join(data.dirs.across, data.name+"_catcons.tmp"), os.path.join(data.dirs.across, data.name+"_cathaps.tmp"), os.path.join(data.dirs.across, data.name+"_catshuf.tmp"), os.path.join(data.dirs.across, data.name+"_catsort.tmp"), os.path.join(data.dirs.across, data.name+".tmparrs.h5"), os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5"), ] for rfile in removal: if os.path.exists(rfile): os.remove(rfile) ## remove singlecat related h5 files smpios = glob.glob(os.path.join(data.dirs.across, '*.tmp.h5')) for smpio in smpios: if os.path.exists(smpio): os.remove(smpio)
python
{ "resource": "" }
q259302
assembly_cleanup
validation
def assembly_cleanup(data): """ cleanup for assembly object """ ## build s2 results data frame data.stats_dfs.s2 = data._build_stat("s2") data.stats_files.s2 = os.path.join(data.dirs.edits, 's2_rawedit_stats.txt') ## write stats for all samples with io.open(data.stats_files.s2, 'w', encoding='utf-8') as outfile: data.stats_dfs.s2.fillna(value=0).astype(np.int).to_string(outfile)
python
{ "resource": "" }
q259303
parse_single_results
validation
def parse_single_results(data, sample, res1): """ parse results from cutadapt into sample data""" ## set default values #sample.stats_dfs.s2["reads_raw"] = 0 sample.stats_dfs.s2["trim_adapter_bp_read1"] = 0 sample.stats_dfs.s2["trim_quality_bp_read1"] = 0 sample.stats_dfs.s2["reads_filtered_by_Ns"] = 0 sample.stats_dfs.s2["reads_filtered_by_minlen"] = 0 sample.stats_dfs.s2["reads_passed_filter"] = 0 ## parse new values from cutadapt results output lines = res1.strip().split("\n") for line in lines: if "Total reads processed:" in line: value = int(line.split()[3].replace(",", "")) sample.stats_dfs.s2["reads_raw"] = value if "Reads with adapters:" in line: value = int(line.split()[3].replace(",", "")) sample.stats_dfs.s2["trim_adapter_bp_read1"] = value if "Quality-trimmed" in line: value = int(line.split()[1].replace(",", "")) sample.stats_dfs.s2["trim_quality_bp_read1"] = value if "Reads that were too short" in line: value = int(line.split()[5].replace(",", "")) sample.stats_dfs.s2["reads_filtered_by_minlen"] = value if "Reads with too many N" in line: value = int(line.split()[5].replace(",", "")) sample.stats_dfs.s2["reads_filtered_by_Ns"] = value if "Reads written (passing filters):" in line: value = int(line.split()[4].replace(",", "")) sample.stats_dfs.s2["reads_passed_filter"] = value ## save to stats summary if sample.stats_dfs.s2.reads_passed_filter: sample.stats.state = 2 sample.stats.reads_passed_filter = sample.stats_dfs.s2.reads_passed_filter sample.files.edits = [ (OPJ(data.dirs.edits, sample.name+".trimmed_R1_.fastq.gz"), 0)] ## write the long form output to the log file. LOGGER.info(res1) else: print("{}No reads passed filtering in Sample: {}".format(data._spacer, sample.name))
python
{ "resource": "" }
q259304
run2
validation
def run2(data, samples, force, ipyclient): """ Filter for samples that are already finished with this step, allow others to run, pass them to parallel client function to filter with cutadapt. """ ## create output directories data.dirs.edits = os.path.join(os.path.realpath( data.paramsdict["project_dir"]), data.name+"_edits") if not os.path.exists(data.dirs.edits): os.makedirs(data.dirs.edits) ## get samples subsamples = choose_samples(samples, force) ## only allow extra adapters in filters==3, ## and add poly repeats if not in list of adapters if int(data.paramsdict["filter_adapters"]) == 3: if not data._hackersonly["p3_adapters_extra"]: for poly in ["A"*8, "T"*8, "C"*8, "G"*8]: data._hackersonly["p3_adapters_extra"].append(poly) if not data._hackersonly["p5_adapters_extra"]: for poly in ["A"*8, "T"*8, "C"*8, "G"*8]: data._hackersonly["p5_adapters_extra"].append(poly) else: data._hackersonly["p5_adapters_extra"] = [] data._hackersonly["p3_adapters_extra"] = [] ## concat is not parallelized (since it's disk limited, generally) subsamples = concat_reads(data, subsamples, ipyclient) ## cutadapt is parallelized by ncores/2 because cutadapt spawns threads lbview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2]) run_cutadapt(data, subsamples, lbview) ## cleanup is ... assembly_cleanup(data)
python
{ "resource": "" }
q259305
concat_reads
validation
def concat_reads(data, subsamples, ipyclient): """ concatenate if multiple input files for a single samples """ ## concatenate reads if they come from merged assemblies. if any([len(i.files.fastqs) > 1 for i in subsamples]): ## run on single engine for now start = time.time() printstr = " concatenating inputs | {} | s2 |" finished = 0 catjobs = {} for sample in subsamples: if len(sample.files.fastqs) > 1: catjobs[sample.name] = ipyclient[0].apply(\ concat_multiple_inputs, *(data, sample)) else: sample.files.concat = sample.files.fastqs ## wait for all to finish while 1: finished = sum([i.ready() for i in catjobs.values()]) elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(len(catjobs), finished, printstr.format(elapsed), spacer=data._spacer) time.sleep(0.1) if finished == len(catjobs): print("") break ## collect results, which are concat file handles. for async in catjobs: if catjobs[async].successful(): data.samples[async].files.concat = catjobs[async].result() else: error = catjobs[async].result()#exception() LOGGER.error("error in step2 concat %s", error) raise IPyradWarningExit("error in step2 concat: {}".format(error)) else: for sample in subsamples: ## just copy fastqs handles to concat attribute sample.files.concat = sample.files.fastqs return subsamples
python
{ "resource": "" }
q259306
run_cutadapt
validation
def run_cutadapt(data, subsamples, lbview): """ sends fastq files to cutadapt """ ## choose cutadapt function based on datatype start = time.time() printstr = " processing reads | {} | s2 |" finished = 0 rawedits = {} ## sort subsamples so that the biggest files get submitted first subsamples.sort(key=lambda x: x.stats.reads_raw, reverse=True) LOGGER.info([i.stats.reads_raw for i in subsamples]) ## send samples to cutadapt filtering if "pair" in data.paramsdict["datatype"]: for sample in subsamples: rawedits[sample.name] = lbview.apply(cutadaptit_pairs, *(data, sample)) else: for sample in subsamples: rawedits[sample.name] = lbview.apply(cutadaptit_single, *(data, sample)) ## wait for all to finish while 1: finished = sum([i.ready() for i in rawedits.values()]) elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(len(rawedits), finished, printstr.format(elapsed), spacer=data._spacer) time.sleep(0.1) if finished == len(rawedits): print("") break ## collect results, report failures, and store stats. async = sample.name for async in rawedits: if rawedits[async].successful(): res = rawedits[async].result() ## if single cleanup is easy if "pair" not in data.paramsdict["datatype"]: parse_single_results(data, data.samples[async], res) else: parse_pair_results(data, data.samples[async], res) else: print(" found an error in step2; see ipyrad_log.txt") LOGGER.error("error in run_cutadapt(): %s", rawedits[async].exception())
python
{ "resource": "" }
q259307
concat_multiple_inputs
validation
def concat_multiple_inputs(data, sample): """ If multiple fastq files were appended into the list of fastqs for samples then we merge them here before proceeding. """ ## if more than one tuple in fastq list if len(sample.files.fastqs) > 1: ## create a cat command to append them all (doesn't matter if they ## are gzipped, cat still works). Grab index 0 of tuples for R1s. cmd1 = ["cat"] + [i[0] for i in sample.files.fastqs] isgzip = ".gz" if not sample.files.fastqs[0][0].endswith(".gz"): isgzip = "" ## write to new concat handle conc1 = os.path.join(data.dirs.edits, sample.name+"_R1_concat.fq{}".format(isgzip)) with open(conc1, 'w') as cout1: proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=cout1, close_fds=True) res1 = proc1.communicate()[0] if proc1.returncode: raise IPyradWarningExit("error in: {}, {}".format(cmd1, res1)) ## Only set conc2 if R2 actually exists conc2 = 0 if "pair" in data.paramsdict["datatype"]: cmd2 = ["cat"] + [i[1] for i in sample.files.fastqs] conc2 = os.path.join(data.dirs.edits, sample.name+"_R2_concat.fq{}".format(isgzip)) with open(conc2, 'w') as cout2: proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=cout2, close_fds=True) res2 = proc2.communicate()[0] if proc2.returncode: raise IPyradWarningExit("Error concatenating fastq files. Make sure all "\ + "these files exist: {}\nError message: {}".format(cmd2, proc2.returncode)) ## store new file handles sample.files.concat = [(conc1, conc2)] return sample.files.concat
python
{ "resource": "" }
q259308
make
validation
def make( data, samples ): """ Convert vcf from step6 to .loci format to facilitate downstream format conversion """ invcffile = os.path.join( data.dirs.consens, data.name+".vcf" ) outlocifile = os.path.join( data.dirs.outfiles, data.name+".loci" ) importvcf( invcffile, outlocifile )
python
{ "resource": "" }
q259309
importvcf
validation
def importvcf( vcffile, locifile ): """ Function for importing a vcf file into loci format. Arguments are the input vcffile and the loci file to write out. """ try: ## Get names of all individuals in the vcf with open( invcffile, 'r' ) as invcf: for line in invcf: if line.split()[0] == "#CHROM": ## This is maybe a little clever. The names in the vcf are everything after ## the "FORMAT" column, so find that index, then slice everything after it. names_col = line.split().index( "FORMAT" ) + 1 names = line.split()[ names_col:] LOGGER.debug( "Got names - %s", names ) break print( "wat" ) ## Get the column to start reading at except Exception: print( "wat" )
python
{ "resource": "" }
q259310
get_targets
validation
def get_targets(ipyclient): """ A function to find 2 engines per hostname on the ipyclient. We'll assume that the CPUs are hyperthreaded, which is why we grab two. If they are not then no foul. Two multi-threaded jobs will be run on each of the 2 engines per host. """ ## fill hosts with async[gethostname] hosts = [] for eid in ipyclient.ids: engine = ipyclient[eid] if not engine.outstanding: hosts.append(engine.apply(socket.gethostname)) ## capture results of asyncs hosts = [i.get() for i in hosts] hostset = set(hosts) hostzip = zip(hosts, ipyclient.ids) hostdict = {host: [i[1] for i in hostzip if i[0] == host] for host in hostset} targets = list(itertools.chain(*[hostdict[i][:2] for i in hostdict])) ## return first two engines from each host return targets
python
{ "resource": "" }
q259311
compute_tree_stats
validation
def compute_tree_stats(self, ipyclient): """ compute stats for stats file and NHX tree features """ ## get name indices names = self.samples ## get majority rule consensus tree of weighted Q bootstrap trees if self.params.nboots: ## Tree object fulltre = ete3.Tree(self.trees.tree, format=0) fulltre.unroot() ## only grab as many boots as the last option said was max with open(self.trees.boots, 'r') as inboots: bb = [ete3.Tree(i.strip(), format=0) for i in inboots.readlines()] wboots = [fulltre] + bb[-self.params.nboots:] ## infer consensus tree and write to file wctre, wcounts = consensus_tree(wboots, names=names) self.trees.cons = os.path.join(self.dirs, self.name + ".cons") with open(self.trees.cons, 'w') as ocons: ocons.write(wctre.write(format=0)) else: wctre = ete3.Tree(self.trees.tree, format=0) wctre.unroot() ## build stats file and write trees self.trees.nhx = os.path.join(self.dirs, self.name + ".nhx") with open(self.files.stats, 'w') as ostats: ## print Tetrad info #ostats.write(STATS_STRING.format(**self.stats)) ## print bootstrap splits if self.params.nboots: ostats.write("## splits observed in {} trees\n".format(len(wboots))) for i, j in enumerate(self.samples): ostats.write("{:<3} {}\n".format(i, j)) ostats.write("\n") for split, freq in wcounts: if split.count('1') > 1: ostats.write("{} {:.2f}\n".format(split, round(freq, 2))) ostats.write("\n") ## parallelized this function because it can be slogging lbview = ipyclient.load_balanced_view() ## store results in dicts qtots = {} qsamp = {} tots = sum(1 for i in wctre.iter_leaves()) totn = set(wctre.get_leaf_names()) ## iterate over node traversal. for node in wctre.traverse(): ## this is slow, needs to look at every sampled quartet ## so we send it be processed on an engine qtots[node] = lbview.apply(_get_total, *(tots, node)) qsamp[node] = lbview.apply(_get_sampled, *(self, totn, node)) ## wait for jobs to finish ipyclient.wait() ## put results into tree for node in wctre.traverse(): ## this is fast, just calcs n_choose_k total = qtots[node].result() sampled = qsamp[node].result() ## store the results to the tree node.add_feature("quartets_total", total) node.add_feature("quartets_sampled", sampled) features = ["quartets_total", "quartets_sampled"] ## return as NHX format with extra info with open(self.trees.nhx, 'w') as outtre: outtre.write(wctre.write(format=0, features=features))
python
{ "resource": "" }
q259312
random_product
validation
def random_product(iter1, iter2): """ random sampler for equal_splits func""" pool1 = tuple(iter1) pool2 = tuple(iter2) ind1 = random.sample(pool1, 2) ind2 = random.sample(pool2, 2) return tuple(ind1+ind2)
python
{ "resource": "" }
q259313
n_choose_k
validation
def n_choose_k(n, k): """ get the number of quartets as n-choose-k. This is used in equal splits to decide whether a split should be exhaustively sampled or randomly sampled. Edges near tips can be exhaustive while highly nested edges probably have too many quartets """ return int(reduce(MUL, (Fraction(n-i, i+1) for i in range(k)), 1))
python
{ "resource": "" }
q259314
count_snps
validation
def count_snps(mat): """ get dstats from the count array and return as a float tuple """ ## get [aabb, baba, abba, aaab] snps = np.zeros(4, dtype=np.uint32) ## get concordant (aabb) pis sites snps[0] = np.uint32(\ mat[0, 5] + mat[0, 10] + mat[0, 15] + \ mat[5, 0] + mat[5, 10] + mat[5, 15] + \ mat[10, 0] + mat[10, 5] + mat[10, 15] + \ mat[15, 0] + mat[15, 5] + mat[15, 10]) ## get discordant (baba) sites for i in range(16): if i % 5: snps[1] += mat[i, i] ## get discordant (abba) sites snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\ mat[4, 1] + mat[6, 9] + mat[7, 13] +\ mat[8, 2] + mat[9, 6] + mat[11, 14] +\ mat[12, 3] + mat[13, 7] + mat[14, 11] ## get autapomorphy sites snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2] return snps
python
{ "resource": "" }
q259315
chunk_to_matrices
validation
def chunk_to_matrices(narr, mapcol, nmask): """ numba compiled code to get matrix fast. arr is a 4 x N seq matrix converted to np.int8 I convert the numbers for ATGC into their respective index for the MAT matrix, and leave all others as high numbers, i.e., -==45, N==78. """ ## get seq alignment and create an empty array for filling mats = np.zeros((3, 16, 16), dtype=np.uint32) ## replace ints with small ints that index their place in the ## 16x16. This no longer checks for big ints to exclude, so resolve=True ## is now the default, TODO. last_loc = -1 for idx in xrange(mapcol.shape[0]): if not nmask[idx]: if not mapcol[idx] == last_loc: i = narr[:, idx] mats[0, (4*i[0])+i[1], (4*i[2])+i[3]] += 1 last_loc = mapcol[idx] ## fill the alternates x = np.uint8(0) for y in np.array([0, 4, 8, 12], dtype=np.uint8): for z in np.array([0, 4, 8, 12], dtype=np.uint8): mats[1, y:y+np.uint8(4), z:z+np.uint8(4)] = mats[0, x].reshape(4, 4) mats[2, y:y+np.uint8(4), z:z+np.uint8(4)] = mats[0, x].reshape(4, 4).T x += np.uint8(1) return mats
python
{ "resource": "" }
q259316
calculate
validation
def calculate(seqnon, mapcol, nmask, tests): """ groups together several numba compiled funcs """ ## create empty matrices #LOGGER.info("tests[0] %s", tests[0]) #LOGGER.info('seqnon[[tests[0]]] %s', seqnon[[tests[0]]]) mats = chunk_to_matrices(seqnon, mapcol, nmask) ## empty svdscores for each arrangement of seqchunk svds = np.zeros((3, 16), dtype=np.float64) qscores = np.zeros(3, dtype=np.float64) ranks = np.zeros(3, dtype=np.float64) for test in range(3): ## get svd scores svds[test] = np.linalg.svd(mats[test].astype(np.float64))[1] ranks[test] = np.linalg.matrix_rank(mats[test].astype(np.float64)) ## get minrank, or 11 minrank = int(min(11, ranks.min())) for test in range(3): qscores[test] = np.sqrt(np.sum(svds[test, minrank:]**2)) ## sort to find the best qorder best = np.where(qscores == qscores.min())[0] #best = qscores[qscores == qscores.min()][0] bidx = tests[best][0] qsnps = count_snps(mats[best][0]) return bidx, qsnps
python
{ "resource": "" }
q259317
nworker
validation
def nworker(data, smpchunk, tests): """ The workhorse function. Not numba. """ ## tell engines to limit threads #numba.config.NUMBA_DEFAULT_NUM_THREADS = 1 ## open the seqarray view, the modified array is in bootsarr with h5py.File(data.database.input, 'r') as io5: seqview = io5["bootsarr"][:] maparr = io5["bootsmap"][:] ## create an N-mask array of all seq cols (this isn't really too slow) nall_mask = seqview[:] == 78 ## tried numba compiling everythign below here, but was not faster ## than making nmask w/ axis arg in numpy ## get the input arrays ready rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16) rweights = None #rweights = np.ones(smpchunk.shape[0], dtype=np.float64) rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32) #times = [] ## fill arrays with results using numba funcs for idx in xrange(smpchunk.shape[0]): ## get seqchunk for 4 samples (4, ncols) sidx = smpchunk[idx] seqchunk = seqview[sidx] ## get N-containing columns in 4-array, and invariant sites. nmask = np.any(nall_mask[sidx], axis=0) nmask += np.all(seqchunk == seqchunk[0], axis=0) ## <- do we need this? ## get matrices if there are any shared SNPs ## returns best-tree index, qscores, and qstats #bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests) bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests) ## get weights from the three scores sorted. ## Only save to file if the quartet has information rdstats[idx] = qstats rquartets[idx] = smpchunk[idx][bidx] return rquartets, rweights, rdstats
python
{ "resource": "" }
q259318
shuffle_cols
validation
def shuffle_cols(seqarr, newarr, cols): """ used in bootstrap resampling without a map file """ for idx in xrange(cols.shape[0]): newarr[:, idx] = seqarr[:, cols[idx]] return newarr
python
{ "resource": "" }
q259319
resolve_ambigs
validation
def resolve_ambigs(tmpseq): """ returns a seq array with 'RSKYWM' randomly replaced with resolved bases""" ## iterate over the bases 'RSKWYM': [82, 83, 75, 87, 89, 77] for ambig in np.uint8([82, 83, 75, 87, 89, 77]): ## get all site in this ambig idx, idy = np.where(tmpseq == ambig) ## get the two resolutions of the ambig res1, res2 = AMBIGS[ambig.view("S1")] ## randomly sample half those sites halfmask = np.random.choice([True, False], idx.shape[0]) ## replace ambig bases with their resolutions for i in xrange(halfmask.shape[0]): if halfmask[i]: tmpseq[idx[i], idy[i]] = np.array(res1).view(np.uint8) else: tmpseq[idx[i], idy[i]] = np.array(res2).view(np.uint8) return tmpseq
python
{ "resource": "" }
q259320
get_spans
validation
def get_spans(maparr, spans): """ get span distance for each locus in original seqarray """ ## start at 0, finds change at 1-index of map file bidx = 1 spans = np.zeros((maparr[-1, 0], 2), np.uint64) ## read through marr and record when locus id changes for idx in xrange(1, maparr.shape[0]): cur = maparr[idx, 0] if cur != bidx: idy = idx + 1 spans[cur-2, 1] = idx spans[cur-1, 0] = idx bidx = cur spans[-1, 1] = maparr[-1, -1] return spans
python
{ "resource": "" }
q259321
get_shape
validation
def get_shape(spans, loci): """ get shape of new bootstrap resampled locus array """ width = 0 for idx in xrange(loci.shape[0]): width += spans[loci[idx], 1] - spans[loci[idx], 0] return width
python
{ "resource": "" }
q259322
fill_boot
validation
def fill_boot(seqarr, newboot, newmap, spans, loci): """ fills the new bootstrap resampled array """ ## column index cidx = 0 ## resample each locus for i in xrange(loci.shape[0]): ## grab a random locus's columns x1 = spans[loci[i]][0] x2 = spans[loci[i]][1] cols = seqarr[:, x1:x2] ## randomize columns within colsq cord = np.random.choice(cols.shape[1], cols.shape[1], replace=False) rcols = cols[:, cord] ## fill bootarr with n columns from seqarr ## the required length was already measured newboot[:, cidx:cidx+cols.shape[1]] = rcols ## fill bootmap with new map info newmap[cidx: cidx+cols.shape[1], 0] = i+1 ## advance column index cidx += cols.shape[1] ## return the concatenated cols return newboot, newmap
python
{ "resource": "" }
q259323
_byteify
validation
def _byteify(data, ignore_dicts=False): """ converts unicode to utf-8 when reading in json files """ if isinstance(data, unicode): return data.encode("utf-8") if isinstance(data, list): return [_byteify(item, ignore_dicts=True) for item in data] if isinstance(data, dict) and not ignore_dicts: return { _byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True) for key, value in data.iteritems() } return data
python
{ "resource": "" }
q259324
Tetrad._parse_names
validation
def _parse_names(self): """ parse sample names from the sequence file""" self.samples = [] with iter(open(self.files.data, 'r')) as infile: infile.next().strip().split() while 1: try: self.samples.append(infile.next().split()[0]) except StopIteration: break
python
{ "resource": "" }
q259325
Tetrad._run_qmc
validation
def _run_qmc(self, boot): """ runs quartet max-cut on a quartets file """ ## convert to txt file for wQMC self._tmp = os.path.join(self.dirs, ".tmpwtre") cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp] ## run them proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) res = proc.communicate() if proc.returncode: #LOGGER.error("Error in QMC: \n({}).".format(res)) LOGGER.error(res) raise IPyradWarningExit(res[1]) ## read in the tmp files since qmc does not pipe with open(self._tmp) as intree: ## convert int names back to str names renamer returns a newick str #tmp = toytree.tree(intree.read().strip()) tmp = ete3.Tree(intree.read().strip()) tmpwtre = self._renamer(tmp)#.tree) ## save the tree if boot: self.trees.boots = os.path.join(self.dirs, self.name+".boots") with open(self.trees.boots, 'a') as outboot: outboot.write(tmpwtre+"\n") else: self.trees.tree = os.path.join(self.dirs, self.name+".tree") with open(self.trees.tree, 'w') as outtree: outtree.write(tmpwtre) ## save JSON file checkpoint self._save()
python
{ "resource": "" }
q259326
Tetrad._dump_qmc
validation
def _dump_qmc(self): """ Makes a reduced array that excludes quartets with no information and prints the quartets and weights to a file formatted for wQMC """ ## open the h5 database io5 = h5py.File(self.database.output, 'r') ## create an output file for writing self.files.qdump = os.path.join(self.dirs, self.name+".quartets.txt") LOGGER.info("qdump file %s", self.files.qdump) outfile = open(self.files.qdump, 'w') ## todo: should pull quarts order in randomly? or doesn't matter? for idx in xrange(0, self.params.nquartets, self._chunksize): ## get mask of zero weight quartets #mask = io5["weights"][idx:idx+self.chunksize] != 0 #weight = io5["weights"][idx:idx+self.chunksize][mask] #LOGGER.info("exluded = %s, mask shape %s", # self._chunksize - mask.shape[0], mask.shape) #LOGGER.info('q shape %s', io5["quartets"][idx:idx+self._chunksize].shape) masked_quartets = io5["quartets"][idx:idx+self._chunksize, :]#[mask, :] quarts = [list(j) for j in masked_quartets] ## format and print #chunk = ["{},{}|{},{}:{}".format(*i+[j]) for i, j \ # in zip(quarts, weight)] chunk = ["{},{}|{},{}".format(*i) for i in quarts] outfile.write("\n".join(chunk)+"\n") ## close output file and h5 database outfile.close() io5.close()
python
{ "resource": "" }
q259327
Tetrad._renamer
validation
def _renamer(self, tre): """ renames newick from numbers to sample names""" ## get the tre with numbered tree tip labels names = tre.get_leaves() ## replace numbered names with snames for name in names: name.name = self.samples[int(name.name)] ## return with only topology and leaf labels return tre.write(format=9)
python
{ "resource": "" }
q259328
Tetrad._finalize_stats
validation
def _finalize_stats(self, ipyclient): """ write final tree files """ ## print stats file location: #print(STATSOUT.format(opr(self.files.stats))) ## print finished tree information --------------------- print(FINALTREES.format(opr(self.trees.tree))) ## print bootstrap information -------------------------- if self.params.nboots: ## get consensus, map values to tree edges, record stats file self._compute_tree_stats(ipyclient) ## print bootstrap info print(BOOTTREES.format(opr(self.trees.cons), opr(self.trees.boots))) ## print the ASCII tree only if its small if len(self.samples) < 20: if self.params.nboots: wctre = ete3.Tree(self.trees.cons, format=0) wctre.ladderize() print(wctre.get_ascii(show_internal=True, attributes=["dist", "name"])) print("") else: qtre = ete3.Tree(self.trees.tree, format=0) qtre.ladderize() #qtre = toytree.tree(self.trees.tree, format=0) #qtre.tree.unroot() print(qtre.get_ascii()) print("") ## print PDF filename & tips ----------------------------- docslink = "https://toytree.readthedocs.io/" citelink = "https://ipyrad.readthedocs.io/tetrad.html" print(LINKS.format(docslink, citelink))
python
{ "resource": "" }
q259329
Tetrad._save
validation
def _save(self): """ save a JSON file representation of Tetrad Class for checkpoint""" ## save each attribute as dict fulldict = copy.deepcopy(self.__dict__) for i, j in fulldict.items(): if isinstance(j, Params): fulldict[i] = j.__dict__ fulldumps = json.dumps(fulldict, sort_keys=False, indent=4, separators=(",", ":"), ) ## save to file, make dir if it wasn't made earlier assemblypath = os.path.join(self.dirs, self.name+".tet.json") if not os.path.exists(self.dirs): os.mkdir(self.dirs) ## protect save from interruption done = 0 while not done: try: with open(assemblypath, 'w') as jout: jout.write(fulldumps) done = 1 except (KeyboardInterrupt, SystemExit): print('.') continue
python
{ "resource": "" }
q259330
Tetrad._insert_to_array
validation
def _insert_to_array(self, start, results): """ inputs results from workers into hdf4 array """ qrts, wgts, qsts = results #qrts, wgts = results #print(qrts) with h5py.File(self.database.output, 'r+') as out: chunk = self._chunksize out['quartets'][start:start+chunk] = qrts ##out['weights'][start:start+chunk] = wgts ## entered as 0-indexed ! if self.checkpoint.boots: key = "qboots/b{}".format(self.checkpoint.boots-1) out[key][start:start+chunk] = qsts else: out["qstats"][start:start+chunk] = qsts
python
{ "resource": "" }
q259331
select_samples
validation
def select_samples(dbsamples, samples, pidx=None): """ Get the row index of samples that are included. If samples are in the 'excluded' they were already filtered out of 'samples' during _get_samples. """ ## get index from dbsamples samples = [i.name for i in samples] if pidx: sidx = [list(dbsamples[pidx]).index(i) for i in samples] else: sidx = [list(dbsamples).index(i) for i in samples] sidx.sort() return sidx
python
{ "resource": "" }
q259332
padnames
validation
def padnames(names): """ pads names for loci output """ ## get longest name longname_len = max(len(i) for i in names) ## Padding distance between name and seq. padding = 5 ## add pad to names pnames = [name + " " * (longname_len - len(name)+ padding) \ for name in names] snppad = "//" + " " * (longname_len - 2 + padding) return np.array(pnames), snppad
python
{ "resource": "" }
q259333
locichunk
validation
def locichunk(args): """ Function from make_loci to apply to chunks. smask is sample mask. """ ## parse args data, optim, pnames, snppad, smask, start, samplecov, locuscov, upper = args ## this slice hslice = [start, start+optim] ## get filter db info co5 = h5py.File(data.database, 'r') afilt = co5["filters"][hslice[0]:hslice[1], ] aedge = co5["edges"][hslice[0]:hslice[1], ] asnps = co5["snps"][hslice[0]:hslice[1], ] ## get seqs db io5 = h5py.File(data.clust_database, 'r') if upper: aseqs = np.char.upper(io5["seqs"][hslice[0]:hslice[1], ]) else: aseqs = io5["seqs"][hslice[0]:hslice[1], ] ## which loci passed all filters keep = np.where(np.sum(afilt, axis=1) == 0)[0] store = [] ## write loci that passed after trimming edges, then write snp string for iloc in keep: edg = aedge[iloc] #LOGGER.info("!!!!!! iloc edg %s, %s", iloc, edg) args = [iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start] if edg[4]: outstr, samplecov, locuscov = enter_pairs(*args) store.append(outstr) else: outstr, samplecov, locuscov = enter_singles(*args) store.append(outstr) ## write to file and clear store tmpo = os.path.join(data.dirs.outfiles, data.name+".loci.{}".format(start)) with open(tmpo, 'w') as tmpout: tmpout.write("\n".join(store) + "\n") ## close handles io5.close() co5.close() ## return sample counter return samplecov, locuscov, start
python
{ "resource": "" }
q259334
enter_pairs
validation
def enter_pairs(iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start): """ enters funcs for pairs """ ## snps was created using only the selected samples. LOGGER.info("edges in enter_pairs %s", edg) seq1 = aseqs[iloc, :, edg[0]:edg[1]+1] snp1 = asnps[iloc, edg[0]:edg[1]+1, ] ## the 2nd read edges are +5 for the spacer seq2 = aseqs[iloc, :, edg[2]:edg[3]+1] snp2 = asnps[iloc, edg[2]:edg[3]+1, ] ## remove rows with all Ns, seq has only selected samples nalln = np.all(seq1 == "N", axis=1) ## make mask of removed rows and excluded samples. Use the inverse ## of this to save the coverage for samples nsidx = nalln + smask LOGGER.info("nsidx %s, nalln %s, smask %s", nsidx, nalln, smask) samplecov = samplecov + np.invert(nsidx).astype(np.int32) LOGGER.info("samplecov %s", samplecov) idx = np.sum(np.invert(nsidx).astype(np.int32)) LOGGER.info("idx %s", idx) locuscov[idx] += 1 ## select the remaining names in order seq1 = seq1[~nsidx, ] seq2 = seq2[~nsidx, ] names = pnames[~nsidx] ## save string for printing, excluding names not in samples outstr = "\n".join(\ [name + s1.tostring()+"nnnn"+s2.tostring() for name, s1, s2 in \ zip(names, seq1, seq2)]) #LOGGER.info("s1 %s", s1.tostring()) #LOGGER.info("s2 %s", s2.tostring()) ## get snp string and add to store snpstring1 = ["-" if snp1[i, 0] else \ "*" if snp1[i, 1] else \ " " for i in range(len(snp1))] snpstring2 = ["-" if snp2[i, 0] else \ "*" if snp2[i, 1] else \ " " for i in range(len(snp2))] #npis = str(snpstring1+snpstring2).count("*") #nvars = str(snpstring1+snpstring2).count("-") + npis outstr += "\n" + snppad + "".join(snpstring1)+\ " "+"".join(snpstring2)+"|{}|".format(iloc+start) #"|LOCID={},DBID={},NVAR={},NPIS={}|"\ #.format(1+iloc+start, iloc, nvars, npis) return outstr, samplecov, locuscov
python
{ "resource": "" }
q259335
enter_singles
validation
def enter_singles(iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start): """ enter funcs for SE or merged data """ ## grab all seqs between edges seq = aseqs[iloc, :, edg[0]:edg[1]+1] ## snps was created using only the selected samples, and is edge masked. ## The mask is for counting snps quickly, but trimming is still needed here ## to make the snps line up with the seqs in the snp string. snp = asnps[iloc, edg[0]:edg[1]+1, ] ## remove rows with all Ns, seq has only selected samples nalln = np.all(seq == "N", axis=1) ## make mask of removed rows and excluded samples. Use the inverse ## of this to save the coverage for samples nsidx = nalln + smask samplecov = samplecov + np.invert(nsidx).astype(np.int32) idx = np.sum(np.invert(nsidx).astype(np.int32)) locuscov[idx] += 1 ## select the remaining names in order seq = seq[~nsidx, ] names = pnames[~nsidx] ## save string for printing, excluding names not in samples outstr = "\n".join(\ [name + s.tostring() for name, s in zip(names, seq)]) ## get snp string and add to store snpstring = ["-" if snp[i, 0] else \ "*" if snp[i, 1] else \ " " for i in range(len(snp))] outstr += "\n" + snppad + "".join(snpstring) + "|{}|".format(iloc+start) #LOGGER.info("outstr %s", outstr) return outstr, samplecov, locuscov
python
{ "resource": "" }
q259336
init_arrays
validation
def init_arrays(data): """ Create database file for storing final filtered snps data as hdf5 array. Copies splits and duplicates info from clust_database to database. """ ## get stats from step6 h5 and create new h5 co5 = h5py.File(data.clust_database, 'r') io5 = h5py.File(data.database, 'w') ## get maxlen and chunk len maxlen = data._hackersonly["max_fragment_length"] + 20 chunks = co5["seqs"].attrs["chunksize"][0] nloci = co5["seqs"].shape[0] ## make array for snp string, 2 cols, - and * snps = io5.create_dataset("snps", (nloci, maxlen, 2), dtype=np.bool, chunks=(chunks, maxlen, 2), compression='gzip') snps.attrs["chunksize"] = chunks snps.attrs["names"] = ["-", "*"] ## array for filters that will be applied in step7 filters = io5.create_dataset("filters", (nloci, 6), dtype=np.bool) filters.attrs["filters"] = ["duplicates", "max_indels", "max_snps", "max_shared_hets", "min_samps", "max_alleles"] ## array for edgetrimming edges = io5.create_dataset("edges", (nloci, 5), dtype=np.uint16, chunks=(chunks, 5), compression="gzip") edges.attrs["chunksize"] = chunks edges.attrs["names"] = ["R1_L", "R1_R", "R2_L", "R2_R", "sep"] ## xfer data from clustdb to finaldb edges[:, 4] = co5["splits"][:] filters[:, 0] = co5["duplicates"][:] ## close h5s io5.close() co5.close()
python
{ "resource": "" }
q259337
snpcount_numba
validation
def snpcount_numba(superints, snpsarr): """ Used to count the number of unique bases in a site for snpstring. """ ## iterate over all loci for iloc in xrange(superints.shape[0]): for site in xrange(superints.shape[2]): ## make new array catg = np.zeros(4, dtype=np.int16) ## a list for only catgs ncol = superints[iloc, :, site] for idx in range(ncol.shape[0]): if ncol[idx] == 67: #C catg[0] += 1 elif ncol[idx] == 65: #A catg[1] += 1 elif ncol[idx] == 84: #T catg[2] += 1 elif ncol[idx] == 71: #G catg[3] += 1 elif ncol[idx] == 82: #R catg[1] += 1 #A catg[3] += 1 #G elif ncol[idx] == 75: #K catg[2] += 1 #T catg[3] += 1 #G elif ncol[idx] == 83: #S catg[0] += 1 #C catg[3] += 1 #G elif ncol[idx] == 89: #Y catg[0] += 1 #C catg[2] += 1 #T elif ncol[idx] == 87: #W catg[1] += 1 #A catg[2] += 1 #T elif ncol[idx] == 77: #M catg[0] += 1 #C catg[1] += 1 #A ## get second most common site catg.sort() ## if invariant e.g., [0, 0, 0, 9], then nothing (" ") if not catg[2]: pass else: if catg[2] > 1: snpsarr[iloc, site, 1] = True else: snpsarr[iloc, site, 0] = True return snpsarr
python
{ "resource": "" }
q259338
maxind_numba
validation
def maxind_numba(block): """ filter for indels """ ## remove terminal edges inds = 0 for row in xrange(block.shape[0]): where = np.where(block[row] != 45)[0] if len(where) == 0: obs = 100 else: left = np.min(where) right = np.max(where) obs = np.sum(block[row, left:right] == 45) if obs > inds: inds = obs return inds
python
{ "resource": "" }
q259339
write_snps_map
validation
def write_snps_map(data): """ write a map file with linkage information for SNPs file""" ## grab map data from tmparr start = time.time() tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name)) with h5py.File(tmparrs, 'r') as io5: maparr = io5["maparr"][:] ## get last data end = np.where(np.all(maparr[:] == 0, axis=1))[0] if np.any(end): end = end.min() else: end = maparr.shape[0] ## write to map file (this is too slow...) outchunk = [] with open(data.outfiles.snpsmap, 'w') as out: for idx in xrange(end): ## build to list line = maparr[idx, :] #print(line) outchunk.append(\ "{}\trad{}_snp{}\t{}\t{}\n"\ .format(line[0], line[1], line[2], 0, line[3])) ## clear list if not idx % 10000: out.write("".join(outchunk)) outchunk = [] ## write remaining out.write("".join(outchunk)) LOGGER.debug("finished writing snps_map in: %s", time.time() - start)
python
{ "resource": "" }
q259340
write_usnps
validation
def write_usnps(data, sidx, pnames): """ write the bisnp string """ ## grab bis data from tmparr tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name)) with h5py.File(tmparrs, 'r') as io5: bisarr = io5["bisarr"] ## trim to size b/c it was made longer than actual end = np.where(np.all(bisarr[:] == "", axis=0))[0] if np.any(end): end = end.min() else: end = bisarr.shape[1] ## write to usnps file with open(data.outfiles.usnpsphy, 'w') as out: out.write("{} {}\n".format(bisarr.shape[0], end)) for idx, name in enumerate(pnames): out.write("{}{}\n".format(name, "".join(bisarr[idx, :end])))
python
{ "resource": "" }
q259341
write_str
validation
def write_str(data, sidx, pnames): """ Write STRUCTURE format for all SNPs and unlinked SNPs """ ## grab snp and bis data from tmparr start = time.time() tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name)) with h5py.File(tmparrs, 'r') as io5: snparr = io5["snparr"] bisarr = io5["bisarr"] ## trim to size b/c it was made longer than actual bend = np.where(np.all(bisarr[:] == "", axis=0))[0] if np.any(bend): bend = bend.min() else: bend = bisarr.shape[1] send = np.where(np.all(snparr[:] == "", axis=0))[0] if np.any(send): send = send.min() else: send = snparr.shape[1] ## write to str and ustr out1 = open(data.outfiles.str, 'w') out2 = open(data.outfiles.ustr, 'w') numdict = {'A': '0', 'T': '1', 'G': '2', 'C': '3', 'N': '-9', '-': '-9'} if data.paramsdict["max_alleles_consens"] > 1: for idx, name in enumerate(pnames): out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]]))) out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][1]] for i in snparr[idx, :send]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][1]] for i in bisarr[idx, :bend]]))) else: ## haploid output for idx, name in enumerate(pnames): out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]]))) out1.close() out2.close() LOGGER.debug("finished writing str in: %s", time.time() - start)
python
{ "resource": "" }
q259342
concat_vcf
validation
def concat_vcf(data, names, full): """ Sorts, concatenates, and gzips VCF chunks. Also cleans up chunks. """ ## open handle and write headers if not full: writer = open(data.outfiles.vcf, 'w') else: writer = gzip.open(data.outfiles.VCF, 'w') vcfheader(data, names, writer) writer.close() ## get vcf chunks vcfchunks = glob.glob(data.outfiles.vcf+".*") vcfchunks.sort(key=lambda x: int(x.rsplit(".")[-1])) ## concatenate if not full: writer = open(data.outfiles.vcf, 'a') else: writer = gzip.open(data.outfiles.VCF, 'a') ## what order do users want? The order in the original ref file? ## Sorted by the size of chroms? that is the order in faidx. ## If reference mapping then it's nice to sort the vcf data by ## CHROM and POS. This is doing a very naive sort right now, so the ## CHROM will be ordered, but not the pos within each chrom. if data.paramsdict["assembly_method"] in ["reference", "denovo+reference"]: ## Some unix sorting magic to get POS sorted within CHROM ## First you sort by POS (-k 2,2), then you do a `stable` sort ## by CHROM. You end up with POS ordered and grouped correctly by CHROM ## but relatively unordered CHROMs (locus105 will be before locus11). cmd = ["cat"] + vcfchunks + [" | sort -k 2,2 -n | sort -k 1,1 -s"] cmd = " ".join(cmd) proc = sps.Popen(cmd, shell=True, stderr=sps.STDOUT, stdout=writer, close_fds=True) else: proc = sps.Popen(["cat"] + vcfchunks, stderr=sps.STDOUT, stdout=writer, close_fds=True) err = proc.communicate()[0] if proc.returncode: raise IPyradWarningExit("err in concat_vcf: %s", err) writer.close() for chunk in vcfchunks: os.remove(chunk)
python
{ "resource": "" }
q259343
reftrick
validation
def reftrick(iseq, consdict): """ Returns the most common base at each site in order. """ altrefs = np.zeros((iseq.shape[1], 4), dtype=np.uint8) altrefs[:, 1] = 46 for col in xrange(iseq.shape[1]): ## expand colums with ambigs and remove N- fcounts = np.zeros(111, dtype=np.int64) counts = np.bincount(iseq[:, col])#, minlength=90) fcounts[:counts.shape[0]] = counts ## set N and - to zero, wish numba supported minlen arg fcounts[78] = 0 fcounts[45] = 0 ## add ambig counts to true bases for aidx in xrange(consdict.shape[0]): nbases = fcounts[consdict[aidx, 0]] for _ in xrange(nbases): fcounts[consdict[aidx, 1]] += 1 fcounts[consdict[aidx, 2]] += 1 fcounts[consdict[aidx, 0]] = 0 ## now get counts from the modified counts arr who = np.argmax(fcounts) altrefs[col, 0] = who fcounts[who] = 0 ## if an alt allele fill over the "." placeholder who = np.argmax(fcounts) if who: altrefs[col, 1] = who fcounts[who] = 0 ## if 3rd or 4th alleles observed then add to arr who = np.argmax(fcounts) altrefs[col, 2] = who fcounts[who] = 0 ## if 3rd or 4th alleles observed then add to arr who = np.argmax(fcounts) altrefs[col, 3] = who return altrefs
python
{ "resource": "" }
q259344
_collapse_outgroup
validation
def _collapse_outgroup(tree, taxdicts): """ collapse outgroup in ete Tree for easier viewing """ ## check that all tests have the same outgroup outg = taxdicts[0]["p4"] if not all([i["p4"] == outg for i in taxdicts]): raise Exception("no good") ## prune tree, keep only one sample from outgroup tre = ete.Tree(tree.write(format=1)) #tree.copy(method="deepcopy") alltax = [i for i in tre.get_leaf_names() if i not in outg] alltax += [outg[0]] tre.prune(alltax) tre.search_nodes(name=outg[0])[0].name = "outgroup" tre.ladderize() ## remove other ougroups from taxdicts taxd = copy.deepcopy(taxdicts) newtaxdicts = [] for test in taxd: #test["p4"] = [outg[0]] test["p4"] = ["outgroup"] newtaxdicts.append(test) return tre, newtaxdicts
python
{ "resource": "" }
q259345
Tree.draw
validation
def draw( self, show_tip_labels=True, show_node_support=False, use_edge_lengths=False, orient="right", print_args=False, *args, **kwargs): """ plot the tree using toyplot.graph. Parameters: ----------- show_tip_labels: bool Show tip names from tree. use_edge_lengths: bool Use edge lengths from newick tree. show_node_support: bool Show support values at nodes using a set of default options. ... """ ## re-decompose tree for new orient and edges args self._decompose_tree(orient=orient, use_edge_lengths=use_edge_lengths) ## update kwargs with entered args and all other kwargs dwargs = {} dwargs["show_tip_labels"] = show_tip_labels dwargs["show_node_support"] = show_node_support dwargs.update(kwargs) ## pass to panel plotter canvas, axes, panel = tree_panel_plot(self, print_args, **dwargs) return canvas, axes, panel
python
{ "resource": "" }
q259346
get_quick_depths
validation
def get_quick_depths(data, sample): """ iterate over clustS files to get data """ ## use existing sample cluster path if it exists, since this ## func can be used in step 4 and that can occur after merging ## assemblies after step3, and if we then referenced by data.dirs.clusts ## the path would be broken. ## ## If branching at step 3 to test different clust thresholds, the ## branched samples will retain the samples.files.clusters of the ## parent (which have the clust_threshold value of the parent), so ## it will look like nothing has changed. If we call this func ## from step 3 then it indicates we are in a branch and should ## reset the sample.files.clusters handle to point to the correct ## data.dirs.clusts directory. See issue #229. ## Easier to just always trust that samples.files.clusters is right, ## no matter what step? #if sample.files.clusters and not sample.stats.state == 3: # pass #else: # ## set cluster file handles sample.files.clusters = os.path.join( data.dirs.clusts, sample.name+".clustS.gz") ## get new clustered loci fclust = data.samples[sample.name].files.clusters clusters = gzip.open(fclust, 'r') pairdealer = itertools.izip(*[iter(clusters)]*2) ## storage depths = [] maxlen = [] ## start with cluster 0 tdepth = 0 tlen = 0 ## iterate until empty while 1: ## grab next try: name, seq = pairdealer.next() except StopIteration: break ## if not the end of a cluster #print name.strip(), seq.strip() if name.strip() == seq.strip(): depths.append(tdepth) maxlen.append(tlen) tlen = 0 tdepth = 0 else: tdepth += int(name.split(";")[-2][5:]) tlen = len(seq) ## return clusters.close() return np.array(maxlen), np.array(depths)
python
{ "resource": "" }
q259347
align_and_parse
validation
def align_and_parse(handle, max_internal_indels=5, is_gbs=False): """ much faster implementation for aligning chunks """ ## data are already chunked, read in the whole thing. bail if no data. try: with open(handle, 'rb') as infile: clusts = infile.read().split("//\n//\n") ## remove any empty spots clusts = [i for i in clusts if i] ## Skip entirely empty chunks if not clusts: raise IPyradError except (IOError, IPyradError): LOGGER.debug("skipping empty chunk - {}".format(handle)) return 0 ## count discarded clusters for printing to stats later highindels = 0 ## iterate over clusters sending each to muscle, splits and aligns pairs try: aligned = persistent_popen_align3(clusts, 200, is_gbs) except Exception as inst: LOGGER.debug("Error in handle - {} - {}".format(handle, inst)) #raise IPyradWarningExit("error hrere {}".format(inst)) aligned = [] ## store good alignments to be written to file refined = [] ## filter and trim alignments for clust in aligned: ## check for too many internal indels filtered = aligned_indel_filter(clust, max_internal_indels) ## reverse complement matches. No longer implemented. #filtered = overshoot_filter(clust) ## finally, add to outstack if alignment is good if not filtered: refined.append(clust)#"\n".join(stack)) else: highindels += 1 ## write to file after if refined: outhandle = handle.rsplit(".", 1)[0]+".aligned" with open(outhandle, 'wb') as outfile: outfile.write("\n//\n//\n".join(refined)+"\n") ## remove the old tmp file log_level = logging.getLevelName(LOGGER.getEffectiveLevel()) if not log_level == "DEBUG": os.remove(handle) return highindels
python
{ "resource": "" }
q259348
aligned_indel_filter
validation
def aligned_indel_filter(clust, max_internal_indels): """ checks for too many internal indels in muscle aligned clusters """ ## make into list lclust = clust.split() ## paired or not try: seq1 = [i.split("nnnn")[0] for i in lclust[1::2]] seq2 = [i.split("nnnn")[1] for i in lclust[1::2]] intindels1 = [i.rstrip("-").lstrip("-").count("-") for i in seq1] intindels2 = [i.rstrip("-").lstrip("-").count("-") for i in seq2] intindels = intindels1 + intindels2 if max(intindels) > max_internal_indels: return 1 except IndexError: seq1 = lclust[1::2] intindels = [i.rstrip("-").lstrip("-").count("-") for i in seq1] if max(intindels) > max_internal_indels: return 1 return 0
python
{ "resource": "" }
q259349
setup_dirs
validation
def setup_dirs(data): """ sets up directories for step3 data """ ## make output folder for clusters pdir = os.path.realpath(data.paramsdict["project_dir"]) data.dirs.clusts = os.path.join(pdir, "{}_clust_{}"\ .format(data.name, data.paramsdict["clust_threshold"])) if not os.path.exists(data.dirs.clusts): os.mkdir(data.dirs.clusts) ## make a tmpdir for align files data.tmpdir = os.path.abspath(os.path.expanduser( os.path.join(pdir, data.name+'-tmpalign'))) if not os.path.exists(data.tmpdir): os.mkdir(data.tmpdir) ## If ref mapping, init samples and make the refmapping output directory. if not data.paramsdict["assembly_method"] == "denovo": ## make output directory for read mapping process data.dirs.refmapping = os.path.join(pdir, "{}_refmapping".format(data.name)) if not os.path.exists(data.dirs.refmapping): os.mkdir(data.dirs.refmapping)
python
{ "resource": "" }
q259350
build_dag
validation
def build_dag(data, samples): """ build a directed acyclic graph describing jobs to be run in order. """ ## Create DAGs for the assembly method being used, store jobs in nodes snames = [i.name for i in samples] dag = nx.DiGraph() ## get list of pre-align jobs from globals based on assembly method joborder = JOBORDER[data.paramsdict["assembly_method"]] ## WHICH JOBS TO RUN: iterate over the sample names for sname in snames: ## append pre-align job for each sample to nodes list for func in joborder: dag.add_node("{}-{}-{}".format(func, 0, sname)) ## append align func jobs, each will have max 10 for chunk in xrange(10): dag.add_node("{}-{}-{}".format("muscle_align", chunk, sname)) ## append final reconcat jobs dag.add_node("{}-{}-{}".format("reconcat", 0, sname)) ## ORDER OF JOBS: add edges/dependency between jobs: (first-this, then-that) for sname in snames: for sname2 in snames: ## enforce that clust/map cannot start until derep is done for ALL ## samples. This is b/c... dag.add_edge("{}-{}-{}".format(joborder[0], 0, sname2), "{}-{}-{}".format(joborder[1], 0, sname)) ## add remaining pre-align jobs for idx in xrange(2, len(joborder)): dag.add_edge("{}-{}-{}".format(joborder[idx-1], 0, sname), "{}-{}-{}".format(joborder[idx], 0, sname)) ## Add 10 align jobs, none of which can start until all chunker jobs ## are finished. Similarly, reconcat jobs cannot start until all align ## jobs are finished. for sname2 in snames: for chunk in range(10): dag.add_edge("{}-{}-{}".format("muscle_chunker", 0, sname2), "{}-{}-{}".format("muscle_align", chunk, sname)) ## add that the final reconcat job can't start until after ## each chunk of its own sample has finished aligning. dag.add_edge("{}-{}-{}".format("muscle_align", chunk, sname), "{}-{}-{}".format("reconcat", 0, sname)) ## return the dag return dag, joborder
python
{ "resource": "" }
q259351
_plot_dag
validation
def _plot_dag(dag, results, snames): """ makes plot to help visualize the DAG setup. For developers only. """ try: import matplotlib.pyplot as plt from matplotlib.dates import date2num from matplotlib.cm import gist_rainbow ## first figure is dag layout plt.figure("dag_layout", figsize=(10, 10)) nx.draw(dag, pos=nx.spring_layout(dag), node_color='pink', with_labels=True) plt.savefig("./dag_layout.png", bbox_inches='tight', dpi=200) ## second figure is times for steps pos = {} colors = {} for node in dag: #jobkey = "{}-{}".format(node, sample) mtd = results[node].metadata start = date2num(mtd.started) #runtime = date2num(md.completed)# - start ## sample id to separate samples on x-axis _, _, sname = node.split("-", 2) sid = snames.index(sname) ## 1e6 to separate on y-axis pos[node] = (start+sid, start*1e6) colors[node] = mtd.engine_id ## x just spaces out samples; ## y is start time of each job with edge leading to next job ## color is the engine that ran the job ## all jobs were submitted as 3 second wait times plt.figure("dag_starttimes", figsize=(10, 16)) nx.draw(dag, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow, with_labels=True) plt.savefig("./dag_starttimes.png", bbox_inches='tight', dpi=200) except Exception as inst: LOGGER.warning(inst)
python
{ "resource": "" }
q259352
trackjobs
validation
def trackjobs(func, results, spacer): """ Blocks and prints progress for just the func being requested from a list of submitted engine jobs. Returns whether any of the jobs failed. func = str results = dict of asyncs """ ## TODO: try to insert a better way to break on KBD here. LOGGER.info("inside trackjobs of %s", func) ## get just the jobs from results that are relevant to this func asyncs = [(i, results[i]) for i in results if i.split("-", 2)[0] == func] ## progress bar start = time.time() while 1: ## how many of this func have finished so far ready = [i[1].ready() for i in asyncs] elapsed = datetime.timedelta(seconds=int(time.time()-start)) printstr = " {} | {} | s3 |".format(PRINTSTR[func], elapsed) progressbar(len(ready), sum(ready), printstr, spacer=spacer) time.sleep(0.1) if len(ready) == sum(ready): print("") break sfails = [] errmsgs = [] for job in asyncs: if not job[1].successful(): sfails.append(job[0]) errmsgs.append(job[1].result()) return func, sfails, errmsgs
python
{ "resource": "" }
q259353
concat_multiple_edits
validation
def concat_multiple_edits(data, sample): """ if multiple fastq files were appended into the list of fastqs for samples then we merge them here before proceeding. """ ## if more than one tuple in fastq list if len(sample.files.edits) > 1: ## create a cat command to append them all (doesn't matter if they ## are gzipped, cat still works). Grab index 0 of tuples for R1s. cmd1 = ["cat"] + [i[0] for i in sample.files.edits] ## write to new concat handle conc1 = os.path.join(data.dirs.edits, sample.name+"_R1_concatedit.fq.gz") with open(conc1, 'w') as cout1: proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=cout1, close_fds=True) res1 = proc1.communicate()[0] if proc1.returncode: raise IPyradWarningExit("error in: %s, %s", cmd1, res1) ## Only set conc2 if R2 actually exists conc2 = 0 if os.path.exists(str(sample.files.edits[0][1])): cmd2 = ["cat"] + [i[1] for i in sample.files.edits] conc2 = os.path.join(data.dirs.edits, sample.name+"_R2_concatedit.fq.gz") with gzip.open(conc2, 'w') as cout2: proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=cout2, close_fds=True) res2 = proc2.communicate()[0] if proc2.returncode: raise IPyradWarningExit("error in: %s, %s", cmd2, res2) ## store new file handles sample.files.edits = [(conc1, conc2)] return sample.files.edits
python
{ "resource": "" }
q259354
cluster
validation
def cluster(data, sample, nthreads, force): """ Calls vsearch for clustering. cov varies by data type, values were chosen based on experience, but could be edited by users """ ## get the dereplicated reads if "reference" in data.paramsdict["assembly_method"]: derephandle = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq") ## In the event all reads for all samples map successfully then clustering ## the unmapped reads makes no sense, so just bail out. if not os.stat(derephandle).st_size: ## In this case you do have to create empty, dummy vsearch output ## files so building_clusters will not fail. uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp") usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort") hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp") for f in [uhandle, usort, hhandle]: open(f, 'a').close() return else: derephandle = os.path.join(data.dirs.edits, sample.name+"_derep.fastq") ## create handles for the outfiles uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp") temphandle = os.path.join(data.dirs.clusts, sample.name+".htemp") ## If derep file doesn't exist then bail out if not os.path.isfile(derephandle): LOGGER.warn("Bad derephandle - {}".format(derephandle)) raise IPyradError("Input file for clustering doesn't exist - {}"\ .format(derephandle)) ## testing one sample fail #if sample.name == "1C_0": # x ## datatype specific optimization ## minsl: the percentage of the seed that must be matched ## smaller values for RAD/ddRAD where we might want to combine, say 50bp ## reads and 100bp reads in the same analysis. ## query_cov: the percentage of the query sequence that must match seed ## smaller values are needed for gbs where only the tips might overlap ## larger values for pairgbs where they should overlap near completely ## small minsl and high query cov allows trimmed reads to match to untrim ## seed for rad/ddrad/pairddrad. strand = "plus" cov = 0.75 minsl = 0.5 if data.paramsdict["datatype"] in ["gbs", "2brad"]: strand = "both" cov = 0.5 minsl = 0.5 elif data.paramsdict["datatype"] == 'pairgbs': strand = "both" cov = 0.75 minsl = 0.75 ## If this value is not null (which is the default) then override query cov if data._hackersonly["query_cov"]: cov = str(data._hackersonly["query_cov"]) assert float(cov) <= 1, "query_cov must be <= 1.0" ## get call string cmd = [ipyrad.bins.vsearch, "-cluster_smallmem", derephandle, "-strand", strand, "-query_cov", str(cov), "-id", str(data.paramsdict["clust_threshold"]), "-minsl", str(minsl), "-userout", uhandle, "-userfields", "query+target+id+gaps+qstrand+qcov", "-maxaccepts", "1", "-maxrejects", "0", "-threads", str(nthreads), "-notmatched", temphandle, "-fasta_width", "0", "-fastq_qmax", "100", "-fulldp", "-usersort"] ## not sure what the benefit of this option is exactly, needs testing, ## might improve indel detection on left side, but we don't want to enforce ## aligning on left side if not necessarily, since quality trimmed reads ## might lose bases on left side in step2 and no longer align. #if data.paramsdict["datatype"] in ["rad", "ddrad", "pairddrad"]: # cmd += ["-leftjust"] ## run vsearch LOGGER.debug("%s", cmd) proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True) ## This is long running so we wrap it to make sure we can kill it try: res = proc.communicate()[0] except KeyboardInterrupt: proc.kill() raise KeyboardInterrupt ## check for errors if proc.returncode: LOGGER.error("error %s: %s", cmd, res) raise IPyradWarningExit("cmd {}: {}".format(cmd, res))
python
{ "resource": "" }
q259355
muscle_chunker
validation
def muscle_chunker(data, sample): """ Splits the muscle alignment into chunks. Each chunk is run on a separate computing core. Because the largest clusters are at the beginning of the clusters file, assigning equal clusters to each file would put all of the large cluster, that take longer to align, near the top. So instead we randomly distribute the clusters among the files. If assembly method is reference then this step is just a placeholder and nothing happens. """ ## log our location for debugging LOGGER.info("inside muscle_chunker") ## only chunk up denovo data, refdata has its own chunking method which ## makes equal size chunks, instead of uneven chunks like in denovo if data.paramsdict["assembly_method"] != "reference": ## get the number of clusters clustfile = os.path.join(data.dirs.clusts, sample.name+".clust.gz") with iter(gzip.open(clustfile, 'rb')) as clustio: nloci = sum(1 for i in clustio if "//" in i) // 2 #tclust = clustio.read().count("//")//2 optim = (nloci//20) + (nloci%20) LOGGER.info("optim for align chunks: %s", optim) ## write optim clusters to each tmp file clustio = gzip.open(clustfile, 'rb') inclusts = iter(clustio.read().strip().split("//\n//\n")) ## splitting loci so first file is smaller and last file is bigger inc = optim // 10 for idx in range(10): ## how big is this chunk? this = optim + (idx * inc) left = nloci-this if idx == 9: ## grab everything left grabchunk = list(itertools.islice(inclusts, int(1e9))) else: ## grab next chunks-worth of data grabchunk = list(itertools.islice(inclusts, this)) nloci = left ## write the chunk to file tmpfile = os.path.join(data.tmpdir, sample.name+"_chunk_{}.ali".format(idx)) with open(tmpfile, 'wb') as out: out.write("//\n//\n".join(grabchunk)) ## write the chunk to file #grabchunk = list(itertools.islice(inclusts, left)) #if grabchunk: # tmpfile = os.path.join(data.tmpdir, sample.name+"_chunk_9.ali") # with open(tmpfile, 'a') as out: # out.write("\n//\n//\n".join(grabchunk)) clustio.close()
python
{ "resource": "" }
q259356
derep_concat_split
validation
def derep_concat_split(data, sample, nthreads, force): """ Running on remote Engine. Refmaps, then merges, then dereplicates, then denovo clusters reads. """ ## report location for debugging LOGGER.info("INSIDE derep %s", sample.name) ## MERGED ASSEMBIES ONLY: ## concatenate edits files within Samples. Returns a new sample.files.edits ## with the concat file. No change if not merged Assembly. mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq") if not force: if not os.path.exists(mergefile): sample.files.edits = concat_multiple_edits(data, sample) else: LOGGER.info("skipped concat_multiple_edits: {} exists"\ .format(mergefile)) else: sample.files.edits = concat_multiple_edits(data, sample) ## PAIRED DATA ONLY: ## Denovo: merge or concat fastq pairs [sample.files.pairs] ## Reference: only concat fastq pairs [] ## Denovo + Reference: ... if 'pair' in data.paramsdict['datatype']: ## the output file handle for merged reads ## modify behavior of merging vs concating if reference if "reference" in data.paramsdict["assembly_method"]: nmerged = merge_pairs(data, sample.files.edits, mergefile, 0, 0) else: nmerged = merge_pairs(data, sample.files.edits, mergefile, 1, 1) ## store results sample.files.edits = [(mergefile, )] sample.stats.reads_merged = nmerged ## 3rad uses random adapters to identify pcr duplicates. We will ## remove pcr dupes here. Basically append the radom adapter to ## each sequence, do a regular old vsearch derep, then trim ## off the adapter, and push it down the pipeline. This will ## remove all identical seqs with identical random i5 adapters. if "3rad" in data.paramsdict["datatype"]: declone_3rad(data, sample) derep_and_sort(data, os.path.join(data.dirs.edits, sample.name+"_declone.fastq"), os.path.join(data.dirs.edits, sample.name+"_derep.fastq"), nthreads) else: ## convert fastq to fasta, then derep and sort reads by their size. ## we pass in only one file b/c paired should be merged by now. derep_and_sort(data, sample.files.edits[0][0], os.path.join(data.dirs.edits, sample.name+"_derep.fastq"), nthreads)
python
{ "resource": "" }
q259357
branch_assembly
validation
def branch_assembly(args, parsedict): """ Load the passed in assembly and create a branch. Copy it to a new assembly, and also write out the appropriate params.txt """ ## Get the current assembly data = getassembly(args, parsedict) ## get arguments to branch command bargs = args.branch ## get new name, trim off .txt if it was accidentally added newname = bargs[0] if newname.endswith(".txt"): newname = newname[:-4] ## look for subsamples if len(bargs) > 1: ## Branching and subsampling at step 6 is a bad idea, it messes up ## indexing into the hdf5 cluster file. Warn against this. if any([x.stats.state == 6 for x in data.samples.values()]): pass ## TODODODODODO #print("wat") ## are we removing or keeping listed samples? subsamples = bargs[1:] ## drop the matching samples if bargs[1] == "-": ## check drop names fails = [i for i in subsamples[1:] if i not in data.samples.keys()] if any(fails): raise IPyradWarningExit("\ \n Failed: unrecognized names requested, check spelling:\n {}"\ .format("\n ".join([i for i in fails]))) print(" dropping {} samples".format(len(subsamples)-1)) subsamples = list(set(data.samples.keys()) - set(subsamples)) ## If the arg after the new param name is a file that exists if os.path.exists(bargs[1]): new_data = data.branch(newname, infile=bargs[1]) else: new_data = data.branch(newname, subsamples) ## keeping all samples else: new_data = data.branch(newname, None) print(" creating a new branch called '{}' with {} Samples".\ format(new_data.name, len(new_data.samples))) print(" writing new params file to {}"\ .format("params-"+new_data.name+".txt\n")) new_data.write_params("params-"+new_data.name+".txt", force=args.force)
python
{ "resource": "" }
q259358
getassembly
validation
def getassembly(args, parsedict): """ loads assembly or creates a new one and set its params from parsedict. Does not launch ipcluster. """ ## Creating an assembly with a full path in the name will "work" ## but it is potentially dangerous, so here we have assembly_name ## and assembly_file, name is used for creating new in cwd, file is ## used for loading existing. ## ## Be nice if the user includes the extension. #project_dir = ip.core.assembly._expander(parsedict['1']) #assembly_name = parsedict['0'] project_dir = ip.core.assembly._expander(parsedict['project_dir']) assembly_name = parsedict['assembly_name'] assembly_file = os.path.join(project_dir, assembly_name) ## Assembly creation will handle error checking on ## the format of the assembly_name ## make sure the working directory exists. if not os.path.exists(project_dir): os.mkdir(project_dir) try: ## If 1 and force then go ahead and create a new assembly if ('1' in args.steps) and args.force: data = ip.Assembly(assembly_name, cli=True) else: data = ip.load_json(assembly_file, cli=True) data._cli = True except IPyradWarningExit as _: ## if no assembly is found then go ahead and make one if '1' not in args.steps: raise IPyradWarningExit(\ " Error: You must first run step 1 on the assembly: {}"\ .format(assembly_file)) else: ## create a new assembly object data = ip.Assembly(assembly_name, cli=True) ## for entering some params... for param in parsedict: ## trap assignment of assembly_name since it is immutable. if param == "assembly_name": ## Raise error if user tried to change assembly name if parsedict[param] != data.name: data.set_params(param, parsedict[param]) else: ## all other params should be handled by set_params try: data.set_params(param, parsedict[param]) except IndexError as _: print(" Malformed params file: {}".format(args.params)) print(" Bad parameter {} - {}".format(param, parsedict[param])) sys.exit(-1) return data
python
{ "resource": "" }
q259359
get_binom
validation
def get_binom(base1, base2, estE, estH): """ return probability of base call """ prior_homo = (1. - estH) / 2. prior_hete = estH ## calculate probs bsum = base1 + base2 hetprob = scipy.misc.comb(bsum, base1)/(2. **(bsum)) homoa = scipy.stats.binom.pmf(base2, bsum, estE) homob = scipy.stats.binom.pmf(base1, bsum, estE) ## calculate probs hetprob *= prior_hete homoa *= prior_homo homob *= prior_homo ## final probabilities = [homoa, homob, hetprob] bestprob = max(probabilities)/float(sum(probabilities)) ## return if hetprob > homoa: return True, bestprob else: return False, bestprob
python
{ "resource": "" }
q259360
basecaller
validation
def basecaller(arrayed, mindepth_majrule, mindepth_statistical, estH, estE): """ call all sites in a locus array. """ ## an array to fill with consensus site calls cons = np.zeros(arrayed.shape[1], dtype=np.uint8) cons.fill(78) arr = arrayed.view(np.uint8) ## iterate over columns for col in xrange(arr.shape[1]): ## the site of focus carr = arr[:, col] ## make mask of N and - sites mask = carr == 45 mask += carr == 78 marr = carr[~mask] ## skip if only empties (e.g., N-) if not marr.shape[0]: cons[col] = 78 ## skip if not variable elif np.all(marr == marr[0]): cons[col] = marr[0] ## estimate variable site call else: ## get allele freqs (first-most, second, third = p, q, r) counts = np.bincount(marr) pbase = np.argmax(counts) nump = counts[pbase] counts[pbase] = 0 qbase = np.argmax(counts) numq = counts[qbase] counts[qbase] = 0 rbase = np.argmax(counts) numr = counts[rbase] ## based on biallelic depth bidepth = nump + numq if bidepth < mindepth_majrule: cons[col] = 78 else: ## if depth is too high, reduce to sampled int if bidepth > 500: base1 = int(500 * (nump / float(bidepth))) base2 = int(500 * (numq / float(bidepth))) else: base1 = nump base2 = numq ## make statistical base call if bidepth >= mindepth_statistical: ishet, prob = get_binom(base1, base2, estE, estH) #LOGGER.info("ishet, prob, b1, b2: %s %s %s %s", ishet, prob, base1, base2) if prob < 0.95: cons[col] = 78 else: if ishet: cons[col] = TRANS[(pbase, qbase)] else: cons[col] = pbase ## make majrule base call else: #if bidepth >= mindepth_majrule: if nump == numq: cons[col] = TRANS[(pbase, qbase)] else: cons[col] = pbase return cons.view("S1")
python
{ "resource": "" }
q259361
nfilter1
validation
def nfilter1(data, reps): """ applies read depths filter """ if sum(reps) >= data.paramsdict["mindepth_majrule"] and \ sum(reps) <= data.paramsdict["maxdepth"]: return 1 else: return 0
python
{ "resource": "" }
q259362
storealleles
validation
def storealleles(consens, hidx, alleles): """ store phased allele data for diploids """ ## find the first hetero site and choose the priority base ## example, if W: then priority base in A and not T. PRIORITY=(order: CATG) bigbase = PRIORITY[consens[hidx[0]]] ## find which allele has priority based on bigbase bigallele = [i for i in alleles if i[0] == bigbase][0] ## uplow other bases relative to this one and the priority list ## e.g., if there are two hetero sites (WY) and the two alleles are ## AT and TC, then since bigbase of (W) is A second hetero site should ## be stored as y, since the ordering is swapped in this case; the priority ## base (C versus T) is C, but C goes with the minor base at h site 1. #consens = list(consens) for hsite, pbase in zip(hidx[1:], bigallele[1:]): if PRIORITY[consens[hsite]] != pbase: consens[hsite] = consens[hsite].lower() ## return consens return consens
python
{ "resource": "" }
q259363
chunk_clusters
validation
def chunk_clusters(data, sample): """ split job into bits and pass to the client """ ## counter for split job submission num = 0 ## set optim size for chunks in N clusters. The first few chunks take longer ## because they contain larger clusters, so we create 4X as many chunks as ## processors so that they are split more evenly. optim = int((sample.stats.clusters_total // data.cpus) + \ (sample.stats.clusters_total % data.cpus)) ## break up the file into smaller tmp files for each engine ## chunking by cluster is a bit trickier than chunking by N lines chunkslist = [] ## open to clusters with gzip.open(sample.files.clusters, 'rb') as clusters: ## create iterator to sample 2 lines at a time pairdealer = itertools.izip(*[iter(clusters)]*2) ## Use iterator to sample til end of cluster done = 0 while not done: ## grab optim clusters and write to file. done, chunk = clustdealer(pairdealer, optim) chunkhandle = os.path.join(data.dirs.clusts, "tmp_"+str(sample.name)+"."+str(num*optim)) if chunk: chunkslist.append((optim, chunkhandle)) with open(chunkhandle, 'wb') as outchunk: outchunk.write("//\n//\n".join(chunk)+"//\n//\n") num += 1 return chunkslist
python
{ "resource": "" }
q259364
run
validation
def run(data, samples, force, ipyclient): """ checks if the sample should be run and passes the args """ ## prepare dirs data.dirs.consens = os.path.join(data.dirs.project, data.name+"_consens") if not os.path.exists(data.dirs.consens): os.mkdir(data.dirs.consens) ## zap any tmp files that might be leftover tmpcons = glob.glob(os.path.join(data.dirs.consens, "*_tmpcons.*")) tmpcats = glob.glob(os.path.join(data.dirs.consens, "*_tmpcats.*")) for tmpfile in tmpcons+tmpcats: os.remove(tmpfile) ## filter through samples for those ready samples = get_subsamples(data, samples, force) ## set up parallel client: how many cores? lbview = ipyclient.load_balanced_view() data.cpus = data._ipcluster["cores"] if not data.cpus: data.cpus = len(ipyclient.ids) ## wrap everything to ensure destruction of temp files inst = "" try: ## calculate depths, if they changed. samples = calculate_depths(data, samples, lbview) ## chunk clusters into bits for parallel processing lasyncs = make_chunks(data, samples, lbview) ## process chunks and cleanup process_chunks(data, samples, lasyncs, lbview) except KeyboardInterrupt as inst: raise inst finally: ## if process failed at any point delete tmp files tmpcons = glob.glob(os.path.join(data.dirs.clusts, "tmp_*.[0-9]*")) tmpcons += glob.glob(os.path.join(data.dirs.consens, "*_tmpcons.*")) tmpcons += glob.glob(os.path.join(data.dirs.consens, "*_tmpcats.*")) for tmpchunk in tmpcons: os.remove(tmpchunk) ## Finished step 5. Set step 6 checkpoint to 0 to force ## re-running from scratch. data._checkpoint = 0
python
{ "resource": "" }
q259365
calculate_depths
validation
def calculate_depths(data, samples, lbview): """ check whether mindepth has changed, and thus whether clusters_hidepth needs to be recalculated, and get new maxlen for new highdepth clusts. if mindepth not changed then nothing changes. """ ## send jobs to be processed on engines start = time.time() printstr = " calculating depths | {} | s5 |" recaljobs = {} maxlens = [] for sample in samples: recaljobs[sample.name] = lbview.apply(recal_hidepth, *(data, sample)) ## block until finished while 1: ready = [i.ready() for i in recaljobs.values()] elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer=data._spacer) time.sleep(0.1) if len(ready) == sum(ready): print("") break ## check for failures and collect results modsamples = [] for sample in samples: if not recaljobs[sample.name].successful(): LOGGER.error(" sample %s failed: %s", sample.name, recaljobs[sample.name].exception()) else: modsample, _, maxlen, _, _ = recaljobs[sample.name].result() modsamples.append(modsample) maxlens.append(maxlen) ## reset global maxlen if something changed data._hackersonly["max_fragment_length"] = int(max(maxlens)) + 4 return samples
python
{ "resource": "" }
q259366
make_chunks
validation
def make_chunks(data, samples, lbview): """ calls chunk_clusters and tracks progress. """ ## first progress bar start = time.time() printstr = " chunking clusters | {} | s5 |" elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer) ## send off samples to be chunked lasyncs = {} for sample in samples: lasyncs[sample.name] = lbview.apply(chunk_clusters, *(data, sample)) ## block until finished while 1: ready = [i.ready() for i in lasyncs.values()] elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer=data._spacer) time.sleep(0.1) if len(ready) == sum(ready): print("") break ## check for failures for sample in samples: if not lasyncs[sample.name].successful(): LOGGER.error(" sample %s failed: %s", sample.name, lasyncs[sample.name].exception()) return lasyncs
python
{ "resource": "" }
q259367
make
validation
def make(data, samples): """ reads in .loci and builds alleles from case characters """ #read in loci file outfile = open(os.path.join(data.dirs.outfiles, data.name+".alleles"), 'w') lines = open(os.path.join(data.dirs.outfiles, data.name+".loci"), 'r') ## Get the longest sample name for pretty printing longname = max(len(x) for x in data.samples.keys()) ## Padding between name and sequence in output file. This should be the ## same as write_outfiles.write_tmp_loci.name_padding name_padding = 5 writing = [] loc = 0 for line in lines: if ">" in line: name, seq = line.split(" ")[0], line.split(" ")[-1] allele1, allele2 = splitalleles(seq.strip()) ## Format the output string. the "-2" below accounts for the additional ## 2 characters added to the sample name that don't get added to the ## snpsites line, so you gotta bump this line back 2 to make it ## line up right. writing.append(name+"_0"+" "*(longname-len(name)-2+name_padding)+allele1) writing.append(name+"_1"+" "*(longname-len(name)-2+name_padding)+allele2) else: writing.append(line.strip()) loc += 1 ## print every 10K loci " if not loc % 10000: outfile.write("\n".join(writing)+"\n") writing = [] outfile.write("\n".join(writing)) outfile.close()
python
{ "resource": "" }
q259368
cluster_info
validation
def cluster_info(ipyclient, spacer=""): """ reports host and engine info for an ipyclient """ ## get engine data, skips busy engines. hosts = [] for eid in ipyclient.ids: engine = ipyclient[eid] if not engine.outstanding: hosts.append(engine.apply(_socket.gethostname)) ## report it hosts = [i.get() for i in hosts] result = [] for hostname in set(hosts): result.append("{}host compute node: [{} cores] on {}"\ .format(spacer, hosts.count(hostname), hostname)) print "\n".join(result)
python
{ "resource": "" }
q259369
_set_debug_dict
validation
def _set_debug_dict(__loglevel__): """ set the debug dict """ _lconfig.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': "%(asctime)s \t"\ +"pid=%(process)d \t"\ +"[%(filename)s]\t"\ +"%(levelname)s \t"\ +"%(message)s" }, }, 'handlers': { __name__: { 'level':__loglevel__, 'class':'logging.FileHandler', 'filename':__debugfile__, 'formatter':"standard", 'mode':'a+' } }, 'loggers':{ __name__: { 'handlers': [__name__], 'level': __loglevel__, 'propogate': True } } })
python
{ "resource": "" }
q259370
_debug_off
validation
def _debug_off(): """ turns off debugging by removing hidden tmp file """ if _os.path.exists(__debugflag__): _os.remove(__debugflag__) __loglevel__ = "ERROR" _LOGGER.info("debugging turned off") _set_debug_dict(__loglevel__)
python
{ "resource": "" }
q259371
_cmd_exists
validation
def _cmd_exists(cmd): """ check if dependency program is there """ return _subprocess.call("type " + cmd, shell=True, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE) == 0
python
{ "resource": "" }
q259372
_getbins
validation
def _getbins(): """ gets the right version of vsearch, muscle, and smalt depending on linux vs osx """ # Return error if system is 32-bit arch. # This is straight from the python docs: # https://docs.python.org/2/library/platform.html#cross-platform if not _sys.maxsize > 2**32: _sys.exit("ipyrad requires 64bit architecture") ## get platform mac or linux _platform = _sys.platform ## get current location if 'VIRTUAL_ENV' in _os.environ: ipyrad_path = _os.environ['VIRTUAL_ENV'] else: path = _os.path.abspath(_os.path.dirname(__file__)) ipyrad_path = _os.path.dirname(path) ## find bin directory ipyrad_path = _os.path.dirname(path) bin_path = _os.path.join(ipyrad_path, "bin") ## get the correct binaries if 'linux' in _platform: vsearch = _os.path.join( _os.path.abspath(bin_path), "vsearch-linux-x86_64") muscle = _os.path.join( _os.path.abspath(bin_path), "muscle-linux-x86_64") smalt = _os.path.join( _os.path.abspath(bin_path), "smalt-linux-x86_64") bwa = _os.path.join( _os.path.abspath(bin_path), "bwa-linux-x86_64") samtools = _os.path.join( _os.path.abspath(bin_path), "samtools-linux-x86_64") bedtools = _os.path.join( _os.path.abspath(bin_path), "bedtools-linux-x86_64") qmc = _os.path.join( _os.path.abspath(bin_path), "QMC-linux-x86_64") else: vsearch = _os.path.join( _os.path.abspath(bin_path), "vsearch-osx-x86_64") muscle = _os.path.join( _os.path.abspath(bin_path), "muscle-osx-x86_64") smalt = _os.path.join( _os.path.abspath(bin_path), "smalt-osx-x86_64") bwa = _os.path.join( _os.path.abspath(bin_path), "bwa-osx-x86_64") samtools = _os.path.join( _os.path.abspath(bin_path), "samtools-osx-x86_64") bedtools = _os.path.join( _os.path.abspath(bin_path), "bedtools-osx-x86_64") ## only one compiled version available, works for all? qmc = _os.path.join( _os.path.abspath(bin_path), "QMC-osx-x86_64") # Test for existence of binaries assert _cmd_exists(muscle), "muscle not found here: "+muscle assert _cmd_exists(vsearch), "vsearch not found here: "+vsearch assert _cmd_exists(smalt), "smalt not found here: "+smalt assert _cmd_exists(bwa), "bwa not found here: "+bwa assert _cmd_exists(samtools), "samtools not found here: "+samtools assert _cmd_exists(bedtools), "bedtools not found here: "+bedtools #assert _cmd_exists(qmc), "wQMC not found here: "+qmc return vsearch, muscle, smalt, bwa, samtools, bedtools, qmc
python
{ "resource": "" }
q259373
nworker
validation
def nworker(data, chunk): """ Worker to distribute work to jit funcs. Wraps everything on an engine to run single-threaded to maximize efficiency for multi-processing. """ ## set the thread limit on the remote engine oldlimit = set_mkl_thread_limit(1) ## open seqarray view, the modified arr is in bootstarr with h5py.File(data.database.input, 'r') as io5: seqview = io5["bootsarr"][:] maparr = io5["bootsmap"][:, 0] smps = io5["quartets"][chunk:chunk+data._chunksize] ## create an N-mask array of all seq cols nall_mask = seqview[:] == 78 ## init arrays to fill with results rquartets = np.zeros((smps.shape[0], 4), dtype=np.uint16) rinvariants = np.zeros((smps.shape[0], 16, 16), dtype=np.uint16) ## fill arrays with results as we compute them. This iterates ## over all of the quartet sets in this sample chunk. It would ## be nice to have this all numbified. for idx in xrange(smps.shape[0]): sidx = smps[idx] seqs = seqview[sidx] ## these axis calls cannot be numbafied, but I can't ## find a faster way that is JIT compiled, and I've ## really, really, really tried. Tried again now that ## numba supports axis args for np.sum. Still can't ## get speed improvements by numbifying this loop. nmask = np.any(nall_mask[sidx], axis=0) nmask += np.all(seqs == seqs[0], axis=0) ## here are the jitted funcs bidx, invar = calculate(seqs, maparr, nmask, TESTS) ## store results rquartets[idx] = smps[idx][bidx] rinvariants[idx] = invar ## reset thread limit set_mkl_thread_limit(oldlimit) ## return results... return rquartets, rinvariants
python
{ "resource": "" }
q259374
store_all
validation
def store_all(self): """ Populate array with all possible quartets. This allows us to sample from the total, and also to continue from a checkpoint """ with h5py.File(self.database.input, 'a') as io5: fillsets = io5["quartets"] ## generator for all quartet sets qiter = itertools.combinations(xrange(len(self.samples)), 4) i = 0 while i < self.params.nquartets: ## sample a chunk of the next ordered N set of quartets dat = np.array(list(itertools.islice(qiter, self._chunksize))) end = min(self.params.nquartets, dat.shape[0]+i) fillsets[i:end] = dat[:end-i] i += self._chunksize ## send progress update to stdout on engine print(min(i, self.params.nquartets))
python
{ "resource": "" }
q259375
store_random
validation
def store_random(self): """ Populate array with random quartets sampled from a generator. Holding all sets in memory might take a lot, but holding a very large list of random numbers for which ones to sample will fit into memory for most reasonable sized sets. So we'll load a list of random numbers in the range of the length of total sets that can be generated, then only keep sets from the set generator if they are in the int list. I did several tests to check that random pairs are as likely as 0 & 1 to come up together in a random quartet set. """ with h5py.File(self.database.input, 'a') as io5: fillsets = io5["quartets"] ## set generators qiter = itertools.combinations(xrange(len(self.samples)), 4) rand = np.arange(0, n_choose_k(len(self.samples), 4)) np.random.shuffle(rand) rslice = rand[:self.params.nquartets] rss = np.sort(rslice) riter = iter(rss) del rand, rslice ## print progress update 1 to the engine stdout print(self._chunksize) ## set to store rando = riter.next() tmpr = np.zeros((self.params.nquartets, 4), dtype=np.uint16) tidx = 0 while 1: try: for i, j in enumerate(qiter): if i == rando: tmpr[tidx] = j tidx += 1 rando = riter.next() ## print progress bar update to engine stdout if not i % self._chunksize: print(min(i, self.params.nquartets)) except StopIteration: break ## store into database fillsets[:] = tmpr del tmpr
python
{ "resource": "" }
q259376
random_combination
validation
def random_combination(nsets, n, k): """ Returns nsets unique random quartet sets sampled from n-choose-k without replacement combinations. """ sets = set() while len(sets) < nsets: newset = tuple(sorted(np.random.choice(n, k, replace=False))) sets.add(newset) return tuple(sets)
python
{ "resource": "" }
q259377
random_product
validation
def random_product(iter1, iter2): """ Random sampler for equal_splits functions """ iter4 = np.concatenate([ np.random.choice(iter1, 2, replace=False), np.random.choice(iter2, 2, replace=False) ]) return iter4
python
{ "resource": "" }
q259378
resolve_ambigs
validation
def resolve_ambigs(tmpseq): """ Randomly resolve ambiguous bases. This is applied to each boot replicate so that over reps the random resolutions don't matter. Sites are randomly resolved, so best for unlinked SNPs since otherwise linked SNPs are losing their linkage information... though it's not like we're using it anyways. """ ## the order of rows in GETCONS for aidx in xrange(6): #np.uint([82, 75, 83, 89, 87, 77]): ambig, res1, res2 = GETCONS[aidx] ## get true wherever tmpseq is ambig idx, idy = np.where(tmpseq == ambig) halfmask = np.random.choice(np.array([True, False]), idx.shape[0]) for col in xrange(idx.shape[0]): if halfmask[col]: tmpseq[idx[col], idy[col]] = res1 else: tmpseq[idx[col], idy[col]] = res2 return tmpseq
python
{ "resource": "" }
q259379
set_mkl_thread_limit
validation
def set_mkl_thread_limit(cores): """ set mkl thread limit and return old value so we can reset when finished. """ if "linux" in sys.platform: mkl_rt = ctypes.CDLL('libmkl_rt.so') else: mkl_rt = ctypes.CDLL('libmkl_rt.dylib') oldlimit = mkl_rt.mkl_get_max_threads() mkl_rt.mkl_set_num_threads(ctypes.byref(ctypes.c_int(cores))) return oldlimit
python
{ "resource": "" }
q259380
get_total
validation
def get_total(tots, node): """ get total number of quartets possible for a split""" if (node.is_leaf() or node.is_root()): return 0 else: ## Get counts on down edges. ## How to treat polytomies here? if len(node.children) > 2: down_r = node.children[0] down_l = node.children[1] for child in node.children[2:]: down_l += child else: down_r, down_l = node.children lendr = sum(1 for i in down_r.iter_leaves()) lendl = sum(1 for i in down_l.iter_leaves()) ## get count on up edge sister up_r = node.get_sisters()[0] lenur = sum(1 for i in up_r.iter_leaves()) ## everyone else lenul = tots - (lendr + lendl + lenur) ## return product return lendr * lendl * lenur * lenul
python
{ "resource": "" }
q259381
get_sampled
validation
def get_sampled(data, totn, node): """ get total number of quartets sampled for a split""" ## convert tip names to ints names = sorted(totn) cdict = {name: idx for idx, name in enumerate(names)} ## skip some nodes if (node.is_leaf() or node.is_root()): return 0 else: ## get counts on down edges if len(node.children) > 2: down_r = node.children[0] down_l = node.children[1] for child in node.children[2:]: down_l += child else: down_r, down_l = node.children lendr = set(cdict[i] for i in down_r.get_leaf_names()) lendl = set(cdict[i] for i in down_l.get_leaf_names()) ## get count on up edge sister up_r = node.get_sisters()[0] lenur = set(cdict[i] for i in up_r.get_leaf_names()) ## everyone else lenul = set(cdict[i] for i in totn) - set.union(lendr, lendl, lenur) idx = 0 sampled = 0 with h5py.File(data.database.output, 'r') as io5: end = io5["quartets"].shape[0] while 1: ## break condition if idx >= end: break ## counts matches qrts = io5["quartets"][idx:idx+data._chunksize] for qrt in qrts: sqrt = set(qrt) if all([sqrt.intersection(i) for i in [lendr, lendl, lenur, lenul]]): sampled += 1 ## increase span idx += data._chunksize return sampled
python
{ "resource": "" }
q259382
Tetrad._run_qmc
validation
def _run_qmc(self, boot): """ Runs quartet max-cut QMC on the quartets qdump file. """ ## build command self._tmp = os.path.join(self.dirs, ".tmptre") cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp] ## run it proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) res = proc.communicate() if proc.returncode: raise IPyradWarningExit(res[1]) ## parse tmp file written by qmc into a tree and rename it with open(self._tmp, 'r') as intree: tre = ete3.Tree(intree.read().strip()) names = tre.get_leaves() for name in names: name.name = self.samples[int(name.name)] tmptre = tre.write(format=9) ## save the tree to file if boot: self.trees.boots = os.path.join(self.dirs, self.name+".boots") with open(self.trees.boots, 'a') as outboot: outboot.write(tmptre+"\n") else: self.trees.tree = os.path.join(self.dirs, self.name+".tree") with open(self.trees.tree, 'w') as outtree: outtree.write(tmptre) ## save the file self._save()
python
{ "resource": "" }
q259383
Tetrad._insert_to_array
validation
def _insert_to_array(self, chunk, results): """ Enters results arrays into the HDF5 database. """ ## two result arrs chunksize = self._chunksize qrts, invs = results ## enter into db with h5py.File(self.database.output, 'r+') as io5: io5['quartets'][chunk:chunk+chunksize] = qrts ## entered as 0-indexed ! if self.params.save_invariants: if self.checkpoint.boots: key = "invariants/boot{}".format(self.checkpoint.boots) io5[key][chunk:chunk+chunksize] = invs else: io5["invariants/boot0"][chunk:chunk+chunksize] = invs
python
{ "resource": "" }
q259384
get_client
validation
def get_client(cluster_id, profile, engines, timeout, cores, quiet, spacer, **kwargs): """ Creates a client to view ipcluster engines for a given profile and returns it with at least one engine spun up and ready to go. If no engines are found after nwait amount of time then an error is raised. If engines==MPI it waits a bit longer to find engines. If the number of engines is set then it waits even longer to try to find that number of engines. """ ## save stds for later, we're gonna hide them to prevent external printing save_stdout = sys.stdout save_stderr = sys.stderr sys.stdout = cStringIO.StringIO() sys.stderr = cStringIO.StringIO() ## get cluster_info print string connection_string = "{}establishing parallel connection:".format(spacer) ## wrapped search for ipcluster try: ## are we looking for a running ipcluster instance? if profile not in [None, "default"]: args = {'profile': profile, "timeout": timeout} else: clusterargs = [cluster_id, profile, timeout] argnames = ["cluster_id", "profile", "timeout"] args = {key:value for key, value in zip(argnames, clusterargs)} ## get connection within timeout window of wait time and hide messages ipyclient = ipp.Client(**args) sys.stdout = save_stdout sys.stderr = save_stderr ## check that all engines have connected if (engines == "MPI") or ("ipyrad-cli-" in cluster_id): if not quiet: print(connection_string) for _ in range(6000): initid = len(ipyclient) time.sleep(0.01) ## If MPI then wait for all engines to start so we can report ## how many cores are on each host. If Local then only wait for ## one engine to be ready and then just go. if (engines == "MPI") or ("ipyrad-cli-" in cluster_id): ## wait for cores to be connected if cores: time.sleep(0.1) if initid == cores: break if initid: time.sleep(3) if len(ipyclient) == initid: break else: if cores: if initid == cores: break else: if initid: break except KeyboardInterrupt as inst: ## ensure stdout is reset even if Exception was raised sys.stdout = save_stdout sys.stderr = save_stderr raise inst ## This is raised if ipcluster is not running ------------ except IOError as inst: ## ensure stdout is reset even if Exception was raised sys.stdout = save_stdout sys.stderr = save_stderr if "ipyrad-cli-" in cluster_id: raise IPyradWarningExit(NO_IPCLUSTER_CLI) else: raise IPyradWarningExit(NO_IPCLUSTER_API) except (ipp.TimeoutError, ipp.NoEnginesRegistered) as inst: ## raised by ipp if no connection file is found for 'nwait' seconds sys.stdout = save_stdout sys.stderr = save_stderr raise inst except Exception as inst: ## if any other exceptions were missed... sys.stdout = save_stdout sys.stderr = save_stderr raise inst finally: ## ensure that no matter what we reset the stds sys.stdout = save_stdout sys.stderr = save_stderr return ipyclient
python
{ "resource": "" }
q259385
memoize
validation
def memoize(func): """ Memoization decorator for a function taking one or more arguments. """ class Memodict(dict): """ just a dict""" def __getitem__(self, *key): return dict.__getitem__(self, key) def __missing__(self, key): """ this makes it faster """ ret = self[key] = func(*key) return ret return Memodict().__getitem__
python
{ "resource": "" }
q259386
ambigcutters
validation
def ambigcutters(seq): """ Returns both resolutions of a cut site that has an ambiguous base in it, else the single cut site """ resos = [] if any([i in list("RKSYWM") for i in seq]): for base in list("RKSYWM"): if base in seq: resos.append(seq.replace(base, AMBIGS[base][0])) resos.append(seq.replace(base, AMBIGS[base][1])) return resos else: return [seq, ""]
python
{ "resource": "" }
q259387
splitalleles
validation
def splitalleles(consensus): """ takes diploid consensus alleles with phase data stored as a mixture of upper and lower case characters and splits it into 2 alleles """ ## store two alleles, allele1 will start with bigbase allele1 = list(consensus) allele2 = list(consensus) hidx = [i for (i, j) in enumerate(consensus) if j in "RKSWYMrkswym"] ## do remaining h sites for idx in hidx: hsite = consensus[idx] if hsite.isupper(): allele1[idx] = PRIORITY[hsite] allele2[idx] = MINOR[hsite] else: allele1[idx] = MINOR[hsite.upper()] allele2[idx] = PRIORITY[hsite.upper()] ## convert back to strings allele1 = "".join(allele1) allele2 = "".join(allele2) return allele1, allele2
python
{ "resource": "" }
q259388
comp
validation
def comp(seq): """ returns a seq with complement. Preserves little n's for splitters.""" ## makes base to its small complement then makes upper return seq.replace("A", 't')\ .replace('T', 'a')\ .replace('C', 'g')\ .replace('G', 'c')\ .replace('n', 'Z')\ .upper()\ .replace("Z", "n")
python
{ "resource": "" }
q259389
fullcomp
validation
def fullcomp(seq): """ returns complement of sequence including ambiguity characters, and saves lower case info for multiple hetero sequences""" ## this is surely not the most efficient... seq = seq.replace("A", 'u')\ .replace('T', 'v')\ .replace('C', 'p')\ .replace('G', 'z')\ .replace('u', 'T')\ .replace('v', 'A')\ .replace('p', 'G')\ .replace('z', 'C') ## No complement for S & W b/c complements are S & W, respectively seq = seq.replace('R', 'u')\ .replace('K', 'v')\ .replace('Y', 'b')\ .replace('M', 'o')\ .replace('u', 'Y')\ .replace('v', 'M')\ .replace('b', 'R')\ .replace('o', 'K') seq = seq.replace('r', 'u')\ .replace('k', 'v')\ .replace('y', 'b')\ .replace('m', 'o')\ .replace('u', 'y')\ .replace('v', 'm')\ .replace('b', 'r')\ .replace('o', 'k') return seq
python
{ "resource": "" }
q259390
fastq_touchup_for_vsearch_merge
validation
def fastq_touchup_for_vsearch_merge(read, outfile, reverse=False): """ option to change orientation of reads and sets Qscore to B """ counts = 0 with open(outfile, 'w') as out: ## read in paired end read files 4 lines at a time if read.endswith(".gz"): fr1 = gzip.open(read, 'rb') else: fr1 = open(read, 'rb') quarts = itertools.izip(*[iter(fr1)]*4) ## a list to store until writing writing = [] while 1: try: lines = quarts.next() except StopIteration: break if reverse: seq = lines[1].strip()[::-1] else: seq = lines[1].strip() writing.append("".join([ lines[0], seq+"\n", lines[2], "B"*len(seq) ])) ## write to disk counts += 1 if not counts % 1000: out.write("\n".join(writing)+"\n") writing = [] if writing: out.write("\n".join(writing)) out.close() fr1.close()
python
{ "resource": "" }
q259391
revcomp
validation
def revcomp(sequence): "returns reverse complement of a string" sequence = sequence[::-1].strip()\ .replace("A", "t")\ .replace("T", "a")\ .replace("C", "g")\ .replace("G", "c").upper() return sequence
python
{ "resource": "" }
q259392
clustdealer
validation
def clustdealer(pairdealer, optim): """ return optim clusters given iterators, and whether it got all or not""" ccnt = 0 chunk = [] while ccnt < optim: ## try refreshing taker, else quit try: taker = itertools.takewhile(lambda x: x[0] != "//\n", pairdealer) oneclust = ["".join(taker.next())] except StopIteration: #LOGGER.debug('last chunk %s', chunk) return 1, chunk ## load one cluster while 1: try: oneclust.append("".join(taker.next())) except StopIteration: break chunk.append("".join(oneclust)) ccnt += 1 return 0, chunk
python
{ "resource": "" }
q259393
progressbar
validation
def progressbar(njobs, finished, msg="", spacer=" "): """ prints a progress bar """ if njobs: progress = 100*(finished / float(njobs)) else: progress = 100 hashes = '#'*int(progress/5.) nohash = ' '*int(20-len(hashes)) if not ipyrad.__interactive__: msg = msg.rsplit("|", 2)[0] args = [spacer, hashes+nohash, int(progress), msg] print("\r{}[{}] {:>3}% {} ".format(*args), end="") sys.stdout.flush()
python
{ "resource": "" }
q259394
get_threaded_view
validation
def get_threaded_view(ipyclient, split=True): """ gets optimum threaded view of ids given the host setup """ ## engine ids ## e.g., [0, 1, 2, 3, 4, 5, 6, 7, 8] eids = ipyclient.ids ## get host names ## e.g., ['a', 'a', 'b', 'b', 'a', 'c', 'c', 'c', 'c'] dview = ipyclient.direct_view() hosts = dview.apply_sync(socket.gethostname) ## group ids into a dict by their hostnames ## e.g., {a: [0, 1, 4], b: [2, 3], c: [5, 6, 7, 8]} hostdict = defaultdict(list) for host, eid in zip(hosts, eids): hostdict[host].append(eid) ## Now split threads on the same host into separate proc if there are many hostdictkeys = hostdict.keys() for key in hostdictkeys: gids = hostdict[key] maxt = len(gids) if len(gids) >= 4: maxt = 2 ## if 4 nodes and 4 ppn, put one sample per host if (len(gids) == 4) and (len(hosts) >= 4): maxt = 4 if len(gids) >= 6: maxt = 3 if len(gids) >= 8: maxt = 4 if len(gids) >= 16: maxt = 4 ## split ids into groups of maxt threaded = [gids[i:i+maxt] for i in xrange(0, len(gids), maxt)] lth = len(threaded) ## if anything was split (lth>1) update hostdict with new proc if lth > 1: hostdict.pop(key) for hostid in range(lth): hostdict[str(key)+"_"+str(hostid)] = threaded[hostid] ## make sure split numbering is correct #threaded = hostdict.values() #assert len(ipyclient.ids) <= len(list(itertools.chain(*threaded))) LOGGER.info("threaded_view: %s", dict(hostdict)) return hostdict
python
{ "resource": "" }
q259395
detect_cpus
validation
def detect_cpus(): """ Detects the number of CPUs on a system. This is better than asking ipyparallel since ipp has to wait for Engines to spin up. """ # Linux, Unix and MacOS: if hasattr(os, "sysconf"): if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"): # Linux & Unix: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: return ncpus else: # OSX: return int(os.popen2("sysctl -n hw.ncpu")[1].read()) # Windows: if os.environ.has_key("NUMBER_OF_PROCESSORS"): ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) if ncpus > 0: return ncpus return 1
python
{ "resource": "" }
q259396
_call_structure
validation
def _call_structure(mname, ename, sname, name, workdir, seed, ntaxa, nsites, kpop, rep): """ make the subprocess call to structure """ ## create call string outname = os.path.join(workdir, "{}-K-{}-rep-{}".format(name, kpop, rep)) cmd = ["structure", "-m", mname, "-e", ename, "-K", str(kpop), "-D", str(seed), "-N", str(ntaxa), "-L", str(nsites), "-i", sname, "-o", outname] ## call the shell function proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) comm = proc.communicate() ## cleanup oldfiles = [mname, ename, sname] for oldfile in oldfiles: if os.path.exists(oldfile): os.remove(oldfile) return comm
python
{ "resource": "" }
q259397
_get_clumpp_table
validation
def _get_clumpp_table(self, kpop, max_var_multiple, quiet): """ private function to clumpp results""" ## concat results for k=x reps, excluded = _concat_reps(self, kpop, max_var_multiple, quiet) if reps: ninds = reps[0].inds nreps = len(reps) else: ninds = nreps = 0 if not reps: return "no result files found" clumphandle = os.path.join(self.workdir, "tmp.clumppparams.txt") self.clumppparams.kpop = kpop self.clumppparams.c = ninds self.clumppparams.r = nreps with open(clumphandle, 'w') as tmp_c: tmp_c.write(self.clumppparams._asfile()) ## create CLUMPP args string outfile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop)) indfile = os.path.join(self.workdir, "{}-K-{}.indfile".format(self.name, kpop)) miscfile = os.path.join(self.workdir, "{}-K-{}.miscfile".format(self.name, kpop)) cmd = ["CLUMPP", clumphandle, "-i", indfile, "-o", outfile, "-j", miscfile, "-r", str(nreps), "-c", str(ninds), "-k", str(kpop)] ## call clumpp proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) _ = proc.communicate() ## cleanup for rfile in [indfile, miscfile]: if os.path.exists(rfile): os.remove(rfile) ## parse clumpp results file ofile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop)) if os.path.exists(ofile): csvtable = pd.read_csv(ofile, delim_whitespace=True, header=None) table = csvtable.loc[:, 5:] ## apply names to cols and rows table.columns = range(table.shape[1]) table.index = self.labels if not quiet: sys.stderr.write( "[K{}] {}/{} results permuted across replicates (max_var={}).\n"\ .format(kpop, nreps, nreps+excluded, max_var_multiple)) return table else: sys.stderr.write("No files ready for {}-K-{} in {}\n"\ .format(self.name, kpop, self.workdir)) return
python
{ "resource": "" }
q259398
_get_evanno_table
validation
def _get_evanno_table(self, kpops, max_var_multiple, quiet): """ Calculates Evanno method K value scores for a series of permuted clumpp results. """ ## iterate across k-vals kpops = sorted(kpops) replnliks = [] for kpop in kpops: ## concat results for k=x reps, excluded = _concat_reps(self, kpop, max_var_multiple, quiet) ## report if some results were excluded if excluded: if not quiet: sys.stderr.write( "[K{}] {} reps excluded (not converged) see 'max_var_multiple'.\n"\ .format(kpop, excluded)) if reps: ninds = reps[0].inds nreps = len(reps) else: ninds = nreps = 0 if not reps: print "no result files found" ## all we really need is the lnlik replnliks.append([i.est_lnlik for i in reps]) ## compare lnlik and var of results if len(replnliks) > 1: lnmean = [np.mean(i) for i in replnliks] lnstds = [np.std(i, ddof=1) for i in replnliks] else: lnmean = replnliks lnstds = np.nan tab = pd.DataFrame( index=kpops, data={ "Nreps": [len(i) for i in replnliks], "lnPK": [0] * len(kpops), "lnPPK": [0] * len(kpops), "deltaK": [0] * len(kpops), "estLnProbMean": lnmean, "estLnProbStdev": lnstds, } ) ## calculate Evanno's for kpop in kpops[1:]: tab.loc[kpop, "lnPK"] = tab.loc[kpop, "estLnProbMean"] \ - tab.loc[kpop-1, "estLnProbMean"] for kpop in kpops[1:-1]: tab.loc[kpop, "lnPPK"] = abs(tab.loc[kpop+1, "lnPK"] - tab.loc[kpop, "lnPK"]) tab.loc[kpop, "deltaK"] = (abs( tab.loc[kpop+1, "estLnProbMean"] - \ 2.0 * tab.loc[kpop, "estLnProbMean"] + \ tab.loc[kpop-1, "estLnProbMean"]) / \ tab.loc[kpop, "estLnProbStdev"]) ## return table return tab
python
{ "resource": "" }
q259399
Structure.result_files
validation
def result_files(self): """ returns a list of files that have finished structure """ reps = OPJ(self.workdir, self.name+"-K-*-rep-*_f") repfiles = glob.glob(reps) return repfiles
python
{ "resource": "" }