text
stringlengths
81
112k
Check if the inputfile is a valid bed-file def check_bed_file(fname): """ Check if the inputfile is a valid bed-file """ if not os.path.exists(fname): logger.error("Inputfile %s does not exist!", fname) sys.exit(1) for i, line in enumerate(open(fname)): if line.startswith("#") or line.startswith("track") or line.startswith("browser"): # comment or BED specific stuff pass else: vals = line.strip().split("\t") if len(vals) < 3: logger.error("Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s", i + 1, fname) sys.exit(1) try: start, end = int(vals[1]), int(vals[2]) except ValueError: logger.error("No valid integer coordinates on line %s of file %s", i + 1, fname) sys.exit(1) if len(vals) > 3: try: float(vals[3]) except ValueError: pass
Check if an input file is valid, which means BED, narrowPeak or FASTA def check_denovo_input(inputfile, params): """ Check if an input file is valid, which means BED, narrowPeak or FASTA """ background = params["background"] input_type = determine_file_type(inputfile) if input_type == "fasta": valid_bg = FA_VALID_BGS elif input_type in ["bed", "narrowpeak"]: genome = params["genome"] valid_bg = BED_VALID_BGS if "genomic" in background or "gc" in background: Genome(genome) # is it a valid bed-file etc. check_bed_file(inputfile) # bed-specific, will also work for narrowPeak else: sys.stderr.write("Format of inputfile {} not recognized.\n".format(inputfile)) sys.stderr.write("Input should be FASTA, BED or narrowPeak.\n") sys.stderr.write("See https://genome.ucsc.edu/FAQ/FAQformat.html for specifications.\n") sys.exit(1) for bg in background: if not bg in valid_bg: logger.info("Input type is %s, ignoring background type '%s'", input_type, bg) background = [bg for bg in background if bg in valid_bg] if len(background) == 0: logger.error("No valid backgrounds specified!") sys.exit(1) return input_type, background
Scan a FASTA file with motifs. Scan a FASTA file and return a dictionary with the best match per motif. Parameters ---------- fname : str Filename of a sequence file in FASTA format. motifs : list List of motif instances. Returns ------- result : dict Dictionary with motif scanning results. def scan_to_best_match(fname, motifs, ncpus=None, genome=None, score=False): """Scan a FASTA file with motifs. Scan a FASTA file and return a dictionary with the best match per motif. Parameters ---------- fname : str Filename of a sequence file in FASTA format. motifs : list List of motif instances. Returns ------- result : dict Dictionary with motif scanning results. """ # Initialize scanner s = Scanner(ncpus=ncpus) s.set_motifs(motifs) s.set_threshold(threshold=0.0) if genome: s.set_genome(genome) if isinstance(motifs, six.string_types): motifs = read_motifs(motifs) logger.debug("scanning %s...", fname) result = dict([(m.id, []) for m in motifs]) if score: it = s.best_score(fname) else: it = s.best_match(fname) for scores in it: for motif,score in zip(motifs, scores): result[motif.id].append(score) # Close the pool and reclaim memory del s return result
Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve. def set_background(self, fname=None, genome=None, length=200, nseq=10000): """Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve. """ length = int(length) if genome and fname: raise ValueError("Need either genome or filename for background.") if fname: if not os.path.exists(fname): raise IOError("Background file {} does not exist!".format(fname)) self.background = Fasta(fname) self.background_hash = file_checksum(fname) return if not genome: if self.genome: genome = self.genome logger.info("Using default background: genome {} with length {}".format( genome, length)) else: raise ValueError("Need either genome or filename for background.") logger.info("Using background: genome {} with length {}".format(genome, length)) with Cache(CACHE_DIR) as cache: self.background_hash = "{}\{}".format(genome, int(length)) fa = cache.get(self.background_hash) if not fa: fa = RandomGenomicFasta(genome, length, nseq) cache.set(self.background_hash, fa) self.background = fa
Set motif scanning threshold based on background sequences. Parameters ---------- fpr : float, optional Desired FPR, between 0.0 and 1.0. threshold : float or str, optional Desired motif threshold, expressed as the fraction of the difference between minimum and maximum score of the PWM. Should either be a float between 0.0 and 1.0 or a filename with thresholds as created by 'gimme threshold'. def set_threshold(self, fpr=None, threshold=None): """Set motif scanning threshold based on background sequences. Parameters ---------- fpr : float, optional Desired FPR, between 0.0 and 1.0. threshold : float or str, optional Desired motif threshold, expressed as the fraction of the difference between minimum and maximum score of the PWM. Should either be a float between 0.0 and 1.0 or a filename with thresholds as created by 'gimme threshold'. """ if threshold and fpr: raise ValueError("Need either fpr or threshold.") if fpr: fpr = float(fpr) if not (0.0 < fpr < 1.0): raise ValueError("Parameter fpr should be between 0 and 1") if not self.motifs: raise ValueError("please run set_motifs() first") thresholds = {} motifs = read_motifs(self.motifs) if threshold is not None: self.threshold = parse_threshold_values(self.motifs, threshold) return if not self.background: try: self.set_background() except: raise ValueError("please run set_background() first") seqs = self.background.seqs with Cache(CACHE_DIR) as cache: scan_motifs = [] for motif in motifs: k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) threshold = cache.get(k) if threshold is None: scan_motifs.append(motif) else: if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold if len(scan_motifs) > 0: logger.info("Determining FPR-based threshold") for motif, threshold in self._threshold_from_seqs(scan_motifs, seqs, fpr): k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) cache.set(k, threshold) if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold self.threshold_str = "{}_{}_{}".format(fpr, threshold, self.background_hash) self.threshold = thresholds
count the number of matches above the cutoff returns an iterator of lists containing integer counts def count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ for matches in self.scan(seqs, nreport, scan_rc): counts = [len(m) for m in matches] yield counts
count the number of matches above the cutoff returns an iterator of lists containing integer counts def total_count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ count_table = [counts for counts in self.count(seqs, nreport, scan_rc)] return np.sum(np.array(count_table), 0)
give the score of the best match of each motif in each sequence returns an iterator of lists containing floats def best_score(self, seqs, scan_rc=True, normalize=False): """ give the score of the best match of each motif in each sequence returns an iterator of lists containing floats """ self.set_threshold(threshold=0.0) if normalize and len(self.meanstd) == 0: self.set_meanstd() means = np.array([self.meanstd[m][0] for m in self.motif_ids]) stds = np.array([self.meanstd[m][1] for m in self.motif_ids]) for matches in self.scan(seqs, 1, scan_rc): scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0]) if normalize: scores = (scores - means) / stds yield scores
give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand) def best_match(self, seqs, scan_rc=True): """ give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand) """ self.set_threshold(threshold=0.0) for matches in self.scan(seqs, 1, scan_rc): yield [m[0] for m in matches]
scan a set of regions / sequences def scan(self, seqs, nreport=100, scan_rc=True, normalize=False): """ scan a set of regions / sequences """ if not self.threshold: sys.stderr.write( "Using default threshold of 0.95. " "This is likely not optimal!\n" ) self.set_threshold(threshold=0.95) seqs = as_fasta(seqs, genome=self.genome) it = self._scan_sequences(seqs.seqs, nreport, scan_rc) if normalize: if len(self.meanstd) == 0: self.set_meanstd() mean_std = [self.meanstd.get(m_id) for m_id in self.motif_ids] means = [x[0] for x in mean_std] stds = [x[1] for x in mean_std] for result in it: if normalize: zresult = [] for i,mrow in enumerate(result): mrow = [((x[0] - means[i]) / stds[i], x[1], x[2]) for x in mrow] zresult.append(mrow) yield zresult else: yield result
Calculate ROC_AUC and other metrics and optionally plot ROC curve. def roc(args): """ Calculate ROC_AUC and other metrics and optionally plot ROC curve.""" outputfile = args.outfile # Default extension for image if outputfile and not outputfile.endswith(".png"): outputfile += ".png" motifs = read_motifs(args.pwmfile, fmt="pwm") ids = [] if args.ids: ids = args.ids.split(",") else: ids = [m.id for m in motifs] motifs = [m for m in motifs if (m.id in ids)] stats = [ "phyper_at_fpr", "roc_auc", "pr_auc", "enr_at_fpr", "recall_at_fdr", "roc_values", "matches_at_fpr", ] plot_x = [] plot_y = [] legend = [] f_out = sys.stdout if args.outdir: if not os.path.exists(args.outdir): os.makedirs(args.outdir) f_out = open(args.outdir + "/gimme.roc.report.txt", "w") # Print the metrics f_out.write("Motif\t# matches\t# matches background\tP-value\tlog10 P-value\tROC AUC\tPR AUC\tEnr. at 1% FPR\tRecall at 10% FDR\n") for motif_stats in calc_stats_iterator(motifs, args.sample, args.background, genome=args.genome, stats=stats, ncpus=args.ncpus): for motif in motifs: if str(motif) in motif_stats: if outputfile: x, y = motif_stats[str(motif)]["roc_values"] plot_x.append(x) plot_y.append(y) legend.append(motif.id) log_pvalue = np.inf if motif_stats[str(motif)]["phyper_at_fpr"] > 0: log_pvalue = -np.log10(motif_stats[str(motif)]["phyper_at_fpr"]) f_out.write("{}\t{:d}\t{:d}\t{:.2e}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.2f}\t{:0.4f}\n".format( motif.id, motif_stats[str(motif)]["matches_at_fpr"][0], motif_stats[str(motif)]["matches_at_fpr"][1], motif_stats[str(motif)]["phyper_at_fpr"], log_pvalue, motif_stats[str(motif)]["roc_auc"], motif_stats[str(motif)]["pr_auc"], motif_stats[str(motif)]["enr_at_fpr"], motif_stats[str(motif)]["recall_at_fdr"], )) f_out.close() if args.outdir: html_report( args.outdir, args.outdir + "/gimme.roc.report.txt", args.pwmfile, 0.01, ) # Plot the ROC curve if outputfile: roc_plot(outputfile, plot_x, plot_y, ids=legend)
Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float def ssd(p1, p2): """Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float """ return 2 - np.sum([(a-b)**2 for a,b in zip(p1,p2)])
Calculates motif similarity based on Pearson correlation of scores. Based on Kielbasa (2015) and Grau (2015). Scores are calculated based on scanning a de Bruijn sequence of 7-mers. This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015). Optionally another sequence can be given as an argument. Parameters ---------- m1 : Motif instance Motif 1 to compare. m2 : Motif instance Motif 2 to compare. seq : str, optional Sequence to use for scanning instead of k=7 de Bruijn sequence. Returns ------- score, position, strand def seqcor(m1, m2, seq=None): """Calculates motif similarity based on Pearson correlation of scores. Based on Kielbasa (2015) and Grau (2015). Scores are calculated based on scanning a de Bruijn sequence of 7-mers. This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015). Optionally another sequence can be given as an argument. Parameters ---------- m1 : Motif instance Motif 1 to compare. m2 : Motif instance Motif 2 to compare. seq : str, optional Sequence to use for scanning instead of k=7 de Bruijn sequence. Returns ------- score, position, strand """ l1 = len(m1) l2 = len(m2) l = max(l1, l2) if seq is None: seq = RCDB L = len(seq) # Scan RC de Bruijn sequence result1 = pfmscan(seq, m1.pwm, m1.pwm_min_score(), len(seq), False, True) result2 = pfmscan(seq, m2.pwm, m2.pwm_min_score(), len(seq), False, True) # Reverse complement of motif 2 result3 = pfmscan(seq, m2.rc().pwm, m2.rc().pwm_min_score(), len(seq), False, True) result1 = np.array(result1) result2 = np.array(result2) result3 = np.array(result3) # Return maximum correlation c = [] for i in range(l1 - l1 // 3): c.append([1 - distance.correlation(result1[:L-l-i],result2[i:L-l]), i, 1]) c.append([1 - distance.correlation(result1[:L-l-i],result3[i:L-l]), i, -1]) for i in range(l2 - l2 // 3): c.append([1 - distance.correlation(result1[i:L-l],result2[:L-l-i]), -i, 1]) c.append([1 - distance.correlation(result1[i:L-l],result3[:L-l-i]), -i, -1]) return sorted(c, key=lambda x: x[0])[-1]
Compare two motifs. The similarity metric can be any of seqcor, pcc, ed, distance, wic, chisq, akl or ssd. If match is 'total' the similarity score is calculated for the whole match, including positions that are not present in both motifs. If match is partial or subtotal, only the matching psotiions are used to calculate the score. The score of individual position is combined using either the mean or the sum. Note that the match and combine parameters have no effect on the seqcor similarity metric. Parameters ---------- m1 : Motif instance Motif instance 1. m2 : Motif instance Motif instance 2. match : str, optional Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str, optional Distance metric. combine : str, optional Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool, optional Calculate p-vale of match. Returns ------- score, position, strand def compare_motifs(self, m1, m2, match="total", metric="wic", combine="mean", pval=False): """Compare two motifs. The similarity metric can be any of seqcor, pcc, ed, distance, wic, chisq, akl or ssd. If match is 'total' the similarity score is calculated for the whole match, including positions that are not present in both motifs. If match is partial or subtotal, only the matching psotiions are used to calculate the score. The score of individual position is combined using either the mean or the sum. Note that the match and combine parameters have no effect on the seqcor similarity metric. Parameters ---------- m1 : Motif instance Motif instance 1. m2 : Motif instance Motif instance 2. match : str, optional Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str, optional Distance metric. combine : str, optional Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool, optional Calculate p-vale of match. Returns ------- score, position, strand """ if metric == "seqcor": return seqcor(m1, m2) elif match == "partial": if pval: return self.pvalue(m1, m2, "total", metric, combine, self.max_partial(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_partial(m1.pwm, m2.pwm, metric, combine) else: return self.max_partial(m1.pfm, m2.pfm, metric, combine) elif match == "total": if pval: return self.pvalue(m1, m2, match, metric, combine, self.max_total(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", 'akl']: # Slightly randomize the weight matrix return self.max_total(m1.wiggle_pwm(), m2.wiggle_pwm(), metric, combine) elif metric in ["ed", "distance", "wic", "chisq", "pcc", "ssd"]: return self.max_total(m1.pwm, m2.pwm, metric, combine) else: return self.max_total(m1.pfm, m2.pfm, metric, combine) elif match == "subtotal": if metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_subtotal(m1.pwm, m2.pwm, metric, combine) else: return self.max_subtotal(m1.pfm, m2.pfm, metric, combine)
Pairwise comparison of a set of motifs compared to reference motifs. Parameters ---------- motifs : list List of Motif instances. dbmotifs : list List of Motif instances. match : str Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str Distance metric. combine : str Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool , optional Calculate p-vale of match. parallel : bool , optional Use multiprocessing for parallel execution. True by default. trim : float or None If a float value is specified, motifs are trimmed used this IC cutoff before comparison. ncpus : int or None Specifies the number of cores to use for parallel execution. Returns ------- scores : dict Dictionary with scores. def get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval=False, parallel=True, trim=None, ncpus=None): """Pairwise comparison of a set of motifs compared to reference motifs. Parameters ---------- motifs : list List of Motif instances. dbmotifs : list List of Motif instances. match : str Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str Distance metric. combine : str Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool , optional Calculate p-vale of match. parallel : bool , optional Use multiprocessing for parallel execution. True by default. trim : float or None If a float value is specified, motifs are trimmed used this IC cutoff before comparison. ncpus : int or None Specifies the number of cores to use for parallel execution. Returns ------- scores : dict Dictionary with scores. """ # trim motifs first, if specified if trim: for m in motifs: m.trim(trim) for m in dbmotifs: m.trim(trim) # hash of result scores scores = {} if parallel: # Divide the job into big chunks, to keep parallel overhead to minimum # Number of chunks = number of processors available if ncpus is None: ncpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=ncpus, maxtasksperchild=1000) batch_len = len(dbmotifs) // ncpus if batch_len <= 0: batch_len = 1 jobs = [] for i in range(0, len(dbmotifs), batch_len): # submit jobs to the job server p = pool.apply_async(_get_all_scores, args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval)) jobs.append(p) pool.close() for job in jobs: # Get the job result result = job.get() # and update the result score for m1,v in result.items(): for m2, s in v.items(): if m1 not in scores: scores[m1] = {} scores[m1][m2] = s pool.join() else: # Do the whole thing at once if we don't want parallel scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval) return scores
Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict def get_closest_match(self, motifs, dbmotifs=None, match="partial", metric="wic",combine="mean", parallel=True, ncpus=None): """Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict """ if dbmotifs is None: pwm = self.config.get_default_params()["motif_db"] pwmdir = self.config.get_motif_dir() dbmotifs = os.path.join(pwmdir, pwm) motifs = parse_motifs(motifs) dbmotifs = parse_motifs(dbmotifs) dbmotif_lookup = dict([(m.id, m) for m in dbmotifs]) scores = self.get_all_scores(motifs, dbmotifs, match, metric, combine, parallel=parallel, ncpus=ncpus) for motif in scores: scores[motif] = sorted( scores[motif].items(), key=lambda x:x[1][0] )[-1] for motif in motifs: dbmotif, score = scores[motif.id] pval, pos, orient = self.compare_motifs( motif, dbmotif_lookup[dbmotif], match, metric, combine, True) scores[motif.id] = [dbmotif, (list(score) + [pval])] return scores
List regions for the service def list_regions(service): """ List regions for the service """ for region in service.regions(): print '%(name)s: %(endpoint)s' % { 'name': region.name, 'endpoint': region.endpoint, }
Print nice looking table of information from list of load balancers def elb_table(balancers): """ Print nice looking table of information from list of load balancers """ t = prettytable.PrettyTable(['Name', 'DNS', 'Ports', 'Zones', 'Created']) t.align = 'l' for b in balancers: ports = ['%s: %s -> %s' % (l[2], l[0], l[1]) for l in b.listeners] ports = '\n'.join(ports) zones = '\n'.join(b.availability_zones) t.add_row([b.name, b.dns_name, ports, zones, b.created_time]) return t
Print nice looking table of information from list of instances def ec2_table(instances): """ Print nice looking table of information from list of instances """ t = prettytable.PrettyTable(['ID', 'State', 'Monitored', 'Image', 'Name', 'Type', 'SSH key', 'DNS']) t.align = 'l' for i in instances: name = i.tags.get('Name', '') t.add_row([i.id, i.state, i.monitored, i.image_id, name, i.instance_type, i.key_name, i.dns_name]) return t
Print nice looking table of information from images def ec2_image_table(images): """ Print nice looking table of information from images """ t = prettytable.PrettyTable(['ID', 'State', 'Name', 'Owner', 'Root device', 'Is public', 'Description']) t.align = 'l' for i in images: t.add_row([i.id, i.state, i.name, i.ownerId, i.root_device_type, i.is_public, i.description]) return t
Run Fabric commands against EC2 instances def ec2_fab(service, args): """ Run Fabric commands against EC2 instances """ instance_ids = args.instances instances = service.list(elb=args.elb, instance_ids=instance_ids) hosts = service.resolve_hosts(instances) fab.env.hosts = hosts fab.env.key_filename = settings.get('SSH', 'KEY_FILE') fab.env.user = settings.get('SSH', 'USER', getpass.getuser()) fab.env.parallel = True fabfile = find_fabfile(args.file) if not fabfile: print 'Couldn\'t find any fabfiles!' return fab.env.real_fabile = fabfile docstring, callables, default = load_fabfile(fabfile) fab_state.commands.update(callables) commands_to_run = parse_arguments(args.methods) for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run: fab.execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs)
AWS support script's main method def main(): """ AWS support script's main method """ p = argparse.ArgumentParser(description='Manage Amazon AWS services', prog='aws', version=__version__) subparsers = p.add_subparsers(help='Select Amazon AWS service to use') # Auto Scaling as_service = subparsers.add_parser('as', help='Amazon Auto Scaling') as_subparsers = as_service.add_subparsers(help='Perform action') as_service_list = as_subparsers.add_parser('list', help='List Auto Scaling groups') as_service_list.set_defaults(func=as_list_handler) # Elastic Cloud Computing ec2_service = subparsers.add_parser('ec2', help='Amazon Elastic Compute Cloud') ec2_subparsers = ec2_service.add_subparsers(help='Perform action') ec2_service_list = ec2_subparsers.add_parser('list', help='List items') ec2_service_list.add_argument('--elb', '-e', help='Filter instances inside this ELB instance') ec2_service_list.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_list.add_argument('--type', default='instances', choices=['instances', 'regions', 'images'], help='List items of this type') ec2_service_list.set_defaults(func=ec2_list_handler) ec2_service_fab = ec2_subparsers.add_parser('fab', help='Run Fabric commands') ec2_service_fab.add_argument('--elb', '-e', help='Run against EC2 instances for this ELB') ec2_service_fab.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_fab.add_argument('--file', '-f', nargs='+', help='Define fabfile to use') ec2_service_fab.add_argument('methods', metavar='method:arg1,arg2=val2,host=foo,hosts=\'h1;h2\',', nargs='+', help='Specify one or more methods to execute.') ec2_service_fab.set_defaults(func=ec2_fab_handler) ec2_service_create = ec2_subparsers.add_parser('create', help='Create and start new instances') ec2_service_create.set_defaults(func=ec2_create_handler) ec2_service_start = ec2_subparsers.add_parser('start', help='Start existing instances') ec2_service_start.add_argument('instance', nargs='+', help='ID of an instance to start') ec2_service_start.set_defaults(func=ec2_start_handler) ec2_service_stop = ec2_subparsers.add_parser('stop', help='Stop instances') ec2_service_stop.add_argument('instance', nargs='+', help='ID of an instance to stop') ec2_service_stop.add_argument('--force', '-f', action='store_true', help='Force stop') ec2_service_stop.set_defaults(func=ec2_stop_handler) ec2_service_terminate = ec2_subparsers.add_parser('terminate', help='Terminate instances') ec2_service_terminate.add_argument('instance', nargs='+', help='ID of an instance to terminate') ec2_service_terminate.set_defaults(func=ec2_terminate_handler) ec2_service_images = ec2_subparsers.add_parser('images', help='List AMI images') ec2_service_images.add_argument('image', nargs='*', help='Image ID to use as filter') ec2_service_images.set_defaults(func=ec2_images_handler) ec2_service_create_image = ec2_subparsers.add_parser('create-image', help='Create AMI image from instance') ec2_service_create_image.add_argument('instance', help='ID of an instance to image') ec2_service_create_image.add_argument('name', help='The name of the image') ec2_service_create_image.add_argument('--description', '-d', help='Optional description for the image') ec2_service_create_image.add_argument('--noreboot', action='store_true', default=False, help='Do not shutdown the instance before creating image. ' + 'Note: System integrity might suffer if used.') ec2_service_create_image.set_defaults(func=ec2_create_image_handler) # Elastic Load Balancing elb_service = subparsers.add_parser('elb', help='Amazon Elastic Load Balancing') elb_subparsers = elb_service.add_subparsers(help='Perform action') elb_service_list = elb_subparsers.add_parser('list', help='List items') elb_service_list.add_argument('--type', default='balancers', choices=['balancers', 'regions'], help='List items of this type') elb_service_list.set_defaults(func=elb_list_handler) elb_service_instances = elb_subparsers.add_parser('instances', help='List registered instances') elb_service_instances.add_argument('balancer', help='Name of the Load Balancer') elb_service_instances.set_defaults(func=elb_instances_handler) elb_service_register = elb_subparsers.add_parser('register', help='Register instances to balancer') elb_service_register.add_argument('balancer', help='Name of the load balancer') elb_service_register.add_argument('instance', nargs='+', help='ID of an instance to register') elb_service_register.set_defaults(func=elb_register_handler) elb_service_deregister = elb_subparsers.add_parser('deregister', help='Deregister instances of balancer') elb_service_deregister.add_argument('balancer', help='Name of the Load Balancer') elb_service_deregister.add_argument('instance', nargs='+', help='ID of an instance to deregister') elb_service_deregister.set_defaults(func=elb_deregister_handler) elb_service_zones = elb_subparsers.add_parser('zones', help='Enable or disable availability zones') elb_service_zones.add_argument('balancer', help='Name of the Load Balancer') elb_service_zones.add_argument('zone', nargs='+', help='Name of the availability zone') elb_service_zones.add_argument('status', help='Disable of enable zones', choices=['enable', 'disable']) elb_service_zones.set_defaults(func=elb_zones_handler) elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') elb_service_delete.add_argument('balancer', help='Name of the Load Balancer') elb_service_delete.set_defaults(func=elb_delete_handler) # elb_service_create = elb_subparsers.add_parser('create', help='Create new Load Balancer') # elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') # elb_service_register = elb_subparsers.add_parser('register', help='Register EC2 instance') # elb_service_zone = elb_subparsers.add_parser('zone', help='Enable or disable region') arguments = p.parse_args() arguments.func(p, arguments)
Buffer "data" from an stream into one data object. Parameters ---------- stream : stream The stream to buffer buffer_size : int > 0 The number of examples to retain per batch. partial : bool, default=False If True, yield a final partial batch on under-run. axis : int or None If `None` (default), concatenate data along a new 0th axis. Otherwise, concatenation is performed along the specified axis. This is primarily useful when combining data that already has a dimension for buffer index, e.g., when buffering buffers. Yields ------ batch A batch of size at most `buffer_size` Raises ------ DataError If the stream contains items that are not data-like. def buffer_stream(stream, buffer_size, partial=False, axis=None): '''Buffer "data" from an stream into one data object. Parameters ---------- stream : stream The stream to buffer buffer_size : int > 0 The number of examples to retain per batch. partial : bool, default=False If True, yield a final partial batch on under-run. axis : int or None If `None` (default), concatenate data along a new 0th axis. Otherwise, concatenation is performed along the specified axis. This is primarily useful when combining data that already has a dimension for buffer index, e.g., when buffering buffers. Yields ------ batch A batch of size at most `buffer_size` Raises ------ DataError If the stream contains items that are not data-like. ''' data = [] count = 0 for item in stream: data.append(item) count += 1 if count < buffer_size: continue try: yield __stack_data(data, axis=axis) except (TypeError, AttributeError): raise DataError("Malformed data stream: {}".format(data)) finally: data = [] count = 0 if data and partial: yield __stack_data(data, axis=axis)
Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key. def tuples(stream, *keys): """Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key. """ if not keys: raise PescadorError('Unable to generate tuples from ' 'an empty item set') for data in stream: try: yield tuple(data[key] for key in keys) except TypeError: raise DataError("Malformed data stream: {}".format(data))
Reformat data objects as keras-compatible tuples. For more detail: https://keras.io/models/model/#fit Parameters ---------- stream : iterable Stream of data objects. inputs : string or iterable of strings, None Keys to use for ordered input data. If not specified, returns `None` in its place. outputs : string or iterable of strings, default=None Keys to use for ordered output data. If not specified, returns `None` in its place. Yields ------ x : np.ndarray, list of np.ndarray, or None If `inputs` is a string, `x` is a single np.ndarray. If `inputs` is an iterable of strings, `x` is a list of np.ndarrays. If `inputs` is a null type, `x` is None. y : np.ndarray, list of np.ndarray, or None If `outputs` is a string, `y` is a single np.ndarray. If `outputs` is an iterable of strings, `y` is a list of np.ndarrays. If `outputs` is a null type, `y` is None. Raises ------ DataError If the stream contains items that are not data-like. def keras_tuples(stream, inputs=None, outputs=None): """Reformat data objects as keras-compatible tuples. For more detail: https://keras.io/models/model/#fit Parameters ---------- stream : iterable Stream of data objects. inputs : string or iterable of strings, None Keys to use for ordered input data. If not specified, returns `None` in its place. outputs : string or iterable of strings, default=None Keys to use for ordered output data. If not specified, returns `None` in its place. Yields ------ x : np.ndarray, list of np.ndarray, or None If `inputs` is a string, `x` is a single np.ndarray. If `inputs` is an iterable of strings, `x` is a list of np.ndarrays. If `inputs` is a null type, `x` is None. y : np.ndarray, list of np.ndarray, or None If `outputs` is a string, `y` is a single np.ndarray. If `outputs` is an iterable of strings, `y` is a list of np.ndarrays. If `outputs` is a null type, `y` is None. Raises ------ DataError If the stream contains items that are not data-like. """ flatten_inputs, flatten_outputs = False, False if inputs and isinstance(inputs, six.string_types): inputs = [inputs] flatten_inputs = True if outputs and isinstance(outputs, six.string_types): outputs = [outputs] flatten_outputs = True inputs, outputs = (inputs or []), (outputs or []) if not inputs + outputs: raise PescadorError('At least one key must be given for ' '`inputs` or `outputs`') for data in stream: try: x = list(data[key] for key in inputs) or None if len(inputs) == 1 and flatten_inputs: x = x[0] y = list(data[key] for key in outputs) or None if len(outputs) == 1 and flatten_outputs: y = y[0] yield (x, y) except TypeError: raise DataError("Malformed data stream: {}".format(data))
Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments. def location(args): """ Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments. """ fastafile = args.fastafile pwmfile = args.pwmfile lwidth = args.width if not lwidth: f = Fasta(fastafile) lwidth = len(f.items()[0][1]) f = None jobs = [] motifs = pwmfile_to_motifs(pwmfile) ids = [motif.id for motif in motifs] if args.ids: ids = args.ids.split(",") n_cpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=n_cpus, maxtasksperchild=1000) for motif in motifs: if motif.id in ids: outfile = os.path.join("%s_histogram" % motif.id) jobs.append( pool.apply_async( motif_localization, (fastafile,motif,lwidth,outfile, args.cutoff) )) for job in jobs: job.get()
Find location of executable. def which(fname): """Find location of executable.""" if "PATH" not in os.environ or not os.environ["PATH"]: path = os.defpath else: path = os.environ["PATH"] for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]: p = os.path.abspath(p) if os.access(p, os.X_OK) and not os.path.isdir(p): return p p = sp.Popen("locate %s" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) (stdout, stderr) = p.communicate() if not stderr: for p in stdout.decode().split("\n"): if (os.path.basename(p) == fname) and ( os.access(p, os.X_OK)) and ( not os.path.isdir(p)): return p
Find all files in a directory by extension. def find_by_ext(dirname, ext): """Find all files in a directory by extension.""" # Get all fasta-files try: files = os.listdir(dirname) except OSError: if os.path.exists(dirname): cmd = "find {0} -maxdepth 1 -name \"*\"".format(dirname) p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, _stderr = p.communicate() files = [os.path.basename(fname) for fname in stdout.decode().splitlines()] else: raise retfiles = [os.path.join(dirname, fname) for fname in files if os.path.splitext(fname)[-1] in ext] return retfiles
Return list of Motif instances from default motif database. def default_motifs(): """Return list of Motif instances from default motif database.""" config = MotifConfig() d = config.get_motif_dir() m = config.get_default_params()['motif_db'] if not d or not m: raise ValueError("default motif database not configured") fname = os.path.join(d, m) with open(fname) as f: motifs = read_motifs(f) return motifs
Convert alignment to motif. Converts a list with sequences to a motif. Sequences should be the same length. Parameters ---------- align : list List with sequences (A,C,G,T). Returns ------- m : Motif instance Motif created from the aligned sequences. def motif_from_align(align): """Convert alignment to motif. Converts a list with sequences to a motif. Sequences should be the same length. Parameters ---------- align : list List with sequences (A,C,G,T). Returns ------- m : Motif instance Motif created from the aligned sequences. """ width = len(align[0]) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 m = Motif(pfm) m.align = align[:] return m
Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus. def motif_from_consensus(cons, n=12): """Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus. """ width = len(cons) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] m = Motif() for i,char in enumerate(cons): for nuc in m.iupac[char.upper()]: pfm[i][nucs[nuc]] = n / len(m.iupac[char.upper()]) m = Motif(pfm) m.id = cons return m
Parse motifs in a variety of formats to return a list of motifs. Parameters ---------- motifs : list or str Filename of motif, list of motifs or single Motif instance. Returns ------- motifs : list List of Motif instances. def parse_motifs(motifs): """Parse motifs in a variety of formats to return a list of motifs. Parameters ---------- motifs : list or str Filename of motif, list of motifs or single Motif instance. Returns ------- motifs : list List of Motif instances. """ if isinstance(motifs, six.string_types): with open(motifs) as f: if motifs.endswith("pwm") or motifs.endswith("pfm"): motifs = read_motifs(f, fmt="pwm") elif motifs.endswith("transfac"): motifs = read_motifs(f, fmt="transfac") else: motifs = read_motifs(f) elif isinstance(motifs, Motif): motifs = [motifs] else: if not isinstance(list(motifs)[0], Motif): raise ValueError("Not a list of motifs") return list(motifs)
Read motifs from a file-like object. Parameters ---------- handle : file-like object Motifs. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. Returns ------- motifs : list List of Motif instances. def _read_motifs_from_filehandle(handle, fmt): """ Read motifs from a file-like object. Parameters ---------- handle : file-like object Motifs. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. Returns ------- motifs : list List of Motif instances. """ if fmt.lower() == "pwm": motifs = _read_motifs_pwm(handle) if fmt.lower() == "transfac": motifs = _read_motifs_transfac(handle) if fmt.lower() == "xxmotif": motifs = _read_motifs_xxmotif(handle) if fmt.lower() == "align": motifs = _read_motifs_align(handle) if fmt.lower() == "jaspar": motifs = _read_motifs_jaspar(handle) if handle.name: base = os.path.splitext(handle.name)[0] map_file = base + ".motif2factors.txt" if os.path.exists(map_file): m2f_direct = {} m2f_indirect = {} for line in open(map_file): try: motif,*factor_info = line.strip().split("\t") if len(factor_info) == 1: m2f_direct[motif] = factor_info[0].split(",") elif len(factor_info) == 3: if factor_info[2] == "Y": m2f_direct[motif] = m2f_direct.get(motif, []) + [factor_info[0]] else: m2f_indirect[motif] = m2f_indirect.get(motif, []) + [factor_info[0]] except: pass for motif in motifs: if motif.id in m2f_direct: motif.factors[DIRECT_NAME] = m2f_direct[motif.id] if motif.id in m2f_indirect: motif.factors[INDIRECT_NAME] = m2f_indirect[motif.id] for motif in motifs: for n in [DIRECT_NAME, INDIRECT_NAME]: motif.factors[n] = list(set(motif.factors[n])) return motifs
Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary. def read_motifs(infile=None, fmt="pwm", as_dict=False): """ Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary. """ if infile is None or isinstance(infile, six.string_types): infile = pwmfile_location(infile) with open(infile) as f: motifs = _read_motifs_from_filehandle(f, fmt) else: motifs = _read_motifs_from_filehandle(infile, fmt) if as_dict: motifs = {m.id:m for m in motifs} return motifs
Return the total information content of the motif. Return ------ ic : float Motif information content. def information_content(self): """Return the total information content of the motif. Return ------ ic : float Motif information content. """ ic = 0 for row in self.pwm: ic += 2.0 + np.sum([row[x] * log(row[x])/log(2) for x in range(4) if row[x] > 0]) return ic
Return the minimum PWM score. Returns ------- score : float Minimum PWM score. def pwm_min_score(self): """Return the minimum PWM score. Returns ------- score : float Minimum PWM score. """ if self.min_score is None: score = 0 for row in self.pwm: score += log(min(row) / 0.25 + 0.01) self.min_score = score return self.min_score
Return the maximum PWM score. Returns ------- score : float Maximum PWM score. def pwm_max_score(self): """Return the maximum PWM score. Returns ------- score : float Maximum PWM score. """ if self.max_score is None: score = 0 for row in self.pwm: score += log(max(row) / 0.25 + 0.01) self.max_score = score return self.max_score
Calculate the log-odds score for a specific k-mer. Parameters ---------- kmer : str String representing a kmer. Should be the same length as the motif. Returns ------- score : float Log-odd score. def score_kmer(self, kmer): """Calculate the log-odds score for a specific k-mer. Parameters ---------- kmer : str String representing a kmer. Should be the same length as the motif. Returns ------- score : float Log-odd score. """ if len(kmer) != len(self.pwm): raise Exception("incorrect k-mer length") score = 0.0 d = {"A":0, "C":1, "G":2, "T":3} for nuc, row in zip(kmer.upper(), self.pwm): score += log(row[d[nuc]] / 0.25 + 0.01) return score
Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions. def pfm_to_pwm(self, pfm, pseudo=0.001): """Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions. """ return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm]
Return motif formatted in MotEvo (TRANSFAC-like) format Returns ------- m : str String of motif in MotEvo format. def to_motevo(self): """Return motif formatted in MotEvo (TRANSFAC-like) format Returns ------- m : str String of motif in MotEvo format. """ m = "//\n" m += "NA {}\n".format(self.id) m += "P0\tA\tC\tG\tT\n" for i, row in enumerate(self.pfm): m += "{}\t{}\n".format(i, "\t".join([str(int(x)) for x in row])) m += "//" return m
Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format. def to_transfac(self): """Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format. """ m = "%s\t%s\t%s\n" % ("DE", self.id, "unknown") for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())): m += "%i\t%s\t%s\n" % (i, "\t".join([str(int(x)) for x in row]), cons) m += "XX" return m
Return motif formatted in MEME format Returns ------- m : str String of motif in MEME format. def to_meme(self): """Return motif formatted in MEME format Returns ------- m : str String of motif in MEME format. """ motif_id = self.id.replace(" ", "_") m = "MOTIF %s\n" % motif_id m += "BL MOTIF %s width=0 seqs=0\n"% motif_id m += "letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\n" % (len(self), np.sum(self.pfm[0])) m +="\n".join(["\t".join(["%s" % x for x in row]) for row in self.pwm]) return m
Calculate the information content of one position. Returns ------- score : float Information content. def ic_pos(self, row1, row2=None): """Calculate the information content of one position. Returns ------- score : float Information content. """ if row2 is None: row2 = [0.25,0.25,0.25,0.25] score = 0 for a,b in zip(row1, row2): if a > 0: score += a * log(a / b) / log(2) return score
Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient. def pcc_pos(self, row1, row2): """Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient. """ mean1 = np.mean(row1) mean2 = np.mean(row2) a = 0 x = 0 y = 0 for n1, n2 in zip(row1, row2): a += (n1 - mean1) * (n2 - mean2) x += (n1 - mean1) ** 2 y += (n2 - mean2) ** 2 if a == 0: return 0 else: return a / sqrt(x * y)
Return the reverse complemented motif. Returns ------- m : Motif instance New Motif instance with the reverse complement of the input motif. def rc(self): """Return the reverse complemented motif. Returns ------- m : Motif instance New Motif instance with the reverse complement of the input motif. """ m = Motif() m.pfm = [row[::-1] for row in self.pfm[::-1]] m.pwm = [row[::-1] for row in self.pwm[::-1]] m.id = self.id + "_revcomp" return m
Trim positions with an information content lower than the threshold. The default threshold is set to 0.4. The Motif will be changed in-place. Parameters ---------- edge_ic_cutoff : float, optional Information content threshold. All motif positions at the flanks with an information content lower thab this will be removed. Returns ------- m : Motif instance def trim(self, edge_ic_cutoff=0.4): """Trim positions with an information content lower than the threshold. The default threshold is set to 0.4. The Motif will be changed in-place. Parameters ---------- edge_ic_cutoff : float, optional Information content threshold. All motif positions at the flanks with an information content lower thab this will be removed. Returns ------- m : Motif instance """ pwm = self.pwm[:] while len(pwm) > 0 and self.ic_pos(pwm[0]) < edge_ic_cutoff: pwm = pwm[1:] self.pwm = self.pwm[1:] self.pfm = self.pfm[1:] while len(pwm) > 0 and self.ic_pos(pwm[-1]) < edge_ic_cutoff: pwm = pwm[:-1] self.pwm = self.pwm[:-1] self.pfm = self.pfm[:-1] self.consensus = None self.min_score = None self.max_score = None self.wiggled_pwm = None return self
Scan FASTA with the motif as a consensus sequence. Parameters ---------- fa : Fasta object Fasta object to scan Returns ------- matches : dict Dictionaru with matches. def consensus_scan(self, fa): """Scan FASTA with the motif as a consensus sequence. Parameters ---------- fa : Fasta object Fasta object to scan Returns ------- matches : dict Dictionaru with matches. """ regexp = "".join(["[" + "".join(self.iupac[x.upper()]) + "]" for x in self.to_consensusv2()]) p = re.compile(regexp) matches = {} for name,seq in fa.items(): matches[name] = [] for match in p.finditer(seq): middle = (match.span()[1] + match.span()[0]) / 2 matches[name].append(middle) return matches
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. def pwm_scan(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. """ c = self.pwm_min_score() + ( self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for _,pos,_ in result: matches[name].append(pos) return matches
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The score, position and strand for every match is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. The score, position and strand for every match is returned. def pwm_scan_all(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The score, position and strand for every match is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. The score, position and strand for every match is returned. """ c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score,pos,strand in result: matches[name].append((pos,score,strand)) return matches
Scan sequences with this motif and save to a GFF file. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The output is save to a file in GFF format. Parameters ---------- fa : Fasta object Fasta object to scan. gfffile : str Filename of GFF output file. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. append : bool , optional Append to GFF file instead of overwriting it. False by default. def pwm_scan_to_gff(self, fa, gfffile, cutoff=0.9, nreport=50, scan_rc=True, append=False): """Scan sequences with this motif and save to a GFF file. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The output is save to a file in GFF format. Parameters ---------- fa : Fasta object Fasta object to scan. gfffile : str Filename of GFF output file. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. append : bool , optional Append to GFF file instead of overwriting it. False by default. """ if append: out = open(gfffile, "a") else: out = open(gfffile, "w") c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm strandmap = {-1:"-","-1":"-","-":"-","1":"+",1:"+","+":"+"} gff_line = ("{}\tpfmscan\tmisc_feature\t{}\t{}\t{:.3f}\t{}\t.\t" "motif_name \"{}\" ; motif_instance \"{}\"\n") for name, seq in fa.items(): result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score, pos, strand in result: out.write(gff_line.format( name, pos, pos + len(pwm), score, strandmap[strand], self.id, seq[pos:pos + len(pwm)] )) out.close()
Return the average of two motifs. Combine this motif with another motif and return the average as a new Motif object. The position and orientatien need to be supplied. The pos parameter is the position of the second motif relative to this motif. For example, take the following two motifs: Motif 1: CATGYT Motif 2: GGCTTGY With position -2, the motifs are averaged as follows: xxCATGYT GGCTTGYx Parameters ---------- other : Motif object Other Motif object. pos : int Position of the second motif relative to this motif. orientation : int Orientation, should be 1 or -1. If the orientation is -1 then the reverse complement of the other motif is used for averaging. include_bg : bool , optional Extend both motifs with background frequencies (0.25) before averaging. False by default. Returns ------- motif : motif object New Motif object containing average motif. def average_motifs(self, other, pos, orientation, include_bg=False): """Return the average of two motifs. Combine this motif with another motif and return the average as a new Motif object. The position and orientatien need to be supplied. The pos parameter is the position of the second motif relative to this motif. For example, take the following two motifs: Motif 1: CATGYT Motif 2: GGCTTGY With position -2, the motifs are averaged as follows: xxCATGYT GGCTTGYx Parameters ---------- other : Motif object Other Motif object. pos : int Position of the second motif relative to this motif. orientation : int Orientation, should be 1 or -1. If the orientation is -1 then the reverse complement of the other motif is used for averaging. include_bg : bool , optional Extend both motifs with background frequencies (0.25) before averaging. False by default. Returns ------- motif : motif object New Motif object containing average motif. """ # xxCATGYT # GGCTTGYx # pos = -2 pfm1 = self.pfm[:] pfm2 = other.pfm[:] if orientation < 0: pfm2 = [row[::-1] for row in pfm2[::-1]] pfm1_count = float(np.sum(pfm1[0])) pfm2_count = float(np.sum(pfm2[0])) if include_bg: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm2_count / 4.0 for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm1_count / 4.0 for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm1_count / 4.0 for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm2_count / 4.0 for x in range(4)] for i in range(pos)] + pfm2 else: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(pos)] + pfm2 pfm = [[a + b for a,b in zip(x,y)] for x,y in zip(pfm1, pfm2)] m = Motif(pfm) m.id = m.to_consensus() return m
Return string representation of pwm. Parameters ---------- precision : int, optional, default 4 Floating-point precision. Returns ------- pwm_string : str def _pwm_to_str(self, precision=4): """Return string representation of pwm. Parameters ---------- precision : int, optional, default 4 Floating-point precision. Returns ------- pwm_string : str """ if not self.pwm: return "" fmt = "{{:.{:d}f}}".format(precision) return "\n".join( ["\t".join([fmt.format(p) for p in row]) for row in self.pwm] )
Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format. def to_pwm(self, precision=4, extra_str=""): """Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format. """ motif_id = self.id if extra_str: motif_id += "_%s" % extra_str if not self.pwm: self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()] return ">%s\n%s" % ( motif_id, self._pwm_to_str(precision) )
Create a sequence logo using seqlogo. Create a sequence logo and save it to a file. Valid formats are: PNG, EPS, GIF and PDF. Parameters ---------- fname : str Output filename. fmt : str , optional Output format (case-insensitive). Valid formats are PNG, EPS, GIF and PDF. add_left : int , optional Pad motif with empty positions on the left side. seqlogo : str Location of the seqlogo executable. By default the seqlogo version that is included with GimmeMotifs is used. height : float Height of the image def to_img(self, fname, fmt="PNG", add_left=0, seqlogo=None, height=6): """Create a sequence logo using seqlogo. Create a sequence logo and save it to a file. Valid formats are: PNG, EPS, GIF and PDF. Parameters ---------- fname : str Output filename. fmt : str , optional Output format (case-insensitive). Valid formats are PNG, EPS, GIF and PDF. add_left : int , optional Pad motif with empty positions on the left side. seqlogo : str Location of the seqlogo executable. By default the seqlogo version that is included with GimmeMotifs is used. height : float Height of the image """ if not seqlogo: seqlogo = self.seqlogo if not seqlogo: raise ValueError("seqlogo not specified or configured") #TODO: split to_align function VALID_FORMATS = ["EPS", "GIF", "PDF", "PNG"] N = 1000 fmt = fmt.upper() if not fmt in VALID_FORMATS: sys.stderr.write("Invalid motif format\n") return if fname[-4:].upper() == (".%s" % fmt): fname = fname[:-4] seqs = [] if add_left == 0: seqs = ["" for i in range(N)] else: for nuc in ["A", "C", "T", "G"]: seqs += [nuc * add_left for i in range(N // 4)] for pos in range(len(self.pwm)): vals = [self.pwm[pos][0] * N] for i in range(1,4): vals.append(vals[i-1] + self.pwm[pos][i] * N) if vals[3] - N != 0: #print "Motif weights don't add up to 1! Error of %s%%" % ((vals[3] - n)/ n * 100) vals[3] = N for i in range(N): if i <= vals[0]: seqs[i] += "A" elif i <= vals[1]: seqs[i] += "C" elif i <= vals[2]: seqs[i] += "G" elif i <= vals[3]: seqs[i] += "T" f = NamedTemporaryFile(mode="w", dir=mytmpdir()) for seq in seqs: f.write("%s\n" % seq) f.flush() makelogo = "{0} -f {1} -F {2} -c -a -h {3} -w {4} -o {5} -b -n -Y" cmd = makelogo.format( seqlogo, f.name, fmt, height, len(self) + add_left, fname) sp.call(cmd, shell=True)
Create a new motif with shuffled positions. Shuffle the positions of this motif and return a new Motif instance. Returns ------- m : Motif instance Motif instance with shuffled positions. def randomize(self): """Create a new motif with shuffled positions. Shuffle the positions of this motif and return a new Motif instance. Returns ------- m : Motif instance Motif instance with shuffled positions. """ random_pfm = [[c for c in row] for row in self.pfm] random.shuffle(random_pfm) m = Motif(pfm=random_pfm) m.id = "random" return m
Run the maelstrom method. def maelstrom(args): """Run the maelstrom method.""" infile = args.inputfile genome = args.genome outdir = args.outdir pwmfile = args.pwmfile methods = args.methods ncpus = args.ncpus if not os.path.exists(infile): raise ValueError("file {} does not exist".format(infile)) if methods: methods = [x.strip() for x in methods.split(",")] run_maelstrom(infile, genome, outdir, pwmfile, methods=methods, ncpus=ncpus)
Send data, e.g. {key: np.ndarray}, with metadata def zmq_send_data(socket, data, flags=0, copy=True, track=False): """Send data, e.g. {key: np.ndarray}, with metadata""" header, payload = [], [] for key in sorted(data.keys()): arr = data[key] if not isinstance(arr, np.ndarray): raise DataError('Only ndarray types can be serialized') header.append(dict(dtype=str(arr.dtype), shape=arr.shape, key=key, aligned=arr.flags['ALIGNED'])) # Force contiguity payload.append(arr) # Send the header msg = [json.dumps(header).encode('ascii')] msg.extend(payload) return socket.send_multipart(msg, flags, copy=copy, track=track)
Receive data over a socket. def zmq_recv_data(socket, flags=0, copy=True, track=False): """Receive data over a socket.""" data = dict() msg = socket.recv_multipart(flags=flags, copy=copy, track=track) headers = json.loads(msg[0].decode('ascii')) if len(headers) == 0: raise StopIteration for header, payload in zip(headers, msg[1:]): data[header['key']] = np.frombuffer(buffer(payload), dtype=header['dtype']) data[header['key']].shape = header['shape'] if six.PY2: # Legacy python won't let us preserve alignment, skip this step continue data[header['key']].flags['ALIGNED'] = header['aligned'] return data
Note: A ZMQStreamer does not activate its stream, but allows the zmq_worker to do that. Yields ------ data : dict Data drawn from `streamer(max_iter)`. def iterate(self, max_iter=None): """ Note: A ZMQStreamer does not activate its stream, but allows the zmq_worker to do that. Yields ------ data : dict Data drawn from `streamer(max_iter)`. """ context = zmq.Context() if six.PY2: warnings.warn('zmq_stream cannot preserve numpy array alignment ' 'in Python 2', RuntimeWarning) try: socket = context.socket(zmq.PAIR) port = socket.bind_to_random_port('tcp://*', min_port=self.min_port, max_port=self.max_port, max_tries=self.max_tries) terminate = mp.Event() worker = mp.Process(target=SafeFunction(zmq_worker), args=[port, self.streamer, terminate], kwargs=dict(copy=self.copy, max_iter=max_iter)) worker.daemon = True worker.start() # Yield from the queue as long as it's open while True: yield zmq_recv_data(socket) except StopIteration: pass except: # pylint: disable-msg=W0702 six.reraise(*sys.exc_info()) finally: terminate.set() worker.join(self.timeout) if worker.is_alive(): worker.terminate() context.destroy()
Mask all lowercase nucleotides with N's def hardmask(self): """ Mask all lowercase nucleotides with N's """ p = re.compile("a|c|g|t|n") for seq_id in self.fasta_dict.keys(): self.fasta_dict[seq_id] = p.sub("N", self.fasta_dict[seq_id]) return self
Return n random sequences from this Fasta object def get_random(self, n, l=None): """ Return n random sequences from this Fasta object """ random_f = Fasta() if l: ids = self.ids[:] random.shuffle(ids) i = 0 while (i < n) and (len(ids) > 0): seq_id = ids.pop() if (len(self[seq_id]) >= l): start = random.randint(0, len(self[seq_id]) - l) random_f["random%s" % (i + 1)] = self[seq_id][start:start+l] i += 1 if len(random_f) != n: sys.stderr.write("Not enough sequences of required length") return else: return random_f else: choice = random.sample(self.ids, n) for i in range(n): random_f[choice[i]] = self[choice[i]] return random_f
Write sequences to FASTA formatted file def writefasta(self, fname): """ Write sequences to FASTA formatted file""" f = open(fname, "w") fa_str = "\n".join([">%s\n%s" % (id, self._format_seq(seq)) for id, seq in self.items()]) f.write(fa_str) f.close()
Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True. def cluster_motifs(motifs, match="total", metric="wic", combine="mean", pval=True, threshold=0.95, trim_edges=False, edge_ic_cutoff=0.2, include_bg=True, progress=True, ncpus=None): """ Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True. """ # First read pfm or pfm formatted motiffile if type([]) != type(motifs): motifs = read_motifs(motifs, fmt="pwm") mc = MotifComparer() # Trim edges with low information content if trim_edges: for motif in motifs: motif.trim(edge_ic_cutoff) # Make a MotifTree node for every motif nodes = [MotifTree(m) for m in motifs] # Determine all pairwise scores and maxscore per motif scores = {} motif_nodes = dict([(n.motif.id,n) for n in nodes]) motifs = [n.motif for n in nodes] if progress: sys.stderr.write("Calculating initial scores\n") result = mc.get_all_scores(motifs, motifs, match, metric, combine, pval, parallel=True, ncpus=ncpus) for m1, other_motifs in result.items(): for m2, score in other_motifs.items(): if m1 == m2: if pval: motif_nodes[m1].maxscore = 1 - score[0] else: motif_nodes[m1].maxscore = score[0] else: if pval: score = [1 - score[0]] + score[1:] scores[(motif_nodes[m1],motif_nodes[m2])] = score cluster_nodes = [node for node in nodes] ave_count = 1 total = len(cluster_nodes) while len(cluster_nodes) > 1: l = sorted(scores.keys(), key=lambda x: scores[x][0]) i = -1 (n1, n2) = l[i] while n1 not in cluster_nodes or n2 not in cluster_nodes: i -= 1 (n1,n2) = l[i] if len(n1.motif) > 0 and len(n2.motif) > 0: (score, pos, orientation) = scores[(n1,n2)] ave_motif = n1.motif.average_motifs(n2.motif, pos, orientation, include_bg=include_bg) ave_motif.trim(edge_ic_cutoff) # Check if the motif is not empty if len(ave_motif) == 0: ave_motif = Motif([[0.25,0.25,0.25,0.25]]) ave_motif.id = "Average_%s" % ave_count ave_count += 1 new_node = MotifTree(ave_motif) if pval: new_node.maxscore = 1 - mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] else: new_node.maxscore = mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] new_node.mergescore = score #print "%s + %s = %s with score %s" % (n1.motif.id, n2.motif.id, ave_motif.id, score) n1.parent = new_node n2.parent = new_node new_node.left = n1 new_node.right = n2 cmp_nodes = dict([(node.motif, node) for node in nodes if not node.parent]) if progress: progress = (1 - len(cmp_nodes) / float(total)) * 100 sys.stderr.write('\rClustering [{0}{1}] {2}%'.format( '#' * (int(progress) // 10), " " * (10 - int(progress) // 10), int(progress))) result = mc.get_all_scores( [new_node.motif], list(cmp_nodes.keys()), match, metric, combine, pval, parallel=True) for motif, n in cmp_nodes.items(): x = result[new_node.motif.id][motif.id] if pval: x = [1 - x[0]] + x[1:] scores[(new_node, n)] = x nodes.append(new_node) cluster_nodes = [node for node in nodes if not node.parent] if progress: sys.stderr.write("\n") root = nodes[-1] for node in [node for node in nodes if not node.left]: node.parent.checkMerge(root, threshold) return root
Determine the number of samples in a batch. Parameters ---------- batch : dict A batch dictionary. Each value must implement `len`. All values must have the same `len`. Returns ------- n : int >= 0 or None The number of samples in this batch. If the batch has no fields, n is None. Raises ------ PescadorError If some two values have unequal length def batch_length(batch): '''Determine the number of samples in a batch. Parameters ---------- batch : dict A batch dictionary. Each value must implement `len`. All values must have the same `len`. Returns ------- n : int >= 0 or None The number of samples in this batch. If the batch has no fields, n is None. Raises ------ PescadorError If some two values have unequal length ''' n = None for value in six.itervalues(batch): if n is None: n = len(value) elif len(value) != n: raise PescadorError('Unequal field lengths') return n
Activates a number of streams def _activate(self): """Activates a number of streams""" self.distribution_ = 1. / self.n_streams * np.ones(self.n_streams) self.valid_streams_ = np.ones(self.n_streams, dtype=bool) self.streams_ = [None] * self.k self.stream_weights_ = np.zeros(self.k) self.stream_counts_ = np.zeros(self.k, dtype=int) # Array of pointers into `self.streamers` self.stream_idxs_ = np.zeros(self.k, dtype=int) for idx in range(self.k): if not (self.distribution_ > 0).any(): break self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) self.streams_[idx], self.stream_weights_[idx] = ( self._new_stream(self.stream_idxs_[idx])) self.weight_norm_ = np.sum(self.stream_weights_)
Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace def _new_stream(self, idx): '''Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # instantiate if self.rate is not None: n_stream = 1 + self.rng.poisson(lam=self.rate) else: n_stream = None # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) if not self.with_replacement: self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return (self.streamers[idx].iterate(max_iter=n_stream), self.weights[idx])
Yields items from the mux, and handles stream exhaustion and replacement. def iterate(self, max_iter=None): """Yields items from the mux, and handles stream exhaustion and replacement. """ if max_iter is None: max_iter = np.inf # Calls Streamer's __enter__, which calls activate() with self as active_mux: # Main sampling loop n = 0 while n < max_iter and active_mux._streamers_available(): # Pick a stream from the active set idx = active_mux._next_sample_index() # Can we sample from it? try: # Then yield the sample yield six.advance_iterator(active_mux.streams_[idx]) # Increment the sample counter n += 1 active_mux.stream_counts_[idx] += 1 except StopIteration: # Oops, this stream is exhausted. # Call child-class exhausted-stream behavior active_mux._on_stream_exhausted(idx) # Setup a new stream for this index active_mux._replace_stream(idx)
StochasticMux chooses its next sample stream randomly def _next_sample_index(self): """StochasticMux chooses its next sample stream randomly""" return self.rng.choice(self.n_active, p=(self.stream_weights_ / self.weight_norm_))
Randomly select and create a stream. StochasticMux adds mode handling to _activate_stream, making it so that if we're not sampling "with_replacement", the distribution for this chosen streamer is set to 0, causing the streamer not to be available until it is exhausted. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace def _activate_stream(self, idx): '''Randomly select and create a stream. StochasticMux adds mode handling to _activate_stream, making it so that if we're not sampling "with_replacement", the distribution for this chosen streamer is set to 0, causing the streamer not to be available until it is exhausted. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Get the number of samples for this streamer. n_samples_to_stream = None if self.rate is not None: n_samples_to_stream = 1 + self.rng.poisson(lam=self.rate) # instantiate a new streamer streamer = self.streamers[idx].iterate(max_iter=n_samples_to_stream) weight = self.weights[idx] # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) # if not self.with_replacement: if self.mode != "with_replacement": self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return streamer, weight
Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Choose the stream index from the candidate pool self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) # Activate the Streamer, and get the weights self.streams_[idx], self.stream_weights_[idx] = self._activate_stream( self.stream_idxs_[idx]) # Reset the sample count to zero self.stream_counts_[idx] = 0
ShuffledMux's activate is similar to StochasticMux, but there is no 'n_active', since all the streams are always available. def _activate(self): """ShuffledMux's activate is similar to StochasticMux, but there is no 'n_active', since all the streams are always available. """ self.streams_ = [None] * self.n_streams # Weights of the active streams. # Once a stream is exhausted, it is set to 0. # Upon activation, this is just a copy of self.weights. self.stream_weights_ = np.array(self.weights, dtype=float) # How many samples have been drawn from each (active) stream. self.stream_counts_ = np.zeros(self.n_streams, dtype=int) # Initialize each active stream. for idx in range(self.n_streams): # Setup a new streamer at this index. self._new_stream(idx) self.weight_norm_ = np.sum(self.stream_weights_)
ShuffledMux chooses its next sample stream randomly, conditioned on the stream weights. def _next_sample_index(self): """ShuffledMux chooses its next sample stream randomly, conditioned on the stream weights. """ return self.rng.choice(self.n_streams, p=(self.stream_weights_ / self.weight_norm_))
Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Don't activate the stream if the weight is 0 or None if self.stream_weights_[idx]: self.streams_[idx] = self.streamers[idx].iterate() else: self.streams_[idx] = None # Reset the sample count to zero self.stream_counts_[idx] = 0
Rotates through each active sampler by incrementing the index def _next_sample_index(self): """Rotates through each active sampler by incrementing the index""" # Return the next streamer index where the streamer is not None, # wrapping around. idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 # Continue to increment if this streamer is exhausted (None) # This should never be infinite looping; # the `_streamers_available` check happens immediately # before this, so there should always be at least one not-None # streamer. while self.streams_[idx] is None: idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 return idx
Activate a new stream, given the index into the stream pool. BaseMux's _new_stream simply chooses a new stream and activates it. For special behavior (ie Weighted streams), you must override this in a child class. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace def _new_stream(self, idx): """Activate a new stream, given the index into the stream pool. BaseMux's _new_stream simply chooses a new stream and activates it. For special behavior (ie Weighted streams), you must override this in a child class. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace """ # Get the stream index from the candidate pool stream_index = self.stream_idxs_[idx] # Activate the Streamer, and get the weights self.streams_[idx] = self.streamers[stream_index].iterate() # Reset the sample count to zero self.stream_counts_[idx] = 0
Called by `BaseMux`'s iterate() when a stream is exhausted. Set the stream to None so it is ignored once exhausted. Parameters ---------- idx : int or None Raises ------ StopIteration If all streams are consumed, and `mode`=="exahustive" def _replace_stream(self, idx=None): """Called by `BaseMux`'s iterate() when a stream is exhausted. Set the stream to None so it is ignored once exhausted. Parameters ---------- idx : int or None Raises ------ StopIteration If all streams are consumed, and `mode`=="exahustive" """ self.streams_[idx] = None # Check if we've now exhausted all the streams. if not self._streamers_available(): if self.mode == 'exhaustive': pass elif self.mode == "cycle": self._setup_streams(permute=False) elif self.mode == "permuted_cycle": self._setup_streams(permute=True)
Grab the next stream from the input streamers, and start it. Raises ------ StopIteration When the input list or generator of streamers is complete, will raise a StopIteration. If `mode == cycle`, it will instead restart iterating from the beginning of the sequence. def _new_stream(self): '''Grab the next stream from the input streamers, and start it. Raises ------ StopIteration When the input list or generator of streamers is complete, will raise a StopIteration. If `mode == cycle`, it will instead restart iterating from the beginning of the sequence. ''' try: # Advance the stream_generator_ to get the next available stream. # If successful, this will make self.chain_streamer_.active True next_stream = six.advance_iterator(self.stream_generator_) except StopIteration: # If running with cycle, restart the chain_streamer_ after # exhaustion. if self.mode == "cycle": self.stream_generator_ = self.chain_streamer_.iterate() # Try again to get the next stream; # if it fails this time, just let it raise the StopIteration; # this means the streams are probably dead or empty. next_stream = six.advance_iterator(self.stream_generator_) # If running in exhaustive mode else: # self.chain_streamer_ should no longer be active, so # the outer loop should fall out without running. next_stream = None if next_stream is not None: # Start that stream, and return it. streamer = next_stream.iterate() # Activate the Streamer self.streams_[0] = streamer # Reset the sample count to zero self.stream_counts_[0] = 0
Shuffle X and Y into n / len(paths) datasets, and save them to disk at the locations provided in paths. def split_and_save_datasets(X, Y, paths): """Shuffle X and Y into n / len(paths) datasets, and save them to disk at the locations provided in paths. """ shuffled_idxs = np.random.permutation(np.arange(len(X))) for i in range(len(paths)): # Take every len(paths) item, starting at i. # len(paths) is 3, so this would be [0::3], [1::3], [2::3] X_i = X[shuffled_idxs[i::len(paths)]] Y_i = Y[shuffled_idxs[i::len(paths)]] np.savez(paths[i], X=X_i, Y=Y_i)
Generate data from an npz file. def npz_generator(npz_path): """Generate data from an npz file.""" npz_data = np.load(npz_path) X = npz_data['X'] # Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,) y = npz_data['Y'] n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'Y': y[i]}
Current hypergeometric implementation in scipy is broken, so here's the correct version def phyper(k, good, bad, N): """ Current hypergeometric implementation in scipy is broken, so here's the correct version """ pvalues = [phyper_single(x, good, bad, N) for x in range(k + 1, N + 1)] return np.sum(pvalues)
Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format. def write_equalwidth_bedfile(bedfile, width, outfile): """Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format.""" BUFSIZE = 10000 f = open(bedfile) out = open(outfile, "w") lines = f.readlines(BUFSIZE) line_count = 0 while lines: for line in lines: line_count += 1 if not line.startswith("#") and not line.startswith("track") and not line.startswith("browser"): vals = line.strip().split("\t") try: start, end = int(vals[1]), int(vals[2]) except ValueError: print("Error on line %s while reading %s. Is the file in BED or WIG format?" % (line_count, bedfile)) sys.exit(1) start = (start + end) // 2 - (width // 2) # This shifts the center, but ensures the width is identical... maybe not ideal if start < 0: start = 0 end = start + width # Keep all the other information in the bedfile if it's there if len(vals) > 3: out.write("%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:]))) else: out.write("%s\t%s\t%s\n" % (vals[0], start, end)) lines = f.readlines(BUFSIZE) out.close() f.close()
Calculate enrichment based on hypergeometric distribution def calc_motif_enrichment(sample, background, mtc=None, len_sample=None, len_back=None): """Calculate enrichment based on hypergeometric distribution""" INF = "Inf" if mtc not in [None, "Bonferroni", "Benjamini-Hochberg", "None"]: raise RuntimeError("Unknown correction: %s" % mtc) sig = {} p_value = {} n_sample = {} n_back = {} if not(len_sample): len_sample = sample.seqn() if not(len_back): len_back = background.seqn() for motif in sample.motifs.keys(): p = "NA" s = "NA" q = len(sample.motifs[motif]) m = 0 if(background.motifs.get(motif)): m = len(background.motifs[motif]) n = len_back - m k = len_sample p = phyper(q - 1, m, n, k) if p != 0: s = -(log(p)/log(10)) else: s = INF else: s = INF p = 0.0 sig[motif] = s p_value[motif] = p n_sample[motif] = q n_back[motif] = m if mtc == "Bonferroni": for motif in p_value.keys(): if p_value[motif] != "NA": p_value[motif] = p_value[motif] * len(p_value.keys()) if p_value[motif] > 1: p_value[motif] = 1 elif mtc == "Benjamini-Hochberg": motifs = sorted(p_value.keys(), key=lambda x: -p_value[x]) l = len(p_value) c = l for m in motifs: if p_value[m] != "NA": p_value[m] = p_value[m] * l / c c -= 1 return (sig, p_value, n_sample, n_back)
Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value def parse_cutoff(motifs, cutoff, default=0.9): """ Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value """ cutoffs = {} if os.path.isfile(str(cutoff)): for i,line in enumerate(open(cutoff)): if line != "Motif\tScore\tCutoff\n": try: motif,_,c = line.strip().split("\t") c = float(c) cutoffs[motif] = c except Exception as e: sys.stderr.write("Error parsing cutoff file, line {0}: {1}\n".format(e, i + 1)) sys.exit(1) else: for motif in motifs: cutoffs[motif.id] = float(cutoff) for motif in motifs: if not motif.id in cutoffs: sys.stderr.write("No cutoff found for {0}, using default {1}\n".format(motif.id, default)) cutoffs[motif.id] = default return cutoffs
Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case. def determine_file_type(fname): """ Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case. """ if not (isinstance(fname, str) or isinstance(fname, unicode)): raise ValueError("{} is not a file name!", fname) if not os.path.isfile(fname): raise ValueError("{} is not a file!", fname) ext = os.path.splitext(fname)[1].lower() if ext in ["bed"]: return "bed" elif ext in ["fa", "fasta"]: return "fasta" elif ext in ["narrowpeak"]: return "narrowpeak" try: Fasta(fname) return "fasta" except: pass # Read first line that is not a comment or an UCSC-specific line p = re.compile(r'^(#|track|browser)') with open(fname) as f: for line in f.readlines(): line = line.strip() if not p.search(line): break region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if region_p.search(line): return "region" else: vals = line.split("\t") if len(vals) >= 3: try: _, _ = int(vals[1]), int(vals[2]) except ValueError: return "unknown" if len(vals) == 10: try: _, _ = int(vals[4]), int(vals[9]) return "narrowpeak" except ValueError: # As far as I know there is no 10-column BED format return "unknown" pass return "bed" # Catch-all return "unknown"
automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file def get_seqs_type(seqs): """ automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file """ region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if isinstance(seqs, Fasta): return "fasta" elif isinstance(seqs, list): if len(seqs) == 0: raise ValueError("empty list of sequences to scan") else: if region_p.search(seqs[0]): return "regions" else: raise ValueError("unknown region type") elif isinstance(seqs, str) or isinstance(seqs, unicode): if os.path.isfile(seqs): ftype = determine_file_type(seqs) if ftype == "unknown": raise ValueError("unknown type") elif ftype == "narrowpeak": raise ValueError("narrowPeak not yet supported in this function") else: return ftype + "file" else: raise ValueError("no file found with name {}".format(seqs)) else: raise ValueError("unknown type {}".format(type(seqs).__name__))
Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str def file_checksum(fname): """Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str """ size = os.path.getsize(fname) with open(fname, "r+") as f: checksum = hashlib.md5(mmap.mmap(f.fileno(), size)).hexdigest() return checksum
Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name. def download_annotation(genomebuild, gene_file): """ Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name. """ pred_bin = "genePredToBed" pred = find_executable(pred_bin) if not pred: sys.stderr.write("{} not found in path!\n".format(pred_bin)) sys.exit(1) tmp = NamedTemporaryFile(delete=False, suffix=".gz") anno = [] f = urlopen(UCSC_GENE_URL.format(genomebuild)) p = re.compile(r'\w+.Gene.txt.gz') for line in f.readlines(): m = p.search(line.decode()) if m: anno.append(m.group(0)) sys.stderr.write("Retrieving gene annotation for {}\n".format(genomebuild)) url = "" for a in ANNOS: if a in anno: url = UCSC_GENE_URL.format(genomebuild) + a break if url: sys.stderr.write("Using {}\n".format(url)) urlretrieve( url, tmp.name ) with gzip.open(tmp.name) as f: cols = f.readline().decode(errors='ignore').split("\t") start_col = 1 for i,col in enumerate(cols): if col == "+" or col == "-": start_col = i - 1 break end_col = start_col + 10 cmd = "zcat {} | cut -f{}-{} | {} /dev/stdin {}" print(cmd.format(tmp.name, start_col, end_col, pred, gene_file)) sp.call(cmd.format( tmp.name, start_col, end_col, pred, gene_file), shell=True) else: sys.stderr.write("No annotation found!")
Check if dir exists, if not: give warning and die def _check_dir(self, dirname): """ Check if dir exists, if not: give warning and die""" if not os.path.exists(dirname): print("Directory %s does not exist!" % dirname) sys.exit(1)
Index a single, one-sequence fasta-file def _make_index(self, fasta, index): """ Index a single, one-sequence fasta-file""" out = open(index, "wb") f = open(fasta) # Skip first line of fasta-file line = f.readline() offset = f.tell() line = f.readline() while line: out.write(pack(self.pack_char, offset)) offset = f.tell() line = f.readline() f.close() out.close()
Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir def create_index(self,fasta_dir=None, index_dir=None): """Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir""" # Use default directories if they are not supplied if not fasta_dir: fasta_dir = self.fasta_dir if not index_dir: index_dir = self.index_dir # Can't continue if we still don't have an index_dir or fasta_dir if not fasta_dir: print("fasta_dir not defined!") sys.exit(1) if not index_dir: print("index_dir not defined!") sys.exit(1) index_dir = os.path.abspath(index_dir) fasta_dir = os.path.abspath(fasta_dir) self.index_dir = index_dir # Prepare index directory if not os.path.exists(index_dir): try: os.mkdir(index_dir) except OSError as e: if e.args[0] == 13: sys.stderr.write("No permission to create index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) # Directories need to exist self._check_dir(fasta_dir) self._check_dir(index_dir) # Get all fasta-files fastafiles = find_by_ext(fasta_dir, FASTA_EXT) if not(fastafiles): msg = "No fastafiles found in {} with extension in {}".format( fasta_dir, ",".join(FASTA_EXT)) raise IOError(msg) # param_file will hold all the information about the location of the fasta-files, indeces and # length of the sequences param_file = os.path.join(index_dir, self.param_file) size_file = os.path.join(index_dir, self.size_file) try: out = open(param_file, "w") except IOError as e: if e.args[0] == 13: sys.stderr.write("No permission to create files in index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) s_out = open(size_file, "w") for fasta_file in fastafiles: #sys.stderr.write("Indexing %s\n" % fasta_file) f = open(fasta_file) line = f.readline() if not line.startswith(">"): sys.stderr.write("%s is not a valid FASTA file, expected > at first line\n" % fasta_file) sys.exit() seqname = line.strip().replace(">", "") line = f.readline() line_size = len(line.strip()) total_size = 0 while line: line = line.strip() if line.startswith(">"): sys.stderr.write("Sorry, can only index genomes with " "one sequence per FASTA file\n%s contains multiple " "sequences\n" % fasta_file) sys.exit() total_size += len(line) line = f.readline() index_file = os.path.join(index_dir, "%s.index" % seqname) out.write("{}\t{}\t{}\t{}\t{}\n".format( seqname, fasta_file, index_file, line_size, total_size)) s_out.write("{}\t{}\n".format(seqname, total_size)) self._make_index(fasta_file, index_file) f.close() out.close() s_out.close() # Read the index we just made so we can immediately use it self._read_index_file()
read the param_file, index_dir should already be set def _read_index_file(self): """read the param_file, index_dir should already be set """ param_file = os.path.join(self.index_dir, self.param_file) with open(param_file) as f: for line in f.readlines(): (name, fasta_file, index_file, line_size, total_size) = line.strip().split("\t") self.size[name] = int(total_size) self.fasta_file[name] = fasta_file self.index_file[name] = index_file self.line_size[name] = int(line_size)
retrieve a number of lines from a fasta file-object, starting at offset def _read_seq_from_fasta(self, fasta, offset, nr_lines): """ retrieve a number of lines from a fasta file-object, starting at offset""" fasta.seek(offset) lines = [fasta.readline().strip() for _ in range(nr_lines)] return "".join(lines)
Retrieve multiple sequences from same chr (RC not possible yet) def get_sequences(self, chr, coords): """ Retrieve multiple sequences from same chr (RC not possible yet)""" # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chr] index_file = self.index_file[chr] line_size = self.line_size[chr] total_size = self.size[chr] index = open(index_file, "rb") fasta = open(fasta_file) seqs = [] for coordset in coords: seq = "" for (start,end) in coordset: if start > total_size: raise ValueError("%s: %s, invalid start, greater than sequence length!" % (chr,start)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError("Invalid end, greater than sequence length!") seq += self._read(index, fasta, start, end, line_size) seqs.append(seq) index.close() fasta.close() return seqs
Retrieve a sequence def get_sequence(self, chrom, start, end, strand=None): """ Retrieve a sequence """ # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chrom] index_file = self.index_file[chrom] line_size = self.line_size[chrom] total_size = self.size[chrom] #print fasta_file, index_file, line_size, total_size if start > total_size: raise ValueError( "Invalid start {0}, greater than sequence length {1} of {2}!".format(start, total_size, chrom)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError( "Invalid end {0}, greater than sequence length {1} of {2}!".format(end, total_size, chrom)) index = open(index_file, "rb") fasta = open(fasta_file) seq = self._read(index, fasta, start, end, line_size) index.close() fasta.close() if strand and strand == "-": seq = rc(seq) return seq
Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument def get_size(self, chrom=None): """ Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument """ if len(self.size) == 0: raise LookupError("no chromosomes in index, is the index correct?") if chrom: if chrom in self.size: return self.size[chrom] else: raise KeyError("chromosome {} not in index".format(chrom)) total = 0 for size in self.size.values(): total += size return total
Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance def get_tool(name): """ Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance """ tool = name.lower() if tool not in __tools__: raise ValueError("Tool {0} not found!\n".format(name)) t = __tools__[tool]() if not t.is_installed(): sys.stderr.write("Tool {0} not installed!\n".format(tool)) if not t.is_configured(): sys.stderr.write("Tool {0} not configured!\n".format(tool)) return t
Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool. def locate_tool(name, verbose=True): """ Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool. """ m = get_tool(name) tool_bin = which(m.cmd) if tool_bin: if verbose: print("Found {} in {}".format(m.name, tool_bin)) return tool_bin else: print("Couldn't find {}".format(m.name))
Get the command used to run the tool. Returns ------- command : str The tool system command. def bin(self): """ Get the command used to run the tool. Returns ------- command : str The tool system command. """ if self.local_bin: return self.local_bin else: return self.config.bin(self.name)
Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed. def is_installed(self): """ Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed. """ return self.is_configured() and os.access(self.bin(), os.X_OK)
Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. def run(self, fastafile, params=None, tmp=None): """ Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ if not self.is_configured(): raise ValueError("%s is not configured" % self.name) if not self.is_installed(): raise ValueError("%s is not installed or not correctly configured" % self.name) self.tmpdir = mkdtemp(prefix="{0}.".format(self.name), dir=tmp) fastafile = os.path.abspath(fastafile) try: return self._run_program(self.bin(), fastafile, params) except KeyboardInterrupt: return ([], "Killed", "Killed")