repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
vanheeringen-lab/gimmemotifs
gimmemotifs/validation.py
check_bed_file
def check_bed_file(fname): """ Check if the inputfile is a valid bed-file """ if not os.path.exists(fname): logger.error("Inputfile %s does not exist!", fname) sys.exit(1) for i, line in enumerate(open(fname)): if line.startswith("#") or line.startswith("track") or line.startswith("browser"): # comment or BED specific stuff pass else: vals = line.strip().split("\t") if len(vals) < 3: logger.error("Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s", i + 1, fname) sys.exit(1) try: start, end = int(vals[1]), int(vals[2]) except ValueError: logger.error("No valid integer coordinates on line %s of file %s", i + 1, fname) sys.exit(1) if len(vals) > 3: try: float(vals[3]) except ValueError: pass
python
def check_bed_file(fname): """ Check if the inputfile is a valid bed-file """ if not os.path.exists(fname): logger.error("Inputfile %s does not exist!", fname) sys.exit(1) for i, line in enumerate(open(fname)): if line.startswith("#") or line.startswith("track") or line.startswith("browser"): # comment or BED specific stuff pass else: vals = line.strip().split("\t") if len(vals) < 3: logger.error("Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s", i + 1, fname) sys.exit(1) try: start, end = int(vals[1]), int(vals[2]) except ValueError: logger.error("No valid integer coordinates on line %s of file %s", i + 1, fname) sys.exit(1) if len(vals) > 3: try: float(vals[3]) except ValueError: pass
[ "def", "check_bed_file", "(", "fname", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "logger", ".", "error", "(", "\"Inputfile %s does not exist!\"", ",", "fname", ")", "sys", ".", "exit", "(", "1", ")", "for", "i", ",", "line", "in", "enumerate", "(", "open", "(", "fname", ")", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", "or", "line", ".", "startswith", "(", "\"track\"", ")", "or", "line", ".", "startswith", "(", "\"browser\"", ")", ":", "# comment or BED specific stuff", "pass", "else", ":", "vals", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "vals", ")", "<", "3", ":", "logger", ".", "error", "(", "\"Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s\"", ",", "i", "+", "1", ",", "fname", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "start", ",", "end", "=", "int", "(", "vals", "[", "1", "]", ")", ",", "int", "(", "vals", "[", "2", "]", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "\"No valid integer coordinates on line %s of file %s\"", ",", "i", "+", "1", ",", "fname", ")", "sys", ".", "exit", "(", "1", ")", "if", "len", "(", "vals", ")", ">", "3", ":", "try", ":", "float", "(", "vals", "[", "3", "]", ")", "except", "ValueError", ":", "pass" ]
Check if the inputfile is a valid bed-file
[ "Check", "if", "the", "inputfile", "is", "a", "valid", "bed", "-", "file" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/validation.py#L13-L37
train
vanheeringen-lab/gimmemotifs
gimmemotifs/validation.py
check_denovo_input
def check_denovo_input(inputfile, params): """ Check if an input file is valid, which means BED, narrowPeak or FASTA """ background = params["background"] input_type = determine_file_type(inputfile) if input_type == "fasta": valid_bg = FA_VALID_BGS elif input_type in ["bed", "narrowpeak"]: genome = params["genome"] valid_bg = BED_VALID_BGS if "genomic" in background or "gc" in background: Genome(genome) # is it a valid bed-file etc. check_bed_file(inputfile) # bed-specific, will also work for narrowPeak else: sys.stderr.write("Format of inputfile {} not recognized.\n".format(inputfile)) sys.stderr.write("Input should be FASTA, BED or narrowPeak.\n") sys.stderr.write("See https://genome.ucsc.edu/FAQ/FAQformat.html for specifications.\n") sys.exit(1) for bg in background: if not bg in valid_bg: logger.info("Input type is %s, ignoring background type '%s'", input_type, bg) background = [bg for bg in background if bg in valid_bg] if len(background) == 0: logger.error("No valid backgrounds specified!") sys.exit(1) return input_type, background
python
def check_denovo_input(inputfile, params): """ Check if an input file is valid, which means BED, narrowPeak or FASTA """ background = params["background"] input_type = determine_file_type(inputfile) if input_type == "fasta": valid_bg = FA_VALID_BGS elif input_type in ["bed", "narrowpeak"]: genome = params["genome"] valid_bg = BED_VALID_BGS if "genomic" in background or "gc" in background: Genome(genome) # is it a valid bed-file etc. check_bed_file(inputfile) # bed-specific, will also work for narrowPeak else: sys.stderr.write("Format of inputfile {} not recognized.\n".format(inputfile)) sys.stderr.write("Input should be FASTA, BED or narrowPeak.\n") sys.stderr.write("See https://genome.ucsc.edu/FAQ/FAQformat.html for specifications.\n") sys.exit(1) for bg in background: if not bg in valid_bg: logger.info("Input type is %s, ignoring background type '%s'", input_type, bg) background = [bg for bg in background if bg in valid_bg] if len(background) == 0: logger.error("No valid backgrounds specified!") sys.exit(1) return input_type, background
[ "def", "check_denovo_input", "(", "inputfile", ",", "params", ")", ":", "background", "=", "params", "[", "\"background\"", "]", "input_type", "=", "determine_file_type", "(", "inputfile", ")", "if", "input_type", "==", "\"fasta\"", ":", "valid_bg", "=", "FA_VALID_BGS", "elif", "input_type", "in", "[", "\"bed\"", ",", "\"narrowpeak\"", "]", ":", "genome", "=", "params", "[", "\"genome\"", "]", "valid_bg", "=", "BED_VALID_BGS", "if", "\"genomic\"", "in", "background", "or", "\"gc\"", "in", "background", ":", "Genome", "(", "genome", ")", "# is it a valid bed-file etc.", "check_bed_file", "(", "inputfile", ")", "# bed-specific, will also work for narrowPeak", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"Format of inputfile {} not recognized.\\n\"", ".", "format", "(", "inputfile", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"Input should be FASTA, BED or narrowPeak.\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"See https://genome.ucsc.edu/FAQ/FAQformat.html for specifications.\\n\"", ")", "sys", ".", "exit", "(", "1", ")", "for", "bg", "in", "background", ":", "if", "not", "bg", "in", "valid_bg", ":", "logger", ".", "info", "(", "\"Input type is %s, ignoring background type '%s'\"", ",", "input_type", ",", "bg", ")", "background", "=", "[", "bg", "for", "bg", "in", "background", "if", "bg", "in", "valid_bg", "]", "if", "len", "(", "background", ")", "==", "0", ":", "logger", ".", "error", "(", "\"No valid backgrounds specified!\"", ")", "sys", ".", "exit", "(", "1", ")", "return", "input_type", ",", "background" ]
Check if an input file is valid, which means BED, narrowPeak or FASTA
[ "Check", "if", "an", "input", "file", "is", "valid", "which", "means", "BED", "narrowPeak", "or", "FASTA" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/validation.py#L41-L74
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
scan_to_best_match
def scan_to_best_match(fname, motifs, ncpus=None, genome=None, score=False): """Scan a FASTA file with motifs. Scan a FASTA file and return a dictionary with the best match per motif. Parameters ---------- fname : str Filename of a sequence file in FASTA format. motifs : list List of motif instances. Returns ------- result : dict Dictionary with motif scanning results. """ # Initialize scanner s = Scanner(ncpus=ncpus) s.set_motifs(motifs) s.set_threshold(threshold=0.0) if genome: s.set_genome(genome) if isinstance(motifs, six.string_types): motifs = read_motifs(motifs) logger.debug("scanning %s...", fname) result = dict([(m.id, []) for m in motifs]) if score: it = s.best_score(fname) else: it = s.best_match(fname) for scores in it: for motif,score in zip(motifs, scores): result[motif.id].append(score) # Close the pool and reclaim memory del s return result
python
def scan_to_best_match(fname, motifs, ncpus=None, genome=None, score=False): """Scan a FASTA file with motifs. Scan a FASTA file and return a dictionary with the best match per motif. Parameters ---------- fname : str Filename of a sequence file in FASTA format. motifs : list List of motif instances. Returns ------- result : dict Dictionary with motif scanning results. """ # Initialize scanner s = Scanner(ncpus=ncpus) s.set_motifs(motifs) s.set_threshold(threshold=0.0) if genome: s.set_genome(genome) if isinstance(motifs, six.string_types): motifs = read_motifs(motifs) logger.debug("scanning %s...", fname) result = dict([(m.id, []) for m in motifs]) if score: it = s.best_score(fname) else: it = s.best_match(fname) for scores in it: for motif,score in zip(motifs, scores): result[motif.id].append(score) # Close the pool and reclaim memory del s return result
[ "def", "scan_to_best_match", "(", "fname", ",", "motifs", ",", "ncpus", "=", "None", ",", "genome", "=", "None", ",", "score", "=", "False", ")", ":", "# Initialize scanner", "s", "=", "Scanner", "(", "ncpus", "=", "ncpus", ")", "s", ".", "set_motifs", "(", "motifs", ")", "s", ".", "set_threshold", "(", "threshold", "=", "0.0", ")", "if", "genome", ":", "s", ".", "set_genome", "(", "genome", ")", "if", "isinstance", "(", "motifs", ",", "six", ".", "string_types", ")", ":", "motifs", "=", "read_motifs", "(", "motifs", ")", "logger", ".", "debug", "(", "\"scanning %s...\"", ",", "fname", ")", "result", "=", "dict", "(", "[", "(", "m", ".", "id", ",", "[", "]", ")", "for", "m", "in", "motifs", "]", ")", "if", "score", ":", "it", "=", "s", ".", "best_score", "(", "fname", ")", "else", ":", "it", "=", "s", ".", "best_match", "(", "fname", ")", "for", "scores", "in", "it", ":", "for", "motif", ",", "score", "in", "zip", "(", "motifs", ",", "scores", ")", ":", "result", "[", "motif", ".", "id", "]", ".", "append", "(", "score", ")", "# Close the pool and reclaim memory", "del", "s", "return", "result" ]
Scan a FASTA file with motifs. Scan a FASTA file and return a dictionary with the best match per motif. Parameters ---------- fname : str Filename of a sequence file in FASTA format. motifs : list List of motif instances. Returns ------- result : dict Dictionary with motif scanning results.
[ "Scan", "a", "FASTA", "file", "with", "motifs", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L55-L96
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
Scanner.set_background
def set_background(self, fname=None, genome=None, length=200, nseq=10000): """Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve. """ length = int(length) if genome and fname: raise ValueError("Need either genome or filename for background.") if fname: if not os.path.exists(fname): raise IOError("Background file {} does not exist!".format(fname)) self.background = Fasta(fname) self.background_hash = file_checksum(fname) return if not genome: if self.genome: genome = self.genome logger.info("Using default background: genome {} with length {}".format( genome, length)) else: raise ValueError("Need either genome or filename for background.") logger.info("Using background: genome {} with length {}".format(genome, length)) with Cache(CACHE_DIR) as cache: self.background_hash = "{}\{}".format(genome, int(length)) fa = cache.get(self.background_hash) if not fa: fa = RandomGenomicFasta(genome, length, nseq) cache.set(self.background_hash, fa) self.background = fa
python
def set_background(self, fname=None, genome=None, length=200, nseq=10000): """Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve. """ length = int(length) if genome and fname: raise ValueError("Need either genome or filename for background.") if fname: if not os.path.exists(fname): raise IOError("Background file {} does not exist!".format(fname)) self.background = Fasta(fname) self.background_hash = file_checksum(fname) return if not genome: if self.genome: genome = self.genome logger.info("Using default background: genome {} with length {}".format( genome, length)) else: raise ValueError("Need either genome or filename for background.") logger.info("Using background: genome {} with length {}".format(genome, length)) with Cache(CACHE_DIR) as cache: self.background_hash = "{}\{}".format(genome, int(length)) fa = cache.get(self.background_hash) if not fa: fa = RandomGenomicFasta(genome, length, nseq) cache.set(self.background_hash, fa) self.background = fa
[ "def", "set_background", "(", "self", ",", "fname", "=", "None", ",", "genome", "=", "None", ",", "length", "=", "200", ",", "nseq", "=", "10000", ")", ":", "length", "=", "int", "(", "length", ")", "if", "genome", "and", "fname", ":", "raise", "ValueError", "(", "\"Need either genome or filename for background.\"", ")", "if", "fname", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "raise", "IOError", "(", "\"Background file {} does not exist!\"", ".", "format", "(", "fname", ")", ")", "self", ".", "background", "=", "Fasta", "(", "fname", ")", "self", ".", "background_hash", "=", "file_checksum", "(", "fname", ")", "return", "if", "not", "genome", ":", "if", "self", ".", "genome", ":", "genome", "=", "self", ".", "genome", "logger", ".", "info", "(", "\"Using default background: genome {} with length {}\"", ".", "format", "(", "genome", ",", "length", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Need either genome or filename for background.\"", ")", "logger", ".", "info", "(", "\"Using background: genome {} with length {}\"", ".", "format", "(", "genome", ",", "length", ")", ")", "with", "Cache", "(", "CACHE_DIR", ")", "as", "cache", ":", "self", ".", "background_hash", "=", "\"{}\\{}\"", ".", "format", "(", "genome", ",", "int", "(", "length", ")", ")", "fa", "=", "cache", ".", "get", "(", "self", ".", "background_hash", ")", "if", "not", "fa", ":", "fa", "=", "RandomGenomicFasta", "(", "genome", ",", "length", ",", "nseq", ")", "cache", ".", "set", "(", "self", ".", "background_hash", ",", "fa", ")", "self", ".", "background", "=", "fa" ]
Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve.
[ "Set", "the", "background", "to", "use", "for", "FPR", "and", "z", "-", "score", "calculations", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L378-L428
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
Scanner.set_threshold
def set_threshold(self, fpr=None, threshold=None): """Set motif scanning threshold based on background sequences. Parameters ---------- fpr : float, optional Desired FPR, between 0.0 and 1.0. threshold : float or str, optional Desired motif threshold, expressed as the fraction of the difference between minimum and maximum score of the PWM. Should either be a float between 0.0 and 1.0 or a filename with thresholds as created by 'gimme threshold'. """ if threshold and fpr: raise ValueError("Need either fpr or threshold.") if fpr: fpr = float(fpr) if not (0.0 < fpr < 1.0): raise ValueError("Parameter fpr should be between 0 and 1") if not self.motifs: raise ValueError("please run set_motifs() first") thresholds = {} motifs = read_motifs(self.motifs) if threshold is not None: self.threshold = parse_threshold_values(self.motifs, threshold) return if not self.background: try: self.set_background() except: raise ValueError("please run set_background() first") seqs = self.background.seqs with Cache(CACHE_DIR) as cache: scan_motifs = [] for motif in motifs: k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) threshold = cache.get(k) if threshold is None: scan_motifs.append(motif) else: if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold if len(scan_motifs) > 0: logger.info("Determining FPR-based threshold") for motif, threshold in self._threshold_from_seqs(scan_motifs, seqs, fpr): k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) cache.set(k, threshold) if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold self.threshold_str = "{}_{}_{}".format(fpr, threshold, self.background_hash) self.threshold = thresholds
python
def set_threshold(self, fpr=None, threshold=None): """Set motif scanning threshold based on background sequences. Parameters ---------- fpr : float, optional Desired FPR, between 0.0 and 1.0. threshold : float or str, optional Desired motif threshold, expressed as the fraction of the difference between minimum and maximum score of the PWM. Should either be a float between 0.0 and 1.0 or a filename with thresholds as created by 'gimme threshold'. """ if threshold and fpr: raise ValueError("Need either fpr or threshold.") if fpr: fpr = float(fpr) if not (0.0 < fpr < 1.0): raise ValueError("Parameter fpr should be between 0 and 1") if not self.motifs: raise ValueError("please run set_motifs() first") thresholds = {} motifs = read_motifs(self.motifs) if threshold is not None: self.threshold = parse_threshold_values(self.motifs, threshold) return if not self.background: try: self.set_background() except: raise ValueError("please run set_background() first") seqs = self.background.seqs with Cache(CACHE_DIR) as cache: scan_motifs = [] for motif in motifs: k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) threshold = cache.get(k) if threshold is None: scan_motifs.append(motif) else: if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold if len(scan_motifs) > 0: logger.info("Determining FPR-based threshold") for motif, threshold in self._threshold_from_seqs(scan_motifs, seqs, fpr): k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) cache.set(k, threshold) if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold self.threshold_str = "{}_{}_{}".format(fpr, threshold, self.background_hash) self.threshold = thresholds
[ "def", "set_threshold", "(", "self", ",", "fpr", "=", "None", ",", "threshold", "=", "None", ")", ":", "if", "threshold", "and", "fpr", ":", "raise", "ValueError", "(", "\"Need either fpr or threshold.\"", ")", "if", "fpr", ":", "fpr", "=", "float", "(", "fpr", ")", "if", "not", "(", "0.0", "<", "fpr", "<", "1.0", ")", ":", "raise", "ValueError", "(", "\"Parameter fpr should be between 0 and 1\"", ")", "if", "not", "self", ".", "motifs", ":", "raise", "ValueError", "(", "\"please run set_motifs() first\"", ")", "thresholds", "=", "{", "}", "motifs", "=", "read_motifs", "(", "self", ".", "motifs", ")", "if", "threshold", "is", "not", "None", ":", "self", ".", "threshold", "=", "parse_threshold_values", "(", "self", ".", "motifs", ",", "threshold", ")", "return", "if", "not", "self", ".", "background", ":", "try", ":", "self", ".", "set_background", "(", ")", "except", ":", "raise", "ValueError", "(", "\"please run set_background() first\"", ")", "seqs", "=", "self", ".", "background", ".", "seqs", "with", "Cache", "(", "CACHE_DIR", ")", "as", "cache", ":", "scan_motifs", "=", "[", "]", "for", "motif", "in", "motifs", ":", "k", "=", "\"{}|{}|{:.4f}\"", ".", "format", "(", "motif", ".", "hash", "(", ")", ",", "self", ".", "background_hash", ",", "fpr", ")", "threshold", "=", "cache", ".", "get", "(", "k", ")", "if", "threshold", "is", "None", ":", "scan_motifs", ".", "append", "(", "motif", ")", "else", ":", "if", "np", ".", "isclose", "(", "threshold", ",", "motif", ".", "pwm_max_score", "(", ")", ")", ":", "thresholds", "[", "motif", ".", "id", "]", "=", "None", "elif", "np", ".", "isclose", "(", "threshold", ",", "motif", ".", "pwm_min_score", "(", ")", ")", ":", "thresholds", "[", "motif", ".", "id", "]", "=", "0.0", "else", ":", "thresholds", "[", "motif", ".", "id", "]", "=", "threshold", "if", "len", "(", "scan_motifs", ")", ">", "0", ":", "logger", ".", "info", "(", "\"Determining FPR-based threshold\"", ")", "for", "motif", ",", "threshold", "in", "self", ".", "_threshold_from_seqs", "(", "scan_motifs", ",", "seqs", ",", "fpr", ")", ":", "k", "=", "\"{}|{}|{:.4f}\"", ".", "format", "(", "motif", ".", "hash", "(", ")", ",", "self", ".", "background_hash", ",", "fpr", ")", "cache", ".", "set", "(", "k", ",", "threshold", ")", "if", "np", ".", "isclose", "(", "threshold", ",", "motif", ".", "pwm_max_score", "(", ")", ")", ":", "thresholds", "[", "motif", ".", "id", "]", "=", "None", "elif", "np", ".", "isclose", "(", "threshold", ",", "motif", ".", "pwm_min_score", "(", ")", ")", ":", "thresholds", "[", "motif", ".", "id", "]", "=", "0.0", "else", ":", "thresholds", "[", "motif", ".", "id", "]", "=", "threshold", "self", ".", "threshold_str", "=", "\"{}_{}_{}\"", ".", "format", "(", "fpr", ",", "threshold", ",", "self", ".", "background_hash", ")", "self", ".", "threshold", "=", "thresholds" ]
Set motif scanning threshold based on background sequences. Parameters ---------- fpr : float, optional Desired FPR, between 0.0 and 1.0. threshold : float or str, optional Desired motif threshold, expressed as the fraction of the difference between minimum and maximum score of the PWM. Should either be a float between 0.0 and 1.0 or a filename with thresholds as created by 'gimme threshold'.
[ "Set", "motif", "scanning", "threshold", "based", "on", "background", "sequences", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L430-L499
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
Scanner.count
def count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ for matches in self.scan(seqs, nreport, scan_rc): counts = [len(m) for m in matches] yield counts
python
def count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ for matches in self.scan(seqs, nreport, scan_rc): counts = [len(m) for m in matches] yield counts
[ "def", "count", "(", "self", ",", "seqs", ",", "nreport", "=", "100", ",", "scan_rc", "=", "True", ")", ":", "for", "matches", "in", "self", ".", "scan", "(", "seqs", ",", "nreport", ",", "scan_rc", ")", ":", "counts", "=", "[", "len", "(", "m", ")", "for", "m", "in", "matches", "]", "yield", "counts" ]
count the number of matches above the cutoff returns an iterator of lists containing integer counts
[ "count", "the", "number", "of", "matches", "above", "the", "cutoff", "returns", "an", "iterator", "of", "lists", "containing", "integer", "counts" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L515-L522
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
Scanner.total_count
def total_count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ count_table = [counts for counts in self.count(seqs, nreport, scan_rc)] return np.sum(np.array(count_table), 0)
python
def total_count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ count_table = [counts for counts in self.count(seqs, nreport, scan_rc)] return np.sum(np.array(count_table), 0)
[ "def", "total_count", "(", "self", ",", "seqs", ",", "nreport", "=", "100", ",", "scan_rc", "=", "True", ")", ":", "count_table", "=", "[", "counts", "for", "counts", "in", "self", ".", "count", "(", "seqs", ",", "nreport", ",", "scan_rc", ")", "]", "return", "np", ".", "sum", "(", "np", ".", "array", "(", "count_table", ")", ",", "0", ")" ]
count the number of matches above the cutoff returns an iterator of lists containing integer counts
[ "count", "the", "number", "of", "matches", "above", "the", "cutoff", "returns", "an", "iterator", "of", "lists", "containing", "integer", "counts" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L524-L531
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
Scanner.best_score
def best_score(self, seqs, scan_rc=True, normalize=False): """ give the score of the best match of each motif in each sequence returns an iterator of lists containing floats """ self.set_threshold(threshold=0.0) if normalize and len(self.meanstd) == 0: self.set_meanstd() means = np.array([self.meanstd[m][0] for m in self.motif_ids]) stds = np.array([self.meanstd[m][1] for m in self.motif_ids]) for matches in self.scan(seqs, 1, scan_rc): scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0]) if normalize: scores = (scores - means) / stds yield scores
python
def best_score(self, seqs, scan_rc=True, normalize=False): """ give the score of the best match of each motif in each sequence returns an iterator of lists containing floats """ self.set_threshold(threshold=0.0) if normalize and len(self.meanstd) == 0: self.set_meanstd() means = np.array([self.meanstd[m][0] for m in self.motif_ids]) stds = np.array([self.meanstd[m][1] for m in self.motif_ids]) for matches in self.scan(seqs, 1, scan_rc): scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0]) if normalize: scores = (scores - means) / stds yield scores
[ "def", "best_score", "(", "self", ",", "seqs", ",", "scan_rc", "=", "True", ",", "normalize", "=", "False", ")", ":", "self", ".", "set_threshold", "(", "threshold", "=", "0.0", ")", "if", "normalize", "and", "len", "(", "self", ".", "meanstd", ")", "==", "0", ":", "self", ".", "set_meanstd", "(", ")", "means", "=", "np", ".", "array", "(", "[", "self", ".", "meanstd", "[", "m", "]", "[", "0", "]", "for", "m", "in", "self", ".", "motif_ids", "]", ")", "stds", "=", "np", ".", "array", "(", "[", "self", ".", "meanstd", "[", "m", "]", "[", "1", "]", "for", "m", "in", "self", ".", "motif_ids", "]", ")", "for", "matches", "in", "self", ".", "scan", "(", "seqs", ",", "1", ",", "scan_rc", ")", ":", "scores", "=", "np", ".", "array", "(", "[", "sorted", "(", "m", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "[", "0", "]", "[", "0", "]", "for", "m", "in", "matches", "if", "len", "(", "m", ")", ">", "0", "]", ")", "if", "normalize", ":", "scores", "=", "(", "scores", "-", "means", ")", "/", "stds", "yield", "scores" ]
give the score of the best match of each motif in each sequence returns an iterator of lists containing floats
[ "give", "the", "score", "of", "the", "best", "match", "of", "each", "motif", "in", "each", "sequence", "returns", "an", "iterator", "of", "lists", "containing", "floats" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L533-L548
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
Scanner.best_match
def best_match(self, seqs, scan_rc=True): """ give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand) """ self.set_threshold(threshold=0.0) for matches in self.scan(seqs, 1, scan_rc): yield [m[0] for m in matches]
python
def best_match(self, seqs, scan_rc=True): """ give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand) """ self.set_threshold(threshold=0.0) for matches in self.scan(seqs, 1, scan_rc): yield [m[0] for m in matches]
[ "def", "best_match", "(", "self", ",", "seqs", ",", "scan_rc", "=", "True", ")", ":", "self", ".", "set_threshold", "(", "threshold", "=", "0.0", ")", "for", "matches", "in", "self", ".", "scan", "(", "seqs", ",", "1", ",", "scan_rc", ")", ":", "yield", "[", "m", "[", "0", "]", "for", "m", "in", "matches", "]" ]
give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand)
[ "give", "the", "best", "match", "of", "each", "motif", "in", "each", "sequence", "returns", "an", "iterator", "of", "nested", "lists", "containing", "tuples", ":", "(", "score", "position", "strand", ")" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L550-L558
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
Scanner.scan
def scan(self, seqs, nreport=100, scan_rc=True, normalize=False): """ scan a set of regions / sequences """ if not self.threshold: sys.stderr.write( "Using default threshold of 0.95. " "This is likely not optimal!\n" ) self.set_threshold(threshold=0.95) seqs = as_fasta(seqs, genome=self.genome) it = self._scan_sequences(seqs.seqs, nreport, scan_rc) if normalize: if len(self.meanstd) == 0: self.set_meanstd() mean_std = [self.meanstd.get(m_id) for m_id in self.motif_ids] means = [x[0] for x in mean_std] stds = [x[1] for x in mean_std] for result in it: if normalize: zresult = [] for i,mrow in enumerate(result): mrow = [((x[0] - means[i]) / stds[i], x[1], x[2]) for x in mrow] zresult.append(mrow) yield zresult else: yield result
python
def scan(self, seqs, nreport=100, scan_rc=True, normalize=False): """ scan a set of regions / sequences """ if not self.threshold: sys.stderr.write( "Using default threshold of 0.95. " "This is likely not optimal!\n" ) self.set_threshold(threshold=0.95) seqs = as_fasta(seqs, genome=self.genome) it = self._scan_sequences(seqs.seqs, nreport, scan_rc) if normalize: if len(self.meanstd) == 0: self.set_meanstd() mean_std = [self.meanstd.get(m_id) for m_id in self.motif_ids] means = [x[0] for x in mean_std] stds = [x[1] for x in mean_std] for result in it: if normalize: zresult = [] for i,mrow in enumerate(result): mrow = [((x[0] - means[i]) / stds[i], x[1], x[2]) for x in mrow] zresult.append(mrow) yield zresult else: yield result
[ "def", "scan", "(", "self", ",", "seqs", ",", "nreport", "=", "100", ",", "scan_rc", "=", "True", ",", "normalize", "=", "False", ")", ":", "if", "not", "self", ".", "threshold", ":", "sys", ".", "stderr", ".", "write", "(", "\"Using default threshold of 0.95. \"", "\"This is likely not optimal!\\n\"", ")", "self", ".", "set_threshold", "(", "threshold", "=", "0.95", ")", "seqs", "=", "as_fasta", "(", "seqs", ",", "genome", "=", "self", ".", "genome", ")", "it", "=", "self", ".", "_scan_sequences", "(", "seqs", ".", "seqs", ",", "nreport", ",", "scan_rc", ")", "if", "normalize", ":", "if", "len", "(", "self", ".", "meanstd", ")", "==", "0", ":", "self", ".", "set_meanstd", "(", ")", "mean_std", "=", "[", "self", ".", "meanstd", ".", "get", "(", "m_id", ")", "for", "m_id", "in", "self", ".", "motif_ids", "]", "means", "=", "[", "x", "[", "0", "]", "for", "x", "in", "mean_std", "]", "stds", "=", "[", "x", "[", "1", "]", "for", "x", "in", "mean_std", "]", "for", "result", "in", "it", ":", "if", "normalize", ":", "zresult", "=", "[", "]", "for", "i", ",", "mrow", "in", "enumerate", "(", "result", ")", ":", "mrow", "=", "[", "(", "(", "x", "[", "0", "]", "-", "means", "[", "i", "]", ")", "/", "stds", "[", "i", "]", ",", "x", "[", "1", "]", ",", "x", "[", "2", "]", ")", "for", "x", "in", "mrow", "]", "zresult", ".", "append", "(", "mrow", ")", "yield", "zresult", "else", ":", "yield", "result" ]
scan a set of regions / sequences
[ "scan", "a", "set", "of", "regions", "/", "sequences" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L560-L593
train
vanheeringen-lab/gimmemotifs
gimmemotifs/commands/roc.py
roc
def roc(args): """ Calculate ROC_AUC and other metrics and optionally plot ROC curve.""" outputfile = args.outfile # Default extension for image if outputfile and not outputfile.endswith(".png"): outputfile += ".png" motifs = read_motifs(args.pwmfile, fmt="pwm") ids = [] if args.ids: ids = args.ids.split(",") else: ids = [m.id for m in motifs] motifs = [m for m in motifs if (m.id in ids)] stats = [ "phyper_at_fpr", "roc_auc", "pr_auc", "enr_at_fpr", "recall_at_fdr", "roc_values", "matches_at_fpr", ] plot_x = [] plot_y = [] legend = [] f_out = sys.stdout if args.outdir: if not os.path.exists(args.outdir): os.makedirs(args.outdir) f_out = open(args.outdir + "/gimme.roc.report.txt", "w") # Print the metrics f_out.write("Motif\t# matches\t# matches background\tP-value\tlog10 P-value\tROC AUC\tPR AUC\tEnr. at 1% FPR\tRecall at 10% FDR\n") for motif_stats in calc_stats_iterator(motifs, args.sample, args.background, genome=args.genome, stats=stats, ncpus=args.ncpus): for motif in motifs: if str(motif) in motif_stats: if outputfile: x, y = motif_stats[str(motif)]["roc_values"] plot_x.append(x) plot_y.append(y) legend.append(motif.id) log_pvalue = np.inf if motif_stats[str(motif)]["phyper_at_fpr"] > 0: log_pvalue = -np.log10(motif_stats[str(motif)]["phyper_at_fpr"]) f_out.write("{}\t{:d}\t{:d}\t{:.2e}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.2f}\t{:0.4f}\n".format( motif.id, motif_stats[str(motif)]["matches_at_fpr"][0], motif_stats[str(motif)]["matches_at_fpr"][1], motif_stats[str(motif)]["phyper_at_fpr"], log_pvalue, motif_stats[str(motif)]["roc_auc"], motif_stats[str(motif)]["pr_auc"], motif_stats[str(motif)]["enr_at_fpr"], motif_stats[str(motif)]["recall_at_fdr"], )) f_out.close() if args.outdir: html_report( args.outdir, args.outdir + "/gimme.roc.report.txt", args.pwmfile, 0.01, ) # Plot the ROC curve if outputfile: roc_plot(outputfile, plot_x, plot_y, ids=legend)
python
def roc(args): """ Calculate ROC_AUC and other metrics and optionally plot ROC curve.""" outputfile = args.outfile # Default extension for image if outputfile and not outputfile.endswith(".png"): outputfile += ".png" motifs = read_motifs(args.pwmfile, fmt="pwm") ids = [] if args.ids: ids = args.ids.split(",") else: ids = [m.id for m in motifs] motifs = [m for m in motifs if (m.id in ids)] stats = [ "phyper_at_fpr", "roc_auc", "pr_auc", "enr_at_fpr", "recall_at_fdr", "roc_values", "matches_at_fpr", ] plot_x = [] plot_y = [] legend = [] f_out = sys.stdout if args.outdir: if not os.path.exists(args.outdir): os.makedirs(args.outdir) f_out = open(args.outdir + "/gimme.roc.report.txt", "w") # Print the metrics f_out.write("Motif\t# matches\t# matches background\tP-value\tlog10 P-value\tROC AUC\tPR AUC\tEnr. at 1% FPR\tRecall at 10% FDR\n") for motif_stats in calc_stats_iterator(motifs, args.sample, args.background, genome=args.genome, stats=stats, ncpus=args.ncpus): for motif in motifs: if str(motif) in motif_stats: if outputfile: x, y = motif_stats[str(motif)]["roc_values"] plot_x.append(x) plot_y.append(y) legend.append(motif.id) log_pvalue = np.inf if motif_stats[str(motif)]["phyper_at_fpr"] > 0: log_pvalue = -np.log10(motif_stats[str(motif)]["phyper_at_fpr"]) f_out.write("{}\t{:d}\t{:d}\t{:.2e}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.2f}\t{:0.4f}\n".format( motif.id, motif_stats[str(motif)]["matches_at_fpr"][0], motif_stats[str(motif)]["matches_at_fpr"][1], motif_stats[str(motif)]["phyper_at_fpr"], log_pvalue, motif_stats[str(motif)]["roc_auc"], motif_stats[str(motif)]["pr_auc"], motif_stats[str(motif)]["enr_at_fpr"], motif_stats[str(motif)]["recall_at_fdr"], )) f_out.close() if args.outdir: html_report( args.outdir, args.outdir + "/gimme.roc.report.txt", args.pwmfile, 0.01, ) # Plot the ROC curve if outputfile: roc_plot(outputfile, plot_x, plot_y, ids=legend)
[ "def", "roc", "(", "args", ")", ":", "outputfile", "=", "args", ".", "outfile", "# Default extension for image", "if", "outputfile", "and", "not", "outputfile", ".", "endswith", "(", "\".png\"", ")", ":", "outputfile", "+=", "\".png\"", "motifs", "=", "read_motifs", "(", "args", ".", "pwmfile", ",", "fmt", "=", "\"pwm\"", ")", "ids", "=", "[", "]", "if", "args", ".", "ids", ":", "ids", "=", "args", ".", "ids", ".", "split", "(", "\",\"", ")", "else", ":", "ids", "=", "[", "m", ".", "id", "for", "m", "in", "motifs", "]", "motifs", "=", "[", "m", "for", "m", "in", "motifs", "if", "(", "m", ".", "id", "in", "ids", ")", "]", "stats", "=", "[", "\"phyper_at_fpr\"", ",", "\"roc_auc\"", ",", "\"pr_auc\"", ",", "\"enr_at_fpr\"", ",", "\"recall_at_fdr\"", ",", "\"roc_values\"", ",", "\"matches_at_fpr\"", ",", "]", "plot_x", "=", "[", "]", "plot_y", "=", "[", "]", "legend", "=", "[", "]", "f_out", "=", "sys", ".", "stdout", "if", "args", ".", "outdir", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "args", ".", "outdir", ")", ":", "os", ".", "makedirs", "(", "args", ".", "outdir", ")", "f_out", "=", "open", "(", "args", ".", "outdir", "+", "\"/gimme.roc.report.txt\"", ",", "\"w\"", ")", "# Print the metrics", "f_out", ".", "write", "(", "\"Motif\\t# matches\\t# matches background\\tP-value\\tlog10 P-value\\tROC AUC\\tPR AUC\\tEnr. at 1% FPR\\tRecall at 10% FDR\\n\"", ")", "for", "motif_stats", "in", "calc_stats_iterator", "(", "motifs", ",", "args", ".", "sample", ",", "args", ".", "background", ",", "genome", "=", "args", ".", "genome", ",", "stats", "=", "stats", ",", "ncpus", "=", "args", ".", "ncpus", ")", ":", "for", "motif", "in", "motifs", ":", "if", "str", "(", "motif", ")", "in", "motif_stats", ":", "if", "outputfile", ":", "x", ",", "y", "=", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"roc_values\"", "]", "plot_x", ".", "append", "(", "x", ")", "plot_y", ".", "append", "(", "y", ")", "legend", ".", "append", "(", "motif", ".", "id", ")", "log_pvalue", "=", "np", ".", "inf", "if", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"phyper_at_fpr\"", "]", ">", "0", ":", "log_pvalue", "=", "-", "np", ".", "log10", "(", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"phyper_at_fpr\"", "]", ")", "f_out", ".", "write", "(", "\"{}\\t{:d}\\t{:d}\\t{:.2e}\\t{:.3f}\\t{:.3f}\\t{:.3f}\\t{:.2f}\\t{:0.4f}\\n\"", ".", "format", "(", "motif", ".", "id", ",", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"matches_at_fpr\"", "]", "[", "0", "]", ",", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"matches_at_fpr\"", "]", "[", "1", "]", ",", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"phyper_at_fpr\"", "]", ",", "log_pvalue", ",", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"roc_auc\"", "]", ",", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"pr_auc\"", "]", ",", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"enr_at_fpr\"", "]", ",", "motif_stats", "[", "str", "(", "motif", ")", "]", "[", "\"recall_at_fdr\"", "]", ",", ")", ")", "f_out", ".", "close", "(", ")", "if", "args", ".", "outdir", ":", "html_report", "(", "args", ".", "outdir", ",", "args", ".", "outdir", "+", "\"/gimme.roc.report.txt\"", ",", "args", ".", "pwmfile", ",", "0.01", ",", ")", "# Plot the ROC curve", "if", "outputfile", ":", "roc_plot", "(", "outputfile", ",", "plot_x", ",", "plot_y", ",", "ids", "=", "legend", ")" ]
Calculate ROC_AUC and other metrics and optionally plot ROC curve.
[ "Calculate", "ROC_AUC", "and", "other", "metrics", "and", "optionally", "plot", "ROC", "curve", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/commands/roc.py#L85-L161
train
vanheeringen-lab/gimmemotifs
gimmemotifs/comparison.py
ssd
def ssd(p1, p2): """Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float """ return 2 - np.sum([(a-b)**2 for a,b in zip(p1,p2)])
python
def ssd(p1, p2): """Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float """ return 2 - np.sum([(a-b)**2 for a,b in zip(p1,p2)])
[ "def", "ssd", "(", "p1", ",", "p2", ")", ":", "return", "2", "-", "np", ".", "sum", "(", "[", "(", "a", "-", "b", ")", "**", "2", "for", "a", ",", "b", "in", "zip", "(", "p1", ",", "p2", ")", "]", ")" ]
Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float
[ "Calculates", "motif", "position", "similarity", "based", "on", "sum", "of", "squared", "distances", ".", "Parameters", "----------", "p1", ":", "list", "Motif", "position", "1", ".", "p2", ":", "list", "Motif", "position", "2", ".", "Returns", "-------", "score", ":", "float" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/comparison.py#L196-L211
train
vanheeringen-lab/gimmemotifs
gimmemotifs/comparison.py
seqcor
def seqcor(m1, m2, seq=None): """Calculates motif similarity based on Pearson correlation of scores. Based on Kielbasa (2015) and Grau (2015). Scores are calculated based on scanning a de Bruijn sequence of 7-mers. This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015). Optionally another sequence can be given as an argument. Parameters ---------- m1 : Motif instance Motif 1 to compare. m2 : Motif instance Motif 2 to compare. seq : str, optional Sequence to use for scanning instead of k=7 de Bruijn sequence. Returns ------- score, position, strand """ l1 = len(m1) l2 = len(m2) l = max(l1, l2) if seq is None: seq = RCDB L = len(seq) # Scan RC de Bruijn sequence result1 = pfmscan(seq, m1.pwm, m1.pwm_min_score(), len(seq), False, True) result2 = pfmscan(seq, m2.pwm, m2.pwm_min_score(), len(seq), False, True) # Reverse complement of motif 2 result3 = pfmscan(seq, m2.rc().pwm, m2.rc().pwm_min_score(), len(seq), False, True) result1 = np.array(result1) result2 = np.array(result2) result3 = np.array(result3) # Return maximum correlation c = [] for i in range(l1 - l1 // 3): c.append([1 - distance.correlation(result1[:L-l-i],result2[i:L-l]), i, 1]) c.append([1 - distance.correlation(result1[:L-l-i],result3[i:L-l]), i, -1]) for i in range(l2 - l2 // 3): c.append([1 - distance.correlation(result1[i:L-l],result2[:L-l-i]), -i, 1]) c.append([1 - distance.correlation(result1[i:L-l],result3[:L-l-i]), -i, -1]) return sorted(c, key=lambda x: x[0])[-1]
python
def seqcor(m1, m2, seq=None): """Calculates motif similarity based on Pearson correlation of scores. Based on Kielbasa (2015) and Grau (2015). Scores are calculated based on scanning a de Bruijn sequence of 7-mers. This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015). Optionally another sequence can be given as an argument. Parameters ---------- m1 : Motif instance Motif 1 to compare. m2 : Motif instance Motif 2 to compare. seq : str, optional Sequence to use for scanning instead of k=7 de Bruijn sequence. Returns ------- score, position, strand """ l1 = len(m1) l2 = len(m2) l = max(l1, l2) if seq is None: seq = RCDB L = len(seq) # Scan RC de Bruijn sequence result1 = pfmscan(seq, m1.pwm, m1.pwm_min_score(), len(seq), False, True) result2 = pfmscan(seq, m2.pwm, m2.pwm_min_score(), len(seq), False, True) # Reverse complement of motif 2 result3 = pfmscan(seq, m2.rc().pwm, m2.rc().pwm_min_score(), len(seq), False, True) result1 = np.array(result1) result2 = np.array(result2) result3 = np.array(result3) # Return maximum correlation c = [] for i in range(l1 - l1 // 3): c.append([1 - distance.correlation(result1[:L-l-i],result2[i:L-l]), i, 1]) c.append([1 - distance.correlation(result1[:L-l-i],result3[i:L-l]), i, -1]) for i in range(l2 - l2 // 3): c.append([1 - distance.correlation(result1[i:L-l],result2[:L-l-i]), -i, 1]) c.append([1 - distance.correlation(result1[i:L-l],result3[:L-l-i]), -i, -1]) return sorted(c, key=lambda x: x[0])[-1]
[ "def", "seqcor", "(", "m1", ",", "m2", ",", "seq", "=", "None", ")", ":", "l1", "=", "len", "(", "m1", ")", "l2", "=", "len", "(", "m2", ")", "l", "=", "max", "(", "l1", ",", "l2", ")", "if", "seq", "is", "None", ":", "seq", "=", "RCDB", "L", "=", "len", "(", "seq", ")", "# Scan RC de Bruijn sequence", "result1", "=", "pfmscan", "(", "seq", ",", "m1", ".", "pwm", ",", "m1", ".", "pwm_min_score", "(", ")", ",", "len", "(", "seq", ")", ",", "False", ",", "True", ")", "result2", "=", "pfmscan", "(", "seq", ",", "m2", ".", "pwm", ",", "m2", ".", "pwm_min_score", "(", ")", ",", "len", "(", "seq", ")", ",", "False", ",", "True", ")", "# Reverse complement of motif 2", "result3", "=", "pfmscan", "(", "seq", ",", "m2", ".", "rc", "(", ")", ".", "pwm", ",", "m2", ".", "rc", "(", ")", ".", "pwm_min_score", "(", ")", ",", "len", "(", "seq", ")", ",", "False", ",", "True", ")", "result1", "=", "np", ".", "array", "(", "result1", ")", "result2", "=", "np", ".", "array", "(", "result2", ")", "result3", "=", "np", ".", "array", "(", "result3", ")", "# Return maximum correlation", "c", "=", "[", "]", "for", "i", "in", "range", "(", "l1", "-", "l1", "//", "3", ")", ":", "c", ".", "append", "(", "[", "1", "-", "distance", ".", "correlation", "(", "result1", "[", ":", "L", "-", "l", "-", "i", "]", ",", "result2", "[", "i", ":", "L", "-", "l", "]", ")", ",", "i", ",", "1", "]", ")", "c", ".", "append", "(", "[", "1", "-", "distance", ".", "correlation", "(", "result1", "[", ":", "L", "-", "l", "-", "i", "]", ",", "result3", "[", "i", ":", "L", "-", "l", "]", ")", ",", "i", ",", "-", "1", "]", ")", "for", "i", "in", "range", "(", "l2", "-", "l2", "//", "3", ")", ":", "c", ".", "append", "(", "[", "1", "-", "distance", ".", "correlation", "(", "result1", "[", "i", ":", "L", "-", "l", "]", ",", "result2", "[", ":", "L", "-", "l", "-", "i", "]", ")", ",", "-", "i", ",", "1", "]", ")", "c", ".", "append", "(", "[", "1", "-", "distance", ".", "correlation", "(", "result1", "[", "i", ":", "L", "-", "l", "]", ",", "result3", "[", ":", "L", "-", "l", "-", "i", "]", ")", ",", "-", "i", ",", "-", "1", "]", ")", "return", "sorted", "(", "c", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "[", "-", "1", "]" ]
Calculates motif similarity based on Pearson correlation of scores. Based on Kielbasa (2015) and Grau (2015). Scores are calculated based on scanning a de Bruijn sequence of 7-mers. This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015). Optionally another sequence can be given as an argument. Parameters ---------- m1 : Motif instance Motif 1 to compare. m2 : Motif instance Motif 2 to compare. seq : str, optional Sequence to use for scanning instead of k=7 de Bruijn sequence. Returns ------- score, position, strand
[ "Calculates", "motif", "similarity", "based", "on", "Pearson", "correlation", "of", "scores", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/comparison.py#L213-L266
train
vanheeringen-lab/gimmemotifs
gimmemotifs/comparison.py
MotifComparer.compare_motifs
def compare_motifs(self, m1, m2, match="total", metric="wic", combine="mean", pval=False): """Compare two motifs. The similarity metric can be any of seqcor, pcc, ed, distance, wic, chisq, akl or ssd. If match is 'total' the similarity score is calculated for the whole match, including positions that are not present in both motifs. If match is partial or subtotal, only the matching psotiions are used to calculate the score. The score of individual position is combined using either the mean or the sum. Note that the match and combine parameters have no effect on the seqcor similarity metric. Parameters ---------- m1 : Motif instance Motif instance 1. m2 : Motif instance Motif instance 2. match : str, optional Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str, optional Distance metric. combine : str, optional Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool, optional Calculate p-vale of match. Returns ------- score, position, strand """ if metric == "seqcor": return seqcor(m1, m2) elif match == "partial": if pval: return self.pvalue(m1, m2, "total", metric, combine, self.max_partial(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_partial(m1.pwm, m2.pwm, metric, combine) else: return self.max_partial(m1.pfm, m2.pfm, metric, combine) elif match == "total": if pval: return self.pvalue(m1, m2, match, metric, combine, self.max_total(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", 'akl']: # Slightly randomize the weight matrix return self.max_total(m1.wiggle_pwm(), m2.wiggle_pwm(), metric, combine) elif metric in ["ed", "distance", "wic", "chisq", "pcc", "ssd"]: return self.max_total(m1.pwm, m2.pwm, metric, combine) else: return self.max_total(m1.pfm, m2.pfm, metric, combine) elif match == "subtotal": if metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_subtotal(m1.pwm, m2.pwm, metric, combine) else: return self.max_subtotal(m1.pfm, m2.pfm, metric, combine)
python
def compare_motifs(self, m1, m2, match="total", metric="wic", combine="mean", pval=False): """Compare two motifs. The similarity metric can be any of seqcor, pcc, ed, distance, wic, chisq, akl or ssd. If match is 'total' the similarity score is calculated for the whole match, including positions that are not present in both motifs. If match is partial or subtotal, only the matching psotiions are used to calculate the score. The score of individual position is combined using either the mean or the sum. Note that the match and combine parameters have no effect on the seqcor similarity metric. Parameters ---------- m1 : Motif instance Motif instance 1. m2 : Motif instance Motif instance 2. match : str, optional Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str, optional Distance metric. combine : str, optional Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool, optional Calculate p-vale of match. Returns ------- score, position, strand """ if metric == "seqcor": return seqcor(m1, m2) elif match == "partial": if pval: return self.pvalue(m1, m2, "total", metric, combine, self.max_partial(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_partial(m1.pwm, m2.pwm, metric, combine) else: return self.max_partial(m1.pfm, m2.pfm, metric, combine) elif match == "total": if pval: return self.pvalue(m1, m2, match, metric, combine, self.max_total(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", 'akl']: # Slightly randomize the weight matrix return self.max_total(m1.wiggle_pwm(), m2.wiggle_pwm(), metric, combine) elif metric in ["ed", "distance", "wic", "chisq", "pcc", "ssd"]: return self.max_total(m1.pwm, m2.pwm, metric, combine) else: return self.max_total(m1.pfm, m2.pfm, metric, combine) elif match == "subtotal": if metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_subtotal(m1.pwm, m2.pwm, metric, combine) else: return self.max_subtotal(m1.pfm, m2.pfm, metric, combine)
[ "def", "compare_motifs", "(", "self", ",", "m1", ",", "m2", ",", "match", "=", "\"total\"", ",", "metric", "=", "\"wic\"", ",", "combine", "=", "\"mean\"", ",", "pval", "=", "False", ")", ":", "if", "metric", "==", "\"seqcor\"", ":", "return", "seqcor", "(", "m1", ",", "m2", ")", "elif", "match", "==", "\"partial\"", ":", "if", "pval", ":", "return", "self", ".", "pvalue", "(", "m1", ",", "m2", ",", "\"total\"", ",", "metric", ",", "combine", ",", "self", ".", "max_partial", "(", "m1", ".", "pwm", ",", "m2", ".", "pwm", ",", "metric", ",", "combine", ")", ")", "elif", "metric", "in", "[", "\"pcc\"", ",", "\"ed\"", ",", "\"distance\"", ",", "\"wic\"", ",", "\"chisq\"", ",", "\"ssd\"", "]", ":", "return", "self", ".", "max_partial", "(", "m1", ".", "pwm", ",", "m2", ".", "pwm", ",", "metric", ",", "combine", ")", "else", ":", "return", "self", ".", "max_partial", "(", "m1", ".", "pfm", ",", "m2", ".", "pfm", ",", "metric", ",", "combine", ")", "elif", "match", "==", "\"total\"", ":", "if", "pval", ":", "return", "self", ".", "pvalue", "(", "m1", ",", "m2", ",", "match", ",", "metric", ",", "combine", ",", "self", ".", "max_total", "(", "m1", ".", "pwm", ",", "m2", ".", "pwm", ",", "metric", ",", "combine", ")", ")", "elif", "metric", "in", "[", "\"pcc\"", ",", "'akl'", "]", ":", "# Slightly randomize the weight matrix", "return", "self", ".", "max_total", "(", "m1", ".", "wiggle_pwm", "(", ")", ",", "m2", ".", "wiggle_pwm", "(", ")", ",", "metric", ",", "combine", ")", "elif", "metric", "in", "[", "\"ed\"", ",", "\"distance\"", ",", "\"wic\"", ",", "\"chisq\"", ",", "\"pcc\"", ",", "\"ssd\"", "]", ":", "return", "self", ".", "max_total", "(", "m1", ".", "pwm", ",", "m2", ".", "pwm", ",", "metric", ",", "combine", ")", "else", ":", "return", "self", ".", "max_total", "(", "m1", ".", "pfm", ",", "m2", ".", "pfm", ",", "metric", ",", "combine", ")", "elif", "match", "==", "\"subtotal\"", ":", "if", "metric", "in", "[", "\"pcc\"", ",", "\"ed\"", ",", "\"distance\"", ",", "\"wic\"", ",", "\"chisq\"", ",", "\"ssd\"", "]", ":", "return", "self", ".", "max_subtotal", "(", "m1", ".", "pwm", ",", "m2", ".", "pwm", ",", "metric", ",", "combine", ")", "else", ":", "return", "self", ".", "max_subtotal", "(", "m1", ".", "pfm", ",", "m2", ".", "pfm", ",", "metric", ",", "combine", ")" ]
Compare two motifs. The similarity metric can be any of seqcor, pcc, ed, distance, wic, chisq, akl or ssd. If match is 'total' the similarity score is calculated for the whole match, including positions that are not present in both motifs. If match is partial or subtotal, only the matching psotiions are used to calculate the score. The score of individual position is combined using either the mean or the sum. Note that the match and combine parameters have no effect on the seqcor similarity metric. Parameters ---------- m1 : Motif instance Motif instance 1. m2 : Motif instance Motif instance 2. match : str, optional Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str, optional Distance metric. combine : str, optional Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool, optional Calculate p-vale of match. Returns ------- score, position, strand
[ "Compare", "two", "motifs", ".", "The", "similarity", "metric", "can", "be", "any", "of", "seqcor", "pcc", "ed", "distance", "wic", "chisq", "akl", "or", "ssd", ".", "If", "match", "is", "total", "the", "similarity", "score", "is", "calculated", "for", "the", "whole", "match", "including", "positions", "that", "are", "not", "present", "in", "both", "motifs", ".", "If", "match", "is", "partial", "or", "subtotal", "only", "the", "matching", "psotiions", "are", "used", "to", "calculate", "the", "score", ".", "The", "score", "of", "individual", "position", "is", "combined", "using", "either", "the", "mean", "or", "the", "sum", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/comparison.py#L322-L386
train
vanheeringen-lab/gimmemotifs
gimmemotifs/comparison.py
MotifComparer.get_all_scores
def get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval=False, parallel=True, trim=None, ncpus=None): """Pairwise comparison of a set of motifs compared to reference motifs. Parameters ---------- motifs : list List of Motif instances. dbmotifs : list List of Motif instances. match : str Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str Distance metric. combine : str Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool , optional Calculate p-vale of match. parallel : bool , optional Use multiprocessing for parallel execution. True by default. trim : float or None If a float value is specified, motifs are trimmed used this IC cutoff before comparison. ncpus : int or None Specifies the number of cores to use for parallel execution. Returns ------- scores : dict Dictionary with scores. """ # trim motifs first, if specified if trim: for m in motifs: m.trim(trim) for m in dbmotifs: m.trim(trim) # hash of result scores scores = {} if parallel: # Divide the job into big chunks, to keep parallel overhead to minimum # Number of chunks = number of processors available if ncpus is None: ncpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=ncpus, maxtasksperchild=1000) batch_len = len(dbmotifs) // ncpus if batch_len <= 0: batch_len = 1 jobs = [] for i in range(0, len(dbmotifs), batch_len): # submit jobs to the job server p = pool.apply_async(_get_all_scores, args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval)) jobs.append(p) pool.close() for job in jobs: # Get the job result result = job.get() # and update the result score for m1,v in result.items(): for m2, s in v.items(): if m1 not in scores: scores[m1] = {} scores[m1][m2] = s pool.join() else: # Do the whole thing at once if we don't want parallel scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval) return scores
python
def get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval=False, parallel=True, trim=None, ncpus=None): """Pairwise comparison of a set of motifs compared to reference motifs. Parameters ---------- motifs : list List of Motif instances. dbmotifs : list List of Motif instances. match : str Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str Distance metric. combine : str Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool , optional Calculate p-vale of match. parallel : bool , optional Use multiprocessing for parallel execution. True by default. trim : float or None If a float value is specified, motifs are trimmed used this IC cutoff before comparison. ncpus : int or None Specifies the number of cores to use for parallel execution. Returns ------- scores : dict Dictionary with scores. """ # trim motifs first, if specified if trim: for m in motifs: m.trim(trim) for m in dbmotifs: m.trim(trim) # hash of result scores scores = {} if parallel: # Divide the job into big chunks, to keep parallel overhead to minimum # Number of chunks = number of processors available if ncpus is None: ncpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=ncpus, maxtasksperchild=1000) batch_len = len(dbmotifs) // ncpus if batch_len <= 0: batch_len = 1 jobs = [] for i in range(0, len(dbmotifs), batch_len): # submit jobs to the job server p = pool.apply_async(_get_all_scores, args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval)) jobs.append(p) pool.close() for job in jobs: # Get the job result result = job.get() # and update the result score for m1,v in result.items(): for m2, s in v.items(): if m1 not in scores: scores[m1] = {} scores[m1][m2] = s pool.join() else: # Do the whole thing at once if we don't want parallel scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval) return scores
[ "def", "get_all_scores", "(", "self", ",", "motifs", ",", "dbmotifs", ",", "match", ",", "metric", ",", "combine", ",", "pval", "=", "False", ",", "parallel", "=", "True", ",", "trim", "=", "None", ",", "ncpus", "=", "None", ")", ":", "# trim motifs first, if specified", "if", "trim", ":", "for", "m", "in", "motifs", ":", "m", ".", "trim", "(", "trim", ")", "for", "m", "in", "dbmotifs", ":", "m", ".", "trim", "(", "trim", ")", "# hash of result scores", "scores", "=", "{", "}", "if", "parallel", ":", "# Divide the job into big chunks, to keep parallel overhead to minimum", "# Number of chunks = number of processors available", "if", "ncpus", "is", "None", ":", "ncpus", "=", "int", "(", "MotifConfig", "(", ")", ".", "get_default_params", "(", ")", "[", "\"ncpus\"", "]", ")", "pool", "=", "Pool", "(", "processes", "=", "ncpus", ",", "maxtasksperchild", "=", "1000", ")", "batch_len", "=", "len", "(", "dbmotifs", ")", "//", "ncpus", "if", "batch_len", "<=", "0", ":", "batch_len", "=", "1", "jobs", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dbmotifs", ")", ",", "batch_len", ")", ":", "# submit jobs to the job server", "p", "=", "pool", ".", "apply_async", "(", "_get_all_scores", ",", "args", "=", "(", "self", ",", "motifs", ",", "dbmotifs", "[", "i", ":", "i", "+", "batch_len", "]", ",", "match", ",", "metric", ",", "combine", ",", "pval", ")", ")", "jobs", ".", "append", "(", "p", ")", "pool", ".", "close", "(", ")", "for", "job", "in", "jobs", ":", "# Get the job result", "result", "=", "job", ".", "get", "(", ")", "# and update the result score", "for", "m1", ",", "v", "in", "result", ".", "items", "(", ")", ":", "for", "m2", ",", "s", "in", "v", ".", "items", "(", ")", ":", "if", "m1", "not", "in", "scores", ":", "scores", "[", "m1", "]", "=", "{", "}", "scores", "[", "m1", "]", "[", "m2", "]", "=", "s", "pool", ".", "join", "(", ")", "else", ":", "# Do the whole thing at once if we don't want parallel", "scores", "=", "_get_all_scores", "(", "self", ",", "motifs", ",", "dbmotifs", ",", "match", ",", "metric", ",", "combine", ",", "pval", ")", "return", "scores" ]
Pairwise comparison of a set of motifs compared to reference motifs. Parameters ---------- motifs : list List of Motif instances. dbmotifs : list List of Motif instances. match : str Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str Distance metric. combine : str Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool , optional Calculate p-vale of match. parallel : bool , optional Use multiprocessing for parallel execution. True by default. trim : float or None If a float value is specified, motifs are trimmed used this IC cutoff before comparison. ncpus : int or None Specifies the number of cores to use for parallel execution. Returns ------- scores : dict Dictionary with scores.
[ "Pairwise", "comparison", "of", "a", "set", "of", "motifs", "compared", "to", "reference", "motifs", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/comparison.py#L574-L660
train
vanheeringen-lab/gimmemotifs
gimmemotifs/comparison.py
MotifComparer.get_closest_match
def get_closest_match(self, motifs, dbmotifs=None, match="partial", metric="wic",combine="mean", parallel=True, ncpus=None): """Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict """ if dbmotifs is None: pwm = self.config.get_default_params()["motif_db"] pwmdir = self.config.get_motif_dir() dbmotifs = os.path.join(pwmdir, pwm) motifs = parse_motifs(motifs) dbmotifs = parse_motifs(dbmotifs) dbmotif_lookup = dict([(m.id, m) for m in dbmotifs]) scores = self.get_all_scores(motifs, dbmotifs, match, metric, combine, parallel=parallel, ncpus=ncpus) for motif in scores: scores[motif] = sorted( scores[motif].items(), key=lambda x:x[1][0] )[-1] for motif in motifs: dbmotif, score = scores[motif.id] pval, pos, orient = self.compare_motifs( motif, dbmotif_lookup[dbmotif], match, metric, combine, True) scores[motif.id] = [dbmotif, (list(score) + [pval])] return scores
python
def get_closest_match(self, motifs, dbmotifs=None, match="partial", metric="wic",combine="mean", parallel=True, ncpus=None): """Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict """ if dbmotifs is None: pwm = self.config.get_default_params()["motif_db"] pwmdir = self.config.get_motif_dir() dbmotifs = os.path.join(pwmdir, pwm) motifs = parse_motifs(motifs) dbmotifs = parse_motifs(dbmotifs) dbmotif_lookup = dict([(m.id, m) for m in dbmotifs]) scores = self.get_all_scores(motifs, dbmotifs, match, metric, combine, parallel=parallel, ncpus=ncpus) for motif in scores: scores[motif] = sorted( scores[motif].items(), key=lambda x:x[1][0] )[-1] for motif in motifs: dbmotif, score = scores[motif.id] pval, pos, orient = self.compare_motifs( motif, dbmotif_lookup[dbmotif], match, metric, combine, True) scores[motif.id] = [dbmotif, (list(score) + [pval])] return scores
[ "def", "get_closest_match", "(", "self", ",", "motifs", ",", "dbmotifs", "=", "None", ",", "match", "=", "\"partial\"", ",", "metric", "=", "\"wic\"", ",", "combine", "=", "\"mean\"", ",", "parallel", "=", "True", ",", "ncpus", "=", "None", ")", ":", "if", "dbmotifs", "is", "None", ":", "pwm", "=", "self", ".", "config", ".", "get_default_params", "(", ")", "[", "\"motif_db\"", "]", "pwmdir", "=", "self", ".", "config", ".", "get_motif_dir", "(", ")", "dbmotifs", "=", "os", ".", "path", ".", "join", "(", "pwmdir", ",", "pwm", ")", "motifs", "=", "parse_motifs", "(", "motifs", ")", "dbmotifs", "=", "parse_motifs", "(", "dbmotifs", ")", "dbmotif_lookup", "=", "dict", "(", "[", "(", "m", ".", "id", ",", "m", ")", "for", "m", "in", "dbmotifs", "]", ")", "scores", "=", "self", ".", "get_all_scores", "(", "motifs", ",", "dbmotifs", ",", "match", ",", "metric", ",", "combine", ",", "parallel", "=", "parallel", ",", "ncpus", "=", "ncpus", ")", "for", "motif", "in", "scores", ":", "scores", "[", "motif", "]", "=", "sorted", "(", "scores", "[", "motif", "]", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", "[", "0", "]", ")", "[", "-", "1", "]", "for", "motif", "in", "motifs", ":", "dbmotif", ",", "score", "=", "scores", "[", "motif", ".", "id", "]", "pval", ",", "pos", ",", "orient", "=", "self", ".", "compare_motifs", "(", "motif", ",", "dbmotif_lookup", "[", "dbmotif", "]", ",", "match", ",", "metric", ",", "combine", ",", "True", ")", "scores", "[", "motif", ".", "id", "]", "=", "[", "dbmotif", ",", "(", "list", "(", "score", ")", "+", "[", "pval", "]", ")", "]", "return", "scores" ]
Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict
[ "Return", "best", "match", "in", "database", "for", "motifs", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/comparison.py#L662-L711
train
eofs/aws
aws/main.py
list_regions
def list_regions(service): """ List regions for the service """ for region in service.regions(): print '%(name)s: %(endpoint)s' % { 'name': region.name, 'endpoint': region.endpoint, }
python
def list_regions(service): """ List regions for the service """ for region in service.regions(): print '%(name)s: %(endpoint)s' % { 'name': region.name, 'endpoint': region.endpoint, }
[ "def", "list_regions", "(", "service", ")", ":", "for", "region", "in", "service", ".", "regions", "(", ")", ":", "print", "'%(name)s: %(endpoint)s'", "%", "{", "'name'", ":", "region", ".", "name", ",", "'endpoint'", ":", "region", ".", "endpoint", ",", "}" ]
List regions for the service
[ "List", "regions", "for", "the", "service" ]
479cbe27a9f289b43f32f8e3de7d048a4a8993fe
https://github.com/eofs/aws/blob/479cbe27a9f289b43f32f8e3de7d048a4a8993fe/aws/main.py#L18-L26
train
eofs/aws
aws/main.py
elb_table
def elb_table(balancers): """ Print nice looking table of information from list of load balancers """ t = prettytable.PrettyTable(['Name', 'DNS', 'Ports', 'Zones', 'Created']) t.align = 'l' for b in balancers: ports = ['%s: %s -> %s' % (l[2], l[0], l[1]) for l in b.listeners] ports = '\n'.join(ports) zones = '\n'.join(b.availability_zones) t.add_row([b.name, b.dns_name, ports, zones, b.created_time]) return t
python
def elb_table(balancers): """ Print nice looking table of information from list of load balancers """ t = prettytable.PrettyTable(['Name', 'DNS', 'Ports', 'Zones', 'Created']) t.align = 'l' for b in balancers: ports = ['%s: %s -> %s' % (l[2], l[0], l[1]) for l in b.listeners] ports = '\n'.join(ports) zones = '\n'.join(b.availability_zones) t.add_row([b.name, b.dns_name, ports, zones, b.created_time]) return t
[ "def", "elb_table", "(", "balancers", ")", ":", "t", "=", "prettytable", ".", "PrettyTable", "(", "[", "'Name'", ",", "'DNS'", ",", "'Ports'", ",", "'Zones'", ",", "'Created'", "]", ")", "t", ".", "align", "=", "'l'", "for", "b", "in", "balancers", ":", "ports", "=", "[", "'%s: %s -> %s'", "%", "(", "l", "[", "2", "]", ",", "l", "[", "0", "]", ",", "l", "[", "1", "]", ")", "for", "l", "in", "b", ".", "listeners", "]", "ports", "=", "'\\n'", ".", "join", "(", "ports", ")", "zones", "=", "'\\n'", ".", "join", "(", "b", ".", "availability_zones", ")", "t", ".", "add_row", "(", "[", "b", ".", "name", ",", "b", ".", "dns_name", ",", "ports", ",", "zones", ",", "b", ".", "created_time", "]", ")", "return", "t" ]
Print nice looking table of information from list of load balancers
[ "Print", "nice", "looking", "table", "of", "information", "from", "list", "of", "load", "balancers" ]
479cbe27a9f289b43f32f8e3de7d048a4a8993fe
https://github.com/eofs/aws/blob/479cbe27a9f289b43f32f8e3de7d048a4a8993fe/aws/main.py#L29-L40
train
eofs/aws
aws/main.py
ec2_table
def ec2_table(instances): """ Print nice looking table of information from list of instances """ t = prettytable.PrettyTable(['ID', 'State', 'Monitored', 'Image', 'Name', 'Type', 'SSH key', 'DNS']) t.align = 'l' for i in instances: name = i.tags.get('Name', '') t.add_row([i.id, i.state, i.monitored, i.image_id, name, i.instance_type, i.key_name, i.dns_name]) return t
python
def ec2_table(instances): """ Print nice looking table of information from list of instances """ t = prettytable.PrettyTable(['ID', 'State', 'Monitored', 'Image', 'Name', 'Type', 'SSH key', 'DNS']) t.align = 'l' for i in instances: name = i.tags.get('Name', '') t.add_row([i.id, i.state, i.monitored, i.image_id, name, i.instance_type, i.key_name, i.dns_name]) return t
[ "def", "ec2_table", "(", "instances", ")", ":", "t", "=", "prettytable", ".", "PrettyTable", "(", "[", "'ID'", ",", "'State'", ",", "'Monitored'", ",", "'Image'", ",", "'Name'", ",", "'Type'", ",", "'SSH key'", ",", "'DNS'", "]", ")", "t", ".", "align", "=", "'l'", "for", "i", "in", "instances", ":", "name", "=", "i", ".", "tags", ".", "get", "(", "'Name'", ",", "''", ")", "t", ".", "add_row", "(", "[", "i", ".", "id", ",", "i", ".", "state", ",", "i", ".", "monitored", ",", "i", ".", "image_id", ",", "name", ",", "i", ".", "instance_type", ",", "i", ".", "key_name", ",", "i", ".", "dns_name", "]", ")", "return", "t" ]
Print nice looking table of information from list of instances
[ "Print", "nice", "looking", "table", "of", "information", "from", "list", "of", "instances" ]
479cbe27a9f289b43f32f8e3de7d048a4a8993fe
https://github.com/eofs/aws/blob/479cbe27a9f289b43f32f8e3de7d048a4a8993fe/aws/main.py#L43-L52
train
eofs/aws
aws/main.py
ec2_image_table
def ec2_image_table(images): """ Print nice looking table of information from images """ t = prettytable.PrettyTable(['ID', 'State', 'Name', 'Owner', 'Root device', 'Is public', 'Description']) t.align = 'l' for i in images: t.add_row([i.id, i.state, i.name, i.ownerId, i.root_device_type, i.is_public, i.description]) return t
python
def ec2_image_table(images): """ Print nice looking table of information from images """ t = prettytable.PrettyTable(['ID', 'State', 'Name', 'Owner', 'Root device', 'Is public', 'Description']) t.align = 'l' for i in images: t.add_row([i.id, i.state, i.name, i.ownerId, i.root_device_type, i.is_public, i.description]) return t
[ "def", "ec2_image_table", "(", "images", ")", ":", "t", "=", "prettytable", ".", "PrettyTable", "(", "[", "'ID'", ",", "'State'", ",", "'Name'", ",", "'Owner'", ",", "'Root device'", ",", "'Is public'", ",", "'Description'", "]", ")", "t", ".", "align", "=", "'l'", "for", "i", "in", "images", ":", "t", ".", "add_row", "(", "[", "i", ".", "id", ",", "i", ".", "state", ",", "i", ".", "name", ",", "i", ".", "ownerId", ",", "i", ".", "root_device_type", ",", "i", ".", "is_public", ",", "i", ".", "description", "]", ")", "return", "t" ]
Print nice looking table of information from images
[ "Print", "nice", "looking", "table", "of", "information", "from", "images" ]
479cbe27a9f289b43f32f8e3de7d048a4a8993fe
https://github.com/eofs/aws/blob/479cbe27a9f289b43f32f8e3de7d048a4a8993fe/aws/main.py#L54-L62
train
eofs/aws
aws/main.py
ec2_fab
def ec2_fab(service, args): """ Run Fabric commands against EC2 instances """ instance_ids = args.instances instances = service.list(elb=args.elb, instance_ids=instance_ids) hosts = service.resolve_hosts(instances) fab.env.hosts = hosts fab.env.key_filename = settings.get('SSH', 'KEY_FILE') fab.env.user = settings.get('SSH', 'USER', getpass.getuser()) fab.env.parallel = True fabfile = find_fabfile(args.file) if not fabfile: print 'Couldn\'t find any fabfiles!' return fab.env.real_fabile = fabfile docstring, callables, default = load_fabfile(fabfile) fab_state.commands.update(callables) commands_to_run = parse_arguments(args.methods) for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run: fab.execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs)
python
def ec2_fab(service, args): """ Run Fabric commands against EC2 instances """ instance_ids = args.instances instances = service.list(elb=args.elb, instance_ids=instance_ids) hosts = service.resolve_hosts(instances) fab.env.hosts = hosts fab.env.key_filename = settings.get('SSH', 'KEY_FILE') fab.env.user = settings.get('SSH', 'USER', getpass.getuser()) fab.env.parallel = True fabfile = find_fabfile(args.file) if not fabfile: print 'Couldn\'t find any fabfiles!' return fab.env.real_fabile = fabfile docstring, callables, default = load_fabfile(fabfile) fab_state.commands.update(callables) commands_to_run = parse_arguments(args.methods) for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run: fab.execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs)
[ "def", "ec2_fab", "(", "service", ",", "args", ")", ":", "instance_ids", "=", "args", ".", "instances", "instances", "=", "service", ".", "list", "(", "elb", "=", "args", ".", "elb", ",", "instance_ids", "=", "instance_ids", ")", "hosts", "=", "service", ".", "resolve_hosts", "(", "instances", ")", "fab", ".", "env", ".", "hosts", "=", "hosts", "fab", ".", "env", ".", "key_filename", "=", "settings", ".", "get", "(", "'SSH'", ",", "'KEY_FILE'", ")", "fab", ".", "env", ".", "user", "=", "settings", ".", "get", "(", "'SSH'", ",", "'USER'", ",", "getpass", ".", "getuser", "(", ")", ")", "fab", ".", "env", ".", "parallel", "=", "True", "fabfile", "=", "find_fabfile", "(", "args", ".", "file", ")", "if", "not", "fabfile", ":", "print", "'Couldn\\'t find any fabfiles!'", "return", "fab", ".", "env", ".", "real_fabile", "=", "fabfile", "docstring", ",", "callables", ",", "default", "=", "load_fabfile", "(", "fabfile", ")", "fab_state", ".", "commands", ".", "update", "(", "callables", ")", "commands_to_run", "=", "parse_arguments", "(", "args", ".", "methods", ")", "for", "name", ",", "args", ",", "kwargs", ",", "arg_hosts", ",", "arg_roles", ",", "arg_exclude_hosts", "in", "commands_to_run", ":", "fab", ".", "execute", "(", "name", ",", "hosts", "=", "arg_hosts", ",", "roles", "=", "arg_roles", ",", "exclude_hosts", "=", "arg_exclude_hosts", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run Fabric commands against EC2 instances
[ "Run", "Fabric", "commands", "against", "EC2", "instances" ]
479cbe27a9f289b43f32f8e3de7d048a4a8993fe
https://github.com/eofs/aws/blob/479cbe27a9f289b43f32f8e3de7d048a4a8993fe/aws/main.py#L64-L92
train
eofs/aws
aws/main.py
main
def main(): """ AWS support script's main method """ p = argparse.ArgumentParser(description='Manage Amazon AWS services', prog='aws', version=__version__) subparsers = p.add_subparsers(help='Select Amazon AWS service to use') # Auto Scaling as_service = subparsers.add_parser('as', help='Amazon Auto Scaling') as_subparsers = as_service.add_subparsers(help='Perform action') as_service_list = as_subparsers.add_parser('list', help='List Auto Scaling groups') as_service_list.set_defaults(func=as_list_handler) # Elastic Cloud Computing ec2_service = subparsers.add_parser('ec2', help='Amazon Elastic Compute Cloud') ec2_subparsers = ec2_service.add_subparsers(help='Perform action') ec2_service_list = ec2_subparsers.add_parser('list', help='List items') ec2_service_list.add_argument('--elb', '-e', help='Filter instances inside this ELB instance') ec2_service_list.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_list.add_argument('--type', default='instances', choices=['instances', 'regions', 'images'], help='List items of this type') ec2_service_list.set_defaults(func=ec2_list_handler) ec2_service_fab = ec2_subparsers.add_parser('fab', help='Run Fabric commands') ec2_service_fab.add_argument('--elb', '-e', help='Run against EC2 instances for this ELB') ec2_service_fab.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_fab.add_argument('--file', '-f', nargs='+', help='Define fabfile to use') ec2_service_fab.add_argument('methods', metavar='method:arg1,arg2=val2,host=foo,hosts=\'h1;h2\',', nargs='+', help='Specify one or more methods to execute.') ec2_service_fab.set_defaults(func=ec2_fab_handler) ec2_service_create = ec2_subparsers.add_parser('create', help='Create and start new instances') ec2_service_create.set_defaults(func=ec2_create_handler) ec2_service_start = ec2_subparsers.add_parser('start', help='Start existing instances') ec2_service_start.add_argument('instance', nargs='+', help='ID of an instance to start') ec2_service_start.set_defaults(func=ec2_start_handler) ec2_service_stop = ec2_subparsers.add_parser('stop', help='Stop instances') ec2_service_stop.add_argument('instance', nargs='+', help='ID of an instance to stop') ec2_service_stop.add_argument('--force', '-f', action='store_true', help='Force stop') ec2_service_stop.set_defaults(func=ec2_stop_handler) ec2_service_terminate = ec2_subparsers.add_parser('terminate', help='Terminate instances') ec2_service_terminate.add_argument('instance', nargs='+', help='ID of an instance to terminate') ec2_service_terminate.set_defaults(func=ec2_terminate_handler) ec2_service_images = ec2_subparsers.add_parser('images', help='List AMI images') ec2_service_images.add_argument('image', nargs='*', help='Image ID to use as filter') ec2_service_images.set_defaults(func=ec2_images_handler) ec2_service_create_image = ec2_subparsers.add_parser('create-image', help='Create AMI image from instance') ec2_service_create_image.add_argument('instance', help='ID of an instance to image') ec2_service_create_image.add_argument('name', help='The name of the image') ec2_service_create_image.add_argument('--description', '-d', help='Optional description for the image') ec2_service_create_image.add_argument('--noreboot', action='store_true', default=False, help='Do not shutdown the instance before creating image. ' + 'Note: System integrity might suffer if used.') ec2_service_create_image.set_defaults(func=ec2_create_image_handler) # Elastic Load Balancing elb_service = subparsers.add_parser('elb', help='Amazon Elastic Load Balancing') elb_subparsers = elb_service.add_subparsers(help='Perform action') elb_service_list = elb_subparsers.add_parser('list', help='List items') elb_service_list.add_argument('--type', default='balancers', choices=['balancers', 'regions'], help='List items of this type') elb_service_list.set_defaults(func=elb_list_handler) elb_service_instances = elb_subparsers.add_parser('instances', help='List registered instances') elb_service_instances.add_argument('balancer', help='Name of the Load Balancer') elb_service_instances.set_defaults(func=elb_instances_handler) elb_service_register = elb_subparsers.add_parser('register', help='Register instances to balancer') elb_service_register.add_argument('balancer', help='Name of the load balancer') elb_service_register.add_argument('instance', nargs='+', help='ID of an instance to register') elb_service_register.set_defaults(func=elb_register_handler) elb_service_deregister = elb_subparsers.add_parser('deregister', help='Deregister instances of balancer') elb_service_deregister.add_argument('balancer', help='Name of the Load Balancer') elb_service_deregister.add_argument('instance', nargs='+', help='ID of an instance to deregister') elb_service_deregister.set_defaults(func=elb_deregister_handler) elb_service_zones = elb_subparsers.add_parser('zones', help='Enable or disable availability zones') elb_service_zones.add_argument('balancer', help='Name of the Load Balancer') elb_service_zones.add_argument('zone', nargs='+', help='Name of the availability zone') elb_service_zones.add_argument('status', help='Disable of enable zones', choices=['enable', 'disable']) elb_service_zones.set_defaults(func=elb_zones_handler) elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') elb_service_delete.add_argument('balancer', help='Name of the Load Balancer') elb_service_delete.set_defaults(func=elb_delete_handler) # elb_service_create = elb_subparsers.add_parser('create', help='Create new Load Balancer') # elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') # elb_service_register = elb_subparsers.add_parser('register', help='Register EC2 instance') # elb_service_zone = elb_subparsers.add_parser('zone', help='Enable or disable region') arguments = p.parse_args() arguments.func(p, arguments)
python
def main(): """ AWS support script's main method """ p = argparse.ArgumentParser(description='Manage Amazon AWS services', prog='aws', version=__version__) subparsers = p.add_subparsers(help='Select Amazon AWS service to use') # Auto Scaling as_service = subparsers.add_parser('as', help='Amazon Auto Scaling') as_subparsers = as_service.add_subparsers(help='Perform action') as_service_list = as_subparsers.add_parser('list', help='List Auto Scaling groups') as_service_list.set_defaults(func=as_list_handler) # Elastic Cloud Computing ec2_service = subparsers.add_parser('ec2', help='Amazon Elastic Compute Cloud') ec2_subparsers = ec2_service.add_subparsers(help='Perform action') ec2_service_list = ec2_subparsers.add_parser('list', help='List items') ec2_service_list.add_argument('--elb', '-e', help='Filter instances inside this ELB instance') ec2_service_list.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_list.add_argument('--type', default='instances', choices=['instances', 'regions', 'images'], help='List items of this type') ec2_service_list.set_defaults(func=ec2_list_handler) ec2_service_fab = ec2_subparsers.add_parser('fab', help='Run Fabric commands') ec2_service_fab.add_argument('--elb', '-e', help='Run against EC2 instances for this ELB') ec2_service_fab.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_fab.add_argument('--file', '-f', nargs='+', help='Define fabfile to use') ec2_service_fab.add_argument('methods', metavar='method:arg1,arg2=val2,host=foo,hosts=\'h1;h2\',', nargs='+', help='Specify one or more methods to execute.') ec2_service_fab.set_defaults(func=ec2_fab_handler) ec2_service_create = ec2_subparsers.add_parser('create', help='Create and start new instances') ec2_service_create.set_defaults(func=ec2_create_handler) ec2_service_start = ec2_subparsers.add_parser('start', help='Start existing instances') ec2_service_start.add_argument('instance', nargs='+', help='ID of an instance to start') ec2_service_start.set_defaults(func=ec2_start_handler) ec2_service_stop = ec2_subparsers.add_parser('stop', help='Stop instances') ec2_service_stop.add_argument('instance', nargs='+', help='ID of an instance to stop') ec2_service_stop.add_argument('--force', '-f', action='store_true', help='Force stop') ec2_service_stop.set_defaults(func=ec2_stop_handler) ec2_service_terminate = ec2_subparsers.add_parser('terminate', help='Terminate instances') ec2_service_terminate.add_argument('instance', nargs='+', help='ID of an instance to terminate') ec2_service_terminate.set_defaults(func=ec2_terminate_handler) ec2_service_images = ec2_subparsers.add_parser('images', help='List AMI images') ec2_service_images.add_argument('image', nargs='*', help='Image ID to use as filter') ec2_service_images.set_defaults(func=ec2_images_handler) ec2_service_create_image = ec2_subparsers.add_parser('create-image', help='Create AMI image from instance') ec2_service_create_image.add_argument('instance', help='ID of an instance to image') ec2_service_create_image.add_argument('name', help='The name of the image') ec2_service_create_image.add_argument('--description', '-d', help='Optional description for the image') ec2_service_create_image.add_argument('--noreboot', action='store_true', default=False, help='Do not shutdown the instance before creating image. ' + 'Note: System integrity might suffer if used.') ec2_service_create_image.set_defaults(func=ec2_create_image_handler) # Elastic Load Balancing elb_service = subparsers.add_parser('elb', help='Amazon Elastic Load Balancing') elb_subparsers = elb_service.add_subparsers(help='Perform action') elb_service_list = elb_subparsers.add_parser('list', help='List items') elb_service_list.add_argument('--type', default='balancers', choices=['balancers', 'regions'], help='List items of this type') elb_service_list.set_defaults(func=elb_list_handler) elb_service_instances = elb_subparsers.add_parser('instances', help='List registered instances') elb_service_instances.add_argument('balancer', help='Name of the Load Balancer') elb_service_instances.set_defaults(func=elb_instances_handler) elb_service_register = elb_subparsers.add_parser('register', help='Register instances to balancer') elb_service_register.add_argument('balancer', help='Name of the load balancer') elb_service_register.add_argument('instance', nargs='+', help='ID of an instance to register') elb_service_register.set_defaults(func=elb_register_handler) elb_service_deregister = elb_subparsers.add_parser('deregister', help='Deregister instances of balancer') elb_service_deregister.add_argument('balancer', help='Name of the Load Balancer') elb_service_deregister.add_argument('instance', nargs='+', help='ID of an instance to deregister') elb_service_deregister.set_defaults(func=elb_deregister_handler) elb_service_zones = elb_subparsers.add_parser('zones', help='Enable or disable availability zones') elb_service_zones.add_argument('balancer', help='Name of the Load Balancer') elb_service_zones.add_argument('zone', nargs='+', help='Name of the availability zone') elb_service_zones.add_argument('status', help='Disable of enable zones', choices=['enable', 'disable']) elb_service_zones.set_defaults(func=elb_zones_handler) elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') elb_service_delete.add_argument('balancer', help='Name of the Load Balancer') elb_service_delete.set_defaults(func=elb_delete_handler) # elb_service_create = elb_subparsers.add_parser('create', help='Create new Load Balancer') # elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') # elb_service_register = elb_subparsers.add_parser('register', help='Register EC2 instance') # elb_service_zone = elb_subparsers.add_parser('zone', help='Enable or disable region') arguments = p.parse_args() arguments.func(p, arguments)
[ "def", "main", "(", ")", ":", "p", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Manage Amazon AWS services'", ",", "prog", "=", "'aws'", ",", "version", "=", "__version__", ")", "subparsers", "=", "p", ".", "add_subparsers", "(", "help", "=", "'Select Amazon AWS service to use'", ")", "# Auto Scaling", "as_service", "=", "subparsers", ".", "add_parser", "(", "'as'", ",", "help", "=", "'Amazon Auto Scaling'", ")", "as_subparsers", "=", "as_service", ".", "add_subparsers", "(", "help", "=", "'Perform action'", ")", "as_service_list", "=", "as_subparsers", ".", "add_parser", "(", "'list'", ",", "help", "=", "'List Auto Scaling groups'", ")", "as_service_list", ".", "set_defaults", "(", "func", "=", "as_list_handler", ")", "# Elastic Cloud Computing", "ec2_service", "=", "subparsers", ".", "add_parser", "(", "'ec2'", ",", "help", "=", "'Amazon Elastic Compute Cloud'", ")", "ec2_subparsers", "=", "ec2_service", ".", "add_subparsers", "(", "help", "=", "'Perform action'", ")", "ec2_service_list", "=", "ec2_subparsers", ".", "add_parser", "(", "'list'", ",", "help", "=", "'List items'", ")", "ec2_service_list", ".", "add_argument", "(", "'--elb'", ",", "'-e'", ",", "help", "=", "'Filter instances inside this ELB instance'", ")", "ec2_service_list", ".", "add_argument", "(", "'--instances'", ",", "'-i'", ",", "nargs", "=", "'*'", ",", "metavar", "=", "(", "'id'", ",", "'id'", ")", ",", "help", "=", "'List of instance IDs to use as filter'", ")", "ec2_service_list", ".", "add_argument", "(", "'--type'", ",", "default", "=", "'instances'", ",", "choices", "=", "[", "'instances'", ",", "'regions'", ",", "'images'", "]", ",", "help", "=", "'List items of this type'", ")", "ec2_service_list", ".", "set_defaults", "(", "func", "=", "ec2_list_handler", ")", "ec2_service_fab", "=", "ec2_subparsers", ".", "add_parser", "(", "'fab'", ",", "help", "=", "'Run Fabric commands'", ")", "ec2_service_fab", ".", "add_argument", "(", "'--elb'", ",", "'-e'", ",", "help", "=", "'Run against EC2 instances for this ELB'", ")", "ec2_service_fab", ".", "add_argument", "(", "'--instances'", ",", "'-i'", ",", "nargs", "=", "'*'", ",", "metavar", "=", "(", "'id'", ",", "'id'", ")", ",", "help", "=", "'List of instance IDs to use as filter'", ")", "ec2_service_fab", ".", "add_argument", "(", "'--file'", ",", "'-f'", ",", "nargs", "=", "'+'", ",", "help", "=", "'Define fabfile to use'", ")", "ec2_service_fab", ".", "add_argument", "(", "'methods'", ",", "metavar", "=", "'method:arg1,arg2=val2,host=foo,hosts=\\'h1;h2\\','", ",", "nargs", "=", "'+'", ",", "help", "=", "'Specify one or more methods to execute.'", ")", "ec2_service_fab", ".", "set_defaults", "(", "func", "=", "ec2_fab_handler", ")", "ec2_service_create", "=", "ec2_subparsers", ".", "add_parser", "(", "'create'", ",", "help", "=", "'Create and start new instances'", ")", "ec2_service_create", ".", "set_defaults", "(", "func", "=", "ec2_create_handler", ")", "ec2_service_start", "=", "ec2_subparsers", ".", "add_parser", "(", "'start'", ",", "help", "=", "'Start existing instances'", ")", "ec2_service_start", ".", "add_argument", "(", "'instance'", ",", "nargs", "=", "'+'", ",", "help", "=", "'ID of an instance to start'", ")", "ec2_service_start", ".", "set_defaults", "(", "func", "=", "ec2_start_handler", ")", "ec2_service_stop", "=", "ec2_subparsers", ".", "add_parser", "(", "'stop'", ",", "help", "=", "'Stop instances'", ")", "ec2_service_stop", ".", "add_argument", "(", "'instance'", ",", "nargs", "=", "'+'", ",", "help", "=", "'ID of an instance to stop'", ")", "ec2_service_stop", ".", "add_argument", "(", "'--force'", ",", "'-f'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Force stop'", ")", "ec2_service_stop", ".", "set_defaults", "(", "func", "=", "ec2_stop_handler", ")", "ec2_service_terminate", "=", "ec2_subparsers", ".", "add_parser", "(", "'terminate'", ",", "help", "=", "'Terminate instances'", ")", "ec2_service_terminate", ".", "add_argument", "(", "'instance'", ",", "nargs", "=", "'+'", ",", "help", "=", "'ID of an instance to terminate'", ")", "ec2_service_terminate", ".", "set_defaults", "(", "func", "=", "ec2_terminate_handler", ")", "ec2_service_images", "=", "ec2_subparsers", ".", "add_parser", "(", "'images'", ",", "help", "=", "'List AMI images'", ")", "ec2_service_images", ".", "add_argument", "(", "'image'", ",", "nargs", "=", "'*'", ",", "help", "=", "'Image ID to use as filter'", ")", "ec2_service_images", ".", "set_defaults", "(", "func", "=", "ec2_images_handler", ")", "ec2_service_create_image", "=", "ec2_subparsers", ".", "add_parser", "(", "'create-image'", ",", "help", "=", "'Create AMI image from instance'", ")", "ec2_service_create_image", ".", "add_argument", "(", "'instance'", ",", "help", "=", "'ID of an instance to image'", ")", "ec2_service_create_image", ".", "add_argument", "(", "'name'", ",", "help", "=", "'The name of the image'", ")", "ec2_service_create_image", ".", "add_argument", "(", "'--description'", ",", "'-d'", ",", "help", "=", "'Optional description for the image'", ")", "ec2_service_create_image", ".", "add_argument", "(", "'--noreboot'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Do not shutdown the instance before creating image. '", "+", "'Note: System integrity might suffer if used.'", ")", "ec2_service_create_image", ".", "set_defaults", "(", "func", "=", "ec2_create_image_handler", ")", "# Elastic Load Balancing", "elb_service", "=", "subparsers", ".", "add_parser", "(", "'elb'", ",", "help", "=", "'Amazon Elastic Load Balancing'", ")", "elb_subparsers", "=", "elb_service", ".", "add_subparsers", "(", "help", "=", "'Perform action'", ")", "elb_service_list", "=", "elb_subparsers", ".", "add_parser", "(", "'list'", ",", "help", "=", "'List items'", ")", "elb_service_list", ".", "add_argument", "(", "'--type'", ",", "default", "=", "'balancers'", ",", "choices", "=", "[", "'balancers'", ",", "'regions'", "]", ",", "help", "=", "'List items of this type'", ")", "elb_service_list", ".", "set_defaults", "(", "func", "=", "elb_list_handler", ")", "elb_service_instances", "=", "elb_subparsers", ".", "add_parser", "(", "'instances'", ",", "help", "=", "'List registered instances'", ")", "elb_service_instances", ".", "add_argument", "(", "'balancer'", ",", "help", "=", "'Name of the Load Balancer'", ")", "elb_service_instances", ".", "set_defaults", "(", "func", "=", "elb_instances_handler", ")", "elb_service_register", "=", "elb_subparsers", ".", "add_parser", "(", "'register'", ",", "help", "=", "'Register instances to balancer'", ")", "elb_service_register", ".", "add_argument", "(", "'balancer'", ",", "help", "=", "'Name of the load balancer'", ")", "elb_service_register", ".", "add_argument", "(", "'instance'", ",", "nargs", "=", "'+'", ",", "help", "=", "'ID of an instance to register'", ")", "elb_service_register", ".", "set_defaults", "(", "func", "=", "elb_register_handler", ")", "elb_service_deregister", "=", "elb_subparsers", ".", "add_parser", "(", "'deregister'", ",", "help", "=", "'Deregister instances of balancer'", ")", "elb_service_deregister", ".", "add_argument", "(", "'balancer'", ",", "help", "=", "'Name of the Load Balancer'", ")", "elb_service_deregister", ".", "add_argument", "(", "'instance'", ",", "nargs", "=", "'+'", ",", "help", "=", "'ID of an instance to deregister'", ")", "elb_service_deregister", ".", "set_defaults", "(", "func", "=", "elb_deregister_handler", ")", "elb_service_zones", "=", "elb_subparsers", ".", "add_parser", "(", "'zones'", ",", "help", "=", "'Enable or disable availability zones'", ")", "elb_service_zones", ".", "add_argument", "(", "'balancer'", ",", "help", "=", "'Name of the Load Balancer'", ")", "elb_service_zones", ".", "add_argument", "(", "'zone'", ",", "nargs", "=", "'+'", ",", "help", "=", "'Name of the availability zone'", ")", "elb_service_zones", ".", "add_argument", "(", "'status'", ",", "help", "=", "'Disable of enable zones'", ",", "choices", "=", "[", "'enable'", ",", "'disable'", "]", ")", "elb_service_zones", ".", "set_defaults", "(", "func", "=", "elb_zones_handler", ")", "elb_service_delete", "=", "elb_subparsers", ".", "add_parser", "(", "'delete'", ",", "help", "=", "'Delete Load Balancer'", ")", "elb_service_delete", ".", "add_argument", "(", "'balancer'", ",", "help", "=", "'Name of the Load Balancer'", ")", "elb_service_delete", ".", "set_defaults", "(", "func", "=", "elb_delete_handler", ")", "# elb_service_create = elb_subparsers.add_parser('create', help='Create new Load Balancer')", "# elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer')", "# elb_service_register = elb_subparsers.add_parser('register', help='Register EC2 instance')", "# elb_service_zone = elb_subparsers.add_parser('zone', help='Enable or disable region')", "arguments", "=", "p", ".", "parse_args", "(", ")", "arguments", ".", "func", "(", "p", ",", "arguments", ")" ]
AWS support script's main method
[ "AWS", "support", "script", "s", "main", "method" ]
479cbe27a9f289b43f32f8e3de7d048a4a8993fe
https://github.com/eofs/aws/blob/479cbe27a9f289b43f32f8e3de7d048a4a8993fe/aws/main.py#L222-L330
train
pescadores/pescador
pescador/maps.py
buffer_stream
def buffer_stream(stream, buffer_size, partial=False, axis=None): '''Buffer "data" from an stream into one data object. Parameters ---------- stream : stream The stream to buffer buffer_size : int > 0 The number of examples to retain per batch. partial : bool, default=False If True, yield a final partial batch on under-run. axis : int or None If `None` (default), concatenate data along a new 0th axis. Otherwise, concatenation is performed along the specified axis. This is primarily useful when combining data that already has a dimension for buffer index, e.g., when buffering buffers. Yields ------ batch A batch of size at most `buffer_size` Raises ------ DataError If the stream contains items that are not data-like. ''' data = [] count = 0 for item in stream: data.append(item) count += 1 if count < buffer_size: continue try: yield __stack_data(data, axis=axis) except (TypeError, AttributeError): raise DataError("Malformed data stream: {}".format(data)) finally: data = [] count = 0 if data and partial: yield __stack_data(data, axis=axis)
python
def buffer_stream(stream, buffer_size, partial=False, axis=None): '''Buffer "data" from an stream into one data object. Parameters ---------- stream : stream The stream to buffer buffer_size : int > 0 The number of examples to retain per batch. partial : bool, default=False If True, yield a final partial batch on under-run. axis : int or None If `None` (default), concatenate data along a new 0th axis. Otherwise, concatenation is performed along the specified axis. This is primarily useful when combining data that already has a dimension for buffer index, e.g., when buffering buffers. Yields ------ batch A batch of size at most `buffer_size` Raises ------ DataError If the stream contains items that are not data-like. ''' data = [] count = 0 for item in stream: data.append(item) count += 1 if count < buffer_size: continue try: yield __stack_data(data, axis=axis) except (TypeError, AttributeError): raise DataError("Malformed data stream: {}".format(data)) finally: data = [] count = 0 if data and partial: yield __stack_data(data, axis=axis)
[ "def", "buffer_stream", "(", "stream", ",", "buffer_size", ",", "partial", "=", "False", ",", "axis", "=", "None", ")", ":", "data", "=", "[", "]", "count", "=", "0", "for", "item", "in", "stream", ":", "data", ".", "append", "(", "item", ")", "count", "+=", "1", "if", "count", "<", "buffer_size", ":", "continue", "try", ":", "yield", "__stack_data", "(", "data", ",", "axis", "=", "axis", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "raise", "DataError", "(", "\"Malformed data stream: {}\"", ".", "format", "(", "data", ")", ")", "finally", ":", "data", "=", "[", "]", "count", "=", "0", "if", "data", "and", "partial", ":", "yield", "__stack_data", "(", "data", ",", "axis", "=", "axis", ")" ]
Buffer "data" from an stream into one data object. Parameters ---------- stream : stream The stream to buffer buffer_size : int > 0 The number of examples to retain per batch. partial : bool, default=False If True, yield a final partial batch on under-run. axis : int or None If `None` (default), concatenate data along a new 0th axis. Otherwise, concatenation is performed along the specified axis. This is primarily useful when combining data that already has a dimension for buffer index, e.g., when buffering buffers. Yields ------ batch A batch of size at most `buffer_size` Raises ------ DataError If the stream contains items that are not data-like.
[ "Buffer", "data", "from", "an", "stream", "into", "one", "data", "object", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/maps.py#L34-L84
train
pescadores/pescador
pescador/maps.py
tuples
def tuples(stream, *keys): """Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key. """ if not keys: raise PescadorError('Unable to generate tuples from ' 'an empty item set') for data in stream: try: yield tuple(data[key] for key in keys) except TypeError: raise DataError("Malformed data stream: {}".format(data))
python
def tuples(stream, *keys): """Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key. """ if not keys: raise PescadorError('Unable to generate tuples from ' 'an empty item set') for data in stream: try: yield tuple(data[key] for key in keys) except TypeError: raise DataError("Malformed data stream: {}".format(data))
[ "def", "tuples", "(", "stream", ",", "*", "keys", ")", ":", "if", "not", "keys", ":", "raise", "PescadorError", "(", "'Unable to generate tuples from '", "'an empty item set'", ")", "for", "data", "in", "stream", ":", "try", ":", "yield", "tuple", "(", "data", "[", "key", "]", "for", "key", "in", "keys", ")", "except", "TypeError", ":", "raise", "DataError", "(", "\"Malformed data stream: {}\"", ".", "format", "(", "data", ")", ")" ]
Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key.
[ "Reformat", "data", "as", "tuples", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/maps.py#L87-L117
train
pescadores/pescador
pescador/maps.py
keras_tuples
def keras_tuples(stream, inputs=None, outputs=None): """Reformat data objects as keras-compatible tuples. For more detail: https://keras.io/models/model/#fit Parameters ---------- stream : iterable Stream of data objects. inputs : string or iterable of strings, None Keys to use for ordered input data. If not specified, returns `None` in its place. outputs : string or iterable of strings, default=None Keys to use for ordered output data. If not specified, returns `None` in its place. Yields ------ x : np.ndarray, list of np.ndarray, or None If `inputs` is a string, `x` is a single np.ndarray. If `inputs` is an iterable of strings, `x` is a list of np.ndarrays. If `inputs` is a null type, `x` is None. y : np.ndarray, list of np.ndarray, or None If `outputs` is a string, `y` is a single np.ndarray. If `outputs` is an iterable of strings, `y` is a list of np.ndarrays. If `outputs` is a null type, `y` is None. Raises ------ DataError If the stream contains items that are not data-like. """ flatten_inputs, flatten_outputs = False, False if inputs and isinstance(inputs, six.string_types): inputs = [inputs] flatten_inputs = True if outputs and isinstance(outputs, six.string_types): outputs = [outputs] flatten_outputs = True inputs, outputs = (inputs or []), (outputs or []) if not inputs + outputs: raise PescadorError('At least one key must be given for ' '`inputs` or `outputs`') for data in stream: try: x = list(data[key] for key in inputs) or None if len(inputs) == 1 and flatten_inputs: x = x[0] y = list(data[key] for key in outputs) or None if len(outputs) == 1 and flatten_outputs: y = y[0] yield (x, y) except TypeError: raise DataError("Malformed data stream: {}".format(data))
python
def keras_tuples(stream, inputs=None, outputs=None): """Reformat data objects as keras-compatible tuples. For more detail: https://keras.io/models/model/#fit Parameters ---------- stream : iterable Stream of data objects. inputs : string or iterable of strings, None Keys to use for ordered input data. If not specified, returns `None` in its place. outputs : string or iterable of strings, default=None Keys to use for ordered output data. If not specified, returns `None` in its place. Yields ------ x : np.ndarray, list of np.ndarray, or None If `inputs` is a string, `x` is a single np.ndarray. If `inputs` is an iterable of strings, `x` is a list of np.ndarrays. If `inputs` is a null type, `x` is None. y : np.ndarray, list of np.ndarray, or None If `outputs` is a string, `y` is a single np.ndarray. If `outputs` is an iterable of strings, `y` is a list of np.ndarrays. If `outputs` is a null type, `y` is None. Raises ------ DataError If the stream contains items that are not data-like. """ flatten_inputs, flatten_outputs = False, False if inputs and isinstance(inputs, six.string_types): inputs = [inputs] flatten_inputs = True if outputs and isinstance(outputs, six.string_types): outputs = [outputs] flatten_outputs = True inputs, outputs = (inputs or []), (outputs or []) if not inputs + outputs: raise PescadorError('At least one key must be given for ' '`inputs` or `outputs`') for data in stream: try: x = list(data[key] for key in inputs) or None if len(inputs) == 1 and flatten_inputs: x = x[0] y = list(data[key] for key in outputs) or None if len(outputs) == 1 and flatten_outputs: y = y[0] yield (x, y) except TypeError: raise DataError("Malformed data stream: {}".format(data))
[ "def", "keras_tuples", "(", "stream", ",", "inputs", "=", "None", ",", "outputs", "=", "None", ")", ":", "flatten_inputs", ",", "flatten_outputs", "=", "False", ",", "False", "if", "inputs", "and", "isinstance", "(", "inputs", ",", "six", ".", "string_types", ")", ":", "inputs", "=", "[", "inputs", "]", "flatten_inputs", "=", "True", "if", "outputs", "and", "isinstance", "(", "outputs", ",", "six", ".", "string_types", ")", ":", "outputs", "=", "[", "outputs", "]", "flatten_outputs", "=", "True", "inputs", ",", "outputs", "=", "(", "inputs", "or", "[", "]", ")", ",", "(", "outputs", "or", "[", "]", ")", "if", "not", "inputs", "+", "outputs", ":", "raise", "PescadorError", "(", "'At least one key must be given for '", "'`inputs` or `outputs`'", ")", "for", "data", "in", "stream", ":", "try", ":", "x", "=", "list", "(", "data", "[", "key", "]", "for", "key", "in", "inputs", ")", "or", "None", "if", "len", "(", "inputs", ")", "==", "1", "and", "flatten_inputs", ":", "x", "=", "x", "[", "0", "]", "y", "=", "list", "(", "data", "[", "key", "]", "for", "key", "in", "outputs", ")", "or", "None", "if", "len", "(", "outputs", ")", "==", "1", "and", "flatten_outputs", ":", "y", "=", "y", "[", "0", "]", "yield", "(", "x", ",", "y", ")", "except", "TypeError", ":", "raise", "DataError", "(", "\"Malformed data stream: {}\"", ".", "format", "(", "data", ")", ")" ]
Reformat data objects as keras-compatible tuples. For more detail: https://keras.io/models/model/#fit Parameters ---------- stream : iterable Stream of data objects. inputs : string or iterable of strings, None Keys to use for ordered input data. If not specified, returns `None` in its place. outputs : string or iterable of strings, default=None Keys to use for ordered output data. If not specified, returns `None` in its place. Yields ------ x : np.ndarray, list of np.ndarray, or None If `inputs` is a string, `x` is a single np.ndarray. If `inputs` is an iterable of strings, `x` is a list of np.ndarrays. If `inputs` is a null type, `x` is None. y : np.ndarray, list of np.ndarray, or None If `outputs` is a string, `y` is a single np.ndarray. If `outputs` is an iterable of strings, `y` is a list of np.ndarrays. If `outputs` is a null type, `y` is None. Raises ------ DataError If the stream contains items that are not data-like.
[ "Reformat", "data", "objects", "as", "keras", "-", "compatible", "tuples", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/maps.py#L120-L179
train
vanheeringen-lab/gimmemotifs
gimmemotifs/commands/location.py
location
def location(args): """ Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments. """ fastafile = args.fastafile pwmfile = args.pwmfile lwidth = args.width if not lwidth: f = Fasta(fastafile) lwidth = len(f.items()[0][1]) f = None jobs = [] motifs = pwmfile_to_motifs(pwmfile) ids = [motif.id for motif in motifs] if args.ids: ids = args.ids.split(",") n_cpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=n_cpus, maxtasksperchild=1000) for motif in motifs: if motif.id in ids: outfile = os.path.join("%s_histogram" % motif.id) jobs.append( pool.apply_async( motif_localization, (fastafile,motif,lwidth,outfile, args.cutoff) )) for job in jobs: job.get()
python
def location(args): """ Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments. """ fastafile = args.fastafile pwmfile = args.pwmfile lwidth = args.width if not lwidth: f = Fasta(fastafile) lwidth = len(f.items()[0][1]) f = None jobs = [] motifs = pwmfile_to_motifs(pwmfile) ids = [motif.id for motif in motifs] if args.ids: ids = args.ids.split(",") n_cpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=n_cpus, maxtasksperchild=1000) for motif in motifs: if motif.id in ids: outfile = os.path.join("%s_histogram" % motif.id) jobs.append( pool.apply_async( motif_localization, (fastafile,motif,lwidth,outfile, args.cutoff) )) for job in jobs: job.get()
[ "def", "location", "(", "args", ")", ":", "fastafile", "=", "args", ".", "fastafile", "pwmfile", "=", "args", ".", "pwmfile", "lwidth", "=", "args", ".", "width", "if", "not", "lwidth", ":", "f", "=", "Fasta", "(", "fastafile", ")", "lwidth", "=", "len", "(", "f", ".", "items", "(", ")", "[", "0", "]", "[", "1", "]", ")", "f", "=", "None", "jobs", "=", "[", "]", "motifs", "=", "pwmfile_to_motifs", "(", "pwmfile", ")", "ids", "=", "[", "motif", ".", "id", "for", "motif", "in", "motifs", "]", "if", "args", ".", "ids", ":", "ids", "=", "args", ".", "ids", ".", "split", "(", "\",\"", ")", "n_cpus", "=", "int", "(", "MotifConfig", "(", ")", ".", "get_default_params", "(", ")", "[", "\"ncpus\"", "]", ")", "pool", "=", "Pool", "(", "processes", "=", "n_cpus", ",", "maxtasksperchild", "=", "1000", ")", "for", "motif", "in", "motifs", ":", "if", "motif", ".", "id", "in", "ids", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "\"%s_histogram\"", "%", "motif", ".", "id", ")", "jobs", ".", "append", "(", "pool", ".", "apply_async", "(", "motif_localization", ",", "(", "fastafile", ",", "motif", ",", "lwidth", ",", "outfile", ",", "args", ".", "cutoff", ")", ")", ")", "for", "job", "in", "jobs", ":", "job", ".", "get", "(", ")" ]
Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments.
[ "Creates", "histrogram", "of", "motif", "location", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/commands/location.py#L18-L54
train
vanheeringen-lab/gimmemotifs
gimmemotifs/shutils.py
which
def which(fname): """Find location of executable.""" if "PATH" not in os.environ or not os.environ["PATH"]: path = os.defpath else: path = os.environ["PATH"] for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]: p = os.path.abspath(p) if os.access(p, os.X_OK) and not os.path.isdir(p): return p p = sp.Popen("locate %s" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) (stdout, stderr) = p.communicate() if not stderr: for p in stdout.decode().split("\n"): if (os.path.basename(p) == fname) and ( os.access(p, os.X_OK)) and ( not os.path.isdir(p)): return p
python
def which(fname): """Find location of executable.""" if "PATH" not in os.environ or not os.environ["PATH"]: path = os.defpath else: path = os.environ["PATH"] for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]: p = os.path.abspath(p) if os.access(p, os.X_OK) and not os.path.isdir(p): return p p = sp.Popen("locate %s" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) (stdout, stderr) = p.communicate() if not stderr: for p in stdout.decode().split("\n"): if (os.path.basename(p) == fname) and ( os.access(p, os.X_OK)) and ( not os.path.isdir(p)): return p
[ "def", "which", "(", "fname", ")", ":", "if", "\"PATH\"", "not", "in", "os", ".", "environ", "or", "not", "os", ".", "environ", "[", "\"PATH\"", "]", ":", "path", "=", "os", ".", "defpath", "else", ":", "path", "=", "os", ".", "environ", "[", "\"PATH\"", "]", "for", "p", "in", "[", "fname", "]", "+", "[", "os", ".", "path", ".", "join", "(", "x", ",", "fname", ")", "for", "x", "in", "path", ".", "split", "(", "os", ".", "pathsep", ")", "]", ":", "p", "=", "os", ".", "path", ".", "abspath", "(", "p", ")", "if", "os", ".", "access", "(", "p", ",", "os", ".", "X_OK", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "return", "p", "p", "=", "sp", ".", "Popen", "(", "\"locate %s\"", "%", "fname", ",", "shell", "=", "True", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ")", "(", "stdout", ",", "stderr", ")", "=", "p", ".", "communicate", "(", ")", "if", "not", "stderr", ":", "for", "p", "in", "stdout", ".", "decode", "(", ")", ".", "split", "(", "\"\\n\"", ")", ":", "if", "(", "os", ".", "path", ".", "basename", "(", "p", ")", "==", "fname", ")", "and", "(", "os", ".", "access", "(", "p", ",", "os", ".", "X_OK", ")", ")", "and", "(", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ")", ":", "return", "p" ]
Find location of executable.
[ "Find", "location", "of", "executable", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/shutils.py#L11-L30
train
vanheeringen-lab/gimmemotifs
gimmemotifs/shutils.py
find_by_ext
def find_by_ext(dirname, ext): """Find all files in a directory by extension.""" # Get all fasta-files try: files = os.listdir(dirname) except OSError: if os.path.exists(dirname): cmd = "find {0} -maxdepth 1 -name \"*\"".format(dirname) p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, _stderr = p.communicate() files = [os.path.basename(fname) for fname in stdout.decode().splitlines()] else: raise retfiles = [os.path.join(dirname, fname) for fname in files if os.path.splitext(fname)[-1] in ext] return retfiles
python
def find_by_ext(dirname, ext): """Find all files in a directory by extension.""" # Get all fasta-files try: files = os.listdir(dirname) except OSError: if os.path.exists(dirname): cmd = "find {0} -maxdepth 1 -name \"*\"".format(dirname) p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, _stderr = p.communicate() files = [os.path.basename(fname) for fname in stdout.decode().splitlines()] else: raise retfiles = [os.path.join(dirname, fname) for fname in files if os.path.splitext(fname)[-1] in ext] return retfiles
[ "def", "find_by_ext", "(", "dirname", ",", "ext", ")", ":", "# Get all fasta-files", "try", ":", "files", "=", "os", ".", "listdir", "(", "dirname", ")", "except", "OSError", ":", "if", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "cmd", "=", "\"find {0} -maxdepth 1 -name \\\"*\\\"\"", ".", "format", "(", "dirname", ")", "p", "=", "sp", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ")", "stdout", ",", "_stderr", "=", "p", ".", "communicate", "(", ")", "files", "=", "[", "os", ".", "path", ".", "basename", "(", "fname", ")", "for", "fname", "in", "stdout", ".", "decode", "(", ")", ".", "splitlines", "(", ")", "]", "else", ":", "raise", "retfiles", "=", "[", "os", ".", "path", ".", "join", "(", "dirname", ",", "fname", ")", "for", "fname", "in", "files", "if", "os", ".", "path", ".", "splitext", "(", "fname", ")", "[", "-", "1", "]", "in", "ext", "]", "return", "retfiles" ]
Find all files in a directory by extension.
[ "Find", "all", "files", "in", "a", "directory", "by", "extension", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/shutils.py#L32-L49
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
default_motifs
def default_motifs(): """Return list of Motif instances from default motif database.""" config = MotifConfig() d = config.get_motif_dir() m = config.get_default_params()['motif_db'] if not d or not m: raise ValueError("default motif database not configured") fname = os.path.join(d, m) with open(fname) as f: motifs = read_motifs(f) return motifs
python
def default_motifs(): """Return list of Motif instances from default motif database.""" config = MotifConfig() d = config.get_motif_dir() m = config.get_default_params()['motif_db'] if not d or not m: raise ValueError("default motif database not configured") fname = os.path.join(d, m) with open(fname) as f: motifs = read_motifs(f) return motifs
[ "def", "default_motifs", "(", ")", ":", "config", "=", "MotifConfig", "(", ")", "d", "=", "config", ".", "get_motif_dir", "(", ")", "m", "=", "config", ".", "get_default_params", "(", ")", "[", "'motif_db'", "]", "if", "not", "d", "or", "not", "m", ":", "raise", "ValueError", "(", "\"default motif database not configured\"", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "d", ",", "m", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "motifs", "=", "read_motifs", "(", "f", ")", "return", "motifs" ]
Return list of Motif instances from default motif database.
[ "Return", "list", "of", "Motif", "instances", "from", "default", "motif", "database", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L1079-L1092
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
motif_from_align
def motif_from_align(align): """Convert alignment to motif. Converts a list with sequences to a motif. Sequences should be the same length. Parameters ---------- align : list List with sequences (A,C,G,T). Returns ------- m : Motif instance Motif created from the aligned sequences. """ width = len(align[0]) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 m = Motif(pfm) m.align = align[:] return m
python
def motif_from_align(align): """Convert alignment to motif. Converts a list with sequences to a motif. Sequences should be the same length. Parameters ---------- align : list List with sequences (A,C,G,T). Returns ------- m : Motif instance Motif created from the aligned sequences. """ width = len(align[0]) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 m = Motif(pfm) m.align = align[:] return m
[ "def", "motif_from_align", "(", "align", ")", ":", "width", "=", "len", "(", "align", "[", "0", "]", ")", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "pfm", "=", "[", "[", "0", "for", "_", "in", "range", "(", "4", ")", "]", "for", "_", "in", "range", "(", "width", ")", "]", "for", "row", "in", "align", ":", "for", "i", "in", "range", "(", "len", "(", "row", ")", ")", ":", "pfm", "[", "i", "]", "[", "nucs", "[", "row", "[", "i", "]", "]", "]", "+=", "1", "m", "=", "Motif", "(", "pfm", ")", "m", ".", "align", "=", "align", "[", ":", "]", "return", "m" ]
Convert alignment to motif. Converts a list with sequences to a motif. Sequences should be the same length. Parameters ---------- align : list List with sequences (A,C,G,T). Returns ------- m : Motif instance Motif created from the aligned sequences.
[ "Convert", "alignment", "to", "motif", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L1094-L1118
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
motif_from_consensus
def motif_from_consensus(cons, n=12): """Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus. """ width = len(cons) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] m = Motif() for i,char in enumerate(cons): for nuc in m.iupac[char.upper()]: pfm[i][nucs[nuc]] = n / len(m.iupac[char.upper()]) m = Motif(pfm) m.id = cons return m
python
def motif_from_consensus(cons, n=12): """Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus. """ width = len(cons) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] m = Motif() for i,char in enumerate(cons): for nuc in m.iupac[char.upper()]: pfm[i][nucs[nuc]] = n / len(m.iupac[char.upper()]) m = Motif(pfm) m.id = cons return m
[ "def", "motif_from_consensus", "(", "cons", ",", "n", "=", "12", ")", ":", "width", "=", "len", "(", "cons", ")", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "pfm", "=", "[", "[", "0", "for", "_", "in", "range", "(", "4", ")", "]", "for", "_", "in", "range", "(", "width", ")", "]", "m", "=", "Motif", "(", ")", "for", "i", ",", "char", "in", "enumerate", "(", "cons", ")", ":", "for", "nuc", "in", "m", ".", "iupac", "[", "char", ".", "upper", "(", ")", "]", ":", "pfm", "[", "i", "]", "[", "nucs", "[", "nuc", "]", "]", "=", "n", "/", "len", "(", "m", ".", "iupac", "[", "char", ".", "upper", "(", ")", "]", ")", "m", "=", "Motif", "(", "pfm", ")", "m", ".", "id", "=", "cons", "return", "m" ]
Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus.
[ "Convert", "consensus", "sequence", "to", "motif", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L1120-L1147
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
parse_motifs
def parse_motifs(motifs): """Parse motifs in a variety of formats to return a list of motifs. Parameters ---------- motifs : list or str Filename of motif, list of motifs or single Motif instance. Returns ------- motifs : list List of Motif instances. """ if isinstance(motifs, six.string_types): with open(motifs) as f: if motifs.endswith("pwm") or motifs.endswith("pfm"): motifs = read_motifs(f, fmt="pwm") elif motifs.endswith("transfac"): motifs = read_motifs(f, fmt="transfac") else: motifs = read_motifs(f) elif isinstance(motifs, Motif): motifs = [motifs] else: if not isinstance(list(motifs)[0], Motif): raise ValueError("Not a list of motifs") return list(motifs)
python
def parse_motifs(motifs): """Parse motifs in a variety of formats to return a list of motifs. Parameters ---------- motifs : list or str Filename of motif, list of motifs or single Motif instance. Returns ------- motifs : list List of Motif instances. """ if isinstance(motifs, six.string_types): with open(motifs) as f: if motifs.endswith("pwm") or motifs.endswith("pfm"): motifs = read_motifs(f, fmt="pwm") elif motifs.endswith("transfac"): motifs = read_motifs(f, fmt="transfac") else: motifs = read_motifs(f) elif isinstance(motifs, Motif): motifs = [motifs] else: if not isinstance(list(motifs)[0], Motif): raise ValueError("Not a list of motifs") return list(motifs)
[ "def", "parse_motifs", "(", "motifs", ")", ":", "if", "isinstance", "(", "motifs", ",", "six", ".", "string_types", ")", ":", "with", "open", "(", "motifs", ")", "as", "f", ":", "if", "motifs", ".", "endswith", "(", "\"pwm\"", ")", "or", "motifs", ".", "endswith", "(", "\"pfm\"", ")", ":", "motifs", "=", "read_motifs", "(", "f", ",", "fmt", "=", "\"pwm\"", ")", "elif", "motifs", ".", "endswith", "(", "\"transfac\"", ")", ":", "motifs", "=", "read_motifs", "(", "f", ",", "fmt", "=", "\"transfac\"", ")", "else", ":", "motifs", "=", "read_motifs", "(", "f", ")", "elif", "isinstance", "(", "motifs", ",", "Motif", ")", ":", "motifs", "=", "[", "motifs", "]", "else", ":", "if", "not", "isinstance", "(", "list", "(", "motifs", ")", "[", "0", "]", ",", "Motif", ")", ":", "raise", "ValueError", "(", "\"Not a list of motifs\"", ")", "return", "list", "(", "motifs", ")" ]
Parse motifs in a variety of formats to return a list of motifs. Parameters ---------- motifs : list or str Filename of motif, list of motifs or single Motif instance. Returns ------- motifs : list List of Motif instances.
[ "Parse", "motifs", "in", "a", "variety", "of", "formats", "to", "return", "a", "list", "of", "motifs", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L1149-L1178
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
_read_motifs_from_filehandle
def _read_motifs_from_filehandle(handle, fmt): """ Read motifs from a file-like object. Parameters ---------- handle : file-like object Motifs. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. Returns ------- motifs : list List of Motif instances. """ if fmt.lower() == "pwm": motifs = _read_motifs_pwm(handle) if fmt.lower() == "transfac": motifs = _read_motifs_transfac(handle) if fmt.lower() == "xxmotif": motifs = _read_motifs_xxmotif(handle) if fmt.lower() == "align": motifs = _read_motifs_align(handle) if fmt.lower() == "jaspar": motifs = _read_motifs_jaspar(handle) if handle.name: base = os.path.splitext(handle.name)[0] map_file = base + ".motif2factors.txt" if os.path.exists(map_file): m2f_direct = {} m2f_indirect = {} for line in open(map_file): try: motif,*factor_info = line.strip().split("\t") if len(factor_info) == 1: m2f_direct[motif] = factor_info[0].split(",") elif len(factor_info) == 3: if factor_info[2] == "Y": m2f_direct[motif] = m2f_direct.get(motif, []) + [factor_info[0]] else: m2f_indirect[motif] = m2f_indirect.get(motif, []) + [factor_info[0]] except: pass for motif in motifs: if motif.id in m2f_direct: motif.factors[DIRECT_NAME] = m2f_direct[motif.id] if motif.id in m2f_indirect: motif.factors[INDIRECT_NAME] = m2f_indirect[motif.id] for motif in motifs: for n in [DIRECT_NAME, INDIRECT_NAME]: motif.factors[n] = list(set(motif.factors[n])) return motifs
python
def _read_motifs_from_filehandle(handle, fmt): """ Read motifs from a file-like object. Parameters ---------- handle : file-like object Motifs. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. Returns ------- motifs : list List of Motif instances. """ if fmt.lower() == "pwm": motifs = _read_motifs_pwm(handle) if fmt.lower() == "transfac": motifs = _read_motifs_transfac(handle) if fmt.lower() == "xxmotif": motifs = _read_motifs_xxmotif(handle) if fmt.lower() == "align": motifs = _read_motifs_align(handle) if fmt.lower() == "jaspar": motifs = _read_motifs_jaspar(handle) if handle.name: base = os.path.splitext(handle.name)[0] map_file = base + ".motif2factors.txt" if os.path.exists(map_file): m2f_direct = {} m2f_indirect = {} for line in open(map_file): try: motif,*factor_info = line.strip().split("\t") if len(factor_info) == 1: m2f_direct[motif] = factor_info[0].split(",") elif len(factor_info) == 3: if factor_info[2] == "Y": m2f_direct[motif] = m2f_direct.get(motif, []) + [factor_info[0]] else: m2f_indirect[motif] = m2f_indirect.get(motif, []) + [factor_info[0]] except: pass for motif in motifs: if motif.id in m2f_direct: motif.factors[DIRECT_NAME] = m2f_direct[motif.id] if motif.id in m2f_indirect: motif.factors[INDIRECT_NAME] = m2f_indirect[motif.id] for motif in motifs: for n in [DIRECT_NAME, INDIRECT_NAME]: motif.factors[n] = list(set(motif.factors[n])) return motifs
[ "def", "_read_motifs_from_filehandle", "(", "handle", ",", "fmt", ")", ":", "if", "fmt", ".", "lower", "(", ")", "==", "\"pwm\"", ":", "motifs", "=", "_read_motifs_pwm", "(", "handle", ")", "if", "fmt", ".", "lower", "(", ")", "==", "\"transfac\"", ":", "motifs", "=", "_read_motifs_transfac", "(", "handle", ")", "if", "fmt", ".", "lower", "(", ")", "==", "\"xxmotif\"", ":", "motifs", "=", "_read_motifs_xxmotif", "(", "handle", ")", "if", "fmt", ".", "lower", "(", ")", "==", "\"align\"", ":", "motifs", "=", "_read_motifs_align", "(", "handle", ")", "if", "fmt", ".", "lower", "(", ")", "==", "\"jaspar\"", ":", "motifs", "=", "_read_motifs_jaspar", "(", "handle", ")", "if", "handle", ".", "name", ":", "base", "=", "os", ".", "path", ".", "splitext", "(", "handle", ".", "name", ")", "[", "0", "]", "map_file", "=", "base", "+", "\".motif2factors.txt\"", "if", "os", ".", "path", ".", "exists", "(", "map_file", ")", ":", "m2f_direct", "=", "{", "}", "m2f_indirect", "=", "{", "}", "for", "line", "in", "open", "(", "map_file", ")", ":", "try", ":", "motif", ",", "", "*", "factor_info", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "factor_info", ")", "==", "1", ":", "m2f_direct", "[", "motif", "]", "=", "factor_info", "[", "0", "]", ".", "split", "(", "\",\"", ")", "elif", "len", "(", "factor_info", ")", "==", "3", ":", "if", "factor_info", "[", "2", "]", "==", "\"Y\"", ":", "m2f_direct", "[", "motif", "]", "=", "m2f_direct", ".", "get", "(", "motif", ",", "[", "]", ")", "+", "[", "factor_info", "[", "0", "]", "]", "else", ":", "m2f_indirect", "[", "motif", "]", "=", "m2f_indirect", ".", "get", "(", "motif", ",", "[", "]", ")", "+", "[", "factor_info", "[", "0", "]", "]", "except", ":", "pass", "for", "motif", "in", "motifs", ":", "if", "motif", ".", "id", "in", "m2f_direct", ":", "motif", ".", "factors", "[", "DIRECT_NAME", "]", "=", "m2f_direct", "[", "motif", ".", "id", "]", "if", "motif", ".", "id", "in", "m2f_indirect", ":", "motif", ".", "factors", "[", "INDIRECT_NAME", "]", "=", "m2f_indirect", "[", "motif", ".", "id", "]", "for", "motif", "in", "motifs", ":", "for", "n", "in", "[", "DIRECT_NAME", ",", "INDIRECT_NAME", "]", ":", "motif", ".", "factors", "[", "n", "]", "=", "list", "(", "set", "(", "motif", ".", "factors", "[", "n", "]", ")", ")", "return", "motifs" ]
Read motifs from a file-like object. Parameters ---------- handle : file-like object Motifs. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. Returns ------- motifs : list List of Motif instances.
[ "Read", "motifs", "from", "a", "file", "-", "like", "object", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L1180-L1233
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
read_motifs
def read_motifs(infile=None, fmt="pwm", as_dict=False): """ Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary. """ if infile is None or isinstance(infile, six.string_types): infile = pwmfile_location(infile) with open(infile) as f: motifs = _read_motifs_from_filehandle(f, fmt) else: motifs = _read_motifs_from_filehandle(infile, fmt) if as_dict: motifs = {m.id:m for m in motifs} return motifs
python
def read_motifs(infile=None, fmt="pwm", as_dict=False): """ Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary. """ if infile is None or isinstance(infile, six.string_types): infile = pwmfile_location(infile) with open(infile) as f: motifs = _read_motifs_from_filehandle(f, fmt) else: motifs = _read_motifs_from_filehandle(infile, fmt) if as_dict: motifs = {m.id:m for m in motifs} return motifs
[ "def", "read_motifs", "(", "infile", "=", "None", ",", "fmt", "=", "\"pwm\"", ",", "as_dict", "=", "False", ")", ":", "if", "infile", "is", "None", "or", "isinstance", "(", "infile", ",", "six", ".", "string_types", ")", ":", "infile", "=", "pwmfile_location", "(", "infile", ")", "with", "open", "(", "infile", ")", "as", "f", ":", "motifs", "=", "_read_motifs_from_filehandle", "(", "f", ",", "fmt", ")", "else", ":", "motifs", "=", "_read_motifs_from_filehandle", "(", "infile", ",", "fmt", ")", "if", "as_dict", ":", "motifs", "=", "{", "m", ".", "id", ":", "m", "for", "m", "in", "motifs", "}", "return", "motifs" ]
Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary.
[ "Read", "motifs", "from", "a", "file", "or", "stream", "or", "file", "-", "like", "object", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L1236-L1269
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.information_content
def information_content(self): """Return the total information content of the motif. Return ------ ic : float Motif information content. """ ic = 0 for row in self.pwm: ic += 2.0 + np.sum([row[x] * log(row[x])/log(2) for x in range(4) if row[x] > 0]) return ic
python
def information_content(self): """Return the total information content of the motif. Return ------ ic : float Motif information content. """ ic = 0 for row in self.pwm: ic += 2.0 + np.sum([row[x] * log(row[x])/log(2) for x in range(4) if row[x] > 0]) return ic
[ "def", "information_content", "(", "self", ")", ":", "ic", "=", "0", "for", "row", "in", "self", ".", "pwm", ":", "ic", "+=", "2.0", "+", "np", ".", "sum", "(", "[", "row", "[", "x", "]", "*", "log", "(", "row", "[", "x", "]", ")", "/", "log", "(", "2", ")", "for", "x", "in", "range", "(", "4", ")", "if", "row", "[", "x", "]", ">", "0", "]", ")", "return", "ic" ]
Return the total information content of the motif. Return ------ ic : float Motif information content.
[ "Return", "the", "total", "information", "content", "of", "the", "motif", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L167-L178
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pwm_min_score
def pwm_min_score(self): """Return the minimum PWM score. Returns ------- score : float Minimum PWM score. """ if self.min_score is None: score = 0 for row in self.pwm: score += log(min(row) / 0.25 + 0.01) self.min_score = score return self.min_score
python
def pwm_min_score(self): """Return the minimum PWM score. Returns ------- score : float Minimum PWM score. """ if self.min_score is None: score = 0 for row in self.pwm: score += log(min(row) / 0.25 + 0.01) self.min_score = score return self.min_score
[ "def", "pwm_min_score", "(", "self", ")", ":", "if", "self", ".", "min_score", "is", "None", ":", "score", "=", "0", "for", "row", "in", "self", ".", "pwm", ":", "score", "+=", "log", "(", "min", "(", "row", ")", "/", "0.25", "+", "0.01", ")", "self", ".", "min_score", "=", "score", "return", "self", ".", "min_score" ]
Return the minimum PWM score. Returns ------- score : float Minimum PWM score.
[ "Return", "the", "minimum", "PWM", "score", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L180-L194
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pwm_max_score
def pwm_max_score(self): """Return the maximum PWM score. Returns ------- score : float Maximum PWM score. """ if self.max_score is None: score = 0 for row in self.pwm: score += log(max(row) / 0.25 + 0.01) self.max_score = score return self.max_score
python
def pwm_max_score(self): """Return the maximum PWM score. Returns ------- score : float Maximum PWM score. """ if self.max_score is None: score = 0 for row in self.pwm: score += log(max(row) / 0.25 + 0.01) self.max_score = score return self.max_score
[ "def", "pwm_max_score", "(", "self", ")", ":", "if", "self", ".", "max_score", "is", "None", ":", "score", "=", "0", "for", "row", "in", "self", ".", "pwm", ":", "score", "+=", "log", "(", "max", "(", "row", ")", "/", "0.25", "+", "0.01", ")", "self", ".", "max_score", "=", "score", "return", "self", ".", "max_score" ]
Return the maximum PWM score. Returns ------- score : float Maximum PWM score.
[ "Return", "the", "maximum", "PWM", "score", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L196-L210
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.score_kmer
def score_kmer(self, kmer): """Calculate the log-odds score for a specific k-mer. Parameters ---------- kmer : str String representing a kmer. Should be the same length as the motif. Returns ------- score : float Log-odd score. """ if len(kmer) != len(self.pwm): raise Exception("incorrect k-mer length") score = 0.0 d = {"A":0, "C":1, "G":2, "T":3} for nuc, row in zip(kmer.upper(), self.pwm): score += log(row[d[nuc]] / 0.25 + 0.01) return score
python
def score_kmer(self, kmer): """Calculate the log-odds score for a specific k-mer. Parameters ---------- kmer : str String representing a kmer. Should be the same length as the motif. Returns ------- score : float Log-odd score. """ if len(kmer) != len(self.pwm): raise Exception("incorrect k-mer length") score = 0.0 d = {"A":0, "C":1, "G":2, "T":3} for nuc, row in zip(kmer.upper(), self.pwm): score += log(row[d[nuc]] / 0.25 + 0.01) return score
[ "def", "score_kmer", "(", "self", ",", "kmer", ")", ":", "if", "len", "(", "kmer", ")", "!=", "len", "(", "self", ".", "pwm", ")", ":", "raise", "Exception", "(", "\"incorrect k-mer length\"", ")", "score", "=", "0.0", "d", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "for", "nuc", ",", "row", "in", "zip", "(", "kmer", ".", "upper", "(", ")", ",", "self", ".", "pwm", ")", ":", "score", "+=", "log", "(", "row", "[", "d", "[", "nuc", "]", "]", "/", "0.25", "+", "0.01", ")", "return", "score" ]
Calculate the log-odds score for a specific k-mer. Parameters ---------- kmer : str String representing a kmer. Should be the same length as the motif. Returns ------- score : float Log-odd score.
[ "Calculate", "the", "log", "-", "odds", "score", "for", "a", "specific", "k", "-", "mer", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L212-L233
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pfm_to_pwm
def pfm_to_pwm(self, pfm, pseudo=0.001): """Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions. """ return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm]
python
def pfm_to_pwm(self, pfm, pseudo=0.001): """Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions. """ return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm]
[ "def", "pfm_to_pwm", "(", "self", ",", "pfm", ",", "pseudo", "=", "0.001", ")", ":", "return", "[", "[", "(", "x", "+", "pseudo", ")", "/", "(", "float", "(", "np", ".", "sum", "(", "row", ")", ")", "+", "pseudo", "*", "4", ")", "for", "x", "in", "row", "]", "for", "row", "in", "pfm", "]" ]
Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions.
[ "Convert", "PFM", "with", "counts", "to", "a", "PFM", "with", "fractions", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L235-L250
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.to_motevo
def to_motevo(self): """Return motif formatted in MotEvo (TRANSFAC-like) format Returns ------- m : str String of motif in MotEvo format. """ m = "//\n" m += "NA {}\n".format(self.id) m += "P0\tA\tC\tG\tT\n" for i, row in enumerate(self.pfm): m += "{}\t{}\n".format(i, "\t".join([str(int(x)) for x in row])) m += "//" return m
python
def to_motevo(self): """Return motif formatted in MotEvo (TRANSFAC-like) format Returns ------- m : str String of motif in MotEvo format. """ m = "//\n" m += "NA {}\n".format(self.id) m += "P0\tA\tC\tG\tT\n" for i, row in enumerate(self.pfm): m += "{}\t{}\n".format(i, "\t".join([str(int(x)) for x in row])) m += "//" return m
[ "def", "to_motevo", "(", "self", ")", ":", "m", "=", "\"//\\n\"", "m", "+=", "\"NA {}\\n\"", ".", "format", "(", "self", ".", "id", ")", "m", "+=", "\"P0\\tA\\tC\\tG\\tT\\n\"", "for", "i", ",", "row", "in", "enumerate", "(", "self", ".", "pfm", ")", ":", "m", "+=", "\"{}\\t{}\\n\"", ".", "format", "(", "i", ",", "\"\\t\"", ".", "join", "(", "[", "str", "(", "int", "(", "x", ")", ")", "for", "x", "in", "row", "]", ")", ")", "m", "+=", "\"//\"", "return", "m" ]
Return motif formatted in MotEvo (TRANSFAC-like) format Returns ------- m : str String of motif in MotEvo format.
[ "Return", "motif", "formatted", "in", "MotEvo", "(", "TRANSFAC", "-", "like", ")", "format", "Returns", "-------", "m", ":", "str", "String", "of", "motif", "in", "MotEvo", "format", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L252-L266
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.to_transfac
def to_transfac(self): """Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format. """ m = "%s\t%s\t%s\n" % ("DE", self.id, "unknown") for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())): m += "%i\t%s\t%s\n" % (i, "\t".join([str(int(x)) for x in row]), cons) m += "XX" return m
python
def to_transfac(self): """Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format. """ m = "%s\t%s\t%s\n" % ("DE", self.id, "unknown") for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())): m += "%i\t%s\t%s\n" % (i, "\t".join([str(int(x)) for x in row]), cons) m += "XX" return m
[ "def", "to_transfac", "(", "self", ")", ":", "m", "=", "\"%s\\t%s\\t%s\\n\"", "%", "(", "\"DE\"", ",", "self", ".", "id", ",", "\"unknown\"", ")", "for", "i", ",", "(", "row", ",", "cons", ")", "in", "enumerate", "(", "zip", "(", "self", ".", "pfm", ",", "self", ".", "to_consensus", "(", ")", ")", ")", ":", "m", "+=", "\"%i\\t%s\\t%s\\n\"", "%", "(", "i", ",", "\"\\t\"", ".", "join", "(", "[", "str", "(", "int", "(", "x", ")", ")", "for", "x", "in", "row", "]", ")", ",", "cons", ")", "m", "+=", "\"XX\"", "return", "m" ]
Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format.
[ "Return", "motif", "formatted", "in", "TRANSFAC", "format", "Returns", "-------", "m", ":", "str", "String", "of", "motif", "in", "TRANSFAC", "format", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L268-L280
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.to_meme
def to_meme(self): """Return motif formatted in MEME format Returns ------- m : str String of motif in MEME format. """ motif_id = self.id.replace(" ", "_") m = "MOTIF %s\n" % motif_id m += "BL MOTIF %s width=0 seqs=0\n"% motif_id m += "letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\n" % (len(self), np.sum(self.pfm[0])) m +="\n".join(["\t".join(["%s" % x for x in row]) for row in self.pwm]) return m
python
def to_meme(self): """Return motif formatted in MEME format Returns ------- m : str String of motif in MEME format. """ motif_id = self.id.replace(" ", "_") m = "MOTIF %s\n" % motif_id m += "BL MOTIF %s width=0 seqs=0\n"% motif_id m += "letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\n" % (len(self), np.sum(self.pfm[0])) m +="\n".join(["\t".join(["%s" % x for x in row]) for row in self.pwm]) return m
[ "def", "to_meme", "(", "self", ")", ":", "motif_id", "=", "self", ".", "id", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "m", "=", "\"MOTIF %s\\n\"", "%", "motif_id", "m", "+=", "\"BL MOTIF %s width=0 seqs=0\\n\"", "%", "motif_id", "m", "+=", "\"letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\\n\"", "%", "(", "len", "(", "self", ")", ",", "np", ".", "sum", "(", "self", ".", "pfm", "[", "0", "]", ")", ")", "m", "+=", "\"\\n\"", ".", "join", "(", "[", "\"\\t\"", ".", "join", "(", "[", "\"%s\"", "%", "x", "for", "x", "in", "row", "]", ")", "for", "row", "in", "self", ".", "pwm", "]", ")", "return", "m" ]
Return motif formatted in MEME format Returns ------- m : str String of motif in MEME format.
[ "Return", "motif", "formatted", "in", "MEME", "format", "Returns", "-------", "m", ":", "str", "String", "of", "motif", "in", "MEME", "format", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L282-L295
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.ic_pos
def ic_pos(self, row1, row2=None): """Calculate the information content of one position. Returns ------- score : float Information content. """ if row2 is None: row2 = [0.25,0.25,0.25,0.25] score = 0 for a,b in zip(row1, row2): if a > 0: score += a * log(a / b) / log(2) return score
python
def ic_pos(self, row1, row2=None): """Calculate the information content of one position. Returns ------- score : float Information content. """ if row2 is None: row2 = [0.25,0.25,0.25,0.25] score = 0 for a,b in zip(row1, row2): if a > 0: score += a * log(a / b) / log(2) return score
[ "def", "ic_pos", "(", "self", ",", "row1", ",", "row2", "=", "None", ")", ":", "if", "row2", "is", "None", ":", "row2", "=", "[", "0.25", ",", "0.25", ",", "0.25", ",", "0.25", "]", "score", "=", "0", "for", "a", ",", "b", "in", "zip", "(", "row1", ",", "row2", ")", ":", "if", "a", ">", "0", ":", "score", "+=", "a", "*", "log", "(", "a", "/", "b", ")", "/", "log", "(", "2", ")", "return", "score" ]
Calculate the information content of one position. Returns ------- score : float Information content.
[ "Calculate", "the", "information", "content", "of", "one", "position", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L297-L312
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pcc_pos
def pcc_pos(self, row1, row2): """Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient. """ mean1 = np.mean(row1) mean2 = np.mean(row2) a = 0 x = 0 y = 0 for n1, n2 in zip(row1, row2): a += (n1 - mean1) * (n2 - mean2) x += (n1 - mean1) ** 2 y += (n2 - mean2) ** 2 if a == 0: return 0 else: return a / sqrt(x * y)
python
def pcc_pos(self, row1, row2): """Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient. """ mean1 = np.mean(row1) mean2 = np.mean(row2) a = 0 x = 0 y = 0 for n1, n2 in zip(row1, row2): a += (n1 - mean1) * (n2 - mean2) x += (n1 - mean1) ** 2 y += (n2 - mean2) ** 2 if a == 0: return 0 else: return a / sqrt(x * y)
[ "def", "pcc_pos", "(", "self", ",", "row1", ",", "row2", ")", ":", "mean1", "=", "np", ".", "mean", "(", "row1", ")", "mean2", "=", "np", ".", "mean", "(", "row2", ")", "a", "=", "0", "x", "=", "0", "y", "=", "0", "for", "n1", ",", "n2", "in", "zip", "(", "row1", ",", "row2", ")", ":", "a", "+=", "(", "n1", "-", "mean1", ")", "*", "(", "n2", "-", "mean2", ")", "x", "+=", "(", "n1", "-", "mean1", ")", "**", "2", "y", "+=", "(", "n2", "-", "mean2", ")", "**", "2", "if", "a", "==", "0", ":", "return", "0", "else", ":", "return", "a", "/", "sqrt", "(", "x", "*", "y", ")" ]
Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient.
[ "Calculate", "the", "Pearson", "correlation", "coefficient", "of", "one", "position", "compared", "to", "another", "position", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L314-L337
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.rc
def rc(self): """Return the reverse complemented motif. Returns ------- m : Motif instance New Motif instance with the reverse complement of the input motif. """ m = Motif() m.pfm = [row[::-1] for row in self.pfm[::-1]] m.pwm = [row[::-1] for row in self.pwm[::-1]] m.id = self.id + "_revcomp" return m
python
def rc(self): """Return the reverse complemented motif. Returns ------- m : Motif instance New Motif instance with the reverse complement of the input motif. """ m = Motif() m.pfm = [row[::-1] for row in self.pfm[::-1]] m.pwm = [row[::-1] for row in self.pwm[::-1]] m.id = self.id + "_revcomp" return m
[ "def", "rc", "(", "self", ")", ":", "m", "=", "Motif", "(", ")", "m", ".", "pfm", "=", "[", "row", "[", ":", ":", "-", "1", "]", "for", "row", "in", "self", ".", "pfm", "[", ":", ":", "-", "1", "]", "]", "m", ".", "pwm", "=", "[", "row", "[", ":", ":", "-", "1", "]", "for", "row", "in", "self", ".", "pwm", "[", ":", ":", "-", "1", "]", "]", "m", ".", "id", "=", "self", ".", "id", "+", "\"_revcomp\"", "return", "m" ]
Return the reverse complemented motif. Returns ------- m : Motif instance New Motif instance with the reverse complement of the input motif.
[ "Return", "the", "reverse", "complemented", "motif", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L339-L351
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.trim
def trim(self, edge_ic_cutoff=0.4): """Trim positions with an information content lower than the threshold. The default threshold is set to 0.4. The Motif will be changed in-place. Parameters ---------- edge_ic_cutoff : float, optional Information content threshold. All motif positions at the flanks with an information content lower thab this will be removed. Returns ------- m : Motif instance """ pwm = self.pwm[:] while len(pwm) > 0 and self.ic_pos(pwm[0]) < edge_ic_cutoff: pwm = pwm[1:] self.pwm = self.pwm[1:] self.pfm = self.pfm[1:] while len(pwm) > 0 and self.ic_pos(pwm[-1]) < edge_ic_cutoff: pwm = pwm[:-1] self.pwm = self.pwm[:-1] self.pfm = self.pfm[:-1] self.consensus = None self.min_score = None self.max_score = None self.wiggled_pwm = None return self
python
def trim(self, edge_ic_cutoff=0.4): """Trim positions with an information content lower than the threshold. The default threshold is set to 0.4. The Motif will be changed in-place. Parameters ---------- edge_ic_cutoff : float, optional Information content threshold. All motif positions at the flanks with an information content lower thab this will be removed. Returns ------- m : Motif instance """ pwm = self.pwm[:] while len(pwm) > 0 and self.ic_pos(pwm[0]) < edge_ic_cutoff: pwm = pwm[1:] self.pwm = self.pwm[1:] self.pfm = self.pfm[1:] while len(pwm) > 0 and self.ic_pos(pwm[-1]) < edge_ic_cutoff: pwm = pwm[:-1] self.pwm = self.pwm[:-1] self.pfm = self.pfm[:-1] self.consensus = None self.min_score = None self.max_score = None self.wiggled_pwm = None return self
[ "def", "trim", "(", "self", ",", "edge_ic_cutoff", "=", "0.4", ")", ":", "pwm", "=", "self", ".", "pwm", "[", ":", "]", "while", "len", "(", "pwm", ")", ">", "0", "and", "self", ".", "ic_pos", "(", "pwm", "[", "0", "]", ")", "<", "edge_ic_cutoff", ":", "pwm", "=", "pwm", "[", "1", ":", "]", "self", ".", "pwm", "=", "self", ".", "pwm", "[", "1", ":", "]", "self", ".", "pfm", "=", "self", ".", "pfm", "[", "1", ":", "]", "while", "len", "(", "pwm", ")", ">", "0", "and", "self", ".", "ic_pos", "(", "pwm", "[", "-", "1", "]", ")", "<", "edge_ic_cutoff", ":", "pwm", "=", "pwm", "[", ":", "-", "1", "]", "self", ".", "pwm", "=", "self", ".", "pwm", "[", ":", "-", "1", "]", "self", ".", "pfm", "=", "self", ".", "pfm", "[", ":", "-", "1", "]", "self", ".", "consensus", "=", "None", "self", ".", "min_score", "=", "None", "self", ".", "max_score", "=", "None", "self", ".", "wiggled_pwm", "=", "None", "return", "self" ]
Trim positions with an information content lower than the threshold. The default threshold is set to 0.4. The Motif will be changed in-place. Parameters ---------- edge_ic_cutoff : float, optional Information content threshold. All motif positions at the flanks with an information content lower thab this will be removed. Returns ------- m : Motif instance
[ "Trim", "positions", "with", "an", "information", "content", "lower", "than", "the", "threshold", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L353-L383
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.consensus_scan
def consensus_scan(self, fa): """Scan FASTA with the motif as a consensus sequence. Parameters ---------- fa : Fasta object Fasta object to scan Returns ------- matches : dict Dictionaru with matches. """ regexp = "".join(["[" + "".join(self.iupac[x.upper()]) + "]" for x in self.to_consensusv2()]) p = re.compile(regexp) matches = {} for name,seq in fa.items(): matches[name] = [] for match in p.finditer(seq): middle = (match.span()[1] + match.span()[0]) / 2 matches[name].append(middle) return matches
python
def consensus_scan(self, fa): """Scan FASTA with the motif as a consensus sequence. Parameters ---------- fa : Fasta object Fasta object to scan Returns ------- matches : dict Dictionaru with matches. """ regexp = "".join(["[" + "".join(self.iupac[x.upper()]) + "]" for x in self.to_consensusv2()]) p = re.compile(regexp) matches = {} for name,seq in fa.items(): matches[name] = [] for match in p.finditer(seq): middle = (match.span()[1] + match.span()[0]) / 2 matches[name].append(middle) return matches
[ "def", "consensus_scan", "(", "self", ",", "fa", ")", ":", "regexp", "=", "\"\"", ".", "join", "(", "[", "\"[\"", "+", "\"\"", ".", "join", "(", "self", ".", "iupac", "[", "x", ".", "upper", "(", ")", "]", ")", "+", "\"]\"", "for", "x", "in", "self", ".", "to_consensusv2", "(", ")", "]", ")", "p", "=", "re", ".", "compile", "(", "regexp", ")", "matches", "=", "{", "}", "for", "name", ",", "seq", "in", "fa", ".", "items", "(", ")", ":", "matches", "[", "name", "]", "=", "[", "]", "for", "match", "in", "p", ".", "finditer", "(", "seq", ")", ":", "middle", "=", "(", "match", ".", "span", "(", ")", "[", "1", "]", "+", "match", ".", "span", "(", ")", "[", "0", "]", ")", "/", "2", "matches", "[", "name", "]", ".", "append", "(", "middle", ")", "return", "matches" ]
Scan FASTA with the motif as a consensus sequence. Parameters ---------- fa : Fasta object Fasta object to scan Returns ------- matches : dict Dictionaru with matches.
[ "Scan", "FASTA", "with", "the", "motif", "as", "a", "consensus", "sequence", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L385-L406
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pwm_scan
def pwm_scan(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. """ c = self.pwm_min_score() + ( self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for _,pos,_ in result: matches[name].append(pos) return matches
python
def pwm_scan(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. """ c = self.pwm_min_score() + ( self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for _,pos,_ in result: matches[name].append(pos) return matches
[ "def", "pwm_scan", "(", "self", ",", "fa", ",", "cutoff", "=", "0.9", ",", "nreport", "=", "50", ",", "scan_rc", "=", "True", ")", ":", "c", "=", "self", ".", "pwm_min_score", "(", ")", "+", "(", "self", ".", "pwm_max_score", "(", ")", "-", "self", ".", "pwm_min_score", "(", ")", ")", "*", "cutoff", "pwm", "=", "self", ".", "pwm", "matches", "=", "{", "}", "for", "name", ",", "seq", "in", "fa", ".", "items", "(", ")", ":", "matches", "[", "name", "]", "=", "[", "]", "result", "=", "pfmscan", "(", "seq", ".", "upper", "(", ")", ",", "pwm", ",", "c", ",", "nreport", ",", "scan_rc", ")", "for", "_", ",", "pos", ",", "_", "in", "result", ":", "matches", "[", "name", "]", ".", "append", "(", "pos", ")", "return", "matches" ]
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned.
[ "Scan", "sequences", "with", "this", "motif", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L408-L443
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pwm_scan_all
def pwm_scan_all(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The score, position and strand for every match is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. The score, position and strand for every match is returned. """ c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score,pos,strand in result: matches[name].append((pos,score,strand)) return matches
python
def pwm_scan_all(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The score, position and strand for every match is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. The score, position and strand for every match is returned. """ c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score,pos,strand in result: matches[name].append((pos,score,strand)) return matches
[ "def", "pwm_scan_all", "(", "self", ",", "fa", ",", "cutoff", "=", "0.9", ",", "nreport", "=", "50", ",", "scan_rc", "=", "True", ")", ":", "c", "=", "self", ".", "pwm_min_score", "(", ")", "+", "(", "self", ".", "pwm_max_score", "(", ")", "-", "self", ".", "pwm_min_score", "(", ")", ")", "*", "cutoff", "pwm", "=", "self", ".", "pwm", "matches", "=", "{", "}", "for", "name", ",", "seq", "in", "fa", ".", "items", "(", ")", ":", "matches", "[", "name", "]", "=", "[", "]", "result", "=", "pfmscan", "(", "seq", ".", "upper", "(", ")", ",", "pwm", ",", "c", ",", "nreport", ",", "scan_rc", ")", "for", "score", ",", "pos", ",", "strand", "in", "result", ":", "matches", "[", "name", "]", ".", "append", "(", "(", "pos", ",", "score", ",", "strand", ")", ")", "return", "matches" ]
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The score, position and strand for every match is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. The score, position and strand for every match is returned.
[ "Scan", "sequences", "with", "this", "motif", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L445-L479
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.pwm_scan_to_gff
def pwm_scan_to_gff(self, fa, gfffile, cutoff=0.9, nreport=50, scan_rc=True, append=False): """Scan sequences with this motif and save to a GFF file. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The output is save to a file in GFF format. Parameters ---------- fa : Fasta object Fasta object to scan. gfffile : str Filename of GFF output file. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. append : bool , optional Append to GFF file instead of overwriting it. False by default. """ if append: out = open(gfffile, "a") else: out = open(gfffile, "w") c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm strandmap = {-1:"-","-1":"-","-":"-","1":"+",1:"+","+":"+"} gff_line = ("{}\tpfmscan\tmisc_feature\t{}\t{}\t{:.3f}\t{}\t.\t" "motif_name \"{}\" ; motif_instance \"{}\"\n") for name, seq in fa.items(): result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score, pos, strand in result: out.write(gff_line.format( name, pos, pos + len(pwm), score, strandmap[strand], self.id, seq[pos:pos + len(pwm)] )) out.close()
python
def pwm_scan_to_gff(self, fa, gfffile, cutoff=0.9, nreport=50, scan_rc=True, append=False): """Scan sequences with this motif and save to a GFF file. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The output is save to a file in GFF format. Parameters ---------- fa : Fasta object Fasta object to scan. gfffile : str Filename of GFF output file. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. append : bool , optional Append to GFF file instead of overwriting it. False by default. """ if append: out = open(gfffile, "a") else: out = open(gfffile, "w") c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm strandmap = {-1:"-","-1":"-","-":"-","1":"+",1:"+","+":"+"} gff_line = ("{}\tpfmscan\tmisc_feature\t{}\t{}\t{:.3f}\t{}\t.\t" "motif_name \"{}\" ; motif_instance \"{}\"\n") for name, seq in fa.items(): result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score, pos, strand in result: out.write(gff_line.format( name, pos, pos + len(pwm), score, strandmap[strand], self.id, seq[pos:pos + len(pwm)] )) out.close()
[ "def", "pwm_scan_to_gff", "(", "self", ",", "fa", ",", "gfffile", ",", "cutoff", "=", "0.9", ",", "nreport", "=", "50", ",", "scan_rc", "=", "True", ",", "append", "=", "False", ")", ":", "if", "append", ":", "out", "=", "open", "(", "gfffile", ",", "\"a\"", ")", "else", ":", "out", "=", "open", "(", "gfffile", ",", "\"w\"", ")", "c", "=", "self", ".", "pwm_min_score", "(", ")", "+", "(", "self", ".", "pwm_max_score", "(", ")", "-", "self", ".", "pwm_min_score", "(", ")", ")", "*", "cutoff", "pwm", "=", "self", ".", "pwm", "strandmap", "=", "{", "-", "1", ":", "\"-\"", ",", "\"-1\"", ":", "\"-\"", ",", "\"-\"", ":", "\"-\"", ",", "\"1\"", ":", "\"+\"", ",", "1", ":", "\"+\"", ",", "\"+\"", ":", "\"+\"", "}", "gff_line", "=", "(", "\"{}\\tpfmscan\\tmisc_feature\\t{}\\t{}\\t{:.3f}\\t{}\\t.\\t\"", "\"motif_name \\\"{}\\\" ; motif_instance \\\"{}\\\"\\n\"", ")", "for", "name", ",", "seq", "in", "fa", ".", "items", "(", ")", ":", "result", "=", "pfmscan", "(", "seq", ".", "upper", "(", ")", ",", "pwm", ",", "c", ",", "nreport", ",", "scan_rc", ")", "for", "score", ",", "pos", ",", "strand", "in", "result", ":", "out", ".", "write", "(", "gff_line", ".", "format", "(", "name", ",", "pos", ",", "pos", "+", "len", "(", "pwm", ")", ",", "score", ",", "strandmap", "[", "strand", "]", ",", "self", ".", "id", ",", "seq", "[", "pos", ":", "pos", "+", "len", "(", "pwm", ")", "]", ")", ")", "out", ".", "close", "(", ")" ]
Scan sequences with this motif and save to a GFF file. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The output is save to a file in GFF format. Parameters ---------- fa : Fasta object Fasta object to scan. gfffile : str Filename of GFF output file. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. append : bool , optional Append to GFF file instead of overwriting it. False by default.
[ "Scan", "sequences", "with", "this", "motif", "and", "save", "to", "a", "GFF", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L517-L564
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.average_motifs
def average_motifs(self, other, pos, orientation, include_bg=False): """Return the average of two motifs. Combine this motif with another motif and return the average as a new Motif object. The position and orientatien need to be supplied. The pos parameter is the position of the second motif relative to this motif. For example, take the following two motifs: Motif 1: CATGYT Motif 2: GGCTTGY With position -2, the motifs are averaged as follows: xxCATGYT GGCTTGYx Parameters ---------- other : Motif object Other Motif object. pos : int Position of the second motif relative to this motif. orientation : int Orientation, should be 1 or -1. If the orientation is -1 then the reverse complement of the other motif is used for averaging. include_bg : bool , optional Extend both motifs with background frequencies (0.25) before averaging. False by default. Returns ------- motif : motif object New Motif object containing average motif. """ # xxCATGYT # GGCTTGYx # pos = -2 pfm1 = self.pfm[:] pfm2 = other.pfm[:] if orientation < 0: pfm2 = [row[::-1] for row in pfm2[::-1]] pfm1_count = float(np.sum(pfm1[0])) pfm2_count = float(np.sum(pfm2[0])) if include_bg: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm2_count / 4.0 for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm1_count / 4.0 for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm1_count / 4.0 for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm2_count / 4.0 for x in range(4)] for i in range(pos)] + pfm2 else: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(pos)] + pfm2 pfm = [[a + b for a,b in zip(x,y)] for x,y in zip(pfm1, pfm2)] m = Motif(pfm) m.id = m.to_consensus() return m
python
def average_motifs(self, other, pos, orientation, include_bg=False): """Return the average of two motifs. Combine this motif with another motif and return the average as a new Motif object. The position and orientatien need to be supplied. The pos parameter is the position of the second motif relative to this motif. For example, take the following two motifs: Motif 1: CATGYT Motif 2: GGCTTGY With position -2, the motifs are averaged as follows: xxCATGYT GGCTTGYx Parameters ---------- other : Motif object Other Motif object. pos : int Position of the second motif relative to this motif. orientation : int Orientation, should be 1 or -1. If the orientation is -1 then the reverse complement of the other motif is used for averaging. include_bg : bool , optional Extend both motifs with background frequencies (0.25) before averaging. False by default. Returns ------- motif : motif object New Motif object containing average motif. """ # xxCATGYT # GGCTTGYx # pos = -2 pfm1 = self.pfm[:] pfm2 = other.pfm[:] if orientation < 0: pfm2 = [row[::-1] for row in pfm2[::-1]] pfm1_count = float(np.sum(pfm1[0])) pfm2_count = float(np.sum(pfm2[0])) if include_bg: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm2_count / 4.0 for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm1_count / 4.0 for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm1_count / 4.0 for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm2_count / 4.0 for x in range(4)] for i in range(pos)] + pfm2 else: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(pos)] + pfm2 pfm = [[a + b for a,b in zip(x,y)] for x,y in zip(pfm1, pfm2)] m = Motif(pfm) m.id = m.to_consensus() return m
[ "def", "average_motifs", "(", "self", ",", "other", ",", "pos", ",", "orientation", ",", "include_bg", "=", "False", ")", ":", "# xxCATGYT", "# GGCTTGYx", "# pos = -2", "pfm1", "=", "self", ".", "pfm", "[", ":", "]", "pfm2", "=", "other", ".", "pfm", "[", ":", "]", "if", "orientation", "<", "0", ":", "pfm2", "=", "[", "row", "[", ":", ":", "-", "1", "]", "for", "row", "in", "pfm2", "[", ":", ":", "-", "1", "]", "]", "pfm1_count", "=", "float", "(", "np", ".", "sum", "(", "pfm1", "[", "0", "]", ")", ")", "pfm2_count", "=", "float", "(", "np", ".", "sum", "(", "pfm2", "[", "0", "]", ")", ")", "if", "include_bg", ":", "if", "len", "(", "pfm1", ")", ">", "len", "(", "pfm2", ")", "+", "pos", ":", "pfm2", "+=", "[", "[", "pfm2_count", "/", "4.0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "-", "(", "len", "(", "pfm1", ")", "-", "len", "(", "pfm2", ")", "-", "pos", ")", ",", "0", ")", "]", "elif", "len", "(", "pfm2", ")", "+", "pos", ">", "len", "(", "pfm1", ")", ":", "pfm1", "+=", "[", "[", "pfm1_count", "/", "4.0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "-", "(", "len", "(", "pfm2", ")", "-", "len", "(", "pfm1", ")", "+", "pos", ")", ",", "0", ")", "]", "if", "pos", "<", "0", ":", "pfm1", "=", "[", "[", "pfm1_count", "/", "4.0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "-", "pos", ")", "]", "+", "pfm1", "elif", "pos", ">", "0", ":", "pfm2", "=", "[", "[", "pfm2_count", "/", "4.0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "pos", ")", "]", "+", "pfm2", "else", ":", "if", "len", "(", "pfm1", ")", ">", "len", "(", "pfm2", ")", "+", "pos", ":", "pfm2", "+=", "[", "[", "pfm1", "[", "i", "]", "[", "x", "]", "/", "pfm1_count", "*", "(", "pfm2_count", ")", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "-", "(", "len", "(", "pfm1", ")", "-", "len", "(", "pfm2", ")", "-", "pos", ")", ",", "0", ")", "]", "elif", "len", "(", "pfm2", ")", "+", "pos", ">", "len", "(", "pfm1", ")", ":", "pfm1", "+=", "[", "[", "pfm2", "[", "i", "]", "[", "x", "]", "/", "pfm2_count", "*", "(", "pfm1_count", ")", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "-", "(", "len", "(", "pfm2", ")", "-", "len", "(", "pfm1", ")", "+", "pos", ")", ",", "0", ")", "]", "if", "pos", "<", "0", ":", "pfm1", "=", "[", "[", "pfm2", "[", "i", "]", "[", "x", "]", "/", "pfm2_count", "*", "(", "pfm1_count", ")", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "-", "pos", ")", "]", "+", "pfm1", "elif", "pos", ">", "0", ":", "pfm2", "=", "[", "[", "pfm1", "[", "i", "]", "[", "x", "]", "/", "pfm1_count", "*", "(", "pfm2_count", ")", "for", "x", "in", "range", "(", "4", ")", "]", "for", "i", "in", "range", "(", "pos", ")", "]", "+", "pfm2", "pfm", "=", "[", "[", "a", "+", "b", "for", "a", ",", "b", "in", "zip", "(", "x", ",", "y", ")", "]", "for", "x", ",", "y", "in", "zip", "(", "pfm1", ",", "pfm2", ")", "]", "m", "=", "Motif", "(", "pfm", ")", "m", ".", "id", "=", "m", ".", "to_consensus", "(", ")", "return", "m" ]
Return the average of two motifs. Combine this motif with another motif and return the average as a new Motif object. The position and orientatien need to be supplied. The pos parameter is the position of the second motif relative to this motif. For example, take the following two motifs: Motif 1: CATGYT Motif 2: GGCTTGY With position -2, the motifs are averaged as follows: xxCATGYT GGCTTGYx Parameters ---------- other : Motif object Other Motif object. pos : int Position of the second motif relative to this motif. orientation : int Orientation, should be 1 or -1. If the orientation is -1 then the reverse complement of the other motif is used for averaging. include_bg : bool , optional Extend both motifs with background frequencies (0.25) before averaging. False by default. Returns ------- motif : motif object New Motif object containing average motif.
[ "Return", "the", "average", "of", "two", "motifs", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L566-L637
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif._pwm_to_str
def _pwm_to_str(self, precision=4): """Return string representation of pwm. Parameters ---------- precision : int, optional, default 4 Floating-point precision. Returns ------- pwm_string : str """ if not self.pwm: return "" fmt = "{{:.{:d}f}}".format(precision) return "\n".join( ["\t".join([fmt.format(p) for p in row]) for row in self.pwm] )
python
def _pwm_to_str(self, precision=4): """Return string representation of pwm. Parameters ---------- precision : int, optional, default 4 Floating-point precision. Returns ------- pwm_string : str """ if not self.pwm: return "" fmt = "{{:.{:d}f}}".format(precision) return "\n".join( ["\t".join([fmt.format(p) for p in row]) for row in self.pwm] )
[ "def", "_pwm_to_str", "(", "self", ",", "precision", "=", "4", ")", ":", "if", "not", "self", ".", "pwm", ":", "return", "\"\"", "fmt", "=", "\"{{:.{:d}f}}\"", ".", "format", "(", "precision", ")", "return", "\"\\n\"", ".", "join", "(", "[", "\"\\t\"", ".", "join", "(", "[", "fmt", ".", "format", "(", "p", ")", "for", "p", "in", "row", "]", ")", "for", "row", "in", "self", ".", "pwm", "]", ")" ]
Return string representation of pwm. Parameters ---------- precision : int, optional, default 4 Floating-point precision. Returns ------- pwm_string : str
[ "Return", "string", "representation", "of", "pwm", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L906-L925
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.to_pwm
def to_pwm(self, precision=4, extra_str=""): """Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format. """ motif_id = self.id if extra_str: motif_id += "_%s" % extra_str if not self.pwm: self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()] return ">%s\n%s" % ( motif_id, self._pwm_to_str(precision) )
python
def to_pwm(self, precision=4, extra_str=""): """Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format. """ motif_id = self.id if extra_str: motif_id += "_%s" % extra_str if not self.pwm: self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()] return ">%s\n%s" % ( motif_id, self._pwm_to_str(precision) )
[ "def", "to_pwm", "(", "self", ",", "precision", "=", "4", ",", "extra_str", "=", "\"\"", ")", ":", "motif_id", "=", "self", ".", "id", "if", "extra_str", ":", "motif_id", "+=", "\"_%s\"", "%", "extra_str", "if", "not", "self", ".", "pwm", ":", "self", ".", "pwm", "=", "[", "self", ".", "iupac_pwm", "[", "char", "]", "for", "char", "in", "self", ".", "consensus", ".", "upper", "(", ")", "]", "return", "\">%s\\n%s\"", "%", "(", "motif_id", ",", "self", ".", "_pwm_to_str", "(", "precision", ")", ")" ]
Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format.
[ "Return", "pwm", "as", "string", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L937-L964
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.to_img
def to_img(self, fname, fmt="PNG", add_left=0, seqlogo=None, height=6): """Create a sequence logo using seqlogo. Create a sequence logo and save it to a file. Valid formats are: PNG, EPS, GIF and PDF. Parameters ---------- fname : str Output filename. fmt : str , optional Output format (case-insensitive). Valid formats are PNG, EPS, GIF and PDF. add_left : int , optional Pad motif with empty positions on the left side. seqlogo : str Location of the seqlogo executable. By default the seqlogo version that is included with GimmeMotifs is used. height : float Height of the image """ if not seqlogo: seqlogo = self.seqlogo if not seqlogo: raise ValueError("seqlogo not specified or configured") #TODO: split to_align function VALID_FORMATS = ["EPS", "GIF", "PDF", "PNG"] N = 1000 fmt = fmt.upper() if not fmt in VALID_FORMATS: sys.stderr.write("Invalid motif format\n") return if fname[-4:].upper() == (".%s" % fmt): fname = fname[:-4] seqs = [] if add_left == 0: seqs = ["" for i in range(N)] else: for nuc in ["A", "C", "T", "G"]: seqs += [nuc * add_left for i in range(N // 4)] for pos in range(len(self.pwm)): vals = [self.pwm[pos][0] * N] for i in range(1,4): vals.append(vals[i-1] + self.pwm[pos][i] * N) if vals[3] - N != 0: #print "Motif weights don't add up to 1! Error of %s%%" % ((vals[3] - n)/ n * 100) vals[3] = N for i in range(N): if i <= vals[0]: seqs[i] += "A" elif i <= vals[1]: seqs[i] += "C" elif i <= vals[2]: seqs[i] += "G" elif i <= vals[3]: seqs[i] += "T" f = NamedTemporaryFile(mode="w", dir=mytmpdir()) for seq in seqs: f.write("%s\n" % seq) f.flush() makelogo = "{0} -f {1} -F {2} -c -a -h {3} -w {4} -o {5} -b -n -Y" cmd = makelogo.format( seqlogo, f.name, fmt, height, len(self) + add_left, fname) sp.call(cmd, shell=True)
python
def to_img(self, fname, fmt="PNG", add_left=0, seqlogo=None, height=6): """Create a sequence logo using seqlogo. Create a sequence logo and save it to a file. Valid formats are: PNG, EPS, GIF and PDF. Parameters ---------- fname : str Output filename. fmt : str , optional Output format (case-insensitive). Valid formats are PNG, EPS, GIF and PDF. add_left : int , optional Pad motif with empty positions on the left side. seqlogo : str Location of the seqlogo executable. By default the seqlogo version that is included with GimmeMotifs is used. height : float Height of the image """ if not seqlogo: seqlogo = self.seqlogo if not seqlogo: raise ValueError("seqlogo not specified or configured") #TODO: split to_align function VALID_FORMATS = ["EPS", "GIF", "PDF", "PNG"] N = 1000 fmt = fmt.upper() if not fmt in VALID_FORMATS: sys.stderr.write("Invalid motif format\n") return if fname[-4:].upper() == (".%s" % fmt): fname = fname[:-4] seqs = [] if add_left == 0: seqs = ["" for i in range(N)] else: for nuc in ["A", "C", "T", "G"]: seqs += [nuc * add_left for i in range(N // 4)] for pos in range(len(self.pwm)): vals = [self.pwm[pos][0] * N] for i in range(1,4): vals.append(vals[i-1] + self.pwm[pos][i] * N) if vals[3] - N != 0: #print "Motif weights don't add up to 1! Error of %s%%" % ((vals[3] - n)/ n * 100) vals[3] = N for i in range(N): if i <= vals[0]: seqs[i] += "A" elif i <= vals[1]: seqs[i] += "C" elif i <= vals[2]: seqs[i] += "G" elif i <= vals[3]: seqs[i] += "T" f = NamedTemporaryFile(mode="w", dir=mytmpdir()) for seq in seqs: f.write("%s\n" % seq) f.flush() makelogo = "{0} -f {1} -F {2} -c -a -h {3} -w {4} -o {5} -b -n -Y" cmd = makelogo.format( seqlogo, f.name, fmt, height, len(self) + add_left, fname) sp.call(cmd, shell=True)
[ "def", "to_img", "(", "self", ",", "fname", ",", "fmt", "=", "\"PNG\"", ",", "add_left", "=", "0", ",", "seqlogo", "=", "None", ",", "height", "=", "6", ")", ":", "if", "not", "seqlogo", ":", "seqlogo", "=", "self", ".", "seqlogo", "if", "not", "seqlogo", ":", "raise", "ValueError", "(", "\"seqlogo not specified or configured\"", ")", "#TODO: split to_align function", "VALID_FORMATS", "=", "[", "\"EPS\"", ",", "\"GIF\"", ",", "\"PDF\"", ",", "\"PNG\"", "]", "N", "=", "1000", "fmt", "=", "fmt", ".", "upper", "(", ")", "if", "not", "fmt", "in", "VALID_FORMATS", ":", "sys", ".", "stderr", ".", "write", "(", "\"Invalid motif format\\n\"", ")", "return", "if", "fname", "[", "-", "4", ":", "]", ".", "upper", "(", ")", "==", "(", "\".%s\"", "%", "fmt", ")", ":", "fname", "=", "fname", "[", ":", "-", "4", "]", "seqs", "=", "[", "]", "if", "add_left", "==", "0", ":", "seqs", "=", "[", "\"\"", "for", "i", "in", "range", "(", "N", ")", "]", "else", ":", "for", "nuc", "in", "[", "\"A\"", ",", "\"C\"", ",", "\"T\"", ",", "\"G\"", "]", ":", "seqs", "+=", "[", "nuc", "*", "add_left", "for", "i", "in", "range", "(", "N", "//", "4", ")", "]", "for", "pos", "in", "range", "(", "len", "(", "self", ".", "pwm", ")", ")", ":", "vals", "=", "[", "self", ".", "pwm", "[", "pos", "]", "[", "0", "]", "*", "N", "]", "for", "i", "in", "range", "(", "1", ",", "4", ")", ":", "vals", ".", "append", "(", "vals", "[", "i", "-", "1", "]", "+", "self", ".", "pwm", "[", "pos", "]", "[", "i", "]", "*", "N", ")", "if", "vals", "[", "3", "]", "-", "N", "!=", "0", ":", "#print \"Motif weights don't add up to 1! Error of %s%%\" % ((vals[3] - n)/ n * 100)", "vals", "[", "3", "]", "=", "N", "for", "i", "in", "range", "(", "N", ")", ":", "if", "i", "<=", "vals", "[", "0", "]", ":", "seqs", "[", "i", "]", "+=", "\"A\"", "elif", "i", "<=", "vals", "[", "1", "]", ":", "seqs", "[", "i", "]", "+=", "\"C\"", "elif", "i", "<=", "vals", "[", "2", "]", ":", "seqs", "[", "i", "]", "+=", "\"G\"", "elif", "i", "<=", "vals", "[", "3", "]", ":", "seqs", "[", "i", "]", "+=", "\"T\"", "f", "=", "NamedTemporaryFile", "(", "mode", "=", "\"w\"", ",", "dir", "=", "mytmpdir", "(", ")", ")", "for", "seq", "in", "seqs", ":", "f", ".", "write", "(", "\"%s\\n\"", "%", "seq", ")", "f", ".", "flush", "(", ")", "makelogo", "=", "\"{0} -f {1} -F {2} -c -a -h {3} -w {4} -o {5} -b -n -Y\"", "cmd", "=", "makelogo", ".", "format", "(", "seqlogo", ",", "f", ".", "name", ",", "fmt", ",", "height", ",", "len", "(", "self", ")", "+", "add_left", ",", "fname", ")", "sp", ".", "call", "(", "cmd", ",", "shell", "=", "True", ")" ]
Create a sequence logo using seqlogo. Create a sequence logo and save it to a file. Valid formats are: PNG, EPS, GIF and PDF. Parameters ---------- fname : str Output filename. fmt : str , optional Output format (case-insensitive). Valid formats are PNG, EPS, GIF and PDF. add_left : int , optional Pad motif with empty positions on the left side. seqlogo : str Location of the seqlogo executable. By default the seqlogo version that is included with GimmeMotifs is used. height : float Height of the image
[ "Create", "a", "sequence", "logo", "using", "seqlogo", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L966-L1039
train
vanheeringen-lab/gimmemotifs
gimmemotifs/motif.py
Motif.randomize
def randomize(self): """Create a new motif with shuffled positions. Shuffle the positions of this motif and return a new Motif instance. Returns ------- m : Motif instance Motif instance with shuffled positions. """ random_pfm = [[c for c in row] for row in self.pfm] random.shuffle(random_pfm) m = Motif(pfm=random_pfm) m.id = "random" return m
python
def randomize(self): """Create a new motif with shuffled positions. Shuffle the positions of this motif and return a new Motif instance. Returns ------- m : Motif instance Motif instance with shuffled positions. """ random_pfm = [[c for c in row] for row in self.pfm] random.shuffle(random_pfm) m = Motif(pfm=random_pfm) m.id = "random" return m
[ "def", "randomize", "(", "self", ")", ":", "random_pfm", "=", "[", "[", "c", "for", "c", "in", "row", "]", "for", "row", "in", "self", ".", "pfm", "]", "random", ".", "shuffle", "(", "random_pfm", ")", "m", "=", "Motif", "(", "pfm", "=", "random_pfm", ")", "m", ".", "id", "=", "\"random\"", "return", "m" ]
Create a new motif with shuffled positions. Shuffle the positions of this motif and return a new Motif instance. Returns ------- m : Motif instance Motif instance with shuffled positions.
[ "Create", "a", "new", "motif", "with", "shuffled", "positions", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/motif.py#L1045-L1059
train
vanheeringen-lab/gimmemotifs
gimmemotifs/commands/maelstrom.py
maelstrom
def maelstrom(args): """Run the maelstrom method.""" infile = args.inputfile genome = args.genome outdir = args.outdir pwmfile = args.pwmfile methods = args.methods ncpus = args.ncpus if not os.path.exists(infile): raise ValueError("file {} does not exist".format(infile)) if methods: methods = [x.strip() for x in methods.split(",")] run_maelstrom(infile, genome, outdir, pwmfile, methods=methods, ncpus=ncpus)
python
def maelstrom(args): """Run the maelstrom method.""" infile = args.inputfile genome = args.genome outdir = args.outdir pwmfile = args.pwmfile methods = args.methods ncpus = args.ncpus if not os.path.exists(infile): raise ValueError("file {} does not exist".format(infile)) if methods: methods = [x.strip() for x in methods.split(",")] run_maelstrom(infile, genome, outdir, pwmfile, methods=methods, ncpus=ncpus)
[ "def", "maelstrom", "(", "args", ")", ":", "infile", "=", "args", ".", "inputfile", "genome", "=", "args", ".", "genome", "outdir", "=", "args", ".", "outdir", "pwmfile", "=", "args", ".", "pwmfile", "methods", "=", "args", ".", "methods", "ncpus", "=", "args", ".", "ncpus", "if", "not", "os", ".", "path", ".", "exists", "(", "infile", ")", ":", "raise", "ValueError", "(", "\"file {} does not exist\"", ".", "format", "(", "infile", ")", ")", "if", "methods", ":", "methods", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "methods", ".", "split", "(", "\",\"", ")", "]", "run_maelstrom", "(", "infile", ",", "genome", ",", "outdir", ",", "pwmfile", ",", "methods", "=", "methods", ",", "ncpus", "=", "ncpus", ")" ]
Run the maelstrom method.
[ "Run", "the", "maelstrom", "method", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/commands/maelstrom.py#L12-L27
train
pescadores/pescador
pescador/zmq_stream.py
zmq_send_data
def zmq_send_data(socket, data, flags=0, copy=True, track=False): """Send data, e.g. {key: np.ndarray}, with metadata""" header, payload = [], [] for key in sorted(data.keys()): arr = data[key] if not isinstance(arr, np.ndarray): raise DataError('Only ndarray types can be serialized') header.append(dict(dtype=str(arr.dtype), shape=arr.shape, key=key, aligned=arr.flags['ALIGNED'])) # Force contiguity payload.append(arr) # Send the header msg = [json.dumps(header).encode('ascii')] msg.extend(payload) return socket.send_multipart(msg, flags, copy=copy, track=track)
python
def zmq_send_data(socket, data, flags=0, copy=True, track=False): """Send data, e.g. {key: np.ndarray}, with metadata""" header, payload = [], [] for key in sorted(data.keys()): arr = data[key] if not isinstance(arr, np.ndarray): raise DataError('Only ndarray types can be serialized') header.append(dict(dtype=str(arr.dtype), shape=arr.shape, key=key, aligned=arr.flags['ALIGNED'])) # Force contiguity payload.append(arr) # Send the header msg = [json.dumps(header).encode('ascii')] msg.extend(payload) return socket.send_multipart(msg, flags, copy=copy, track=track)
[ "def", "zmq_send_data", "(", "socket", ",", "data", ",", "flags", "=", "0", ",", "copy", "=", "True", ",", "track", "=", "False", ")", ":", "header", ",", "payload", "=", "[", "]", ",", "[", "]", "for", "key", "in", "sorted", "(", "data", ".", "keys", "(", ")", ")", ":", "arr", "=", "data", "[", "key", "]", "if", "not", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", ":", "raise", "DataError", "(", "'Only ndarray types can be serialized'", ")", "header", ".", "append", "(", "dict", "(", "dtype", "=", "str", "(", "arr", ".", "dtype", ")", ",", "shape", "=", "arr", ".", "shape", ",", "key", "=", "key", ",", "aligned", "=", "arr", ".", "flags", "[", "'ALIGNED'", "]", ")", ")", "# Force contiguity", "payload", ".", "append", "(", "arr", ")", "# Send the header", "msg", "=", "[", "json", ".", "dumps", "(", "header", ")", ".", "encode", "(", "'ascii'", ")", "]", "msg", ".", "extend", "(", "payload", ")", "return", "socket", ".", "send_multipart", "(", "msg", ",", "flags", ",", "copy", "=", "copy", ",", "track", "=", "track", ")" ]
Send data, e.g. {key: np.ndarray}, with metadata
[ "Send", "data", "e", ".", "g", ".", "{", "key", ":", "np", ".", "ndarray", "}", "with", "metadata" ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/zmq_stream.py#L47-L69
train
pescadores/pescador
pescador/zmq_stream.py
zmq_recv_data
def zmq_recv_data(socket, flags=0, copy=True, track=False): """Receive data over a socket.""" data = dict() msg = socket.recv_multipart(flags=flags, copy=copy, track=track) headers = json.loads(msg[0].decode('ascii')) if len(headers) == 0: raise StopIteration for header, payload in zip(headers, msg[1:]): data[header['key']] = np.frombuffer(buffer(payload), dtype=header['dtype']) data[header['key']].shape = header['shape'] if six.PY2: # Legacy python won't let us preserve alignment, skip this step continue data[header['key']].flags['ALIGNED'] = header['aligned'] return data
python
def zmq_recv_data(socket, flags=0, copy=True, track=False): """Receive data over a socket.""" data = dict() msg = socket.recv_multipart(flags=flags, copy=copy, track=track) headers = json.loads(msg[0].decode('ascii')) if len(headers) == 0: raise StopIteration for header, payload in zip(headers, msg[1:]): data[header['key']] = np.frombuffer(buffer(payload), dtype=header['dtype']) data[header['key']].shape = header['shape'] if six.PY2: # Legacy python won't let us preserve alignment, skip this step continue data[header['key']].flags['ALIGNED'] = header['aligned'] return data
[ "def", "zmq_recv_data", "(", "socket", ",", "flags", "=", "0", ",", "copy", "=", "True", ",", "track", "=", "False", ")", ":", "data", "=", "dict", "(", ")", "msg", "=", "socket", ".", "recv_multipart", "(", "flags", "=", "flags", ",", "copy", "=", "copy", ",", "track", "=", "track", ")", "headers", "=", "json", ".", "loads", "(", "msg", "[", "0", "]", ".", "decode", "(", "'ascii'", ")", ")", "if", "len", "(", "headers", ")", "==", "0", ":", "raise", "StopIteration", "for", "header", ",", "payload", "in", "zip", "(", "headers", ",", "msg", "[", "1", ":", "]", ")", ":", "data", "[", "header", "[", "'key'", "]", "]", "=", "np", ".", "frombuffer", "(", "buffer", "(", "payload", ")", ",", "dtype", "=", "header", "[", "'dtype'", "]", ")", "data", "[", "header", "[", "'key'", "]", "]", ".", "shape", "=", "header", "[", "'shape'", "]", "if", "six", ".", "PY2", ":", "# Legacy python won't let us preserve alignment, skip this step", "continue", "data", "[", "header", "[", "'key'", "]", "]", ".", "flags", "[", "'ALIGNED'", "]", "=", "header", "[", "'aligned'", "]", "return", "data" ]
Receive data over a socket.
[ "Receive", "data", "over", "a", "socket", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/zmq_stream.py#L72-L93
train
pescadores/pescador
pescador/zmq_stream.py
ZMQStreamer.iterate
def iterate(self, max_iter=None): """ Note: A ZMQStreamer does not activate its stream, but allows the zmq_worker to do that. Yields ------ data : dict Data drawn from `streamer(max_iter)`. """ context = zmq.Context() if six.PY2: warnings.warn('zmq_stream cannot preserve numpy array alignment ' 'in Python 2', RuntimeWarning) try: socket = context.socket(zmq.PAIR) port = socket.bind_to_random_port('tcp://*', min_port=self.min_port, max_port=self.max_port, max_tries=self.max_tries) terminate = mp.Event() worker = mp.Process(target=SafeFunction(zmq_worker), args=[port, self.streamer, terminate], kwargs=dict(copy=self.copy, max_iter=max_iter)) worker.daemon = True worker.start() # Yield from the queue as long as it's open while True: yield zmq_recv_data(socket) except StopIteration: pass except: # pylint: disable-msg=W0702 six.reraise(*sys.exc_info()) finally: terminate.set() worker.join(self.timeout) if worker.is_alive(): worker.terminate() context.destroy()
python
def iterate(self, max_iter=None): """ Note: A ZMQStreamer does not activate its stream, but allows the zmq_worker to do that. Yields ------ data : dict Data drawn from `streamer(max_iter)`. """ context = zmq.Context() if six.PY2: warnings.warn('zmq_stream cannot preserve numpy array alignment ' 'in Python 2', RuntimeWarning) try: socket = context.socket(zmq.PAIR) port = socket.bind_to_random_port('tcp://*', min_port=self.min_port, max_port=self.max_port, max_tries=self.max_tries) terminate = mp.Event() worker = mp.Process(target=SafeFunction(zmq_worker), args=[port, self.streamer, terminate], kwargs=dict(copy=self.copy, max_iter=max_iter)) worker.daemon = True worker.start() # Yield from the queue as long as it's open while True: yield zmq_recv_data(socket) except StopIteration: pass except: # pylint: disable-msg=W0702 six.reraise(*sys.exc_info()) finally: terminate.set() worker.join(self.timeout) if worker.is_alive(): worker.terminate() context.destroy()
[ "def", "iterate", "(", "self", ",", "max_iter", "=", "None", ")", ":", "context", "=", "zmq", ".", "Context", "(", ")", "if", "six", ".", "PY2", ":", "warnings", ".", "warn", "(", "'zmq_stream cannot preserve numpy array alignment '", "'in Python 2'", ",", "RuntimeWarning", ")", "try", ":", "socket", "=", "context", ".", "socket", "(", "zmq", ".", "PAIR", ")", "port", "=", "socket", ".", "bind_to_random_port", "(", "'tcp://*'", ",", "min_port", "=", "self", ".", "min_port", ",", "max_port", "=", "self", ".", "max_port", ",", "max_tries", "=", "self", ".", "max_tries", ")", "terminate", "=", "mp", ".", "Event", "(", ")", "worker", "=", "mp", ".", "Process", "(", "target", "=", "SafeFunction", "(", "zmq_worker", ")", ",", "args", "=", "[", "port", ",", "self", ".", "streamer", ",", "terminate", "]", ",", "kwargs", "=", "dict", "(", "copy", "=", "self", ".", "copy", ",", "max_iter", "=", "max_iter", ")", ")", "worker", ".", "daemon", "=", "True", "worker", ".", "start", "(", ")", "# Yield from the queue as long as it's open", "while", "True", ":", "yield", "zmq_recv_data", "(", "socket", ")", "except", "StopIteration", ":", "pass", "except", ":", "# pylint: disable-msg=W0702", "six", ".", "reraise", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "finally", ":", "terminate", ".", "set", "(", ")", "worker", ".", "join", "(", "self", ".", "timeout", ")", "if", "worker", ".", "is_alive", "(", ")", ":", "worker", ".", "terminate", "(", ")", "context", ".", "destroy", "(", ")" ]
Note: A ZMQStreamer does not activate its stream, but allows the zmq_worker to do that. Yields ------ data : dict Data drawn from `streamer(max_iter)`.
[ "Note", ":", "A", "ZMQStreamer", "does", "not", "activate", "its", "stream", "but", "allows", "the", "zmq_worker", "to", "do", "that", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/zmq_stream.py#L169-L218
train
vanheeringen-lab/gimmemotifs
gimmemotifs/fasta.py
Fasta.hardmask
def hardmask(self): """ Mask all lowercase nucleotides with N's """ p = re.compile("a|c|g|t|n") for seq_id in self.fasta_dict.keys(): self.fasta_dict[seq_id] = p.sub("N", self.fasta_dict[seq_id]) return self
python
def hardmask(self): """ Mask all lowercase nucleotides with N's """ p = re.compile("a|c|g|t|n") for seq_id in self.fasta_dict.keys(): self.fasta_dict[seq_id] = p.sub("N", self.fasta_dict[seq_id]) return self
[ "def", "hardmask", "(", "self", ")", ":", "p", "=", "re", ".", "compile", "(", "\"a|c|g|t|n\"", ")", "for", "seq_id", "in", "self", ".", "fasta_dict", ".", "keys", "(", ")", ":", "self", ".", "fasta_dict", "[", "seq_id", "]", "=", "p", ".", "sub", "(", "\"N\"", ",", "self", ".", "fasta_dict", "[", "seq_id", "]", ")", "return", "self" ]
Mask all lowercase nucleotides with N's
[ "Mask", "all", "lowercase", "nucleotides", "with", "N", "s" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/fasta.py#L39-L44
train
vanheeringen-lab/gimmemotifs
gimmemotifs/fasta.py
Fasta.get_random
def get_random(self, n, l=None): """ Return n random sequences from this Fasta object """ random_f = Fasta() if l: ids = self.ids[:] random.shuffle(ids) i = 0 while (i < n) and (len(ids) > 0): seq_id = ids.pop() if (len(self[seq_id]) >= l): start = random.randint(0, len(self[seq_id]) - l) random_f["random%s" % (i + 1)] = self[seq_id][start:start+l] i += 1 if len(random_f) != n: sys.stderr.write("Not enough sequences of required length") return else: return random_f else: choice = random.sample(self.ids, n) for i in range(n): random_f[choice[i]] = self[choice[i]] return random_f
python
def get_random(self, n, l=None): """ Return n random sequences from this Fasta object """ random_f = Fasta() if l: ids = self.ids[:] random.shuffle(ids) i = 0 while (i < n) and (len(ids) > 0): seq_id = ids.pop() if (len(self[seq_id]) >= l): start = random.randint(0, len(self[seq_id]) - l) random_f["random%s" % (i + 1)] = self[seq_id][start:start+l] i += 1 if len(random_f) != n: sys.stderr.write("Not enough sequences of required length") return else: return random_f else: choice = random.sample(self.ids, n) for i in range(n): random_f[choice[i]] = self[choice[i]] return random_f
[ "def", "get_random", "(", "self", ",", "n", ",", "l", "=", "None", ")", ":", "random_f", "=", "Fasta", "(", ")", "if", "l", ":", "ids", "=", "self", ".", "ids", "[", ":", "]", "random", ".", "shuffle", "(", "ids", ")", "i", "=", "0", "while", "(", "i", "<", "n", ")", "and", "(", "len", "(", "ids", ")", ">", "0", ")", ":", "seq_id", "=", "ids", ".", "pop", "(", ")", "if", "(", "len", "(", "self", "[", "seq_id", "]", ")", ">=", "l", ")", ":", "start", "=", "random", ".", "randint", "(", "0", ",", "len", "(", "self", "[", "seq_id", "]", ")", "-", "l", ")", "random_f", "[", "\"random%s\"", "%", "(", "i", "+", "1", ")", "]", "=", "self", "[", "seq_id", "]", "[", "start", ":", "start", "+", "l", "]", "i", "+=", "1", "if", "len", "(", "random_f", ")", "!=", "n", ":", "sys", ".", "stderr", ".", "write", "(", "\"Not enough sequences of required length\"", ")", "return", "else", ":", "return", "random_f", "else", ":", "choice", "=", "random", ".", "sample", "(", "self", ".", "ids", ",", "n", ")", "for", "i", "in", "range", "(", "n", ")", ":", "random_f", "[", "choice", "[", "i", "]", "]", "=", "self", "[", "choice", "[", "i", "]", "]", "return", "random_f" ]
Return n random sequences from this Fasta object
[ "Return", "n", "random", "sequences", "from", "this", "Fasta", "object" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/fasta.py#L46-L69
train
vanheeringen-lab/gimmemotifs
gimmemotifs/fasta.py
Fasta.writefasta
def writefasta(self, fname): """ Write sequences to FASTA formatted file""" f = open(fname, "w") fa_str = "\n".join([">%s\n%s" % (id, self._format_seq(seq)) for id, seq in self.items()]) f.write(fa_str) f.close()
python
def writefasta(self, fname): """ Write sequences to FASTA formatted file""" f = open(fname, "w") fa_str = "\n".join([">%s\n%s" % (id, self._format_seq(seq)) for id, seq in self.items()]) f.write(fa_str) f.close()
[ "def", "writefasta", "(", "self", ",", "fname", ")", ":", "f", "=", "open", "(", "fname", ",", "\"w\"", ")", "fa_str", "=", "\"\\n\"", ".", "join", "(", "[", "\">%s\\n%s\"", "%", "(", "id", ",", "self", ".", "_format_seq", "(", "seq", ")", ")", "for", "id", ",", "seq", "in", "self", ".", "items", "(", ")", "]", ")", "f", ".", "write", "(", "fa_str", ")", "f", ".", "close", "(", ")" ]
Write sequences to FASTA formatted file
[ "Write", "sequences", "to", "FASTA", "formatted", "file" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/fasta.py#L117-L122
train
vanheeringen-lab/gimmemotifs
gimmemotifs/cluster.py
cluster_motifs
def cluster_motifs(motifs, match="total", metric="wic", combine="mean", pval=True, threshold=0.95, trim_edges=False, edge_ic_cutoff=0.2, include_bg=True, progress=True, ncpus=None): """ Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True. """ # First read pfm or pfm formatted motiffile if type([]) != type(motifs): motifs = read_motifs(motifs, fmt="pwm") mc = MotifComparer() # Trim edges with low information content if trim_edges: for motif in motifs: motif.trim(edge_ic_cutoff) # Make a MotifTree node for every motif nodes = [MotifTree(m) for m in motifs] # Determine all pairwise scores and maxscore per motif scores = {} motif_nodes = dict([(n.motif.id,n) for n in nodes]) motifs = [n.motif for n in nodes] if progress: sys.stderr.write("Calculating initial scores\n") result = mc.get_all_scores(motifs, motifs, match, metric, combine, pval, parallel=True, ncpus=ncpus) for m1, other_motifs in result.items(): for m2, score in other_motifs.items(): if m1 == m2: if pval: motif_nodes[m1].maxscore = 1 - score[0] else: motif_nodes[m1].maxscore = score[0] else: if pval: score = [1 - score[0]] + score[1:] scores[(motif_nodes[m1],motif_nodes[m2])] = score cluster_nodes = [node for node in nodes] ave_count = 1 total = len(cluster_nodes) while len(cluster_nodes) > 1: l = sorted(scores.keys(), key=lambda x: scores[x][0]) i = -1 (n1, n2) = l[i] while n1 not in cluster_nodes or n2 not in cluster_nodes: i -= 1 (n1,n2) = l[i] if len(n1.motif) > 0 and len(n2.motif) > 0: (score, pos, orientation) = scores[(n1,n2)] ave_motif = n1.motif.average_motifs(n2.motif, pos, orientation, include_bg=include_bg) ave_motif.trim(edge_ic_cutoff) # Check if the motif is not empty if len(ave_motif) == 0: ave_motif = Motif([[0.25,0.25,0.25,0.25]]) ave_motif.id = "Average_%s" % ave_count ave_count += 1 new_node = MotifTree(ave_motif) if pval: new_node.maxscore = 1 - mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] else: new_node.maxscore = mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] new_node.mergescore = score #print "%s + %s = %s with score %s" % (n1.motif.id, n2.motif.id, ave_motif.id, score) n1.parent = new_node n2.parent = new_node new_node.left = n1 new_node.right = n2 cmp_nodes = dict([(node.motif, node) for node in nodes if not node.parent]) if progress: progress = (1 - len(cmp_nodes) / float(total)) * 100 sys.stderr.write('\rClustering [{0}{1}] {2}%'.format( '#' * (int(progress) // 10), " " * (10 - int(progress) // 10), int(progress))) result = mc.get_all_scores( [new_node.motif], list(cmp_nodes.keys()), match, metric, combine, pval, parallel=True) for motif, n in cmp_nodes.items(): x = result[new_node.motif.id][motif.id] if pval: x = [1 - x[0]] + x[1:] scores[(new_node, n)] = x nodes.append(new_node) cluster_nodes = [node for node in nodes if not node.parent] if progress: sys.stderr.write("\n") root = nodes[-1] for node in [node for node in nodes if not node.left]: node.parent.checkMerge(root, threshold) return root
python
def cluster_motifs(motifs, match="total", metric="wic", combine="mean", pval=True, threshold=0.95, trim_edges=False, edge_ic_cutoff=0.2, include_bg=True, progress=True, ncpus=None): """ Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True. """ # First read pfm or pfm formatted motiffile if type([]) != type(motifs): motifs = read_motifs(motifs, fmt="pwm") mc = MotifComparer() # Trim edges with low information content if trim_edges: for motif in motifs: motif.trim(edge_ic_cutoff) # Make a MotifTree node for every motif nodes = [MotifTree(m) for m in motifs] # Determine all pairwise scores and maxscore per motif scores = {} motif_nodes = dict([(n.motif.id,n) for n in nodes]) motifs = [n.motif for n in nodes] if progress: sys.stderr.write("Calculating initial scores\n") result = mc.get_all_scores(motifs, motifs, match, metric, combine, pval, parallel=True, ncpus=ncpus) for m1, other_motifs in result.items(): for m2, score in other_motifs.items(): if m1 == m2: if pval: motif_nodes[m1].maxscore = 1 - score[0] else: motif_nodes[m1].maxscore = score[0] else: if pval: score = [1 - score[0]] + score[1:] scores[(motif_nodes[m1],motif_nodes[m2])] = score cluster_nodes = [node for node in nodes] ave_count = 1 total = len(cluster_nodes) while len(cluster_nodes) > 1: l = sorted(scores.keys(), key=lambda x: scores[x][0]) i = -1 (n1, n2) = l[i] while n1 not in cluster_nodes or n2 not in cluster_nodes: i -= 1 (n1,n2) = l[i] if len(n1.motif) > 0 and len(n2.motif) > 0: (score, pos, orientation) = scores[(n1,n2)] ave_motif = n1.motif.average_motifs(n2.motif, pos, orientation, include_bg=include_bg) ave_motif.trim(edge_ic_cutoff) # Check if the motif is not empty if len(ave_motif) == 0: ave_motif = Motif([[0.25,0.25,0.25,0.25]]) ave_motif.id = "Average_%s" % ave_count ave_count += 1 new_node = MotifTree(ave_motif) if pval: new_node.maxscore = 1 - mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] else: new_node.maxscore = mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] new_node.mergescore = score #print "%s + %s = %s with score %s" % (n1.motif.id, n2.motif.id, ave_motif.id, score) n1.parent = new_node n2.parent = new_node new_node.left = n1 new_node.right = n2 cmp_nodes = dict([(node.motif, node) for node in nodes if not node.parent]) if progress: progress = (1 - len(cmp_nodes) / float(total)) * 100 sys.stderr.write('\rClustering [{0}{1}] {2}%'.format( '#' * (int(progress) // 10), " " * (10 - int(progress) // 10), int(progress))) result = mc.get_all_scores( [new_node.motif], list(cmp_nodes.keys()), match, metric, combine, pval, parallel=True) for motif, n in cmp_nodes.items(): x = result[new_node.motif.id][motif.id] if pval: x = [1 - x[0]] + x[1:] scores[(new_node, n)] = x nodes.append(new_node) cluster_nodes = [node for node in nodes if not node.parent] if progress: sys.stderr.write("\n") root = nodes[-1] for node in [node for node in nodes if not node.left]: node.parent.checkMerge(root, threshold) return root
[ "def", "cluster_motifs", "(", "motifs", ",", "match", "=", "\"total\"", ",", "metric", "=", "\"wic\"", ",", "combine", "=", "\"mean\"", ",", "pval", "=", "True", ",", "threshold", "=", "0.95", ",", "trim_edges", "=", "False", ",", "edge_ic_cutoff", "=", "0.2", ",", "include_bg", "=", "True", ",", "progress", "=", "True", ",", "ncpus", "=", "None", ")", ":", "# First read pfm or pfm formatted motiffile", "if", "type", "(", "[", "]", ")", "!=", "type", "(", "motifs", ")", ":", "motifs", "=", "read_motifs", "(", "motifs", ",", "fmt", "=", "\"pwm\"", ")", "mc", "=", "MotifComparer", "(", ")", "# Trim edges with low information content", "if", "trim_edges", ":", "for", "motif", "in", "motifs", ":", "motif", ".", "trim", "(", "edge_ic_cutoff", ")", "# Make a MotifTree node for every motif", "nodes", "=", "[", "MotifTree", "(", "m", ")", "for", "m", "in", "motifs", "]", "# Determine all pairwise scores and maxscore per motif", "scores", "=", "{", "}", "motif_nodes", "=", "dict", "(", "[", "(", "n", ".", "motif", ".", "id", ",", "n", ")", "for", "n", "in", "nodes", "]", ")", "motifs", "=", "[", "n", ".", "motif", "for", "n", "in", "nodes", "]", "if", "progress", ":", "sys", ".", "stderr", ".", "write", "(", "\"Calculating initial scores\\n\"", ")", "result", "=", "mc", ".", "get_all_scores", "(", "motifs", ",", "motifs", ",", "match", ",", "metric", ",", "combine", ",", "pval", ",", "parallel", "=", "True", ",", "ncpus", "=", "ncpus", ")", "for", "m1", ",", "other_motifs", "in", "result", ".", "items", "(", ")", ":", "for", "m2", ",", "score", "in", "other_motifs", ".", "items", "(", ")", ":", "if", "m1", "==", "m2", ":", "if", "pval", ":", "motif_nodes", "[", "m1", "]", ".", "maxscore", "=", "1", "-", "score", "[", "0", "]", "else", ":", "motif_nodes", "[", "m1", "]", ".", "maxscore", "=", "score", "[", "0", "]", "else", ":", "if", "pval", ":", "score", "=", "[", "1", "-", "score", "[", "0", "]", "]", "+", "score", "[", "1", ":", "]", "scores", "[", "(", "motif_nodes", "[", "m1", "]", ",", "motif_nodes", "[", "m2", "]", ")", "]", "=", "score", "cluster_nodes", "=", "[", "node", "for", "node", "in", "nodes", "]", "ave_count", "=", "1", "total", "=", "len", "(", "cluster_nodes", ")", "while", "len", "(", "cluster_nodes", ")", ">", "1", ":", "l", "=", "sorted", "(", "scores", ".", "keys", "(", ")", ",", "key", "=", "lambda", "x", ":", "scores", "[", "x", "]", "[", "0", "]", ")", "i", "=", "-", "1", "(", "n1", ",", "n2", ")", "=", "l", "[", "i", "]", "while", "n1", "not", "in", "cluster_nodes", "or", "n2", "not", "in", "cluster_nodes", ":", "i", "-=", "1", "(", "n1", ",", "n2", ")", "=", "l", "[", "i", "]", "if", "len", "(", "n1", ".", "motif", ")", ">", "0", "and", "len", "(", "n2", ".", "motif", ")", ">", "0", ":", "(", "score", ",", "pos", ",", "orientation", ")", "=", "scores", "[", "(", "n1", ",", "n2", ")", "]", "ave_motif", "=", "n1", ".", "motif", ".", "average_motifs", "(", "n2", ".", "motif", ",", "pos", ",", "orientation", ",", "include_bg", "=", "include_bg", ")", "ave_motif", ".", "trim", "(", "edge_ic_cutoff", ")", "# Check if the motif is not empty", "if", "len", "(", "ave_motif", ")", "==", "0", ":", "ave_motif", "=", "Motif", "(", "[", "[", "0.25", ",", "0.25", ",", "0.25", ",", "0.25", "]", "]", ")", "ave_motif", ".", "id", "=", "\"Average_%s\"", "%", "ave_count", "ave_count", "+=", "1", "new_node", "=", "MotifTree", "(", "ave_motif", ")", "if", "pval", ":", "new_node", ".", "maxscore", "=", "1", "-", "mc", ".", "compare_motifs", "(", "new_node", ".", "motif", ",", "new_node", ".", "motif", ",", "match", ",", "metric", ",", "combine", ",", "pval", ")", "[", "0", "]", "else", ":", "new_node", ".", "maxscore", "=", "mc", ".", "compare_motifs", "(", "new_node", ".", "motif", ",", "new_node", ".", "motif", ",", "match", ",", "metric", ",", "combine", ",", "pval", ")", "[", "0", "]", "new_node", ".", "mergescore", "=", "score", "#print \"%s + %s = %s with score %s\" % (n1.motif.id, n2.motif.id, ave_motif.id, score)", "n1", ".", "parent", "=", "new_node", "n2", ".", "parent", "=", "new_node", "new_node", ".", "left", "=", "n1", "new_node", ".", "right", "=", "n2", "cmp_nodes", "=", "dict", "(", "[", "(", "node", ".", "motif", ",", "node", ")", "for", "node", "in", "nodes", "if", "not", "node", ".", "parent", "]", ")", "if", "progress", ":", "progress", "=", "(", "1", "-", "len", "(", "cmp_nodes", ")", "/", "float", "(", "total", ")", ")", "*", "100", "sys", ".", "stderr", ".", "write", "(", "'\\rClustering [{0}{1}] {2}%'", ".", "format", "(", "'#'", "*", "(", "int", "(", "progress", ")", "//", "10", ")", ",", "\" \"", "*", "(", "10", "-", "int", "(", "progress", ")", "//", "10", ")", ",", "int", "(", "progress", ")", ")", ")", "result", "=", "mc", ".", "get_all_scores", "(", "[", "new_node", ".", "motif", "]", ",", "list", "(", "cmp_nodes", ".", "keys", "(", ")", ")", ",", "match", ",", "metric", ",", "combine", ",", "pval", ",", "parallel", "=", "True", ")", "for", "motif", ",", "n", "in", "cmp_nodes", ".", "items", "(", ")", ":", "x", "=", "result", "[", "new_node", ".", "motif", ".", "id", "]", "[", "motif", ".", "id", "]", "if", "pval", ":", "x", "=", "[", "1", "-", "x", "[", "0", "]", "]", "+", "x", "[", "1", ":", "]", "scores", "[", "(", "new_node", ",", "n", ")", "]", "=", "x", "nodes", ".", "append", "(", "new_node", ")", "cluster_nodes", "=", "[", "node", "for", "node", "in", "nodes", "if", "not", "node", ".", "parent", "]", "if", "progress", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\n\"", ")", "root", "=", "nodes", "[", "-", "1", "]", "for", "node", "in", "[", "node", "for", "node", "in", "nodes", "if", "not", "node", ".", "left", "]", ":", "node", ".", "parent", ".", "checkMerge", "(", "root", ",", "threshold", ")", "return", "root" ]
Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True.
[ "Clusters", "a", "set", "of", "sequence", "motifs", ".", "Required", "arg", "motifs", "is", "a", "file", "containing", "positional", "frequency", "matrices", "or", "an", "array", "with", "motifs", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/cluster.py#L83-L225
train
pescadores/pescador
pescador/util.py
batch_length
def batch_length(batch): '''Determine the number of samples in a batch. Parameters ---------- batch : dict A batch dictionary. Each value must implement `len`. All values must have the same `len`. Returns ------- n : int >= 0 or None The number of samples in this batch. If the batch has no fields, n is None. Raises ------ PescadorError If some two values have unequal length ''' n = None for value in six.itervalues(batch): if n is None: n = len(value) elif len(value) != n: raise PescadorError('Unequal field lengths') return n
python
def batch_length(batch): '''Determine the number of samples in a batch. Parameters ---------- batch : dict A batch dictionary. Each value must implement `len`. All values must have the same `len`. Returns ------- n : int >= 0 or None The number of samples in this batch. If the batch has no fields, n is None. Raises ------ PescadorError If some two values have unequal length ''' n = None for value in six.itervalues(batch): if n is None: n = len(value) elif len(value) != n: raise PescadorError('Unequal field lengths') return n
[ "def", "batch_length", "(", "batch", ")", ":", "n", "=", "None", "for", "value", "in", "six", ".", "itervalues", "(", "batch", ")", ":", "if", "n", "is", "None", ":", "n", "=", "len", "(", "value", ")", "elif", "len", "(", "value", ")", "!=", "n", ":", "raise", "PescadorError", "(", "'Unequal field lengths'", ")", "return", "n" ]
Determine the number of samples in a batch. Parameters ---------- batch : dict A batch dictionary. Each value must implement `len`. All values must have the same `len`. Returns ------- n : int >= 0 or None The number of samples in this batch. If the batch has no fields, n is None. Raises ------ PescadorError If some two values have unequal length
[ "Determine", "the", "number", "of", "samples", "in", "a", "batch", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/util.py#L121-L150
train
pescadores/pescador
pescador/mux.py
Mux._activate
def _activate(self): """Activates a number of streams""" self.distribution_ = 1. / self.n_streams * np.ones(self.n_streams) self.valid_streams_ = np.ones(self.n_streams, dtype=bool) self.streams_ = [None] * self.k self.stream_weights_ = np.zeros(self.k) self.stream_counts_ = np.zeros(self.k, dtype=int) # Array of pointers into `self.streamers` self.stream_idxs_ = np.zeros(self.k, dtype=int) for idx in range(self.k): if not (self.distribution_ > 0).any(): break self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) self.streams_[idx], self.stream_weights_[idx] = ( self._new_stream(self.stream_idxs_[idx])) self.weight_norm_ = np.sum(self.stream_weights_)
python
def _activate(self): """Activates a number of streams""" self.distribution_ = 1. / self.n_streams * np.ones(self.n_streams) self.valid_streams_ = np.ones(self.n_streams, dtype=bool) self.streams_ = [None] * self.k self.stream_weights_ = np.zeros(self.k) self.stream_counts_ = np.zeros(self.k, dtype=int) # Array of pointers into `self.streamers` self.stream_idxs_ = np.zeros(self.k, dtype=int) for idx in range(self.k): if not (self.distribution_ > 0).any(): break self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) self.streams_[idx], self.stream_weights_[idx] = ( self._new_stream(self.stream_idxs_[idx])) self.weight_norm_ = np.sum(self.stream_weights_)
[ "def", "_activate", "(", "self", ")", ":", "self", ".", "distribution_", "=", "1.", "/", "self", ".", "n_streams", "*", "np", ".", "ones", "(", "self", ".", "n_streams", ")", "self", ".", "valid_streams_", "=", "np", ".", "ones", "(", "self", ".", "n_streams", ",", "dtype", "=", "bool", ")", "self", ".", "streams_", "=", "[", "None", "]", "*", "self", ".", "k", "self", ".", "stream_weights_", "=", "np", ".", "zeros", "(", "self", ".", "k", ")", "self", ".", "stream_counts_", "=", "np", ".", "zeros", "(", "self", ".", "k", ",", "dtype", "=", "int", ")", "# Array of pointers into `self.streamers`", "self", ".", "stream_idxs_", "=", "np", ".", "zeros", "(", "self", ".", "k", ",", "dtype", "=", "int", ")", "for", "idx", "in", "range", "(", "self", ".", "k", ")", ":", "if", "not", "(", "self", ".", "distribution_", ">", "0", ")", ".", "any", "(", ")", ":", "break", "self", ".", "stream_idxs_", "[", "idx", "]", "=", "self", ".", "rng", ".", "choice", "(", "self", ".", "n_streams", ",", "p", "=", "self", ".", "distribution_", ")", "self", ".", "streams_", "[", "idx", "]", ",", "self", ".", "stream_weights_", "[", "idx", "]", "=", "(", "self", ".", "_new_stream", "(", "self", ".", "stream_idxs_", "[", "idx", "]", ")", ")", "self", ".", "weight_norm_", "=", "np", ".", "sum", "(", "self", ".", "stream_weights_", ")" ]
Activates a number of streams
[ "Activates", "a", "number", "of", "streams" ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L237-L259
train
pescadores/pescador
pescador/mux.py
Mux._new_stream
def _new_stream(self, idx): '''Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # instantiate if self.rate is not None: n_stream = 1 + self.rng.poisson(lam=self.rate) else: n_stream = None # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) if not self.with_replacement: self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return (self.streamers[idx].iterate(max_iter=n_stream), self.weights[idx])
python
def _new_stream(self, idx): '''Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # instantiate if self.rate is not None: n_stream = 1 + self.rng.poisson(lam=self.rate) else: n_stream = None # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) if not self.with_replacement: self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return (self.streamers[idx].iterate(max_iter=n_stream), self.weights[idx])
[ "def", "_new_stream", "(", "self", ",", "idx", ")", ":", "# instantiate", "if", "self", ".", "rate", "is", "not", "None", ":", "n_stream", "=", "1", "+", "self", ".", "rng", ".", "poisson", "(", "lam", "=", "self", ".", "rate", ")", "else", ":", "n_stream", "=", "None", "# If we're sampling without replacement, zero this one out", "# This effectively disables this stream as soon as it is chosen,", "# preventing it from being chosen again (unless it is revived)", "if", "not", "self", ".", "with_replacement", ":", "self", ".", "distribution_", "[", "idx", "]", "=", "0.0", "# Correct the distribution", "if", "(", "self", ".", "distribution_", ">", "0", ")", ".", "any", "(", ")", ":", "self", ".", "distribution_", "[", ":", "]", "/=", "np", ".", "sum", "(", "self", ".", "distribution_", ")", "return", "(", "self", ".", "streamers", "[", "idx", "]", ".", "iterate", "(", "max_iter", "=", "n_stream", ")", ",", "self", ".", "weights", "[", "idx", "]", ")" ]
Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
[ "Randomly", "select", "and", "create", "a", "stream", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L341-L366
train
pescadores/pescador
pescador/mux.py
BaseMux.iterate
def iterate(self, max_iter=None): """Yields items from the mux, and handles stream exhaustion and replacement. """ if max_iter is None: max_iter = np.inf # Calls Streamer's __enter__, which calls activate() with self as active_mux: # Main sampling loop n = 0 while n < max_iter and active_mux._streamers_available(): # Pick a stream from the active set idx = active_mux._next_sample_index() # Can we sample from it? try: # Then yield the sample yield six.advance_iterator(active_mux.streams_[idx]) # Increment the sample counter n += 1 active_mux.stream_counts_[idx] += 1 except StopIteration: # Oops, this stream is exhausted. # Call child-class exhausted-stream behavior active_mux._on_stream_exhausted(idx) # Setup a new stream for this index active_mux._replace_stream(idx)
python
def iterate(self, max_iter=None): """Yields items from the mux, and handles stream exhaustion and replacement. """ if max_iter is None: max_iter = np.inf # Calls Streamer's __enter__, which calls activate() with self as active_mux: # Main sampling loop n = 0 while n < max_iter and active_mux._streamers_available(): # Pick a stream from the active set idx = active_mux._next_sample_index() # Can we sample from it? try: # Then yield the sample yield six.advance_iterator(active_mux.streams_[idx]) # Increment the sample counter n += 1 active_mux.stream_counts_[idx] += 1 except StopIteration: # Oops, this stream is exhausted. # Call child-class exhausted-stream behavior active_mux._on_stream_exhausted(idx) # Setup a new stream for this index active_mux._replace_stream(idx)
[ "def", "iterate", "(", "self", ",", "max_iter", "=", "None", ")", ":", "if", "max_iter", "is", "None", ":", "max_iter", "=", "np", ".", "inf", "# Calls Streamer's __enter__, which calls activate()", "with", "self", "as", "active_mux", ":", "# Main sampling loop", "n", "=", "0", "while", "n", "<", "max_iter", "and", "active_mux", ".", "_streamers_available", "(", ")", ":", "# Pick a stream from the active set", "idx", "=", "active_mux", ".", "_next_sample_index", "(", ")", "# Can we sample from it?", "try", ":", "# Then yield the sample", "yield", "six", ".", "advance_iterator", "(", "active_mux", ".", "streams_", "[", "idx", "]", ")", "# Increment the sample counter", "n", "+=", "1", "active_mux", ".", "stream_counts_", "[", "idx", "]", "+=", "1", "except", "StopIteration", ":", "# Oops, this stream is exhausted.", "# Call child-class exhausted-stream behavior", "active_mux", ".", "_on_stream_exhausted", "(", "idx", ")", "# Setup a new stream for this index", "active_mux", ".", "_replace_stream", "(", "idx", ")" ]
Yields items from the mux, and handles stream exhaustion and replacement.
[ "Yields", "items", "from", "the", "mux", "and", "handles", "stream", "exhaustion", "and", "replacement", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L479-L511
train
pescadores/pescador
pescador/mux.py
StochasticMux._next_sample_index
def _next_sample_index(self): """StochasticMux chooses its next sample stream randomly""" return self.rng.choice(self.n_active, p=(self.stream_weights_ / self.weight_norm_))
python
def _next_sample_index(self): """StochasticMux chooses its next sample stream randomly""" return self.rng.choice(self.n_active, p=(self.stream_weights_ / self.weight_norm_))
[ "def", "_next_sample_index", "(", "self", ")", ":", "return", "self", ".", "rng", ".", "choice", "(", "self", ".", "n_active", ",", "p", "=", "(", "self", ".", "stream_weights_", "/", "self", ".", "weight_norm_", ")", ")" ]
StochasticMux chooses its next sample stream randomly
[ "StochasticMux", "chooses", "its", "next", "sample", "stream", "randomly" ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L713-L717
train
pescadores/pescador
pescador/mux.py
StochasticMux._activate_stream
def _activate_stream(self, idx): '''Randomly select and create a stream. StochasticMux adds mode handling to _activate_stream, making it so that if we're not sampling "with_replacement", the distribution for this chosen streamer is set to 0, causing the streamer not to be available until it is exhausted. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Get the number of samples for this streamer. n_samples_to_stream = None if self.rate is not None: n_samples_to_stream = 1 + self.rng.poisson(lam=self.rate) # instantiate a new streamer streamer = self.streamers[idx].iterate(max_iter=n_samples_to_stream) weight = self.weights[idx] # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) # if not self.with_replacement: if self.mode != "with_replacement": self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return streamer, weight
python
def _activate_stream(self, idx): '''Randomly select and create a stream. StochasticMux adds mode handling to _activate_stream, making it so that if we're not sampling "with_replacement", the distribution for this chosen streamer is set to 0, causing the streamer not to be available until it is exhausted. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Get the number of samples for this streamer. n_samples_to_stream = None if self.rate is not None: n_samples_to_stream = 1 + self.rng.poisson(lam=self.rate) # instantiate a new streamer streamer = self.streamers[idx].iterate(max_iter=n_samples_to_stream) weight = self.weights[idx] # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) # if not self.with_replacement: if self.mode != "with_replacement": self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return streamer, weight
[ "def", "_activate_stream", "(", "self", ",", "idx", ")", ":", "# Get the number of samples for this streamer.", "n_samples_to_stream", "=", "None", "if", "self", ".", "rate", "is", "not", "None", ":", "n_samples_to_stream", "=", "1", "+", "self", ".", "rng", ".", "poisson", "(", "lam", "=", "self", ".", "rate", ")", "# instantiate a new streamer", "streamer", "=", "self", ".", "streamers", "[", "idx", "]", ".", "iterate", "(", "max_iter", "=", "n_samples_to_stream", ")", "weight", "=", "self", ".", "weights", "[", "idx", "]", "# If we're sampling without replacement, zero this one out", "# This effectively disables this stream as soon as it is chosen,", "# preventing it from being chosen again (unless it is revived)", "# if not self.with_replacement:", "if", "self", ".", "mode", "!=", "\"with_replacement\"", ":", "self", ".", "distribution_", "[", "idx", "]", "=", "0.0", "# Correct the distribution", "if", "(", "self", ".", "distribution_", ">", "0", ")", ".", "any", "(", ")", ":", "self", ".", "distribution_", "[", ":", "]", "/=", "np", ".", "sum", "(", "self", ".", "distribution_", ")", "return", "streamer", ",", "weight" ]
Randomly select and create a stream. StochasticMux adds mode handling to _activate_stream, making it so that if we're not sampling "with_replacement", the distribution for this chosen streamer is set to 0, causing the streamer not to be available until it is exhausted. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
[ "Randomly", "select", "and", "create", "a", "stream", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L738-L771
train
pescadores/pescador
pescador/mux.py
StochasticMux._new_stream
def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Choose the stream index from the candidate pool self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) # Activate the Streamer, and get the weights self.streams_[idx], self.stream_weights_[idx] = self._activate_stream( self.stream_idxs_[idx]) # Reset the sample count to zero self.stream_counts_[idx] = 0
python
def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Choose the stream index from the candidate pool self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) # Activate the Streamer, and get the weights self.streams_[idx], self.stream_weights_[idx] = self._activate_stream( self.stream_idxs_[idx]) # Reset the sample count to zero self.stream_counts_[idx] = 0
[ "def", "_new_stream", "(", "self", ",", "idx", ")", ":", "# Choose the stream index from the candidate pool", "self", ".", "stream_idxs_", "[", "idx", "]", "=", "self", ".", "rng", ".", "choice", "(", "self", ".", "n_streams", ",", "p", "=", "self", ".", "distribution_", ")", "# Activate the Streamer, and get the weights", "self", ".", "streams_", "[", "idx", "]", ",", "self", ".", "stream_weights_", "[", "idx", "]", "=", "self", ".", "_activate_stream", "(", "self", ".", "stream_idxs_", "[", "idx", "]", ")", "# Reset the sample count to zero", "self", ".", "stream_counts_", "[", "idx", "]", "=", "0" ]
Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
[ "Randomly", "select", "and", "create", "a", "new", "stream", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L773-L790
train
pescadores/pescador
pescador/mux.py
ShuffledMux._activate
def _activate(self): """ShuffledMux's activate is similar to StochasticMux, but there is no 'n_active', since all the streams are always available. """ self.streams_ = [None] * self.n_streams # Weights of the active streams. # Once a stream is exhausted, it is set to 0. # Upon activation, this is just a copy of self.weights. self.stream_weights_ = np.array(self.weights, dtype=float) # How many samples have been drawn from each (active) stream. self.stream_counts_ = np.zeros(self.n_streams, dtype=int) # Initialize each active stream. for idx in range(self.n_streams): # Setup a new streamer at this index. self._new_stream(idx) self.weight_norm_ = np.sum(self.stream_weights_)
python
def _activate(self): """ShuffledMux's activate is similar to StochasticMux, but there is no 'n_active', since all the streams are always available. """ self.streams_ = [None] * self.n_streams # Weights of the active streams. # Once a stream is exhausted, it is set to 0. # Upon activation, this is just a copy of self.weights. self.stream_weights_ = np.array(self.weights, dtype=float) # How many samples have been drawn from each (active) stream. self.stream_counts_ = np.zeros(self.n_streams, dtype=int) # Initialize each active stream. for idx in range(self.n_streams): # Setup a new streamer at this index. self._new_stream(idx) self.weight_norm_ = np.sum(self.stream_weights_)
[ "def", "_activate", "(", "self", ")", ":", "self", ".", "streams_", "=", "[", "None", "]", "*", "self", ".", "n_streams", "# Weights of the active streams.", "# Once a stream is exhausted, it is set to 0.", "# Upon activation, this is just a copy of self.weights.", "self", ".", "stream_weights_", "=", "np", ".", "array", "(", "self", ".", "weights", ",", "dtype", "=", "float", ")", "# How many samples have been drawn from each (active) stream.", "self", ".", "stream_counts_", "=", "np", ".", "zeros", "(", "self", ".", "n_streams", ",", "dtype", "=", "int", ")", "# Initialize each active stream.", "for", "idx", "in", "range", "(", "self", ".", "n_streams", ")", ":", "# Setup a new streamer at this index.", "self", ".", "_new_stream", "(", "idx", ")", "self", ".", "weight_norm_", "=", "np", ".", "sum", "(", "self", ".", "stream_weights_", ")" ]
ShuffledMux's activate is similar to StochasticMux, but there is no 'n_active', since all the streams are always available.
[ "ShuffledMux", "s", "activate", "is", "similar", "to", "StochasticMux", "but", "there", "is", "no", "n_active", "since", "all", "the", "streams", "are", "always", "available", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L888-L906
train
pescadores/pescador
pescador/mux.py
ShuffledMux._next_sample_index
def _next_sample_index(self): """ShuffledMux chooses its next sample stream randomly, conditioned on the stream weights. """ return self.rng.choice(self.n_streams, p=(self.stream_weights_ / self.weight_norm_))
python
def _next_sample_index(self): """ShuffledMux chooses its next sample stream randomly, conditioned on the stream weights. """ return self.rng.choice(self.n_streams, p=(self.stream_weights_ / self.weight_norm_))
[ "def", "_next_sample_index", "(", "self", ")", ":", "return", "self", ".", "rng", ".", "choice", "(", "self", ".", "n_streams", ",", "p", "=", "(", "self", ".", "stream_weights_", "/", "self", ".", "weight_norm_", ")", ")" ]
ShuffledMux chooses its next sample stream randomly, conditioned on the stream weights.
[ "ShuffledMux", "chooses", "its", "next", "sample", "stream", "randomly", "conditioned", "on", "the", "stream", "weights", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L917-L923
train
pescadores/pescador
pescador/mux.py
ShuffledMux._new_stream
def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Don't activate the stream if the weight is 0 or None if self.stream_weights_[idx]: self.streams_[idx] = self.streamers[idx].iterate() else: self.streams_[idx] = None # Reset the sample count to zero self.stream_counts_[idx] = 0
python
def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Don't activate the stream if the weight is 0 or None if self.stream_weights_[idx]: self.streams_[idx] = self.streamers[idx].iterate() else: self.streams_[idx] = None # Reset the sample count to zero self.stream_counts_[idx] = 0
[ "def", "_new_stream", "(", "self", ",", "idx", ")", ":", "# Don't activate the stream if the weight is 0 or None", "if", "self", ".", "stream_weights_", "[", "idx", "]", ":", "self", ".", "streams_", "[", "idx", "]", "=", "self", ".", "streamers", "[", "idx", "]", ".", "iterate", "(", ")", "else", ":", "self", ".", "streams_", "[", "idx", "]", "=", "None", "# Reset the sample count to zero", "self", ".", "stream_counts_", "[", "idx", "]", "=", "0" ]
Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
[ "Randomly", "select", "and", "create", "a", "new", "stream", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L932-L947
train
pescadores/pescador
pescador/mux.py
RoundRobinMux._next_sample_index
def _next_sample_index(self): """Rotates through each active sampler by incrementing the index""" # Return the next streamer index where the streamer is not None, # wrapping around. idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 # Continue to increment if this streamer is exhausted (None) # This should never be infinite looping; # the `_streamers_available` check happens immediately # before this, so there should always be at least one not-None # streamer. while self.streams_[idx] is None: idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 return idx
python
def _next_sample_index(self): """Rotates through each active sampler by incrementing the index""" # Return the next streamer index where the streamer is not None, # wrapping around. idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 # Continue to increment if this streamer is exhausted (None) # This should never be infinite looping; # the `_streamers_available` check happens immediately # before this, so there should always be at least one not-None # streamer. while self.streams_[idx] is None: idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 return idx
[ "def", "_next_sample_index", "(", "self", ")", ":", "# Return the next streamer index where the streamer is not None,", "# wrapping around.", "idx", "=", "self", ".", "active_index_", "self", ".", "active_index_", "+=", "1", "if", "self", ".", "active_index_", ">=", "len", "(", "self", ".", "streams_", ")", ":", "self", ".", "active_index_", "=", "0", "# Continue to increment if this streamer is exhausted (None)", "# This should never be infinite looping;", "# the `_streamers_available` check happens immediately", "# before this, so there should always be at least one not-None", "# streamer.", "while", "self", ".", "streams_", "[", "idx", "]", "is", "None", ":", "idx", "=", "self", ".", "active_index_", "self", ".", "active_index_", "+=", "1", "if", "self", ".", "active_index_", ">=", "len", "(", "self", ".", "streams_", ")", ":", "self", ".", "active_index_", "=", "0", "return", "idx" ]
Rotates through each active sampler by incrementing the index
[ "Rotates", "through", "each", "active", "sampler", "by", "incrementing", "the", "index" ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L1058-L1080
train
pescadores/pescador
pescador/mux.py
RoundRobinMux._new_stream
def _new_stream(self, idx): """Activate a new stream, given the index into the stream pool. BaseMux's _new_stream simply chooses a new stream and activates it. For special behavior (ie Weighted streams), you must override this in a child class. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace """ # Get the stream index from the candidate pool stream_index = self.stream_idxs_[idx] # Activate the Streamer, and get the weights self.streams_[idx] = self.streamers[stream_index].iterate() # Reset the sample count to zero self.stream_counts_[idx] = 0
python
def _new_stream(self, idx): """Activate a new stream, given the index into the stream pool. BaseMux's _new_stream simply chooses a new stream and activates it. For special behavior (ie Weighted streams), you must override this in a child class. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace """ # Get the stream index from the candidate pool stream_index = self.stream_idxs_[idx] # Activate the Streamer, and get the weights self.streams_[idx] = self.streamers[stream_index].iterate() # Reset the sample count to zero self.stream_counts_[idx] = 0
[ "def", "_new_stream", "(", "self", ",", "idx", ")", ":", "# Get the stream index from the candidate pool", "stream_index", "=", "self", ".", "stream_idxs_", "[", "idx", "]", "# Activate the Streamer, and get the weights", "self", ".", "streams_", "[", "idx", "]", "=", "self", ".", "streamers", "[", "stream_index", "]", ".", "iterate", "(", ")", "# Reset the sample count to zero", "self", ".", "stream_counts_", "[", "idx", "]", "=", "0" ]
Activate a new stream, given the index into the stream pool. BaseMux's _new_stream simply chooses a new stream and activates it. For special behavior (ie Weighted streams), you must override this in a child class. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
[ "Activate", "a", "new", "stream", "given", "the", "index", "into", "the", "stream", "pool", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L1082-L1101
train
pescadores/pescador
pescador/mux.py
RoundRobinMux._replace_stream
def _replace_stream(self, idx=None): """Called by `BaseMux`'s iterate() when a stream is exhausted. Set the stream to None so it is ignored once exhausted. Parameters ---------- idx : int or None Raises ------ StopIteration If all streams are consumed, and `mode`=="exahustive" """ self.streams_[idx] = None # Check if we've now exhausted all the streams. if not self._streamers_available(): if self.mode == 'exhaustive': pass elif self.mode == "cycle": self._setup_streams(permute=False) elif self.mode == "permuted_cycle": self._setup_streams(permute=True)
python
def _replace_stream(self, idx=None): """Called by `BaseMux`'s iterate() when a stream is exhausted. Set the stream to None so it is ignored once exhausted. Parameters ---------- idx : int or None Raises ------ StopIteration If all streams are consumed, and `mode`=="exahustive" """ self.streams_[idx] = None # Check if we've now exhausted all the streams. if not self._streamers_available(): if self.mode == 'exhaustive': pass elif self.mode == "cycle": self._setup_streams(permute=False) elif self.mode == "permuted_cycle": self._setup_streams(permute=True)
[ "def", "_replace_stream", "(", "self", ",", "idx", "=", "None", ")", ":", "self", ".", "streams_", "[", "idx", "]", "=", "None", "# Check if we've now exhausted all the streams.", "if", "not", "self", ".", "_streamers_available", "(", ")", ":", "if", "self", ".", "mode", "==", "'exhaustive'", ":", "pass", "elif", "self", ".", "mode", "==", "\"cycle\"", ":", "self", ".", "_setup_streams", "(", "permute", "=", "False", ")", "elif", "self", ".", "mode", "==", "\"permuted_cycle\"", ":", "self", ".", "_setup_streams", "(", "permute", "=", "True", ")" ]
Called by `BaseMux`'s iterate() when a stream is exhausted. Set the stream to None so it is ignored once exhausted. Parameters ---------- idx : int or None Raises ------ StopIteration If all streams are consumed, and `mode`=="exahustive"
[ "Called", "by", "BaseMux", "s", "iterate", "()", "when", "a", "stream", "is", "exhausted", ".", "Set", "the", "stream", "to", "None", "so", "it", "is", "ignored", "once", "exhausted", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L1103-L1127
train
pescadores/pescador
pescador/mux.py
ChainMux._new_stream
def _new_stream(self): '''Grab the next stream from the input streamers, and start it. Raises ------ StopIteration When the input list or generator of streamers is complete, will raise a StopIteration. If `mode == cycle`, it will instead restart iterating from the beginning of the sequence. ''' try: # Advance the stream_generator_ to get the next available stream. # If successful, this will make self.chain_streamer_.active True next_stream = six.advance_iterator(self.stream_generator_) except StopIteration: # If running with cycle, restart the chain_streamer_ after # exhaustion. if self.mode == "cycle": self.stream_generator_ = self.chain_streamer_.iterate() # Try again to get the next stream; # if it fails this time, just let it raise the StopIteration; # this means the streams are probably dead or empty. next_stream = six.advance_iterator(self.stream_generator_) # If running in exhaustive mode else: # self.chain_streamer_ should no longer be active, so # the outer loop should fall out without running. next_stream = None if next_stream is not None: # Start that stream, and return it. streamer = next_stream.iterate() # Activate the Streamer self.streams_[0] = streamer # Reset the sample count to zero self.stream_counts_[0] = 0
python
def _new_stream(self): '''Grab the next stream from the input streamers, and start it. Raises ------ StopIteration When the input list or generator of streamers is complete, will raise a StopIteration. If `mode == cycle`, it will instead restart iterating from the beginning of the sequence. ''' try: # Advance the stream_generator_ to get the next available stream. # If successful, this will make self.chain_streamer_.active True next_stream = six.advance_iterator(self.stream_generator_) except StopIteration: # If running with cycle, restart the chain_streamer_ after # exhaustion. if self.mode == "cycle": self.stream_generator_ = self.chain_streamer_.iterate() # Try again to get the next stream; # if it fails this time, just let it raise the StopIteration; # this means the streams are probably dead or empty. next_stream = six.advance_iterator(self.stream_generator_) # If running in exhaustive mode else: # self.chain_streamer_ should no longer be active, so # the outer loop should fall out without running. next_stream = None if next_stream is not None: # Start that stream, and return it. streamer = next_stream.iterate() # Activate the Streamer self.streams_[0] = streamer # Reset the sample count to zero self.stream_counts_[0] = 0
[ "def", "_new_stream", "(", "self", ")", ":", "try", ":", "# Advance the stream_generator_ to get the next available stream.", "# If successful, this will make self.chain_streamer_.active True", "next_stream", "=", "six", ".", "advance_iterator", "(", "self", ".", "stream_generator_", ")", "except", "StopIteration", ":", "# If running with cycle, restart the chain_streamer_ after", "# exhaustion.", "if", "self", ".", "mode", "==", "\"cycle\"", ":", "self", ".", "stream_generator_", "=", "self", ".", "chain_streamer_", ".", "iterate", "(", ")", "# Try again to get the next stream;", "# if it fails this time, just let it raise the StopIteration;", "# this means the streams are probably dead or empty.", "next_stream", "=", "six", ".", "advance_iterator", "(", "self", ".", "stream_generator_", ")", "# If running in exhaustive mode", "else", ":", "# self.chain_streamer_ should no longer be active, so", "# the outer loop should fall out without running.", "next_stream", "=", "None", "if", "next_stream", "is", "not", "None", ":", "# Start that stream, and return it.", "streamer", "=", "next_stream", ".", "iterate", "(", ")", "# Activate the Streamer", "self", ".", "streams_", "[", "0", "]", "=", "streamer", "# Reset the sample count to zero", "self", ".", "stream_counts_", "[", "0", "]", "=", "0" ]
Grab the next stream from the input streamers, and start it. Raises ------ StopIteration When the input list or generator of streamers is complete, will raise a StopIteration. If `mode == cycle`, it will instead restart iterating from the beginning of the sequence.
[ "Grab", "the", "next", "stream", "from", "the", "input", "streamers", "and", "start", "it", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L1247-L1287
train
pescadores/pescador
examples/mux/mux_files_example.py
split_and_save_datasets
def split_and_save_datasets(X, Y, paths): """Shuffle X and Y into n / len(paths) datasets, and save them to disk at the locations provided in paths. """ shuffled_idxs = np.random.permutation(np.arange(len(X))) for i in range(len(paths)): # Take every len(paths) item, starting at i. # len(paths) is 3, so this would be [0::3], [1::3], [2::3] X_i = X[shuffled_idxs[i::len(paths)]] Y_i = Y[shuffled_idxs[i::len(paths)]] np.savez(paths[i], X=X_i, Y=Y_i)
python
def split_and_save_datasets(X, Y, paths): """Shuffle X and Y into n / len(paths) datasets, and save them to disk at the locations provided in paths. """ shuffled_idxs = np.random.permutation(np.arange(len(X))) for i in range(len(paths)): # Take every len(paths) item, starting at i. # len(paths) is 3, so this would be [0::3], [1::3], [2::3] X_i = X[shuffled_idxs[i::len(paths)]] Y_i = Y[shuffled_idxs[i::len(paths)]] np.savez(paths[i], X=X_i, Y=Y_i)
[ "def", "split_and_save_datasets", "(", "X", ",", "Y", ",", "paths", ")", ":", "shuffled_idxs", "=", "np", ".", "random", ".", "permutation", "(", "np", ".", "arange", "(", "len", "(", "X", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "paths", ")", ")", ":", "# Take every len(paths) item, starting at i.", "# len(paths) is 3, so this would be [0::3], [1::3], [2::3]", "X_i", "=", "X", "[", "shuffled_idxs", "[", "i", ":", ":", "len", "(", "paths", ")", "]", "]", "Y_i", "=", "Y", "[", "shuffled_idxs", "[", "i", ":", ":", "len", "(", "paths", ")", "]", "]", "np", ".", "savez", "(", "paths", "[", "i", "]", ",", "X", "=", "X_i", ",", "Y", "=", "Y_i", ")" ]
Shuffle X and Y into n / len(paths) datasets, and save them to disk at the locations provided in paths.
[ "Shuffle", "X", "and", "Y", "into", "n", "/", "len", "(", "paths", ")", "datasets", "and", "save", "them", "to", "disk", "at", "the", "locations", "provided", "in", "paths", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/examples/mux/mux_files_example.py#L38-L49
train
pescadores/pescador
examples/mux/mux_files_example.py
npz_generator
def npz_generator(npz_path): """Generate data from an npz file.""" npz_data = np.load(npz_path) X = npz_data['X'] # Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,) y = npz_data['Y'] n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'Y': y[i]}
python
def npz_generator(npz_path): """Generate data from an npz file.""" npz_data = np.load(npz_path) X = npz_data['X'] # Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,) y = npz_data['Y'] n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'Y': y[i]}
[ "def", "npz_generator", "(", "npz_path", ")", ":", "npz_data", "=", "np", ".", "load", "(", "npz_path", ")", "X", "=", "npz_data", "[", "'X'", "]", "# Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,)", "y", "=", "npz_data", "[", "'Y'", "]", "n", "=", "X", ".", "shape", "[", "0", "]", "while", "True", ":", "i", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "n", ")", "yield", "{", "'X'", ":", "X", "[", "i", "]", ",", "'Y'", ":", "y", "[", "i", "]", "}" ]
Generate data from an npz file.
[ "Generate", "data", "from", "an", "npz", "file", "." ]
786e2b5f882d13ea563769fbc7ad0a0a10c3553d
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/examples/mux/mux_files_example.py#L60-L71
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
phyper
def phyper(k, good, bad, N): """ Current hypergeometric implementation in scipy is broken, so here's the correct version """ pvalues = [phyper_single(x, good, bad, N) for x in range(k + 1, N + 1)] return np.sum(pvalues)
python
def phyper(k, good, bad, N): """ Current hypergeometric implementation in scipy is broken, so here's the correct version """ pvalues = [phyper_single(x, good, bad, N) for x in range(k + 1, N + 1)] return np.sum(pvalues)
[ "def", "phyper", "(", "k", ",", "good", ",", "bad", ",", "N", ")", ":", "pvalues", "=", "[", "phyper_single", "(", "x", ",", "good", ",", "bad", ",", "N", ")", "for", "x", "in", "range", "(", "k", "+", "1", ",", "N", "+", "1", ")", "]", "return", "np", ".", "sum", "(", "pvalues", ")" ]
Current hypergeometric implementation in scipy is broken, so here's the correct version
[ "Current", "hypergeometric", "implementation", "in", "scipy", "is", "broken", "so", "here", "s", "the", "correct", "version" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L77-L80
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
write_equalwidth_bedfile
def write_equalwidth_bedfile(bedfile, width, outfile): """Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format.""" BUFSIZE = 10000 f = open(bedfile) out = open(outfile, "w") lines = f.readlines(BUFSIZE) line_count = 0 while lines: for line in lines: line_count += 1 if not line.startswith("#") and not line.startswith("track") and not line.startswith("browser"): vals = line.strip().split("\t") try: start, end = int(vals[1]), int(vals[2]) except ValueError: print("Error on line %s while reading %s. Is the file in BED or WIG format?" % (line_count, bedfile)) sys.exit(1) start = (start + end) // 2 - (width // 2) # This shifts the center, but ensures the width is identical... maybe not ideal if start < 0: start = 0 end = start + width # Keep all the other information in the bedfile if it's there if len(vals) > 3: out.write("%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:]))) else: out.write("%s\t%s\t%s\n" % (vals[0], start, end)) lines = f.readlines(BUFSIZE) out.close() f.close()
python
def write_equalwidth_bedfile(bedfile, width, outfile): """Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format.""" BUFSIZE = 10000 f = open(bedfile) out = open(outfile, "w") lines = f.readlines(BUFSIZE) line_count = 0 while lines: for line in lines: line_count += 1 if not line.startswith("#") and not line.startswith("track") and not line.startswith("browser"): vals = line.strip().split("\t") try: start, end = int(vals[1]), int(vals[2]) except ValueError: print("Error on line %s while reading %s. Is the file in BED or WIG format?" % (line_count, bedfile)) sys.exit(1) start = (start + end) // 2 - (width // 2) # This shifts the center, but ensures the width is identical... maybe not ideal if start < 0: start = 0 end = start + width # Keep all the other information in the bedfile if it's there if len(vals) > 3: out.write("%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:]))) else: out.write("%s\t%s\t%s\n" % (vals[0], start, end)) lines = f.readlines(BUFSIZE) out.close() f.close()
[ "def", "write_equalwidth_bedfile", "(", "bedfile", ",", "width", ",", "outfile", ")", ":", "BUFSIZE", "=", "10000", "f", "=", "open", "(", "bedfile", ")", "out", "=", "open", "(", "outfile", ",", "\"w\"", ")", "lines", "=", "f", ".", "readlines", "(", "BUFSIZE", ")", "line_count", "=", "0", "while", "lines", ":", "for", "line", "in", "lines", ":", "line_count", "+=", "1", "if", "not", "line", ".", "startswith", "(", "\"#\"", ")", "and", "not", "line", ".", "startswith", "(", "\"track\"", ")", "and", "not", "line", ".", "startswith", "(", "\"browser\"", ")", ":", "vals", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "try", ":", "start", ",", "end", "=", "int", "(", "vals", "[", "1", "]", ")", ",", "int", "(", "vals", "[", "2", "]", ")", "except", "ValueError", ":", "print", "(", "\"Error on line %s while reading %s. Is the file in BED or WIG format?\"", "%", "(", "line_count", ",", "bedfile", ")", ")", "sys", ".", "exit", "(", "1", ")", "start", "=", "(", "start", "+", "end", ")", "//", "2", "-", "(", "width", "//", "2", ")", "# This shifts the center, but ensures the width is identical... maybe not ideal", "if", "start", "<", "0", ":", "start", "=", "0", "end", "=", "start", "+", "width", "# Keep all the other information in the bedfile if it's there", "if", "len", "(", "vals", ")", ">", "3", ":", "out", ".", "write", "(", "\"%s\\t%s\\t%s\\t%s\\n\"", "%", "(", "vals", "[", "0", "]", ",", "start", ",", "end", ",", "\"\\t\"", ".", "join", "(", "vals", "[", "3", ":", "]", ")", ")", ")", "else", ":", "out", ".", "write", "(", "\"%s\\t%s\\t%s\\n\"", "%", "(", "vals", "[", "0", "]", ",", "start", ",", "end", ")", ")", "lines", "=", "f", ".", "readlines", "(", "BUFSIZE", ")", "out", ".", "close", "(", ")", "f", ".", "close", "(", ")" ]
Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format.
[ "Read", "input", "from", "<bedfile", ">", "set", "the", "width", "of", "all", "entries", "to", "<width", ">", "and", "write", "the", "result", "to", "<outfile", ">", ".", "Input", "file", "needs", "to", "be", "in", "BED", "or", "WIG", "format", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L143-L177
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
calc_motif_enrichment
def calc_motif_enrichment(sample, background, mtc=None, len_sample=None, len_back=None): """Calculate enrichment based on hypergeometric distribution""" INF = "Inf" if mtc not in [None, "Bonferroni", "Benjamini-Hochberg", "None"]: raise RuntimeError("Unknown correction: %s" % mtc) sig = {} p_value = {} n_sample = {} n_back = {} if not(len_sample): len_sample = sample.seqn() if not(len_back): len_back = background.seqn() for motif in sample.motifs.keys(): p = "NA" s = "NA" q = len(sample.motifs[motif]) m = 0 if(background.motifs.get(motif)): m = len(background.motifs[motif]) n = len_back - m k = len_sample p = phyper(q - 1, m, n, k) if p != 0: s = -(log(p)/log(10)) else: s = INF else: s = INF p = 0.0 sig[motif] = s p_value[motif] = p n_sample[motif] = q n_back[motif] = m if mtc == "Bonferroni": for motif in p_value.keys(): if p_value[motif] != "NA": p_value[motif] = p_value[motif] * len(p_value.keys()) if p_value[motif] > 1: p_value[motif] = 1 elif mtc == "Benjamini-Hochberg": motifs = sorted(p_value.keys(), key=lambda x: -p_value[x]) l = len(p_value) c = l for m in motifs: if p_value[m] != "NA": p_value[m] = p_value[m] * l / c c -= 1 return (sig, p_value, n_sample, n_back)
python
def calc_motif_enrichment(sample, background, mtc=None, len_sample=None, len_back=None): """Calculate enrichment based on hypergeometric distribution""" INF = "Inf" if mtc not in [None, "Bonferroni", "Benjamini-Hochberg", "None"]: raise RuntimeError("Unknown correction: %s" % mtc) sig = {} p_value = {} n_sample = {} n_back = {} if not(len_sample): len_sample = sample.seqn() if not(len_back): len_back = background.seqn() for motif in sample.motifs.keys(): p = "NA" s = "NA" q = len(sample.motifs[motif]) m = 0 if(background.motifs.get(motif)): m = len(background.motifs[motif]) n = len_back - m k = len_sample p = phyper(q - 1, m, n, k) if p != 0: s = -(log(p)/log(10)) else: s = INF else: s = INF p = 0.0 sig[motif] = s p_value[motif] = p n_sample[motif] = q n_back[motif] = m if mtc == "Bonferroni": for motif in p_value.keys(): if p_value[motif] != "NA": p_value[motif] = p_value[motif] * len(p_value.keys()) if p_value[motif] > 1: p_value[motif] = 1 elif mtc == "Benjamini-Hochberg": motifs = sorted(p_value.keys(), key=lambda x: -p_value[x]) l = len(p_value) c = l for m in motifs: if p_value[m] != "NA": p_value[m] = p_value[m] * l / c c -= 1 return (sig, p_value, n_sample, n_back)
[ "def", "calc_motif_enrichment", "(", "sample", ",", "background", ",", "mtc", "=", "None", ",", "len_sample", "=", "None", ",", "len_back", "=", "None", ")", ":", "INF", "=", "\"Inf\"", "if", "mtc", "not", "in", "[", "None", ",", "\"Bonferroni\"", ",", "\"Benjamini-Hochberg\"", ",", "\"None\"", "]", ":", "raise", "RuntimeError", "(", "\"Unknown correction: %s\"", "%", "mtc", ")", "sig", "=", "{", "}", "p_value", "=", "{", "}", "n_sample", "=", "{", "}", "n_back", "=", "{", "}", "if", "not", "(", "len_sample", ")", ":", "len_sample", "=", "sample", ".", "seqn", "(", ")", "if", "not", "(", "len_back", ")", ":", "len_back", "=", "background", ".", "seqn", "(", ")", "for", "motif", "in", "sample", ".", "motifs", ".", "keys", "(", ")", ":", "p", "=", "\"NA\"", "s", "=", "\"NA\"", "q", "=", "len", "(", "sample", ".", "motifs", "[", "motif", "]", ")", "m", "=", "0", "if", "(", "background", ".", "motifs", ".", "get", "(", "motif", ")", ")", ":", "m", "=", "len", "(", "background", ".", "motifs", "[", "motif", "]", ")", "n", "=", "len_back", "-", "m", "k", "=", "len_sample", "p", "=", "phyper", "(", "q", "-", "1", ",", "m", ",", "n", ",", "k", ")", "if", "p", "!=", "0", ":", "s", "=", "-", "(", "log", "(", "p", ")", "/", "log", "(", "10", ")", ")", "else", ":", "s", "=", "INF", "else", ":", "s", "=", "INF", "p", "=", "0.0", "sig", "[", "motif", "]", "=", "s", "p_value", "[", "motif", "]", "=", "p", "n_sample", "[", "motif", "]", "=", "q", "n_back", "[", "motif", "]", "=", "m", "if", "mtc", "==", "\"Bonferroni\"", ":", "for", "motif", "in", "p_value", ".", "keys", "(", ")", ":", "if", "p_value", "[", "motif", "]", "!=", "\"NA\"", ":", "p_value", "[", "motif", "]", "=", "p_value", "[", "motif", "]", "*", "len", "(", "p_value", ".", "keys", "(", ")", ")", "if", "p_value", "[", "motif", "]", ">", "1", ":", "p_value", "[", "motif", "]", "=", "1", "elif", "mtc", "==", "\"Benjamini-Hochberg\"", ":", "motifs", "=", "sorted", "(", "p_value", ".", "keys", "(", ")", ",", "key", "=", "lambda", "x", ":", "-", "p_value", "[", "x", "]", ")", "l", "=", "len", "(", "p_value", ")", "c", "=", "l", "for", "m", "in", "motifs", ":", "if", "p_value", "[", "m", "]", "!=", "\"NA\"", ":", "p_value", "[", "m", "]", "=", "p_value", "[", "m", "]", "*", "l", "/", "c", "c", "-=", "1", "return", "(", "sig", ",", "p_value", ",", "n_sample", ",", "n_back", ")" ]
Calculate enrichment based on hypergeometric distribution
[ "Calculate", "enrichment", "based", "on", "hypergeometric", "distribution" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L264-L321
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
parse_cutoff
def parse_cutoff(motifs, cutoff, default=0.9): """ Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value """ cutoffs = {} if os.path.isfile(str(cutoff)): for i,line in enumerate(open(cutoff)): if line != "Motif\tScore\tCutoff\n": try: motif,_,c = line.strip().split("\t") c = float(c) cutoffs[motif] = c except Exception as e: sys.stderr.write("Error parsing cutoff file, line {0}: {1}\n".format(e, i + 1)) sys.exit(1) else: for motif in motifs: cutoffs[motif.id] = float(cutoff) for motif in motifs: if not motif.id in cutoffs: sys.stderr.write("No cutoff found for {0}, using default {1}\n".format(motif.id, default)) cutoffs[motif.id] = default return cutoffs
python
def parse_cutoff(motifs, cutoff, default=0.9): """ Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value """ cutoffs = {} if os.path.isfile(str(cutoff)): for i,line in enumerate(open(cutoff)): if line != "Motif\tScore\tCutoff\n": try: motif,_,c = line.strip().split("\t") c = float(c) cutoffs[motif] = c except Exception as e: sys.stderr.write("Error parsing cutoff file, line {0}: {1}\n".format(e, i + 1)) sys.exit(1) else: for motif in motifs: cutoffs[motif.id] = float(cutoff) for motif in motifs: if not motif.id in cutoffs: sys.stderr.write("No cutoff found for {0}, using default {1}\n".format(motif.id, default)) cutoffs[motif.id] = default return cutoffs
[ "def", "parse_cutoff", "(", "motifs", ",", "cutoff", ",", "default", "=", "0.9", ")", ":", "cutoffs", "=", "{", "}", "if", "os", ".", "path", ".", "isfile", "(", "str", "(", "cutoff", ")", ")", ":", "for", "i", ",", "line", "in", "enumerate", "(", "open", "(", "cutoff", ")", ")", ":", "if", "line", "!=", "\"Motif\\tScore\\tCutoff\\n\"", ":", "try", ":", "motif", ",", "_", ",", "c", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "c", "=", "float", "(", "c", ")", "cutoffs", "[", "motif", "]", "=", "c", "except", "Exception", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "\"Error parsing cutoff file, line {0}: {1}\\n\"", ".", "format", "(", "e", ",", "i", "+", "1", ")", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "for", "motif", "in", "motifs", ":", "cutoffs", "[", "motif", ".", "id", "]", "=", "float", "(", "cutoff", ")", "for", "motif", "in", "motifs", ":", "if", "not", "motif", ".", "id", "in", "cutoffs", ":", "sys", ".", "stderr", ".", "write", "(", "\"No cutoff found for {0}, using default {1}\\n\"", ".", "format", "(", "motif", ".", "id", ",", "default", ")", ")", "cutoffs", "[", "motif", ".", "id", "]", "=", "default", "return", "cutoffs" ]
Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value
[ "Provide", "either", "a", "file", "with", "one", "cutoff", "per", "motif", "or", "a", "single", "cutoff", "returns", "a", "hash", "with", "motif", "id", "as", "key", "and", "cutoff", "as", "value" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L400-L424
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
determine_file_type
def determine_file_type(fname): """ Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case. """ if not (isinstance(fname, str) or isinstance(fname, unicode)): raise ValueError("{} is not a file name!", fname) if not os.path.isfile(fname): raise ValueError("{} is not a file!", fname) ext = os.path.splitext(fname)[1].lower() if ext in ["bed"]: return "bed" elif ext in ["fa", "fasta"]: return "fasta" elif ext in ["narrowpeak"]: return "narrowpeak" try: Fasta(fname) return "fasta" except: pass # Read first line that is not a comment or an UCSC-specific line p = re.compile(r'^(#|track|browser)') with open(fname) as f: for line in f.readlines(): line = line.strip() if not p.search(line): break region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if region_p.search(line): return "region" else: vals = line.split("\t") if len(vals) >= 3: try: _, _ = int(vals[1]), int(vals[2]) except ValueError: return "unknown" if len(vals) == 10: try: _, _ = int(vals[4]), int(vals[9]) return "narrowpeak" except ValueError: # As far as I know there is no 10-column BED format return "unknown" pass return "bed" # Catch-all return "unknown"
python
def determine_file_type(fname): """ Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case. """ if not (isinstance(fname, str) or isinstance(fname, unicode)): raise ValueError("{} is not a file name!", fname) if not os.path.isfile(fname): raise ValueError("{} is not a file!", fname) ext = os.path.splitext(fname)[1].lower() if ext in ["bed"]: return "bed" elif ext in ["fa", "fasta"]: return "fasta" elif ext in ["narrowpeak"]: return "narrowpeak" try: Fasta(fname) return "fasta" except: pass # Read first line that is not a comment or an UCSC-specific line p = re.compile(r'^(#|track|browser)') with open(fname) as f: for line in f.readlines(): line = line.strip() if not p.search(line): break region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if region_p.search(line): return "region" else: vals = line.split("\t") if len(vals) >= 3: try: _, _ = int(vals[1]), int(vals[2]) except ValueError: return "unknown" if len(vals) == 10: try: _, _ = int(vals[4]), int(vals[9]) return "narrowpeak" except ValueError: # As far as I know there is no 10-column BED format return "unknown" pass return "bed" # Catch-all return "unknown"
[ "def", "determine_file_type", "(", "fname", ")", ":", "if", "not", "(", "isinstance", "(", "fname", ",", "str", ")", "or", "isinstance", "(", "fname", ",", "unicode", ")", ")", ":", "raise", "ValueError", "(", "\"{} is not a file name!\"", ",", "fname", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "raise", "ValueError", "(", "\"{} is not a file!\"", ",", "fname", ")", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "ext", "in", "[", "\"bed\"", "]", ":", "return", "\"bed\"", "elif", "ext", "in", "[", "\"fa\"", ",", "\"fasta\"", "]", ":", "return", "\"fasta\"", "elif", "ext", "in", "[", "\"narrowpeak\"", "]", ":", "return", "\"narrowpeak\"", "try", ":", "Fasta", "(", "fname", ")", "return", "\"fasta\"", "except", ":", "pass", "# Read first line that is not a comment or an UCSC-specific line", "p", "=", "re", ".", "compile", "(", "r'^(#|track|browser)'", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "p", ".", "search", "(", "line", ")", ":", "break", "region_p", "=", "re", ".", "compile", "(", "r'^(.+):(\\d+)-(\\d+)$'", ")", "if", "region_p", ".", "search", "(", "line", ")", ":", "return", "\"region\"", "else", ":", "vals", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "vals", ")", ">=", "3", ":", "try", ":", "_", ",", "_", "=", "int", "(", "vals", "[", "1", "]", ")", ",", "int", "(", "vals", "[", "2", "]", ")", "except", "ValueError", ":", "return", "\"unknown\"", "if", "len", "(", "vals", ")", "==", "10", ":", "try", ":", "_", ",", "_", "=", "int", "(", "vals", "[", "4", "]", ")", ",", "int", "(", "vals", "[", "9", "]", ")", "return", "\"narrowpeak\"", "except", "ValueError", ":", "# As far as I know there is no 10-column BED format", "return", "\"unknown\"", "pass", "return", "\"bed\"", "# Catch-all", "return", "\"unknown\"" ]
Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case.
[ "Detect", "file", "type", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L495-L562
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
get_seqs_type
def get_seqs_type(seqs): """ automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file """ region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if isinstance(seqs, Fasta): return "fasta" elif isinstance(seqs, list): if len(seqs) == 0: raise ValueError("empty list of sequences to scan") else: if region_p.search(seqs[0]): return "regions" else: raise ValueError("unknown region type") elif isinstance(seqs, str) or isinstance(seqs, unicode): if os.path.isfile(seqs): ftype = determine_file_type(seqs) if ftype == "unknown": raise ValueError("unknown type") elif ftype == "narrowpeak": raise ValueError("narrowPeak not yet supported in this function") else: return ftype + "file" else: raise ValueError("no file found with name {}".format(seqs)) else: raise ValueError("unknown type {}".format(type(seqs).__name__))
python
def get_seqs_type(seqs): """ automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file """ region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if isinstance(seqs, Fasta): return "fasta" elif isinstance(seqs, list): if len(seqs) == 0: raise ValueError("empty list of sequences to scan") else: if region_p.search(seqs[0]): return "regions" else: raise ValueError("unknown region type") elif isinstance(seqs, str) or isinstance(seqs, unicode): if os.path.isfile(seqs): ftype = determine_file_type(seqs) if ftype == "unknown": raise ValueError("unknown type") elif ftype == "narrowpeak": raise ValueError("narrowPeak not yet supported in this function") else: return ftype + "file" else: raise ValueError("no file found with name {}".format(seqs)) else: raise ValueError("unknown type {}".format(type(seqs).__name__))
[ "def", "get_seqs_type", "(", "seqs", ")", ":", "region_p", "=", "re", ".", "compile", "(", "r'^(.+):(\\d+)-(\\d+)$'", ")", "if", "isinstance", "(", "seqs", ",", "Fasta", ")", ":", "return", "\"fasta\"", "elif", "isinstance", "(", "seqs", ",", "list", ")", ":", "if", "len", "(", "seqs", ")", "==", "0", ":", "raise", "ValueError", "(", "\"empty list of sequences to scan\"", ")", "else", ":", "if", "region_p", ".", "search", "(", "seqs", "[", "0", "]", ")", ":", "return", "\"regions\"", "else", ":", "raise", "ValueError", "(", "\"unknown region type\"", ")", "elif", "isinstance", "(", "seqs", ",", "str", ")", "or", "isinstance", "(", "seqs", ",", "unicode", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "seqs", ")", ":", "ftype", "=", "determine_file_type", "(", "seqs", ")", "if", "ftype", "==", "\"unknown\"", ":", "raise", "ValueError", "(", "\"unknown type\"", ")", "elif", "ftype", "==", "\"narrowpeak\"", ":", "raise", "ValueError", "(", "\"narrowPeak not yet supported in this function\"", ")", "else", ":", "return", "ftype", "+", "\"file\"", "else", ":", "raise", "ValueError", "(", "\"no file found with name {}\"", ".", "format", "(", "seqs", ")", ")", "else", ":", "raise", "ValueError", "(", "\"unknown type {}\"", ".", "format", "(", "type", "(", "seqs", ")", ".", "__name__", ")", ")" ]
automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file
[ "automagically", "determine", "input", "type", "the", "following", "types", "are", "detected", ":", "-", "Fasta", "object", "-", "FASTA", "file", "-", "list", "of", "regions", "-", "region", "file", "-", "BED", "file" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L565-L598
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
file_checksum
def file_checksum(fname): """Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str """ size = os.path.getsize(fname) with open(fname, "r+") as f: checksum = hashlib.md5(mmap.mmap(f.fileno(), size)).hexdigest() return checksum
python
def file_checksum(fname): """Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str """ size = os.path.getsize(fname) with open(fname, "r+") as f: checksum = hashlib.md5(mmap.mmap(f.fileno(), size)).hexdigest() return checksum
[ "def", "file_checksum", "(", "fname", ")", ":", "size", "=", "os", ".", "path", ".", "getsize", "(", "fname", ")", "with", "open", "(", "fname", ",", "\"r+\"", ")", "as", "f", ":", "checksum", "=", "hashlib", ".", "md5", "(", "mmap", ".", "mmap", "(", "f", ".", "fileno", "(", ")", ",", "size", ")", ")", ".", "hexdigest", "(", ")", "return", "checksum" ]
Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str
[ "Return", "md5", "checksum", "of", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L616-L633
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
download_annotation
def download_annotation(genomebuild, gene_file): """ Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name. """ pred_bin = "genePredToBed" pred = find_executable(pred_bin) if not pred: sys.stderr.write("{} not found in path!\n".format(pred_bin)) sys.exit(1) tmp = NamedTemporaryFile(delete=False, suffix=".gz") anno = [] f = urlopen(UCSC_GENE_URL.format(genomebuild)) p = re.compile(r'\w+.Gene.txt.gz') for line in f.readlines(): m = p.search(line.decode()) if m: anno.append(m.group(0)) sys.stderr.write("Retrieving gene annotation for {}\n".format(genomebuild)) url = "" for a in ANNOS: if a in anno: url = UCSC_GENE_URL.format(genomebuild) + a break if url: sys.stderr.write("Using {}\n".format(url)) urlretrieve( url, tmp.name ) with gzip.open(tmp.name) as f: cols = f.readline().decode(errors='ignore').split("\t") start_col = 1 for i,col in enumerate(cols): if col == "+" or col == "-": start_col = i - 1 break end_col = start_col + 10 cmd = "zcat {} | cut -f{}-{} | {} /dev/stdin {}" print(cmd.format(tmp.name, start_col, end_col, pred, gene_file)) sp.call(cmd.format( tmp.name, start_col, end_col, pred, gene_file), shell=True) else: sys.stderr.write("No annotation found!")
python
def download_annotation(genomebuild, gene_file): """ Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name. """ pred_bin = "genePredToBed" pred = find_executable(pred_bin) if not pred: sys.stderr.write("{} not found in path!\n".format(pred_bin)) sys.exit(1) tmp = NamedTemporaryFile(delete=False, suffix=".gz") anno = [] f = urlopen(UCSC_GENE_URL.format(genomebuild)) p = re.compile(r'\w+.Gene.txt.gz') for line in f.readlines(): m = p.search(line.decode()) if m: anno.append(m.group(0)) sys.stderr.write("Retrieving gene annotation for {}\n".format(genomebuild)) url = "" for a in ANNOS: if a in anno: url = UCSC_GENE_URL.format(genomebuild) + a break if url: sys.stderr.write("Using {}\n".format(url)) urlretrieve( url, tmp.name ) with gzip.open(tmp.name) as f: cols = f.readline().decode(errors='ignore').split("\t") start_col = 1 for i,col in enumerate(cols): if col == "+" or col == "-": start_col = i - 1 break end_col = start_col + 10 cmd = "zcat {} | cut -f{}-{} | {} /dev/stdin {}" print(cmd.format(tmp.name, start_col, end_col, pred, gene_file)) sp.call(cmd.format( tmp.name, start_col, end_col, pred, gene_file), shell=True) else: sys.stderr.write("No annotation found!")
[ "def", "download_annotation", "(", "genomebuild", ",", "gene_file", ")", ":", "pred_bin", "=", "\"genePredToBed\"", "pred", "=", "find_executable", "(", "pred_bin", ")", "if", "not", "pred", ":", "sys", ".", "stderr", ".", "write", "(", "\"{} not found in path!\\n\"", ".", "format", "(", "pred_bin", ")", ")", "sys", ".", "exit", "(", "1", ")", "tmp", "=", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "suffix", "=", "\".gz\"", ")", "anno", "=", "[", "]", "f", "=", "urlopen", "(", "UCSC_GENE_URL", ".", "format", "(", "genomebuild", ")", ")", "p", "=", "re", ".", "compile", "(", "r'\\w+.Gene.txt.gz'", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "m", "=", "p", ".", "search", "(", "line", ".", "decode", "(", ")", ")", "if", "m", ":", "anno", ".", "append", "(", "m", ".", "group", "(", "0", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"Retrieving gene annotation for {}\\n\"", ".", "format", "(", "genomebuild", ")", ")", "url", "=", "\"\"", "for", "a", "in", "ANNOS", ":", "if", "a", "in", "anno", ":", "url", "=", "UCSC_GENE_URL", ".", "format", "(", "genomebuild", ")", "+", "a", "break", "if", "url", ":", "sys", ".", "stderr", ".", "write", "(", "\"Using {}\\n\"", ".", "format", "(", "url", ")", ")", "urlretrieve", "(", "url", ",", "tmp", ".", "name", ")", "with", "gzip", ".", "open", "(", "tmp", ".", "name", ")", "as", "f", ":", "cols", "=", "f", ".", "readline", "(", ")", ".", "decode", "(", "errors", "=", "'ignore'", ")", ".", "split", "(", "\"\\t\"", ")", "start_col", "=", "1", "for", "i", ",", "col", "in", "enumerate", "(", "cols", ")", ":", "if", "col", "==", "\"+\"", "or", "col", "==", "\"-\"", ":", "start_col", "=", "i", "-", "1", "break", "end_col", "=", "start_col", "+", "10", "cmd", "=", "\"zcat {} | cut -f{}-{} | {} /dev/stdin {}\"", "print", "(", "cmd", ".", "format", "(", "tmp", ".", "name", ",", "start_col", ",", "end_col", ",", "pred", ",", "gene_file", ")", ")", "sp", ".", "call", "(", "cmd", ".", "format", "(", "tmp", ".", "name", ",", "start_col", ",", "end_col", ",", "pred", ",", "gene_file", ")", ",", "shell", "=", "True", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"No annotation found!\"", ")" ]
Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name.
[ "Download", "gene", "annotation", "from", "UCSC", "based", "on", "genomebuild", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L98-L157
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._check_dir
def _check_dir(self, dirname): """ Check if dir exists, if not: give warning and die""" if not os.path.exists(dirname): print("Directory %s does not exist!" % dirname) sys.exit(1)
python
def _check_dir(self, dirname): """ Check if dir exists, if not: give warning and die""" if not os.path.exists(dirname): print("Directory %s does not exist!" % dirname) sys.exit(1)
[ "def", "_check_dir", "(", "self", ",", "dirname", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "print", "(", "\"Directory %s does not exist!\"", "%", "dirname", ")", "sys", ".", "exit", "(", "1", ")" ]
Check if dir exists, if not: give warning and die
[ "Check", "if", "dir", "exists", "if", "not", ":", "give", "warning", "and", "die" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L278-L282
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._make_index
def _make_index(self, fasta, index): """ Index a single, one-sequence fasta-file""" out = open(index, "wb") f = open(fasta) # Skip first line of fasta-file line = f.readline() offset = f.tell() line = f.readline() while line: out.write(pack(self.pack_char, offset)) offset = f.tell() line = f.readline() f.close() out.close()
python
def _make_index(self, fasta, index): """ Index a single, one-sequence fasta-file""" out = open(index, "wb") f = open(fasta) # Skip first line of fasta-file line = f.readline() offset = f.tell() line = f.readline() while line: out.write(pack(self.pack_char, offset)) offset = f.tell() line = f.readline() f.close() out.close()
[ "def", "_make_index", "(", "self", ",", "fasta", ",", "index", ")", ":", "out", "=", "open", "(", "index", ",", "\"wb\"", ")", "f", "=", "open", "(", "fasta", ")", "# Skip first line of fasta-file", "line", "=", "f", ".", "readline", "(", ")", "offset", "=", "f", ".", "tell", "(", ")", "line", "=", "f", ".", "readline", "(", ")", "while", "line", ":", "out", ".", "write", "(", "pack", "(", "self", ".", "pack_char", ",", "offset", ")", ")", "offset", "=", "f", ".", "tell", "(", ")", "line", "=", "f", ".", "readline", "(", ")", "f", ".", "close", "(", ")", "out", ".", "close", "(", ")" ]
Index a single, one-sequence fasta-file
[ "Index", "a", "single", "one", "-", "sequence", "fasta", "-", "file" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L284-L297
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.create_index
def create_index(self,fasta_dir=None, index_dir=None): """Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir""" # Use default directories if they are not supplied if not fasta_dir: fasta_dir = self.fasta_dir if not index_dir: index_dir = self.index_dir # Can't continue if we still don't have an index_dir or fasta_dir if not fasta_dir: print("fasta_dir not defined!") sys.exit(1) if not index_dir: print("index_dir not defined!") sys.exit(1) index_dir = os.path.abspath(index_dir) fasta_dir = os.path.abspath(fasta_dir) self.index_dir = index_dir # Prepare index directory if not os.path.exists(index_dir): try: os.mkdir(index_dir) except OSError as e: if e.args[0] == 13: sys.stderr.write("No permission to create index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) # Directories need to exist self._check_dir(fasta_dir) self._check_dir(index_dir) # Get all fasta-files fastafiles = find_by_ext(fasta_dir, FASTA_EXT) if not(fastafiles): msg = "No fastafiles found in {} with extension in {}".format( fasta_dir, ",".join(FASTA_EXT)) raise IOError(msg) # param_file will hold all the information about the location of the fasta-files, indeces and # length of the sequences param_file = os.path.join(index_dir, self.param_file) size_file = os.path.join(index_dir, self.size_file) try: out = open(param_file, "w") except IOError as e: if e.args[0] == 13: sys.stderr.write("No permission to create files in index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) s_out = open(size_file, "w") for fasta_file in fastafiles: #sys.stderr.write("Indexing %s\n" % fasta_file) f = open(fasta_file) line = f.readline() if not line.startswith(">"): sys.stderr.write("%s is not a valid FASTA file, expected > at first line\n" % fasta_file) sys.exit() seqname = line.strip().replace(">", "") line = f.readline() line_size = len(line.strip()) total_size = 0 while line: line = line.strip() if line.startswith(">"): sys.stderr.write("Sorry, can only index genomes with " "one sequence per FASTA file\n%s contains multiple " "sequences\n" % fasta_file) sys.exit() total_size += len(line) line = f.readline() index_file = os.path.join(index_dir, "%s.index" % seqname) out.write("{}\t{}\t{}\t{}\t{}\n".format( seqname, fasta_file, index_file, line_size, total_size)) s_out.write("{}\t{}\n".format(seqname, total_size)) self._make_index(fasta_file, index_file) f.close() out.close() s_out.close() # Read the index we just made so we can immediately use it self._read_index_file()
python
def create_index(self,fasta_dir=None, index_dir=None): """Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir""" # Use default directories if they are not supplied if not fasta_dir: fasta_dir = self.fasta_dir if not index_dir: index_dir = self.index_dir # Can't continue if we still don't have an index_dir or fasta_dir if not fasta_dir: print("fasta_dir not defined!") sys.exit(1) if not index_dir: print("index_dir not defined!") sys.exit(1) index_dir = os.path.abspath(index_dir) fasta_dir = os.path.abspath(fasta_dir) self.index_dir = index_dir # Prepare index directory if not os.path.exists(index_dir): try: os.mkdir(index_dir) except OSError as e: if e.args[0] == 13: sys.stderr.write("No permission to create index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) # Directories need to exist self._check_dir(fasta_dir) self._check_dir(index_dir) # Get all fasta-files fastafiles = find_by_ext(fasta_dir, FASTA_EXT) if not(fastafiles): msg = "No fastafiles found in {} with extension in {}".format( fasta_dir, ",".join(FASTA_EXT)) raise IOError(msg) # param_file will hold all the information about the location of the fasta-files, indeces and # length of the sequences param_file = os.path.join(index_dir, self.param_file) size_file = os.path.join(index_dir, self.size_file) try: out = open(param_file, "w") except IOError as e: if e.args[0] == 13: sys.stderr.write("No permission to create files in index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) s_out = open(size_file, "w") for fasta_file in fastafiles: #sys.stderr.write("Indexing %s\n" % fasta_file) f = open(fasta_file) line = f.readline() if not line.startswith(">"): sys.stderr.write("%s is not a valid FASTA file, expected > at first line\n" % fasta_file) sys.exit() seqname = line.strip().replace(">", "") line = f.readline() line_size = len(line.strip()) total_size = 0 while line: line = line.strip() if line.startswith(">"): sys.stderr.write("Sorry, can only index genomes with " "one sequence per FASTA file\n%s contains multiple " "sequences\n" % fasta_file) sys.exit() total_size += len(line) line = f.readline() index_file = os.path.join(index_dir, "%s.index" % seqname) out.write("{}\t{}\t{}\t{}\t{}\n".format( seqname, fasta_file, index_file, line_size, total_size)) s_out.write("{}\t{}\n".format(seqname, total_size)) self._make_index(fasta_file, index_file) f.close() out.close() s_out.close() # Read the index we just made so we can immediately use it self._read_index_file()
[ "def", "create_index", "(", "self", ",", "fasta_dir", "=", "None", ",", "index_dir", "=", "None", ")", ":", "# Use default directories if they are not supplied", "if", "not", "fasta_dir", ":", "fasta_dir", "=", "self", ".", "fasta_dir", "if", "not", "index_dir", ":", "index_dir", "=", "self", ".", "index_dir", "# Can't continue if we still don't have an index_dir or fasta_dir", "if", "not", "fasta_dir", ":", "print", "(", "\"fasta_dir not defined!\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "index_dir", ":", "print", "(", "\"index_dir not defined!\"", ")", "sys", ".", "exit", "(", "1", ")", "index_dir", "=", "os", ".", "path", ".", "abspath", "(", "index_dir", ")", "fasta_dir", "=", "os", ".", "path", ".", "abspath", "(", "fasta_dir", ")", "self", ".", "index_dir", "=", "index_dir", "# Prepare index directory", "if", "not", "os", ".", "path", ".", "exists", "(", "index_dir", ")", ":", "try", ":", "os", ".", "mkdir", "(", "index_dir", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "==", "13", ":", "sys", ".", "stderr", ".", "write", "(", "\"No permission to create index directory. Superuser access needed?\\n\"", ")", "sys", ".", "exit", "(", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "e", ")", "# Directories need to exist", "self", ".", "_check_dir", "(", "fasta_dir", ")", "self", ".", "_check_dir", "(", "index_dir", ")", "# Get all fasta-files ", "fastafiles", "=", "find_by_ext", "(", "fasta_dir", ",", "FASTA_EXT", ")", "if", "not", "(", "fastafiles", ")", ":", "msg", "=", "\"No fastafiles found in {} with extension in {}\"", ".", "format", "(", "fasta_dir", ",", "\",\"", ".", "join", "(", "FASTA_EXT", ")", ")", "raise", "IOError", "(", "msg", ")", "# param_file will hold all the information about the location of the fasta-files, indeces and ", "# length of the sequences", "param_file", "=", "os", ".", "path", ".", "join", "(", "index_dir", ",", "self", ".", "param_file", ")", "size_file", "=", "os", ".", "path", ".", "join", "(", "index_dir", ",", "self", ".", "size_file", ")", "try", ":", "out", "=", "open", "(", "param_file", ",", "\"w\"", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "==", "13", ":", "sys", ".", "stderr", ".", "write", "(", "\"No permission to create files in index directory. Superuser access needed?\\n\"", ")", "sys", ".", "exit", "(", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "e", ")", "s_out", "=", "open", "(", "size_file", ",", "\"w\"", ")", "for", "fasta_file", "in", "fastafiles", ":", "#sys.stderr.write(\"Indexing %s\\n\" % fasta_file)", "f", "=", "open", "(", "fasta_file", ")", "line", "=", "f", ".", "readline", "(", ")", "if", "not", "line", ".", "startswith", "(", "\">\"", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"%s is not a valid FASTA file, expected > at first line\\n\"", "%", "fasta_file", ")", "sys", ".", "exit", "(", ")", "seqname", "=", "line", ".", "strip", "(", ")", ".", "replace", "(", "\">\"", ",", "\"\"", ")", "line", "=", "f", ".", "readline", "(", ")", "line_size", "=", "len", "(", "line", ".", "strip", "(", ")", ")", "total_size", "=", "0", "while", "line", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "\">\"", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Sorry, can only index genomes with \"", "\"one sequence per FASTA file\\n%s contains multiple \"", "\"sequences\\n\"", "%", "fasta_file", ")", "sys", ".", "exit", "(", ")", "total_size", "+=", "len", "(", "line", ")", "line", "=", "f", ".", "readline", "(", ")", "index_file", "=", "os", ".", "path", ".", "join", "(", "index_dir", ",", "\"%s.index\"", "%", "seqname", ")", "out", ".", "write", "(", "\"{}\\t{}\\t{}\\t{}\\t{}\\n\"", ".", "format", "(", "seqname", ",", "fasta_file", ",", "index_file", ",", "line_size", ",", "total_size", ")", ")", "s_out", ".", "write", "(", "\"{}\\t{}\\n\"", ".", "format", "(", "seqname", ",", "total_size", ")", ")", "self", ".", "_make_index", "(", "fasta_file", ",", "index_file", ")", "f", ".", "close", "(", ")", "out", ".", "close", "(", ")", "s_out", ".", "close", "(", ")", "# Read the index we just made so we can immediately use it", "self", ".", "_read_index_file", "(", ")" ]
Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir
[ "Index", "all", "fasta", "-", "files", "in", "fasta_dir", "(", "one", "sequence", "per", "file!", ")", "and", "store", "the", "results", "in", "index_dir" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L299-L398
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._read_index_file
def _read_index_file(self): """read the param_file, index_dir should already be set """ param_file = os.path.join(self.index_dir, self.param_file) with open(param_file) as f: for line in f.readlines(): (name, fasta_file, index_file, line_size, total_size) = line.strip().split("\t") self.size[name] = int(total_size) self.fasta_file[name] = fasta_file self.index_file[name] = index_file self.line_size[name] = int(line_size)
python
def _read_index_file(self): """read the param_file, index_dir should already be set """ param_file = os.path.join(self.index_dir, self.param_file) with open(param_file) as f: for line in f.readlines(): (name, fasta_file, index_file, line_size, total_size) = line.strip().split("\t") self.size[name] = int(total_size) self.fasta_file[name] = fasta_file self.index_file[name] = index_file self.line_size[name] = int(line_size)
[ "def", "_read_index_file", "(", "self", ")", ":", "param_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "index_dir", ",", "self", ".", "param_file", ")", "with", "open", "(", "param_file", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "(", "name", ",", "fasta_file", ",", "index_file", ",", "line_size", ",", "total_size", ")", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "self", ".", "size", "[", "name", "]", "=", "int", "(", "total_size", ")", "self", ".", "fasta_file", "[", "name", "]", "=", "fasta_file", "self", ".", "index_file", "[", "name", "]", "=", "index_file", "self", ".", "line_size", "[", "name", "]", "=", "int", "(", "line_size", ")" ]
read the param_file, index_dir should already be set
[ "read", "the", "param_file", "index_dir", "should", "already", "be", "set" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L400-L409
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._read_seq_from_fasta
def _read_seq_from_fasta(self, fasta, offset, nr_lines): """ retrieve a number of lines from a fasta file-object, starting at offset""" fasta.seek(offset) lines = [fasta.readline().strip() for _ in range(nr_lines)] return "".join(lines)
python
def _read_seq_from_fasta(self, fasta, offset, nr_lines): """ retrieve a number of lines from a fasta file-object, starting at offset""" fasta.seek(offset) lines = [fasta.readline().strip() for _ in range(nr_lines)] return "".join(lines)
[ "def", "_read_seq_from_fasta", "(", "self", ",", "fasta", ",", "offset", ",", "nr_lines", ")", ":", "fasta", ".", "seek", "(", "offset", ")", "lines", "=", "[", "fasta", ".", "readline", "(", ")", ".", "strip", "(", ")", "for", "_", "in", "range", "(", "nr_lines", ")", "]", "return", "\"\"", ".", "join", "(", "lines", ")" ]
retrieve a number of lines from a fasta file-object, starting at offset
[ "retrieve", "a", "number", "of", "lines", "from", "a", "fasta", "file", "-", "object", "starting", "at", "offset" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L411-L415
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.get_sequences
def get_sequences(self, chr, coords): """ Retrieve multiple sequences from same chr (RC not possible yet)""" # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chr] index_file = self.index_file[chr] line_size = self.line_size[chr] total_size = self.size[chr] index = open(index_file, "rb") fasta = open(fasta_file) seqs = [] for coordset in coords: seq = "" for (start,end) in coordset: if start > total_size: raise ValueError("%s: %s, invalid start, greater than sequence length!" % (chr,start)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError("Invalid end, greater than sequence length!") seq += self._read(index, fasta, start, end, line_size) seqs.append(seq) index.close() fasta.close() return seqs
python
def get_sequences(self, chr, coords): """ Retrieve multiple sequences from same chr (RC not possible yet)""" # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chr] index_file = self.index_file[chr] line_size = self.line_size[chr] total_size = self.size[chr] index = open(index_file, "rb") fasta = open(fasta_file) seqs = [] for coordset in coords: seq = "" for (start,end) in coordset: if start > total_size: raise ValueError("%s: %s, invalid start, greater than sequence length!" % (chr,start)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError("Invalid end, greater than sequence length!") seq += self._read(index, fasta, start, end, line_size) seqs.append(seq) index.close() fasta.close() return seqs
[ "def", "get_sequences", "(", "self", ",", "chr", ",", "coords", ")", ":", "# Check if we have an index_dir", "if", "not", "self", ".", "index_dir", ":", "print", "(", "\"Index dir is not defined!\"", ")", "sys", ".", "exit", "(", ")", "# retrieve all information for this specific sequence", "fasta_file", "=", "self", ".", "fasta_file", "[", "chr", "]", "index_file", "=", "self", ".", "index_file", "[", "chr", "]", "line_size", "=", "self", ".", "line_size", "[", "chr", "]", "total_size", "=", "self", ".", "size", "[", "chr", "]", "index", "=", "open", "(", "index_file", ",", "\"rb\"", ")", "fasta", "=", "open", "(", "fasta_file", ")", "seqs", "=", "[", "]", "for", "coordset", "in", "coords", ":", "seq", "=", "\"\"", "for", "(", "start", ",", "end", ")", "in", "coordset", ":", "if", "start", ">", "total_size", ":", "raise", "ValueError", "(", "\"%s: %s, invalid start, greater than sequence length!\"", "%", "(", "chr", ",", "start", ")", ")", "if", "start", "<", "0", ":", "raise", "ValueError", "(", "\"Invalid start, < 0!\"", ")", "if", "end", ">", "total_size", ":", "raise", "ValueError", "(", "\"Invalid end, greater than sequence length!\"", ")", "seq", "+=", "self", ".", "_read", "(", "index", ",", "fasta", ",", "start", ",", "end", ",", "line_size", ")", "seqs", ".", "append", "(", "seq", ")", "index", ".", "close", "(", ")", "fasta", ".", "close", "(", ")", "return", "seqs" ]
Retrieve multiple sequences from same chr (RC not possible yet)
[ "Retrieve", "multiple", "sequences", "from", "same", "chr", "(", "RC", "not", "possible", "yet", ")" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L461-L495
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.get_sequence
def get_sequence(self, chrom, start, end, strand=None): """ Retrieve a sequence """ # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chrom] index_file = self.index_file[chrom] line_size = self.line_size[chrom] total_size = self.size[chrom] #print fasta_file, index_file, line_size, total_size if start > total_size: raise ValueError( "Invalid start {0}, greater than sequence length {1} of {2}!".format(start, total_size, chrom)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError( "Invalid end {0}, greater than sequence length {1} of {2}!".format(end, total_size, chrom)) index = open(index_file, "rb") fasta = open(fasta_file) seq = self._read(index, fasta, start, end, line_size) index.close() fasta.close() if strand and strand == "-": seq = rc(seq) return seq
python
def get_sequence(self, chrom, start, end, strand=None): """ Retrieve a sequence """ # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chrom] index_file = self.index_file[chrom] line_size = self.line_size[chrom] total_size = self.size[chrom] #print fasta_file, index_file, line_size, total_size if start > total_size: raise ValueError( "Invalid start {0}, greater than sequence length {1} of {2}!".format(start, total_size, chrom)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError( "Invalid end {0}, greater than sequence length {1} of {2}!".format(end, total_size, chrom)) index = open(index_file, "rb") fasta = open(fasta_file) seq = self._read(index, fasta, start, end, line_size) index.close() fasta.close() if strand and strand == "-": seq = rc(seq) return seq
[ "def", "get_sequence", "(", "self", ",", "chrom", ",", "start", ",", "end", ",", "strand", "=", "None", ")", ":", "# Check if we have an index_dir", "if", "not", "self", ".", "index_dir", ":", "print", "(", "\"Index dir is not defined!\"", ")", "sys", ".", "exit", "(", ")", "# retrieve all information for this specific sequence", "fasta_file", "=", "self", ".", "fasta_file", "[", "chrom", "]", "index_file", "=", "self", ".", "index_file", "[", "chrom", "]", "line_size", "=", "self", ".", "line_size", "[", "chrom", "]", "total_size", "=", "self", ".", "size", "[", "chrom", "]", "#print fasta_file, index_file, line_size, total_size", "if", "start", ">", "total_size", ":", "raise", "ValueError", "(", "\"Invalid start {0}, greater than sequence length {1} of {2}!\"", ".", "format", "(", "start", ",", "total_size", ",", "chrom", ")", ")", "if", "start", "<", "0", ":", "raise", "ValueError", "(", "\"Invalid start, < 0!\"", ")", "if", "end", ">", "total_size", ":", "raise", "ValueError", "(", "\"Invalid end {0}, greater than sequence length {1} of {2}!\"", ".", "format", "(", "end", ",", "total_size", ",", "chrom", ")", ")", "index", "=", "open", "(", "index_file", ",", "\"rb\"", ")", "fasta", "=", "open", "(", "fasta_file", ")", "seq", "=", "self", ".", "_read", "(", "index", ",", "fasta", ",", "start", ",", "end", ",", "line_size", ")", "index", ".", "close", "(", ")", "fasta", ".", "close", "(", ")", "if", "strand", "and", "strand", "==", "\"-\"", ":", "seq", "=", "rc", "(", "seq", ")", "return", "seq" ]
Retrieve a sequence
[ "Retrieve", "a", "sequence" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L498-L532
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.get_size
def get_size(self, chrom=None): """ Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument """ if len(self.size) == 0: raise LookupError("no chromosomes in index, is the index correct?") if chrom: if chrom in self.size: return self.size[chrom] else: raise KeyError("chromosome {} not in index".format(chrom)) total = 0 for size in self.size.values(): total += size return total
python
def get_size(self, chrom=None): """ Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument """ if len(self.size) == 0: raise LookupError("no chromosomes in index, is the index correct?") if chrom: if chrom in self.size: return self.size[chrom] else: raise KeyError("chromosome {} not in index".format(chrom)) total = 0 for size in self.size.values(): total += size return total
[ "def", "get_size", "(", "self", ",", "chrom", "=", "None", ")", ":", "if", "len", "(", "self", ".", "size", ")", "==", "0", ":", "raise", "LookupError", "(", "\"no chromosomes in index, is the index correct?\"", ")", "if", "chrom", ":", "if", "chrom", "in", "self", ".", "size", ":", "return", "self", ".", "size", "[", "chrom", "]", "else", ":", "raise", "KeyError", "(", "\"chromosome {} not in index\"", ".", "format", "(", "chrom", ")", ")", "total", "=", "0", "for", "size", "in", "self", ".", "size", ".", "values", "(", ")", ":", "total", "+=", "size", "return", "total" ]
Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument
[ "Return", "the", "sizes", "of", "all", "sequences", "in", "the", "index", "or", "the", "size", "of", "chrom", "if", "specified", "as", "an", "optional", "argument" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L538-L553
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
get_tool
def get_tool(name): """ Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance """ tool = name.lower() if tool not in __tools__: raise ValueError("Tool {0} not found!\n".format(name)) t = __tools__[tool]() if not t.is_installed(): sys.stderr.write("Tool {0} not installed!\n".format(tool)) if not t.is_configured(): sys.stderr.write("Tool {0} not configured!\n".format(tool)) return t
python
def get_tool(name): """ Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance """ tool = name.lower() if tool not in __tools__: raise ValueError("Tool {0} not found!\n".format(name)) t = __tools__[tool]() if not t.is_installed(): sys.stderr.write("Tool {0} not installed!\n".format(tool)) if not t.is_configured(): sys.stderr.write("Tool {0} not configured!\n".format(tool)) return t
[ "def", "get_tool", "(", "name", ")", ":", "tool", "=", "name", ".", "lower", "(", ")", "if", "tool", "not", "in", "__tools__", ":", "raise", "ValueError", "(", "\"Tool {0} not found!\\n\"", ".", "format", "(", "name", ")", ")", "t", "=", "__tools__", "[", "tool", "]", "(", ")", "if", "not", "t", ".", "is_installed", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Tool {0} not installed!\\n\"", ".", "format", "(", "tool", ")", ")", "if", "not", "t", ".", "is_configured", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Tool {0} not configured!\\n\"", ".", "format", "(", "tool", ")", ")", "return", "t" ]
Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance
[ "Returns", "an", "instance", "of", "a", "specific", "tool", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L33-L58
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
locate_tool
def locate_tool(name, verbose=True): """ Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool. """ m = get_tool(name) tool_bin = which(m.cmd) if tool_bin: if verbose: print("Found {} in {}".format(m.name, tool_bin)) return tool_bin else: print("Couldn't find {}".format(m.name))
python
def locate_tool(name, verbose=True): """ Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool. """ m = get_tool(name) tool_bin = which(m.cmd) if tool_bin: if verbose: print("Found {} in {}".format(m.name, tool_bin)) return tool_bin else: print("Couldn't find {}".format(m.name))
[ "def", "locate_tool", "(", "name", ",", "verbose", "=", "True", ")", ":", "m", "=", "get_tool", "(", "name", ")", "tool_bin", "=", "which", "(", "m", ".", "cmd", ")", "if", "tool_bin", ":", "if", "verbose", ":", "print", "(", "\"Found {} in {}\"", ".", "format", "(", "m", ".", "name", ",", "tool_bin", ")", ")", "return", "tool_bin", "else", ":", "print", "(", "\"Couldn't find {}\"", ".", "format", "(", "m", ".", "name", ")", ")" ]
Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool.
[ "Returns", "the", "binary", "of", "a", "tool", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L60-L81
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifProgram.bin
def bin(self): """ Get the command used to run the tool. Returns ------- command : str The tool system command. """ if self.local_bin: return self.local_bin else: return self.config.bin(self.name)
python
def bin(self): """ Get the command used to run the tool. Returns ------- command : str The tool system command. """ if self.local_bin: return self.local_bin else: return self.config.bin(self.name)
[ "def", "bin", "(", "self", ")", ":", "if", "self", ".", "local_bin", ":", "return", "self", ".", "local_bin", "else", ":", "return", "self", ".", "config", ".", "bin", "(", "self", ".", "name", ")" ]
Get the command used to run the tool. Returns ------- command : str The tool system command.
[ "Get", "the", "command", "used", "to", "run", "the", "tool", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L93-L105
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifProgram.is_installed
def is_installed(self): """ Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed. """ return self.is_configured() and os.access(self.bin(), os.X_OK)
python
def is_installed(self): """ Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed. """ return self.is_configured() and os.access(self.bin(), os.X_OK)
[ "def", "is_installed", "(", "self", ")", ":", "return", "self", ".", "is_configured", "(", ")", "and", "os", ".", "access", "(", "self", ".", "bin", "(", ")", ",", "os", ".", "X_OK", ")" ]
Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed.
[ "Check", "if", "the", "tool", "is", "installed", ".", "Returns", "-------", "is_installed", ":", "bool", "True", "if", "the", "tool", "is", "installed", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L129-L138
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifProgram.run
def run(self, fastafile, params=None, tmp=None): """ Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ if not self.is_configured(): raise ValueError("%s is not configured" % self.name) if not self.is_installed(): raise ValueError("%s is not installed or not correctly configured" % self.name) self.tmpdir = mkdtemp(prefix="{0}.".format(self.name), dir=tmp) fastafile = os.path.abspath(fastafile) try: return self._run_program(self.bin(), fastafile, params) except KeyboardInterrupt: return ([], "Killed", "Killed")
python
def run(self, fastafile, params=None, tmp=None): """ Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ if not self.is_configured(): raise ValueError("%s is not configured" % self.name) if not self.is_installed(): raise ValueError("%s is not installed or not correctly configured" % self.name) self.tmpdir = mkdtemp(prefix="{0}.".format(self.name), dir=tmp) fastafile = os.path.abspath(fastafile) try: return self._run_program(self.bin(), fastafile, params) except KeyboardInterrupt: return ([], "Killed", "Killed")
[ "def", "run", "(", "self", ",", "fastafile", ",", "params", "=", "None", ",", "tmp", "=", "None", ")", ":", "if", "not", "self", ".", "is_configured", "(", ")", ":", "raise", "ValueError", "(", "\"%s is not configured\"", "%", "self", ".", "name", ")", "if", "not", "self", ".", "is_installed", "(", ")", ":", "raise", "ValueError", "(", "\"%s is not installed or not correctly configured\"", "%", "self", ".", "name", ")", "self", ".", "tmpdir", "=", "mkdtemp", "(", "prefix", "=", "\"{0}.\"", ".", "format", "(", "self", ".", "name", ")", ",", "dir", "=", "tmp", ")", "fastafile", "=", "os", ".", "path", ".", "abspath", "(", "fastafile", ")", "try", ":", "return", "self", ".", "_run_program", "(", "self", ".", "bin", "(", ")", ",", "fastafile", ",", "params", ")", "except", "KeyboardInterrupt", ":", "return", "(", "[", "]", ",", "\"Killed\"", ",", "\"Killed\"", ")" ]
Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "the", "tool", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L140-L179
train