repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
XXmotif._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background"]: # Absolute path, just to be sure prm["background"] = os.path.abspath(prm["background"]) prm["background"] = " --negSet {0} ".format( prm["background"]) prm["strand"] = "" if not prm["single"]: prm["strand"] = " --revcomp " return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background"]: # Absolute path, just to be sure prm["background"] = os.path.abspath(prm["background"]) prm["background"] = " --negSet {0} ".format( prm["background"]) prm["strand"] = "" if not prm["single"]: prm["strand"] = " --revcomp " return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "if", "prm", "[", "\"background\"", "]", ":", "# Absolute path, just to be sure", "prm", "[", "\"background\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "prm", "[", "\"background\"", "]", ")", "prm", "[", "\"background\"", "]", "=", "\" --negSet {0} \"", ".", "format", "(", "prm", "[", "\"background\"", "]", ")", "prm", "[", "\"strand\"", "]", "=", "\"\"", "if", "not", "prm", "[", "\"single\"", "]", ":", "prm", "[", "\"strand\"", "]", "=", "\" --revcomp \"", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L203-L223
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
XXmotif._run_program
def _run_program(self, bin, fastafile, params=None): """ Run XXmotif and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = os.path.join( self.tmpdir, os.path.basename(fastafile.replace(".fa", ".pwm"))) stdout = "" stderr = "" cmd = "%s %s %s --localization --batch %s %s" % ( bin, self.tmpdir, fastafile, params["background"], params["strand"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="xxmotif") for m in motifs: m.id = "{0}_{1}".format(self.name, m.id) else: stdout += "\nMotif file {0} not found!\n".format(outfile) stderr += "\nMotif file {0} not found!\n".format(outfile) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run XXmotif and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = os.path.join( self.tmpdir, os.path.basename(fastafile.replace(".fa", ".pwm"))) stdout = "" stderr = "" cmd = "%s %s %s --localization --batch %s %s" % ( bin, self.tmpdir, fastafile, params["background"], params["strand"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="xxmotif") for m in motifs: m.id = "{0}_{1}".format(self.name, m.id) else: stdout += "\nMotif file {0} not found!\n".format(outfile) stderr += "\nMotif file {0} not found!\n".format(outfile) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "os", ".", "path", ".", "basename", "(", "fastafile", ".", "replace", "(", "\".fa\"", ",", "\".pwm\"", ")", ")", ")", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "cmd", "=", "\"%s %s %s --localization --batch %s %s\"", "%", "(", "bin", ",", "self", ".", "tmpdir", ",", "fastafile", ",", "params", "[", "\"background\"", "]", ",", "params", "[", "\"strand\"", "]", ",", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "motifs", "=", "read_motifs", "(", "outfile", ",", "fmt", "=", "\"xxmotif\"", ")", "for", "m", "in", "motifs", ":", "m", ".", "id", "=", "\"{0}_{1}\"", ".", "format", "(", "self", ".", "name", ",", "m", ".", "id", ")", "else", ":", "stdout", "+=", "\"\\nMotif file {0} not found!\\n\"", ".", "format", "(", "outfile", ")", "stderr", "+=", "\"\\nMotif file {0} not found!\\n\"", ".", "format", "(", "outfile", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run XXmotif and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "XXmotif", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L225-L284
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Homer._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) # Background file is essential! if not prm["background"]: print("Background file needed!") sys.exit() prm["background"] = os.path.abspath(prm["background"]) prm["strand"] = "" if prm["single"]: prm["strand"] = " -strand + " return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) # Background file is essential! if not prm["background"]: print("Background file needed!") sys.exit() prm["background"] = os.path.abspath(prm["background"]) prm["strand"] = "" if prm["single"]: prm["strand"] = " -strand + " return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "# Background file is essential!", "if", "not", "prm", "[", "\"background\"", "]", ":", "print", "(", "\"Background file needed!\"", ")", "sys", ".", "exit", "(", ")", "prm", "[", "\"background\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "prm", "[", "\"background\"", "]", ")", "prm", "[", "\"strand\"", "]", "=", "\"\"", "if", "prm", "[", "\"single\"", "]", ":", "prm", "[", "\"strand\"", "]", "=", "\" -strand + \"", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L305-L326
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Homer._run_program
def _run_program(self, bin, fastafile, params=None): """ Run Homer and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = NamedTemporaryFile( mode="w", dir=self.tmpdir, prefix= "homer_w{}.".format(params["width"]) ).name cmd = "%s denovo -i %s -b %s -len %s -S %s %s -o %s -p 8" % ( bin, fastafile, params["background"], params["width"], params["number"], params["strand"], outfile) stderr = "" stdout = "Running command:\n{}\n".format(cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="pwm") for i, m in enumerate(motifs): m.id = "{}_{}_{}".format(self.name, params["width"], i + 1) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run Homer and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = NamedTemporaryFile( mode="w", dir=self.tmpdir, prefix= "homer_w{}.".format(params["width"]) ).name cmd = "%s denovo -i %s -b %s -len %s -S %s %s -o %s -p 8" % ( bin, fastafile, params["background"], params["width"], params["number"], params["strand"], outfile) stderr = "" stdout = "Running command:\n{}\n".format(cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="pwm") for i, m in enumerate(motifs): m.id = "{}_{}_{}".format(self.name, params["width"], i + 1) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "outfile", "=", "NamedTemporaryFile", "(", "mode", "=", "\"w\"", ",", "dir", "=", "self", ".", "tmpdir", ",", "prefix", "=", "\"homer_w{}.\"", ".", "format", "(", "params", "[", "\"width\"", "]", ")", ")", ".", "name", "cmd", "=", "\"%s denovo -i %s -b %s -len %s -S %s %s -o %s -p 8\"", "%", "(", "bin", ",", "fastafile", ",", "params", "[", "\"background\"", "]", ",", "params", "[", "\"width\"", "]", ",", "params", "[", "\"number\"", "]", ",", "params", "[", "\"strand\"", "]", ",", "outfile", ")", "stderr", "=", "\"\"", "stdout", "=", "\"Running command:\\n{}\\n\"", ".", "format", "(", "cmd", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "cwd", "=", "self", ".", "tmpdir", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "motifs", "=", "read_motifs", "(", "outfile", ",", "fmt", "=", "\"pwm\"", ")", "for", "i", ",", "m", "in", "enumerate", "(", "motifs", ")", ":", "m", ".", "id", "=", "\"{}_{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "params", "[", "\"width\"", "]", ",", "i", "+", "1", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Homer and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Homer", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L328-L387
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
BioProspector.parse
def parse(self, fo): """ Convert BioProspector output to motifs Parameters ---------- fo : file-like File object containing BioProspector output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'^\d+\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pwm = [] motif_id = "" for line in fo.readlines(): if line.startswith("Motif #"): if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) motif_id = line.split("#")[1].split(":")[0] pwm = [] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100.0 for x in range(1,5)]) if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) return motifs
python
def parse(self, fo): """ Convert BioProspector output to motifs Parameters ---------- fo : file-like File object containing BioProspector output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'^\d+\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pwm = [] motif_id = "" for line in fo.readlines(): if line.startswith("Motif #"): if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) motif_id = line.split("#")[1].split(":")[0] pwm = [] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100.0 for x in range(1,5)]) if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "p", "=", "re", ".", "compile", "(", "r'^\\d+\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)'", ")", "pwm", "=", "[", "]", "motif_id", "=", "\"\"", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"Motif #\"", ")", ":", "if", "pwm", ":", "m", "=", "Motif", "(", "pwm", ")", "m", ".", "id", "=", "\"BioProspector_w%s_%s\"", "%", "(", "len", "(", "m", ")", ",", "motif_id", ")", "motifs", ".", "append", "(", "m", ")", "motif_id", "=", "line", ".", "split", "(", "\"#\"", ")", "[", "1", "]", ".", "split", "(", "\":\"", ")", "[", "0", "]", "pwm", "=", "[", "]", "else", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "pwm", ".", "append", "(", "[", "float", "(", "m", ".", "group", "(", "x", ")", ")", "/", "100.0", "for", "x", "in", "range", "(", "1", ",", "5", ")", "]", ")", "if", "pwm", ":", "m", "=", "Motif", "(", "pwm", ")", "m", ".", "id", "=", "\"BioProspector_w%s_%s\"", "%", "(", "len", "(", "m", ")", ",", "motif_id", ")", "motifs", ".", "append", "(", "m", ")", "return", "motifs" ]
Convert BioProspector output to motifs Parameters ---------- fo : file-like File object containing BioProspector output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "BioProspector", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "BioProspector", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L488-L524
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Hms._run_program
def _run_program(self, bin, fastafile, params=None): """ Run HMS and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) default_params = {"width":10} if params is not None: default_params.update(params) fgfile, summitfile, outfile = self._prepare_files(fastafile) current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "{} -i {} -w {} -dna 4 -iteration 50 -chain 20 -seqprop -0.1 -strand 2 -peaklocation {} -t_dof 3 -dep 2".format( bin, fgfile, params['width'], summitfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() os.chdir(current_path) motifs = [] if os.path.exists(outfile): with open(outfile) as f: motifs = self.parse(f) for i,m in enumerate(motifs): m.id = "HMS_w{}_{}".format(params['width'], i + 1) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run HMS and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) default_params = {"width":10} if params is not None: default_params.update(params) fgfile, summitfile, outfile = self._prepare_files(fastafile) current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "{} -i {} -w {} -dna 4 -iteration 50 -chain 20 -seqprop -0.1 -strand 2 -peaklocation {} -t_dof 3 -dep 2".format( bin, fgfile, params['width'], summitfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() os.chdir(current_path) motifs = [] if os.path.exists(outfile): with open(outfile) as f: motifs = self.parse(f) for i,m in enumerate(motifs): m.id = "HMS_w{}_{}".format(params['width'], i + 1) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "default_params", "=", "{", "\"width\"", ":", "10", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "fgfile", ",", "summitfile", ",", "outfile", "=", "self", ".", "_prepare_files", "(", "fastafile", ")", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "cmd", "=", "\"{} -i {} -w {} -dna 4 -iteration 50 -chain 20 -seqprop -0.1 -strand 2 -peaklocation {} -t_dof 3 -dep 2\"", ".", "format", "(", "bin", ",", "fgfile", ",", "params", "[", "'width'", "]", ",", "summitfile", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "os", ".", "chdir", "(", "current_path", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ")", "as", "f", ":", "motifs", "=", "self", ".", "parse", "(", "f", ")", "for", "i", ",", "m", "in", "enumerate", "(", "motifs", ")", ":", "m", ".", "id", "=", "\"HMS_w{}_{}\"", ".", "format", "(", "params", "[", "'width'", "]", ",", "i", "+", "1", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run HMS and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "HMS", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L575-L631
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Hms.parse
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(" ")] for i in range(4)] matrix = [[m[0][i], m[1][i],m[2][i],m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
python
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(" ")] for i in range(4)] matrix = [[m[0][i], m[1][i],m[2][i],m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "m", "=", "[", "[", "float", "(", "x", ")", "for", "x", "in", "fo", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "]", "for", "i", "in", "range", "(", "4", ")", "]", "matrix", "=", "[", "[", "m", "[", "0", "]", "[", "i", "]", ",", "m", "[", "1", "]", "[", "i", "]", ",", "m", "[", "2", "]", "[", "i", "]", ",", "m", "[", "3", "]", "[", "i", "]", "]", "for", "i", "in", "range", "(", "len", "(", "m", "[", "0", "]", ")", ")", "]", "motifs", "=", "[", "Motif", "(", "matrix", ")", "]", "motifs", "[", "-", "1", "]", ".", "id", "=", "self", ".", "name", "return", "motifs" ]
Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "HMS", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "HMS", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L633-L653
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Amd._run_program
def _run_program(self, bin, fastafile, params=None): """ Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) fgfile = os.path.join(self.tmpdir, "AMD.in.fa") outfile = fgfile + ".Matrix" shutil.copy(fastafile, fgfile) current_path = os.getcwd() os.chdir(self.tmpdir) stdout = "" stderr = "" cmd = "%s -F %s -B %s" % ( bin, fgfile, params["background"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) motifs = [] if os.path.exists(outfile): f = open(outfile) motifs = self.parse(f) f.close() return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) fgfile = os.path.join(self.tmpdir, "AMD.in.fa") outfile = fgfile + ".Matrix" shutil.copy(fastafile, fgfile) current_path = os.getcwd() os.chdir(self.tmpdir) stdout = "" stderr = "" cmd = "%s -F %s -B %s" % ( bin, fgfile, params["background"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) motifs = [] if os.path.exists(outfile): f = open(outfile) motifs = self.parse(f) f.close() return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "fgfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "\"AMD.in.fa\"", ")", "outfile", "=", "fgfile", "+", "\".Matrix\"", "shutil", ".", "copy", "(", "fastafile", ",", "fgfile", ")", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "cmd", "=", "\"%s -F %s -B %s\"", "%", "(", "bin", ",", "fgfile", ",", "params", "[", "\"background\"", "]", ",", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "os", ".", "chdir", "(", "current_path", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "f", "=", "open", "(", "outfile", ")", "motifs", "=", "self", ".", "parse", "(", "f", ")", "f", ".", "close", "(", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "AMD", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L688-L744
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Amd.parse
def parse(self, fo): """ Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances. """ motifs = [] #160: 112 CACGTGC 7.25 chr14:32308489-32308689 p = re.compile(r'\d+\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)') wm = [] name = "" for line in fo.readlines(): if line.startswith("Motif") and line.strip().endswith(":"): if name: motifs.append(Motif(wm)) motifs[-1].id = name name = "" wm = [] name = "%s_%s" % (self.name, line.split(":")[0]) else: m = p.search(line) if m: wm.append([float(m.group(x)) for x in range(1,5)]) motifs.append(Motif(wm)) motifs[-1].id = name return motifs
python
def parse(self, fo): """ Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances. """ motifs = [] #160: 112 CACGTGC 7.25 chr14:32308489-32308689 p = re.compile(r'\d+\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)') wm = [] name = "" for line in fo.readlines(): if line.startswith("Motif") and line.strip().endswith(":"): if name: motifs.append(Motif(wm)) motifs[-1].id = name name = "" wm = [] name = "%s_%s" % (self.name, line.split(":")[0]) else: m = p.search(line) if m: wm.append([float(m.group(x)) for x in range(1,5)]) motifs.append(Motif(wm)) motifs[-1].id = name return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "#160: 112 CACGTGC 7.25 chr14:32308489-32308689", "p", "=", "re", ".", "compile", "(", "r'\\d+\\s+([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)'", ")", "wm", "=", "[", "]", "name", "=", "\"\"", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"Motif\"", ")", "and", "line", ".", "strip", "(", ")", ".", "endswith", "(", "\":\"", ")", ":", "if", "name", ":", "motifs", ".", "append", "(", "Motif", "(", "wm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "name", "name", "=", "\"\"", "wm", "=", "[", "]", "name", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "line", ".", "split", "(", "\":\"", ")", "[", "0", "]", ")", "else", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "wm", ".", "append", "(", "[", "float", "(", "m", ".", "group", "(", "x", ")", ")", "for", "x", "in", "range", "(", "1", ",", "5", ")", "]", ")", "motifs", ".", "append", "(", "Motif", "(", "wm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "name", "return", "motifs" ]
Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "AMD", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "AMD", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L746-L781
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Improbizer.parse
def parse(self, fo): """ Convert Improbizer output to motifs Parameters ---------- fo : file-like File object containing Improbizer output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'\d+\s+@\s+\d+\.\d+\s+sd\s+\d+\.\d+\s+(\w+)$') line = fo.readline() while line and line.find("Color") == -1: m = p.search(line) if m: pwm_data = {} for i in range(4): vals = [x.strip() for x in fo.readline().strip().split(" ") if x] pwm_data[vals[0].upper()] = vals[1:] pwm = [] for i in range(len(pwm_data["A"])): pwm.append([float(pwm_data[x][i]) for x in ["A","C","G","T"]]) motifs.append(Motif(pwm)) motifs[-1].id = "%s_%s" % (self.name, m.group(1)) line = fo.readline() return motifs
python
def parse(self, fo): """ Convert Improbizer output to motifs Parameters ---------- fo : file-like File object containing Improbizer output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'\d+\s+@\s+\d+\.\d+\s+sd\s+\d+\.\d+\s+(\w+)$') line = fo.readline() while line and line.find("Color") == -1: m = p.search(line) if m: pwm_data = {} for i in range(4): vals = [x.strip() for x in fo.readline().strip().split(" ") if x] pwm_data[vals[0].upper()] = vals[1:] pwm = [] for i in range(len(pwm_data["A"])): pwm.append([float(pwm_data[x][i]) for x in ["A","C","G","T"]]) motifs.append(Motif(pwm)) motifs[-1].id = "%s_%s" % (self.name, m.group(1)) line = fo.readline() return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "p", "=", "re", ".", "compile", "(", "r'\\d+\\s+@\\s+\\d+\\.\\d+\\s+sd\\s+\\d+\\.\\d+\\s+(\\w+)$'", ")", "line", "=", "fo", ".", "readline", "(", ")", "while", "line", "and", "line", ".", "find", "(", "\"Color\"", ")", "==", "-", "1", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "pwm_data", "=", "{", "}", "for", "i", "in", "range", "(", "4", ")", ":", "vals", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "fo", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "x", "]", "pwm_data", "[", "vals", "[", "0", "]", ".", "upper", "(", ")", "]", "=", "vals", "[", "1", ":", "]", "pwm", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "pwm_data", "[", "\"A\"", "]", ")", ")", ":", "pwm", ".", "append", "(", "[", "float", "(", "pwm_data", "[", "x", "]", "[", "i", "]", ")", "for", "x", "in", "[", "\"A\"", ",", "\"C\"", ",", "\"G\"", ",", "\"T\"", "]", "]", ")", "motifs", ".", "append", "(", "Motif", "(", "pwm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "m", ".", "group", "(", "1", ")", ")", "line", "=", "fo", ".", "readline", "(", ")", "return", "motifs" ]
Convert Improbizer output to motifs Parameters ---------- fo : file-like File object containing Improbizer output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "Improbizer", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "Improbizer", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L873-L905
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Trawler._run_program
def _run_program(self, bin, fastafile, params=None): """ Run Trawler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) tmp = NamedTemporaryFile(mode="w", dir=self.tmpdir, delete=False) shutil.copy(fastafile, tmp.name) fastafile = tmp.name current_path = os.getcwd() os.chdir(self.dir()) motifs = [] stdout = "" stderr = "" for wildcard in [0,1,2]: cmd = "%s -sample %s -background %s -directory %s -strand %s -wildcard %s" % ( bin, fastafile, params["background"], self.tmpdir, params["strand"], wildcard, ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) pwmfiles = glob.glob("{}/tmp*/result/*pwm".format(self.tmpdir)) if len(pwmfiles) > 0: out_file = pwmfiles[0] stdout += "\nOutfile: {}".format(out_file) my_motifs = [] if os.path.exists(out_file): my_motifs = read_motifs(out_file, fmt="pwm") for m in motifs: m.id = "{}_{}".format(self.name, m.id) stdout += "\nTrawler: {} motifs".format(len(motifs)) # remove temporary files if os.path.exists(tmp.name): os.unlink(tmp.name) for motif in my_motifs: motif.id = "{}_{}_{}".format(self.name, wildcard, motif.id) motifs += my_motifs else: stderr += "\nNo outfile found" return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run Trawler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) tmp = NamedTemporaryFile(mode="w", dir=self.tmpdir, delete=False) shutil.copy(fastafile, tmp.name) fastafile = tmp.name current_path = os.getcwd() os.chdir(self.dir()) motifs = [] stdout = "" stderr = "" for wildcard in [0,1,2]: cmd = "%s -sample %s -background %s -directory %s -strand %s -wildcard %s" % ( bin, fastafile, params["background"], self.tmpdir, params["strand"], wildcard, ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) pwmfiles = glob.glob("{}/tmp*/result/*pwm".format(self.tmpdir)) if len(pwmfiles) > 0: out_file = pwmfiles[0] stdout += "\nOutfile: {}".format(out_file) my_motifs = [] if os.path.exists(out_file): my_motifs = read_motifs(out_file, fmt="pwm") for m in motifs: m.id = "{}_{}".format(self.name, m.id) stdout += "\nTrawler: {} motifs".format(len(motifs)) # remove temporary files if os.path.exists(tmp.name): os.unlink(tmp.name) for motif in my_motifs: motif.id = "{}_{}_{}".format(self.name, wildcard, motif.id) motifs += my_motifs else: stderr += "\nNo outfile found" return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "tmp", "=", "NamedTemporaryFile", "(", "mode", "=", "\"w\"", ",", "dir", "=", "self", ".", "tmpdir", ",", "delete", "=", "False", ")", "shutil", ".", "copy", "(", "fastafile", ",", "tmp", ".", "name", ")", "fastafile", "=", "tmp", ".", "name", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "dir", "(", ")", ")", "motifs", "=", "[", "]", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "for", "wildcard", "in", "[", "0", ",", "1", ",", "2", "]", ":", "cmd", "=", "\"%s -sample %s -background %s -directory %s -strand %s -wildcard %s\"", "%", "(", "bin", ",", "fastafile", ",", "params", "[", "\"background\"", "]", ",", "self", ".", "tmpdir", ",", "params", "[", "\"strand\"", "]", ",", "wildcard", ",", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "os", ".", "chdir", "(", "current_path", ")", "pwmfiles", "=", "glob", ".", "glob", "(", "\"{}/tmp*/result/*pwm\"", ".", "format", "(", "self", ".", "tmpdir", ")", ")", "if", "len", "(", "pwmfiles", ")", ">", "0", ":", "out_file", "=", "pwmfiles", "[", "0", "]", "stdout", "+=", "\"\\nOutfile: {}\"", ".", "format", "(", "out_file", ")", "my_motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "out_file", ")", ":", "my_motifs", "=", "read_motifs", "(", "out_file", ",", "fmt", "=", "\"pwm\"", ")", "for", "m", "in", "motifs", ":", "m", ".", "id", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "m", ".", "id", ")", "stdout", "+=", "\"\\nTrawler: {} motifs\"", ".", "format", "(", "len", "(", "motifs", ")", ")", "# remove temporary files", "if", "os", ".", "path", ".", "exists", "(", "tmp", ".", "name", ")", ":", "os", ".", "unlink", "(", "tmp", ".", "name", ")", "for", "motif", "in", "my_motifs", ":", "motif", ".", "id", "=", "\"{}_{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "wildcard", ",", "motif", ".", "id", ")", "motifs", "+=", "my_motifs", "else", ":", "stderr", "+=", "\"\\nNo outfile found\"", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Trawler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Trawler", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L945-L1023
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Weeder._run_program
def _run_program(self, bin,fastafile, params=None): """ Run Weeder and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) organism = params["organism"] weeder_organisms = { "hg18":"HS", "hg19":"HS", "hg38":"HS", "mm9":"MM", "mm10":"MM", "dm3":"DM", "dm5":"DM", "dm6":"DM", "yeast":"SC", "sacCer2":"SC", "sacCer3":"SC", "TAIR10":"AT", "TAIR11":"AT", } weeder_organism = weeder_organisms.get(organism, "HS") tmp = NamedTemporaryFile(dir=self.tmpdir) name = tmp.name tmp.close() shutil.copy(fastafile, name) fastafile = name cmd = "{} -f {} -O".format( self.cmd, fastafile, weeder_organism, ) if params["single"]: cmd += " -ss" #print cmd stdout, stderr = "", "" p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(fastafile + ".matrix.w2"): f = open(fastafile + ".matrix.w2") motifs = self.parse(f) f.close() for m in motifs: m.id = "{}_{}".format(self.name, m.id.split("\t")[0]) for ext in [".w2", ".matrix.w2" ]: if os.path.exists(fastafile + ext): os.unlink(fastafile + ext) return motifs, stdout, stderr
python
def _run_program(self, bin,fastafile, params=None): """ Run Weeder and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) organism = params["organism"] weeder_organisms = { "hg18":"HS", "hg19":"HS", "hg38":"HS", "mm9":"MM", "mm10":"MM", "dm3":"DM", "dm5":"DM", "dm6":"DM", "yeast":"SC", "sacCer2":"SC", "sacCer3":"SC", "TAIR10":"AT", "TAIR11":"AT", } weeder_organism = weeder_organisms.get(organism, "HS") tmp = NamedTemporaryFile(dir=self.tmpdir) name = tmp.name tmp.close() shutil.copy(fastafile, name) fastafile = name cmd = "{} -f {} -O".format( self.cmd, fastafile, weeder_organism, ) if params["single"]: cmd += " -ss" #print cmd stdout, stderr = "", "" p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(fastafile + ".matrix.w2"): f = open(fastafile + ".matrix.w2") motifs = self.parse(f) f.close() for m in motifs: m.id = "{}_{}".format(self.name, m.id.split("\t")[0]) for ext in [".w2", ".matrix.w2" ]: if os.path.exists(fastafile + ext): os.unlink(fastafile + ext) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "organism", "=", "params", "[", "\"organism\"", "]", "weeder_organisms", "=", "{", "\"hg18\"", ":", "\"HS\"", ",", "\"hg19\"", ":", "\"HS\"", ",", "\"hg38\"", ":", "\"HS\"", ",", "\"mm9\"", ":", "\"MM\"", ",", "\"mm10\"", ":", "\"MM\"", ",", "\"dm3\"", ":", "\"DM\"", ",", "\"dm5\"", ":", "\"DM\"", ",", "\"dm6\"", ":", "\"DM\"", ",", "\"yeast\"", ":", "\"SC\"", ",", "\"sacCer2\"", ":", "\"SC\"", ",", "\"sacCer3\"", ":", "\"SC\"", ",", "\"TAIR10\"", ":", "\"AT\"", ",", "\"TAIR11\"", ":", "\"AT\"", ",", "}", "weeder_organism", "=", "weeder_organisms", ".", "get", "(", "organism", ",", "\"HS\"", ")", "tmp", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "name", "=", "tmp", ".", "name", "tmp", ".", "close", "(", ")", "shutil", ".", "copy", "(", "fastafile", ",", "name", ")", "fastafile", "=", "name", "cmd", "=", "\"{} -f {} -O\"", ".", "format", "(", "self", ".", "cmd", ",", "fastafile", ",", "weeder_organism", ",", ")", "if", "params", "[", "\"single\"", "]", ":", "cmd", "+=", "\" -ss\"", "#print cmd", "stdout", ",", "stderr", "=", "\"\"", ",", "\"\"", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "cwd", "=", "self", ".", "tmpdir", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "fastafile", "+", "\".matrix.w2\"", ")", ":", "f", "=", "open", "(", "fastafile", "+", "\".matrix.w2\"", ")", "motifs", "=", "self", ".", "parse", "(", "f", ")", "f", ".", "close", "(", ")", "for", "m", "in", "motifs", ":", "m", ".", "id", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "m", ".", "id", ".", "split", "(", "\"\\t\"", ")", "[", "0", "]", ")", "for", "ext", "in", "[", "\".w2\"", ",", "\".matrix.w2\"", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "fastafile", "+", "ext", ")", ":", "os", ".", "unlink", "(", "fastafile", "+", "ext", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Weeder and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Weeder", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1055-L1137
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background_model"]: # Absolute path, just to be sure prm["background_model"] = os.path.abspath(prm["background_model"]) else: if prm.get("organism", None): prm["background_model"] = os.path.join( self.config.get_bg_dir(), "{}.{}.bg".format( prm["organism"], "MotifSampler")) else: raise Exception("No background specified for {}".format(self.name)) prm["strand"] = 1 if prm["single"]: prm["strand"] = 0 tmp = NamedTemporaryFile(dir=self.tmpdir) prm["pwmfile"] = tmp.name tmp2 = NamedTemporaryFile(dir=self.tmpdir) prm["outfile"] = tmp2.name return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background_model"]: # Absolute path, just to be sure prm["background_model"] = os.path.abspath(prm["background_model"]) else: if prm.get("organism", None): prm["background_model"] = os.path.join( self.config.get_bg_dir(), "{}.{}.bg".format( prm["organism"], "MotifSampler")) else: raise Exception("No background specified for {}".format(self.name)) prm["strand"] = 1 if prm["single"]: prm["strand"] = 0 tmp = NamedTemporaryFile(dir=self.tmpdir) prm["pwmfile"] = tmp.name tmp2 = NamedTemporaryFile(dir=self.tmpdir) prm["outfile"] = tmp2.name return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "if", "prm", "[", "\"background_model\"", "]", ":", "# Absolute path, just to be sure", "prm", "[", "\"background_model\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "prm", "[", "\"background_model\"", "]", ")", "else", ":", "if", "prm", ".", "get", "(", "\"organism\"", ",", "None", ")", ":", "prm", "[", "\"background_model\"", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "config", ".", "get_bg_dir", "(", ")", ",", "\"{}.{}.bg\"", ".", "format", "(", "prm", "[", "\"organism\"", "]", ",", "\"MotifSampler\"", ")", ")", "else", ":", "raise", "Exception", "(", "\"No background specified for {}\"", ".", "format", "(", "self", ".", "name", ")", ")", "prm", "[", "\"strand\"", "]", "=", "1", "if", "prm", "[", "\"single\"", "]", ":", "prm", "[", "\"strand\"", "]", "=", "0", "tmp", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "prm", "[", "\"pwmfile\"", "]", "=", "tmp", ".", "name", "tmp2", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "prm", "[", "\"outfile\"", "]", "=", "tmp2", ".", "name", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1173-L1206
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler._run_program
def _run_program(self, bin, fastafile, params=None): """ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) # TODO: test organism #cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1" % ( cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s" % ( bin, fastafile, params["background_model"], params["pwmfile"], params["width"], params["number"], params["outfile"], params["strand"], ) #print cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() #stdout,stderr = "","" #p = Popen(cmd, shell=True) #p.wait() motifs = [] if os.path.exists(params["outfile"]): with open(params["outfile"]) as f: motifs = self.parse_out(f) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) # TODO: test organism #cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1" % ( cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s" % ( bin, fastafile, params["background_model"], params["pwmfile"], params["width"], params["number"], params["outfile"], params["strand"], ) #print cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() #stdout,stderr = "","" #p = Popen(cmd, shell=True) #p.wait() motifs = [] if os.path.exists(params["outfile"]): with open(params["outfile"]) as f: motifs = self.parse_out(f) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "# TODO: test organism", "#cmd = \"%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1\" % (", "cmd", "=", "\"%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s\"", "%", "(", "bin", ",", "fastafile", ",", "params", "[", "\"background_model\"", "]", ",", "params", "[", "\"pwmfile\"", "]", ",", "params", "[", "\"width\"", "]", ",", "params", "[", "\"number\"", "]", ",", "params", "[", "\"outfile\"", "]", ",", "params", "[", "\"strand\"", "]", ",", ")", "#print cmd", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "#stdout,stderr = \"\",\"\"", "#p = Popen(cmd, shell=True)", "#p.wait()", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "params", "[", "\"outfile\"", "]", ")", ":", "with", "open", "(", "params", "[", "\"outfile\"", "]", ")", "as", "f", ":", "motifs", "=", "self", ".", "parse_out", "(", "f", ")", "for", "motif", "in", "motifs", ":", "motif", ".", "id", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "motif", ".", "id", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "MotifSampler", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1208-L1264
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler.parse
def parse(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] pwm = [] info = {} for line in fo.readlines(): if line.startswith("#"): vals = line.strip()[1:].split(" = ") if len(vals) > 1: info[vals[0]] = vals[1] elif len(line) > 1: pwm.append([float(x) for x in line.strip().split("\t")]) else: motifs.append(Motif()) motifs[-1].consensus = info["Consensus"] motifs[-1].width = info["W"] motifs[-1].id = info["ID"] motifs[-1].pwm = pwm[:] pwm = [] return motifs
python
def parse(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] pwm = [] info = {} for line in fo.readlines(): if line.startswith("#"): vals = line.strip()[1:].split(" = ") if len(vals) > 1: info[vals[0]] = vals[1] elif len(line) > 1: pwm.append([float(x) for x in line.strip().split("\t")]) else: motifs.append(Motif()) motifs[-1].consensus = info["Consensus"] motifs[-1].width = info["W"] motifs[-1].id = info["ID"] motifs[-1].pwm = pwm[:] pwm = [] return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "pwm", "=", "[", "]", "info", "=", "{", "}", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "vals", "=", "line", ".", "strip", "(", ")", "[", "1", ":", "]", ".", "split", "(", "\" = \"", ")", "if", "len", "(", "vals", ")", ">", "1", ":", "info", "[", "vals", "[", "0", "]", "]", "=", "vals", "[", "1", "]", "elif", "len", "(", "line", ")", ">", "1", ":", "pwm", ".", "append", "(", "[", "float", "(", "x", ")", "for", "x", "in", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "]", ")", "else", ":", "motifs", ".", "append", "(", "Motif", "(", ")", ")", "motifs", "[", "-", "1", "]", ".", "consensus", "=", "info", "[", "\"Consensus\"", "]", "motifs", "[", "-", "1", "]", ".", "width", "=", "info", "[", "\"W\"", "]", "motifs", "[", "-", "1", "]", ".", "id", "=", "info", "[", "\"ID\"", "]", "motifs", "[", "-", "1", "]", ".", "pwm", "=", "pwm", "[", ":", "]", "pwm", "=", "[", "]", "return", "motifs" ]
Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MotifSampler", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MotifSampler", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1266-L1299
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler.parse_out
def parse_out(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} pseudo = 0.0 # Should be 1/sqrt(# of seqs) aligns = {} for line in fo.readlines(): if line.startswith("#"): pass elif len(line) > 1: vals = line.strip().split("\t") m_id, site = [x.strip().split(" ")[1].replace('"',"") for x in vals[8].split(";") if x] #if vals[6] == "+": if site.upper().find("N") == -1: aligns.setdefault(m_id, []).append(site) #else: # print site, rc(site) # aligns.setdefault(id, []).append(rc(site)) for m_id, align in aligns.items(): #print id, len(align) width = len(align[0]) pfm = [[0 for x in range(4)] for x in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 total = float(len(align)) pwm = [[(x + pseudo/4)/total+(pseudo) for x in row] for row in pfm] m = Motif() m.align = align[:] m.pwm = pwm[:] m.pfm = pfm[:] m.id = m_id motifs.append(m) return motifs
python
def parse_out(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} pseudo = 0.0 # Should be 1/sqrt(# of seqs) aligns = {} for line in fo.readlines(): if line.startswith("#"): pass elif len(line) > 1: vals = line.strip().split("\t") m_id, site = [x.strip().split(" ")[1].replace('"',"") for x in vals[8].split(";") if x] #if vals[6] == "+": if site.upper().find("N") == -1: aligns.setdefault(m_id, []).append(site) #else: # print site, rc(site) # aligns.setdefault(id, []).append(rc(site)) for m_id, align in aligns.items(): #print id, len(align) width = len(align[0]) pfm = [[0 for x in range(4)] for x in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 total = float(len(align)) pwm = [[(x + pseudo/4)/total+(pseudo) for x in row] for row in pfm] m = Motif() m.align = align[:] m.pwm = pwm[:] m.pfm = pfm[:] m.id = m_id motifs.append(m) return motifs
[ "def", "parse_out", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "pseudo", "=", "0.0", "# Should be 1/sqrt(# of seqs)", "aligns", "=", "{", "}", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "pass", "elif", "len", "(", "line", ")", ">", "1", ":", "vals", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "m_id", ",", "site", "=", "[", "x", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "[", "1", "]", ".", "replace", "(", "'\"'", ",", "\"\"", ")", "for", "x", "in", "vals", "[", "8", "]", ".", "split", "(", "\";\"", ")", "if", "x", "]", "#if vals[6] == \"+\":", "if", "site", ".", "upper", "(", ")", ".", "find", "(", "\"N\"", ")", "==", "-", "1", ":", "aligns", ".", "setdefault", "(", "m_id", ",", "[", "]", ")", ".", "append", "(", "site", ")", "#else:", "# print site, rc(site)", "# aligns.setdefault(id, []).append(rc(site))", "for", "m_id", ",", "align", "in", "aligns", ".", "items", "(", ")", ":", "#print id, len(align)", "width", "=", "len", "(", "align", "[", "0", "]", ")", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "width", ")", "]", "for", "row", "in", "align", ":", "for", "i", "in", "range", "(", "len", "(", "row", ")", ")", ":", "pfm", "[", "i", "]", "[", "nucs", "[", "row", "[", "i", "]", "]", "]", "+=", "1", "total", "=", "float", "(", "len", "(", "align", ")", ")", "pwm", "=", "[", "[", "(", "x", "+", "pseudo", "/", "4", ")", "/", "total", "+", "(", "pseudo", ")", "for", "x", "in", "row", "]", "for", "row", "in", "pfm", "]", "m", "=", "Motif", "(", ")", "m", ".", "align", "=", "align", "[", ":", "]", "m", ".", "pwm", "=", "pwm", "[", ":", "]", "m", ".", "pfm", "=", "pfm", "[", ":", "]", "m", ".", "id", "=", "m_id", "motifs", ".", "append", "(", "m", ")", "return", "motifs" ]
Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MotifSampler", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MotifSampler", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1301-L1348
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MDmodule._run_program
def _run_program(self, bin, fastafile, params=None): """ Run MDmodule and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "number":10} if params is not None: default_params.update(params) new_file = os.path.join(self.tmpdir, "mdmodule_in.fa") shutil.copy(fastafile, new_file) fastafile = new_file pwmfile = fastafile + ".out" width = default_params['width'] number = default_params['number'] current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "%s -i %s -a 1 -o %s -w %s -t 100 -r %s" % (bin, fastafile, pwmfile, width, number) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() stdout = "cmd: {}\n".format(cmd) + stdout.decode() motifs = [] if os.path.exists(pwmfile): with open(pwmfile) as f: motifs = self.parse(f) os.chdir(current_path) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run MDmodule and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "number":10} if params is not None: default_params.update(params) new_file = os.path.join(self.tmpdir, "mdmodule_in.fa") shutil.copy(fastafile, new_file) fastafile = new_file pwmfile = fastafile + ".out" width = default_params['width'] number = default_params['number'] current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "%s -i %s -a 1 -o %s -w %s -t 100 -r %s" % (bin, fastafile, pwmfile, width, number) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() stdout = "cmd: {}\n".format(cmd) + stdout.decode() motifs = [] if os.path.exists(pwmfile): with open(pwmfile) as f: motifs = self.parse(f) os.chdir(current_path) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "\"width\"", ":", "10", ",", "\"number\"", ":", "10", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "new_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "\"mdmodule_in.fa\"", ")", "shutil", ".", "copy", "(", "fastafile", ",", "new_file", ")", "fastafile", "=", "new_file", "pwmfile", "=", "fastafile", "+", "\".out\"", "width", "=", "default_params", "[", "'width'", "]", "number", "=", "default_params", "[", "'number'", "]", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "cmd", "=", "\"%s -i %s -a 1 -o %s -w %s -t 100 -r %s\"", "%", "(", "bin", ",", "fastafile", ",", "pwmfile", ",", "width", ",", "number", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "stdout", "=", "\"cmd: {}\\n\"", ".", "format", "(", "cmd", ")", "+", "stdout", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "pwmfile", ")", ":", "with", "open", "(", "pwmfile", ")", "as", "f", ":", "motifs", "=", "self", ".", "parse", "(", "f", ")", "os", ".", "chdir", "(", "current_path", ")", "for", "motif", "in", "motifs", ":", "motif", ".", "id", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "motif", ".", "id", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run MDmodule and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "MDmodule", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1378-L1436
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MDmodule.parse
def parse(self, fo): """ Convert MDmodule output to motifs Parameters ---------- fo : file-like File object containing MDmodule output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile(r'(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pf = re.compile(r'>.+\s+[bf]\d+\s+(\w+)') pwm = [] pfm = [] align = [] m_id = "" for line in fo.readlines(): if line.startswith("Motif"): if m_id: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align pwm = [] pfm = [] align = [] m_id = line.split("\t")[0] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100 for x in [2,3,4,5]]) m = pf.search(line) if m: if not pfm: pfm = [[0 for x in range(4)] for x in range(len(m.group(1)))] for i in range(len(m.group(1))): pfm[i][nucs[m.group(1)[i]]] += 1 align.append(m.group(1)) if pwm: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align return motifs
python
def parse(self, fo): """ Convert MDmodule output to motifs Parameters ---------- fo : file-like File object containing MDmodule output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile(r'(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pf = re.compile(r'>.+\s+[bf]\d+\s+(\w+)') pwm = [] pfm = [] align = [] m_id = "" for line in fo.readlines(): if line.startswith("Motif"): if m_id: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align pwm = [] pfm = [] align = [] m_id = line.split("\t")[0] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100 for x in [2,3,4,5]]) m = pf.search(line) if m: if not pfm: pfm = [[0 for x in range(4)] for x in range(len(m.group(1)))] for i in range(len(m.group(1))): pfm[i][nucs[m.group(1)[i]]] += 1 align.append(m.group(1)) if pwm: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "p", "=", "re", ".", "compile", "(", "r'(\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)'", ")", "pf", "=", "re", ".", "compile", "(", "r'>.+\\s+[bf]\\d+\\s+(\\w+)'", ")", "pwm", "=", "[", "]", "pfm", "=", "[", "]", "align", "=", "[", "]", "m_id", "=", "\"\"", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"Motif\"", ")", ":", "if", "m_id", ":", "motifs", ".", "append", "(", "Motif", "(", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "m_id", "motifs", "[", "-", "1", "]", ".", "pwm", "=", "pwm", "motifs", "[", "-", "1", "]", ".", "pfm", "=", "pfm", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "pwm", "=", "[", "]", "pfm", "=", "[", "]", "align", "=", "[", "]", "m_id", "=", "line", ".", "split", "(", "\"\\t\"", ")", "[", "0", "]", "else", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "pwm", ".", "append", "(", "[", "float", "(", "m", ".", "group", "(", "x", ")", ")", "/", "100", "for", "x", "in", "[", "2", ",", "3", ",", "4", ",", "5", "]", "]", ")", "m", "=", "pf", ".", "search", "(", "line", ")", "if", "m", ":", "if", "not", "pfm", ":", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "len", "(", "m", ".", "group", "(", "1", ")", ")", ")", "]", "for", "i", "in", "range", "(", "len", "(", "m", ".", "group", "(", "1", ")", ")", ")", ":", "pfm", "[", "i", "]", "[", "nucs", "[", "m", ".", "group", "(", "1", ")", "[", "i", "]", "]", "]", "+=", "1", "align", ".", "append", "(", "m", ".", "group", "(", "1", ")", ")", "if", "pwm", ":", "motifs", ".", "append", "(", "Motif", "(", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "m_id", "motifs", "[", "-", "1", "]", ".", "pwm", "=", "pwm", "motifs", "[", "-", "1", "]", ".", "pfm", "=", "pfm", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "return", "motifs" ]
Convert MDmodule output to motifs Parameters ---------- fo : file-like File object containing MDmodule output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MDmodule", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MDmodule", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1438-L1493
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
ChIPMunk._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1509-L1519
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
ChIPMunk._run_program
def _run_program(self, bin, fastafile, params=None): """ Run ChIPMunk and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) basename = "munk_in.fa" new_file = os.path.join(self.tmpdir, basename) out = open(new_file, "w") f = Fasta(fastafile) for seq in f.seqs: header = len(seq) // 2 out.write(">%s\n" % header) out.write("%s\n" % seq) out.close() fastafile = new_file outfile = fastafile + ".out" current_path = os.getcwd() os.chdir(self.dir()) motifs = [] # Max recommended by ChIPMunk userguide ncpus = 4 stdout = "" stderr = "" for zoops_factor in ["oops", 0.0, 0.5, 1.0]: cmd = "{} {} {} y {} m:{} 100 10 1 {} 1>{}".format( bin, params.get("width", 8), params.get("width", 20), zoops_factor, fastafile, ncpus, outfile ) #print("command: ", cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) std = p.communicate() stdout = stdout + std[0].decode() stderr = stderr + std[1].decode() if "RuntimeException" in stderr: return [], stdout, stderr if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f) os.chdir(current_path) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run ChIPMunk and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) basename = "munk_in.fa" new_file = os.path.join(self.tmpdir, basename) out = open(new_file, "w") f = Fasta(fastafile) for seq in f.seqs: header = len(seq) // 2 out.write(">%s\n" % header) out.write("%s\n" % seq) out.close() fastafile = new_file outfile = fastafile + ".out" current_path = os.getcwd() os.chdir(self.dir()) motifs = [] # Max recommended by ChIPMunk userguide ncpus = 4 stdout = "" stderr = "" for zoops_factor in ["oops", 0.0, 0.5, 1.0]: cmd = "{} {} {} y {} m:{} 100 10 1 {} 1>{}".format( bin, params.get("width", 8), params.get("width", 20), zoops_factor, fastafile, ncpus, outfile ) #print("command: ", cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) std = p.communicate() stdout = stdout + std[0].decode() stderr = stderr + std[1].decode() if "RuntimeException" in stderr: return [], stdout, stderr if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f) os.chdir(current_path) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "basename", "=", "\"munk_in.fa\"", "new_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "basename", ")", "out", "=", "open", "(", "new_file", ",", "\"w\"", ")", "f", "=", "Fasta", "(", "fastafile", ")", "for", "seq", "in", "f", ".", "seqs", ":", "header", "=", "len", "(", "seq", ")", "//", "2", "out", ".", "write", "(", "\">%s\\n\"", "%", "header", ")", "out", ".", "write", "(", "\"%s\\n\"", "%", "seq", ")", "out", ".", "close", "(", ")", "fastafile", "=", "new_file", "outfile", "=", "fastafile", "+", "\".out\"", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "dir", "(", ")", ")", "motifs", "=", "[", "]", "# Max recommended by ChIPMunk userguide", "ncpus", "=", "4", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "for", "zoops_factor", "in", "[", "\"oops\"", ",", "0.0", ",", "0.5", ",", "1.0", "]", ":", "cmd", "=", "\"{} {} {} y {} m:{} 100 10 1 {} 1>{}\"", ".", "format", "(", "bin", ",", "params", ".", "get", "(", "\"width\"", ",", "8", ")", ",", "params", ".", "get", "(", "\"width\"", ",", "20", ")", ",", "zoops_factor", ",", "fastafile", ",", "ncpus", ",", "outfile", ")", "#print(\"command: \", cmd)", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "std", "=", "p", ".", "communicate", "(", ")", "stdout", "=", "stdout", "+", "std", "[", "0", "]", ".", "decode", "(", ")", "stderr", "=", "stderr", "+", "std", "[", "1", "]", ".", "decode", "(", ")", "if", "\"RuntimeException\"", "in", "stderr", ":", "return", "[", "]", ",", "stdout", ",", "stderr", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ")", "as", "f", ":", "motifs", "+=", "self", ".", "parse", "(", "f", ")", "os", ".", "chdir", "(", "current_path", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run ChIPMunk and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "ChIPMunk", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1521-L1596
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
ChIPMunk.parse
def parse(self, fo): """ Convert ChIPMunk output to motifs Parameters ---------- fo : file-like File object containing ChIPMunk output. Returns ------- motifs : list List of Motif instances. """ #KDIC|6.124756232026243 #A|517.9999999999999 42.99999999999999 345.99999999999994 25.999999999999996 602.9999999999999 155.99999999999997 2.9999999999999996 91.99999999999999 #C|5.999999999999999 4.999999999999999 2.9999999999999996 956.9999999999999 91.99999999999999 17.999999999999996 22.999999999999996 275.99999999999994 #G|340.99999999999994 943.9999999999999 630.9999999999999 6.999999999999999 16.999999999999996 48.99999999999999 960.9999999999999 14.999999999999998 #T|134.99999999999997 7.999999999999999 19.999999999999996 9.999999999999998 287.99999999999994 776.9999999999999 12.999999999999998 616.9999999999999 #N|999.9999999999998 line = fo.readline() if not line: return [] while not line.startswith("A|"): line = fo.readline() matrix = [] for _ in range(4): matrix.append([float(x) for x in line.strip().split("|")[1].split(" ")]) line = fo.readline() #print matrix matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] #print matrix m = Motif(matrix) m.id = "ChIPMunk_w%s" % len(m) return [m]
python
def parse(self, fo): """ Convert ChIPMunk output to motifs Parameters ---------- fo : file-like File object containing ChIPMunk output. Returns ------- motifs : list List of Motif instances. """ #KDIC|6.124756232026243 #A|517.9999999999999 42.99999999999999 345.99999999999994 25.999999999999996 602.9999999999999 155.99999999999997 2.9999999999999996 91.99999999999999 #C|5.999999999999999 4.999999999999999 2.9999999999999996 956.9999999999999 91.99999999999999 17.999999999999996 22.999999999999996 275.99999999999994 #G|340.99999999999994 943.9999999999999 630.9999999999999 6.999999999999999 16.999999999999996 48.99999999999999 960.9999999999999 14.999999999999998 #T|134.99999999999997 7.999999999999999 19.999999999999996 9.999999999999998 287.99999999999994 776.9999999999999 12.999999999999998 616.9999999999999 #N|999.9999999999998 line = fo.readline() if not line: return [] while not line.startswith("A|"): line = fo.readline() matrix = [] for _ in range(4): matrix.append([float(x) for x in line.strip().split("|")[1].split(" ")]) line = fo.readline() #print matrix matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] #print matrix m = Motif(matrix) m.id = "ChIPMunk_w%s" % len(m) return [m]
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "#KDIC|6.124756232026243", "#A|517.9999999999999 42.99999999999999 345.99999999999994 25.999999999999996 602.9999999999999 155.99999999999997 2.9999999999999996 91.99999999999999", "#C|5.999999999999999 4.999999999999999 2.9999999999999996 956.9999999999999 91.99999999999999 17.999999999999996 22.999999999999996 275.99999999999994", "#G|340.99999999999994 943.9999999999999 630.9999999999999 6.999999999999999 16.999999999999996 48.99999999999999 960.9999999999999 14.999999999999998", "#T|134.99999999999997 7.999999999999999 19.999999999999996 9.999999999999998 287.99999999999994 776.9999999999999 12.999999999999998 616.9999999999999", "#N|999.9999999999998", "line", "=", "fo", ".", "readline", "(", ")", "if", "not", "line", ":", "return", "[", "]", "while", "not", "line", ".", "startswith", "(", "\"A|\"", ")", ":", "line", "=", "fo", ".", "readline", "(", ")", "matrix", "=", "[", "]", "for", "_", "in", "range", "(", "4", ")", ":", "matrix", ".", "append", "(", "[", "float", "(", "x", ")", "for", "x", "in", "line", ".", "strip", "(", ")", ".", "split", "(", "\"|\"", ")", "[", "1", "]", ".", "split", "(", "\" \"", ")", "]", ")", "line", "=", "fo", ".", "readline", "(", ")", "#print matrix", "matrix", "=", "[", "[", "matrix", "[", "x", "]", "[", "y", "]", "for", "x", "in", "range", "(", "4", ")", "]", "for", "y", "in", "range", "(", "len", "(", "matrix", "[", "0", "]", ")", ")", "]", "#print matrix", "m", "=", "Motif", "(", "matrix", ")", "m", ".", "id", "=", "\"ChIPMunk_w%s\"", "%", "len", "(", "m", ")", "return", "[", "m", "]" ]
Convert ChIPMunk output to motifs Parameters ---------- fo : file-like File object containing ChIPMunk output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "ChIPMunk", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "ChIPMunk", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1598-L1633
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Posmo._run_program
def _run_program(self, bin, fastafile, params=None): """ Run Posmo and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {} if params is not None: default_params.update(params) width = params.get("width", 8) basename = "posmo_in.fa" new_file = os.path.join(self.tmpdir, basename) shutil.copy(fastafile, new_file) fastafile = new_file #pwmfile = fastafile + ".pwm" motifs = [] current_path = os.getcwd() os.chdir(self.tmpdir) for n_ones in range(4, min(width, 11), 2): x = "1" * n_ones outfile = "%s.%s.out" % (fastafile, x) cmd = "%s 5000 %s %s 1.6 2.5 %s 200" % (bin, x, fastafile, width) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() context_file = fastafile.replace(basename, "context.%s.%s.txt" % (basename, x)) cmd = "%s %s %s simi.txt 0.88 10 2 10" % (bin.replace("posmo","clusterwd"), context_file, outfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() stdout += out.decode() stderr += err.decode() if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f, width, n_ones) os.chdir(current_path) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run Posmo and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {} if params is not None: default_params.update(params) width = params.get("width", 8) basename = "posmo_in.fa" new_file = os.path.join(self.tmpdir, basename) shutil.copy(fastafile, new_file) fastafile = new_file #pwmfile = fastafile + ".pwm" motifs = [] current_path = os.getcwd() os.chdir(self.tmpdir) for n_ones in range(4, min(width, 11), 2): x = "1" * n_ones outfile = "%s.%s.out" % (fastafile, x) cmd = "%s 5000 %s %s 1.6 2.5 %s 200" % (bin, x, fastafile, width) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() context_file = fastafile.replace(basename, "context.%s.%s.txt" % (basename, x)) cmd = "%s %s %s simi.txt 0.88 10 2 10" % (bin.replace("posmo","clusterwd"), context_file, outfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() stdout += out.decode() stderr += err.decode() if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f, width, n_ones) os.chdir(current_path) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "width", "=", "params", ".", "get", "(", "\"width\"", ",", "8", ")", "basename", "=", "\"posmo_in.fa\"", "new_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "basename", ")", "shutil", ".", "copy", "(", "fastafile", ",", "new_file", ")", "fastafile", "=", "new_file", "#pwmfile = fastafile + \".pwm\"", "motifs", "=", "[", "]", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "for", "n_ones", "in", "range", "(", "4", ",", "min", "(", "width", ",", "11", ")", ",", "2", ")", ":", "x", "=", "\"1\"", "*", "n_ones", "outfile", "=", "\"%s.%s.out\"", "%", "(", "fastafile", ",", "x", ")", "cmd", "=", "\"%s 5000 %s %s 1.6 2.5 %s 200\"", "%", "(", "bin", ",", "x", ",", "fastafile", ",", "width", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "stdout", "=", "stdout", ".", "decode", "(", ")", "stderr", "=", "stderr", ".", "decode", "(", ")", "context_file", "=", "fastafile", ".", "replace", "(", "basename", ",", "\"context.%s.%s.txt\"", "%", "(", "basename", ",", "x", ")", ")", "cmd", "=", "\"%s %s %s simi.txt 0.88 10 2 10\"", "%", "(", "bin", ".", "replace", "(", "\"posmo\"", ",", "\"clusterwd\"", ")", ",", "context_file", ",", "outfile", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ")", "as", "f", ":", "motifs", "+=", "self", ".", "parse", "(", "f", ",", "width", ",", "n_ones", ")", "os", ".", "chdir", "(", "current_path", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Posmo and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Posmo", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1664-L1729
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Posmo.parse
def parse(self, fo, width, seed=None): """ Convert Posmo output to motifs Parameters ---------- fo : file-like File object containing Posmo output. Returns ------- motifs : list List of Motif instances. """ motifs = [] lines = [fo.readline() for x in range(6)] while lines[0]: matrix = [[float(x) for x in line.strip().split("\t")] for line in lines[2:]] matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] m = Motif(matrix) m.trim(0.1) m.id = lines[0].strip().split(" ")[-1] motifs.append(m) lines = [fo.readline() for x in range(6)] for i,motif in enumerate(motifs): if seed: motif.id = "%s_w%s.%s_%s" % (self.name, width, seed, i + 1) else: motif.id = "%s_w%s_%s" % (self.name, width, i + 1) motif.trim(0.25) return motifs
python
def parse(self, fo, width, seed=None): """ Convert Posmo output to motifs Parameters ---------- fo : file-like File object containing Posmo output. Returns ------- motifs : list List of Motif instances. """ motifs = [] lines = [fo.readline() for x in range(6)] while lines[0]: matrix = [[float(x) for x in line.strip().split("\t")] for line in lines[2:]] matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] m = Motif(matrix) m.trim(0.1) m.id = lines[0].strip().split(" ")[-1] motifs.append(m) lines = [fo.readline() for x in range(6)] for i,motif in enumerate(motifs): if seed: motif.id = "%s_w%s.%s_%s" % (self.name, width, seed, i + 1) else: motif.id = "%s_w%s_%s" % (self.name, width, i + 1) motif.trim(0.25) return motifs
[ "def", "parse", "(", "self", ",", "fo", ",", "width", ",", "seed", "=", "None", ")", ":", "motifs", "=", "[", "]", "lines", "=", "[", "fo", ".", "readline", "(", ")", "for", "x", "in", "range", "(", "6", ")", "]", "while", "lines", "[", "0", "]", ":", "matrix", "=", "[", "[", "float", "(", "x", ")", "for", "x", "in", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "]", "for", "line", "in", "lines", "[", "2", ":", "]", "]", "matrix", "=", "[", "[", "matrix", "[", "x", "]", "[", "y", "]", "for", "x", "in", "range", "(", "4", ")", "]", "for", "y", "in", "range", "(", "len", "(", "matrix", "[", "0", "]", ")", ")", "]", "m", "=", "Motif", "(", "matrix", ")", "m", ".", "trim", "(", "0.1", ")", "m", ".", "id", "=", "lines", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "[", "-", "1", "]", "motifs", ".", "append", "(", "m", ")", "lines", "=", "[", "fo", ".", "readline", "(", ")", "for", "x", "in", "range", "(", "6", ")", "]", "for", "i", ",", "motif", "in", "enumerate", "(", "motifs", ")", ":", "if", "seed", ":", "motif", ".", "id", "=", "\"%s_w%s.%s_%s\"", "%", "(", "self", ".", "name", ",", "width", ",", "seed", ",", "i", "+", "1", ")", "else", ":", "motif", ".", "id", "=", "\"%s_w%s_%s\"", "%", "(", "self", ".", "name", ",", "width", ",", "i", "+", "1", ")", "motif", ".", "trim", "(", "0.25", ")", "return", "motifs" ]
Convert Posmo output to motifs Parameters ---------- fo : file-like File object containing Posmo output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "Posmo", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "Posmo", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1731-L1764
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Gadem.parse
def parse(self, fo): """ Convert GADEM output to motifs Parameters ---------- fo : file-like File object containing GADEM output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} lines = fo.readlines() for i in range(0, len(lines), 5): align = [] pwm = [] pfm = [] m_id = "" line = lines[i].strip() m_id = line[1:] number = m_id.split("_")[0][1:] if os.path.exists("%s.seq" % number): with open("%s.seq" % number) as f: for l in f: if "x" not in l and "n" not in l: l = l.strip().upper() align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for p in range(len(l)): pfm[p][nucs[l[p]]] += 1 m = [l.strip().split(" ")[1].split("\t") for l in lines[i + 1: i + 5]] pwm = [[float(m[x][y]) for x in range(4)] for y in range(len(m[0]))] motifs.append(Motif(pwm)) motifs[-1].id = "{}_{}".format(self.name, m_id) #motifs[-1].pwm = pwm if align: motifs[-1].pfm = pfm motifs[-1].align = align return motifs
python
def parse(self, fo): """ Convert GADEM output to motifs Parameters ---------- fo : file-like File object containing GADEM output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} lines = fo.readlines() for i in range(0, len(lines), 5): align = [] pwm = [] pfm = [] m_id = "" line = lines[i].strip() m_id = line[1:] number = m_id.split("_")[0][1:] if os.path.exists("%s.seq" % number): with open("%s.seq" % number) as f: for l in f: if "x" not in l and "n" not in l: l = l.strip().upper() align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for p in range(len(l)): pfm[p][nucs[l[p]]] += 1 m = [l.strip().split(" ")[1].split("\t") for l in lines[i + 1: i + 5]] pwm = [[float(m[x][y]) for x in range(4)] for y in range(len(m[0]))] motifs.append(Motif(pwm)) motifs[-1].id = "{}_{}".format(self.name, m_id) #motifs[-1].pwm = pwm if align: motifs[-1].pfm = pfm motifs[-1].align = align return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "lines", "=", "fo", ".", "readlines", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "lines", ")", ",", "5", ")", ":", "align", "=", "[", "]", "pwm", "=", "[", "]", "pfm", "=", "[", "]", "m_id", "=", "\"\"", "line", "=", "lines", "[", "i", "]", ".", "strip", "(", ")", "m_id", "=", "line", "[", "1", ":", "]", "number", "=", "m_id", ".", "split", "(", "\"_\"", ")", "[", "0", "]", "[", "1", ":", "]", "if", "os", ".", "path", ".", "exists", "(", "\"%s.seq\"", "%", "number", ")", ":", "with", "open", "(", "\"%s.seq\"", "%", "number", ")", "as", "f", ":", "for", "l", "in", "f", ":", "if", "\"x\"", "not", "in", "l", "and", "\"n\"", "not", "in", "l", ":", "l", "=", "l", ".", "strip", "(", ")", ".", "upper", "(", ")", "align", ".", "append", "(", "l", ")", "if", "not", "pfm", ":", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "len", "(", "l", ")", ")", "]", "for", "p", "in", "range", "(", "len", "(", "l", ")", ")", ":", "pfm", "[", "p", "]", "[", "nucs", "[", "l", "[", "p", "]", "]", "]", "+=", "1", "m", "=", "[", "l", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "[", "1", "]", ".", "split", "(", "\"\\t\"", ")", "for", "l", "in", "lines", "[", "i", "+", "1", ":", "i", "+", "5", "]", "]", "pwm", "=", "[", "[", "float", "(", "m", "[", "x", "]", "[", "y", "]", ")", "for", "x", "in", "range", "(", "4", ")", "]", "for", "y", "in", "range", "(", "len", "(", "m", "[", "0", "]", ")", ")", "]", "motifs", ".", "append", "(", "Motif", "(", "pwm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "m_id", ")", "#motifs[-1].pwm = pwm", "if", "align", ":", "motifs", "[", "-", "1", "]", ".", "pfm", "=", "pfm", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "return", "motifs" ]
Convert GADEM output to motifs Parameters ---------- fo : file-like File object containing GADEM output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "GADEM", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "GADEM", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1847-L1896
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Jaspar._run_program
def _run_program(self, bin, fastafile, params=None): """ Get enriched JASPAR motifs in a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ fname = os.path.join(self.config.get_motif_dir(), "JASPAR2010_vertebrate.pwm") motifs = read_motifs(fname, fmt="pwm") for motif in motifs: motif.id = "JASPAR_%s" % motif.id return motifs, "", ""
python
def _run_program(self, bin, fastafile, params=None): """ Get enriched JASPAR motifs in a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ fname = os.path.join(self.config.get_motif_dir(), "JASPAR2010_vertebrate.pwm") motifs = read_motifs(fname, fmt="pwm") for motif in motifs: motif.id = "JASPAR_%s" % motif.id return motifs, "", ""
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "config", ".", "get_motif_dir", "(", ")", ",", "\"JASPAR2010_vertebrate.pwm\"", ")", "motifs", "=", "read_motifs", "(", "fname", ",", "fmt", "=", "\"pwm\"", ")", "for", "motif", "in", "motifs", ":", "motif", ".", "id", "=", "\"JASPAR_%s\"", "%", "motif", ".", "id", "return", "motifs", ",", "\"\"", ",", "\"\"" ]
Get enriched JASPAR motifs in a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Get", "enriched", "JASPAR", "motifs", "in", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1919-L1951
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Meme._run_program
def _run_program(self, bin, fastafile, params=None): """ Run MEME and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "single":False, "number":10} if params is not None: default_params.update(params) tmp = NamedTemporaryFile(dir=self.tmpdir) tmpname = tmp.name strand = "-revcomp" width = default_params["width"] number = default_params["number"] cmd = [bin, fastafile, "-text","-dna","-nostatus","-mod", "zoops","-nmotifs", "%s" % number, "-w","%s" % width, "-maxsize", "10000000"] if not default_params["single"]: cmd.append(strand) #sys.stderr.write(" ".join(cmd) + "\n") p = Popen(cmd, bufsize=1, stderr=PIPE, stdout=PIPE) stdout,stderr = p.communicate() motifs = [] motifs = self.parse(io.StringIO(stdout.decode())) # Delete temporary files tmp.close() return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run MEME and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "single":False, "number":10} if params is not None: default_params.update(params) tmp = NamedTemporaryFile(dir=self.tmpdir) tmpname = tmp.name strand = "-revcomp" width = default_params["width"] number = default_params["number"] cmd = [bin, fastafile, "-text","-dna","-nostatus","-mod", "zoops","-nmotifs", "%s" % number, "-w","%s" % width, "-maxsize", "10000000"] if not default_params["single"]: cmd.append(strand) #sys.stderr.write(" ".join(cmd) + "\n") p = Popen(cmd, bufsize=1, stderr=PIPE, stdout=PIPE) stdout,stderr = p.communicate() motifs = [] motifs = self.parse(io.StringIO(stdout.decode())) # Delete temporary files tmp.close() return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "\"width\"", ":", "10", ",", "\"single\"", ":", "False", ",", "\"number\"", ":", "10", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "tmp", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "tmpname", "=", "tmp", ".", "name", "strand", "=", "\"-revcomp\"", "width", "=", "default_params", "[", "\"width\"", "]", "number", "=", "default_params", "[", "\"number\"", "]", "cmd", "=", "[", "bin", ",", "fastafile", ",", "\"-text\"", ",", "\"-dna\"", ",", "\"-nostatus\"", ",", "\"-mod\"", ",", "\"zoops\"", ",", "\"-nmotifs\"", ",", "\"%s\"", "%", "number", ",", "\"-w\"", ",", "\"%s\"", "%", "width", ",", "\"-maxsize\"", ",", "\"10000000\"", "]", "if", "not", "default_params", "[", "\"single\"", "]", ":", "cmd", ".", "append", "(", "strand", ")", "#sys.stderr.write(\" \".join(cmd) + \"\\n\")", "p", "=", "Popen", "(", "cmd", ",", "bufsize", "=", "1", ",", "stderr", "=", "PIPE", ",", "stdout", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "motifs", "=", "[", "]", "motifs", "=", "self", ".", "parse", "(", "io", ".", "StringIO", "(", "stdout", ".", "decode", "(", ")", ")", ")", "# Delete temporary files", "tmp", ".", "close", "(", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run MEME and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "MEME", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1981-L2033
train
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Meme.parse
def parse(self, fo): """ Convert MEME output to motifs Parameters ---------- fo : file-like File object containing MEME output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile('MOTIF.+MEME-(\d+)\s*width\s*=\s*(\d+)\s+sites\s*=\s*(\d+)') pa = re.compile('\)\s+([A-Z]+)') line = fo.readline() while line: m = p.search(line) align = [] pfm = None if m: #print(m.group(0)) id = "%s_%s_w%s" % (self.name, m.group(1), m.group(2)) while not line.startswith("//"): ma = pa.search(line) if ma: #print(ma.group(0)) l = ma.group(1) align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for pos in range(len(l)): if l[pos] in nucs: pfm[pos][nucs[l[pos]]] += 1 else: for i in range(4): pfm[pos][i] += 0.25 line = fo.readline() motifs.append(Motif(pfm[:])) motifs[-1].id = id motifs[-1].align = align[:] line = fo.readline() return motifs
python
def parse(self, fo): """ Convert MEME output to motifs Parameters ---------- fo : file-like File object containing MEME output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile('MOTIF.+MEME-(\d+)\s*width\s*=\s*(\d+)\s+sites\s*=\s*(\d+)') pa = re.compile('\)\s+([A-Z]+)') line = fo.readline() while line: m = p.search(line) align = [] pfm = None if m: #print(m.group(0)) id = "%s_%s_w%s" % (self.name, m.group(1), m.group(2)) while not line.startswith("//"): ma = pa.search(line) if ma: #print(ma.group(0)) l = ma.group(1) align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for pos in range(len(l)): if l[pos] in nucs: pfm[pos][nucs[l[pos]]] += 1 else: for i in range(4): pfm[pos][i] += 0.25 line = fo.readline() motifs.append(Motif(pfm[:])) motifs[-1].id = id motifs[-1].align = align[:] line = fo.readline() return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "p", "=", "re", ".", "compile", "(", "'MOTIF.+MEME-(\\d+)\\s*width\\s*=\\s*(\\d+)\\s+sites\\s*=\\s*(\\d+)'", ")", "pa", "=", "re", ".", "compile", "(", "'\\)\\s+([A-Z]+)'", ")", "line", "=", "fo", ".", "readline", "(", ")", "while", "line", ":", "m", "=", "p", ".", "search", "(", "line", ")", "align", "=", "[", "]", "pfm", "=", "None", "if", "m", ":", "#print(m.group(0))", "id", "=", "\"%s_%s_w%s\"", "%", "(", "self", ".", "name", ",", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ")", "while", "not", "line", ".", "startswith", "(", "\"//\"", ")", ":", "ma", "=", "pa", ".", "search", "(", "line", ")", "if", "ma", ":", "#print(ma.group(0))", "l", "=", "ma", ".", "group", "(", "1", ")", "align", ".", "append", "(", "l", ")", "if", "not", "pfm", ":", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "len", "(", "l", ")", ")", "]", "for", "pos", "in", "range", "(", "len", "(", "l", ")", ")", ":", "if", "l", "[", "pos", "]", "in", "nucs", ":", "pfm", "[", "pos", "]", "[", "nucs", "[", "l", "[", "pos", "]", "]", "]", "+=", "1", "else", ":", "for", "i", "in", "range", "(", "4", ")", ":", "pfm", "[", "pos", "]", "[", "i", "]", "+=", "0.25", "line", "=", "fo", ".", "readline", "(", ")", "motifs", ".", "append", "(", "Motif", "(", "pfm", "[", ":", "]", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "id", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "[", ":", "]", "line", "=", "fo", ".", "readline", "(", ")", "return", "motifs" ]
Convert MEME output to motifs Parameters ---------- fo : file-like File object containing MEME output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MEME", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MEME", "output", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L2035-L2084
train
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
scan_to_table
def scan_to_table(input_table, genome, scoring, pwmfile=None, ncpus=None): """Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pwmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s """ config = MotifConfig() if pwmfile is None: pwmfile = config.get_default_params().get("motif_db", None) if pwmfile is not None: pwmfile = os.path.join(config.get_motif_dir(), pwmfile) if pwmfile is None: raise ValueError("no pwmfile given and no default database specified") logger.info("reading table") if input_table.endswith("feather"): df = pd.read_feather(input_table) idx = df.iloc[:,0].values else: df = pd.read_table(input_table, index_col=0, comment="#") idx = df.index regions = list(idx) s = Scanner(ncpus=ncpus) s.set_motifs(pwmfile) s.set_genome(genome) s.set_background(genome=genome) nregions = len(regions) scores = [] if scoring == "count": logger.info("setting threshold") s.set_threshold(fpr=FPR) logger.info("creating count table") for row in s.count(regions): scores.append(row) logger.info("done") else: s.set_threshold(threshold=0.0) logger.info("creating score table") for row in s.best_score(regions, normalize=True): scores.append(row) logger.info("done") motif_names = [m.id for m in read_motifs(pwmfile)] logger.info("creating dataframe") return pd.DataFrame(scores, index=idx, columns=motif_names)
python
def scan_to_table(input_table, genome, scoring, pwmfile=None, ncpus=None): """Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pwmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s """ config = MotifConfig() if pwmfile is None: pwmfile = config.get_default_params().get("motif_db", None) if pwmfile is not None: pwmfile = os.path.join(config.get_motif_dir(), pwmfile) if pwmfile is None: raise ValueError("no pwmfile given and no default database specified") logger.info("reading table") if input_table.endswith("feather"): df = pd.read_feather(input_table) idx = df.iloc[:,0].values else: df = pd.read_table(input_table, index_col=0, comment="#") idx = df.index regions = list(idx) s = Scanner(ncpus=ncpus) s.set_motifs(pwmfile) s.set_genome(genome) s.set_background(genome=genome) nregions = len(regions) scores = [] if scoring == "count": logger.info("setting threshold") s.set_threshold(fpr=FPR) logger.info("creating count table") for row in s.count(regions): scores.append(row) logger.info("done") else: s.set_threshold(threshold=0.0) logger.info("creating score table") for row in s.best_score(regions, normalize=True): scores.append(row) logger.info("done") motif_names = [m.id for m in read_motifs(pwmfile)] logger.info("creating dataframe") return pd.DataFrame(scores, index=idx, columns=motif_names)
[ "def", "scan_to_table", "(", "input_table", ",", "genome", ",", "scoring", ",", "pwmfile", "=", "None", ",", "ncpus", "=", "None", ")", ":", "config", "=", "MotifConfig", "(", ")", "if", "pwmfile", "is", "None", ":", "pwmfile", "=", "config", ".", "get_default_params", "(", ")", ".", "get", "(", "\"motif_db\"", ",", "None", ")", "if", "pwmfile", "is", "not", "None", ":", "pwmfile", "=", "os", ".", "path", ".", "join", "(", "config", ".", "get_motif_dir", "(", ")", ",", "pwmfile", ")", "if", "pwmfile", "is", "None", ":", "raise", "ValueError", "(", "\"no pwmfile given and no default database specified\"", ")", "logger", ".", "info", "(", "\"reading table\"", ")", "if", "input_table", ".", "endswith", "(", "\"feather\"", ")", ":", "df", "=", "pd", ".", "read_feather", "(", "input_table", ")", "idx", "=", "df", ".", "iloc", "[", ":", ",", "0", "]", ".", "values", "else", ":", "df", "=", "pd", ".", "read_table", "(", "input_table", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "idx", "=", "df", ".", "index", "regions", "=", "list", "(", "idx", ")", "s", "=", "Scanner", "(", "ncpus", "=", "ncpus", ")", "s", ".", "set_motifs", "(", "pwmfile", ")", "s", ".", "set_genome", "(", "genome", ")", "s", ".", "set_background", "(", "genome", "=", "genome", ")", "nregions", "=", "len", "(", "regions", ")", "scores", "=", "[", "]", "if", "scoring", "==", "\"count\"", ":", "logger", ".", "info", "(", "\"setting threshold\"", ")", "s", ".", "set_threshold", "(", "fpr", "=", "FPR", ")", "logger", ".", "info", "(", "\"creating count table\"", ")", "for", "row", "in", "s", ".", "count", "(", "regions", ")", ":", "scores", ".", "append", "(", "row", ")", "logger", ".", "info", "(", "\"done\"", ")", "else", ":", "s", ".", "set_threshold", "(", "threshold", "=", "0.0", ")", "logger", ".", "info", "(", "\"creating score table\"", ")", "for", "row", "in", "s", ".", "best_score", "(", "regions", ",", "normalize", "=", "True", ")", ":", "scores", ".", "append", "(", "row", ")", "logger", ".", "info", "(", "\"done\"", ")", "motif_names", "=", "[", "m", ".", "id", "for", "m", "in", "read_motifs", "(", "pwmfile", ")", "]", "logger", ".", "info", "(", "\"creating dataframe\"", ")", "return", "pd", ".", "DataFrame", "(", "scores", ",", "index", "=", "idx", ",", "columns", "=", "motif_names", ")" ]
Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pwmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s
[ "Scan", "regions", "in", "input", "table", "with", "motifs", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L52-L123
train
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
run_maelstrom
def run_maelstrom(infile, genome, outdir, pwmfile=None, plot=True, cluster=False, score_table=None, count_table=None, methods=None, ncpus=None): """Run maelstrom on an input table. Parameters ---------- infile : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. outdir : str Output directory for all results. pwmfile : str, optional Specify a PFM file for scanning. plot : bool, optional Create heatmaps. cluster : bool, optional If True and if the input table has more than one column, the data is clustered and the cluster activity methods are also run. Not well-tested. score_table : str, optional Filename of pre-calculated table with motif scores. count_table : str, optional Filename of pre-calculated table with motif counts. methods : list, optional Activity methods to use. By default are all used. ncpus : int, optional If defined this specifies the number of cores to use. """ logger.info("Starting maelstrom") if infile.endswith("feather"): df = pd.read_feather(infile) df = df.set_index(df.columns[0]) else: df = pd.read_table(infile, index_col=0, comment="#") # Check for duplicates if df.index.duplicated(keep=False).any(): raise ValueError("Input file contains duplicate regions! " "Please remove them.") if not os.path.exists(outdir): os.mkdir(outdir) if methods is None: methods = Moap.list_predictors() methods = [m.lower() for m in methods] shutil.copyfile(infile, os.path.join(outdir, "input.table.txt")) # Copy the motif informatuon pwmfile = pwmfile_location(pwmfile) if pwmfile: shutil.copy2(pwmfile, outdir) mapfile = re.sub(".p[fw]m$", ".motif2factors.txt", pwmfile) if os.path.exists(mapfile): shutil.copy2(mapfile, outdir) # Create a file with the number of motif matches if not count_table: count_table = os.path.join(outdir, "motif.count.txt.gz") if not os.path.exists(count_table): logger.info("Motif scanning (counts)") counts = scan_to_table(infile, genome, "count", pwmfile=pwmfile, ncpus=ncpus) counts.to_csv(count_table, sep="\t", compression="gzip") else: logger.info("Counts, using: %s", count_table) # Create a file with the score of the best motif match if not score_table: score_table = os.path.join(outdir, "motif.score.txt.gz") if not os.path.exists(score_table): logger.info("Motif scanning (scores)") scores = scan_to_table(infile, genome, "score", pwmfile=pwmfile, ncpus=ncpus) scores.to_csv(score_table, sep="\t", float_format="%.3f", compression="gzip") else: logger.info("Scores, using: %s", score_table) if cluster: cluster = False for method in methods: m = Moap.create(method, ncpus=ncpus) if m.ptype == "classification": cluster = True break if not cluster: logger.info("Skipping clustering, no classification methods") exps = [] clusterfile = infile if df.shape[1] != 1: # More than one column for method in Moap.list_regression_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, infile]) logger.debug("Adding %s", method) if cluster: clusterfile = os.path.join(outdir, os.path.basename(infile) + ".cluster.txt") df[:] = scale(df, axis=0) names = df.columns df_changed = pd.DataFrame(index=df.index) df_changed["cluster"] = np.nan for name in names: df_changed.loc[(df[name] - df.loc[:,df.columns != name].max(1)) > 0.5, "cluster"] = name df_changed.dropna().to_csv(clusterfile, sep="\t") if df.shape[1] == 1 or cluster: for method in Moap.list_classification_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, clusterfile]) if len(exps) == 0: logger.error("No method to run.") sys.exit(1) for method, scoring, fname in exps: try: if scoring == "count" and count_table: moap_with_table(fname, count_table, outdir, method, scoring, ncpus=ncpus) elif scoring == "score" and score_table: moap_with_table(fname, score_table, outdir, method, scoring, ncpus=ncpus) else: moap_with_bg(fname, genome, outdir, method, scoring, pwmfile=pwmfile, ncpus=ncpus) except Exception as e: logger.warn("Method %s with scoring %s failed", method, scoring) logger.warn(e) logger.warn("Skipping") raise dfs = {} for method, scoring,fname in exps: t = "{}.{}".format(method,scoring) fname = os.path.join(outdir, "activity.{}.{}.out.txt".format( method, scoring)) try: dfs[t] = pd.read_table(fname, index_col=0, comment="#") except: logging.warn("Activity file for {} not found!\n".format(t)) if len(methods) > 1: logger.info("Rank aggregation") df_p = df_rank_aggregation(df, dfs, exps) df_p.to_csv(os.path.join(outdir, "final.out.csv"), sep="\t") #df_p = df_p.join(m2f) # Write motif frequency table if df.shape[1] == 1: mcount = df.join(pd.read_table(count_table, index_col=0, comment="#")) m_group = mcount.groupby(df.columns[0]) freq = m_group.sum() / m_group.count() freq.to_csv(os.path.join(outdir, "motif.freq.txt"), sep="\t") if plot and len(methods) > 1: logger.info("html report") maelstrom_html_report( outdir, os.path.join(outdir, "final.out.csv"), pwmfile ) logger.info(os.path.join(outdir, "gimme.maelstrom.report.html"))
python
def run_maelstrom(infile, genome, outdir, pwmfile=None, plot=True, cluster=False, score_table=None, count_table=None, methods=None, ncpus=None): """Run maelstrom on an input table. Parameters ---------- infile : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. outdir : str Output directory for all results. pwmfile : str, optional Specify a PFM file for scanning. plot : bool, optional Create heatmaps. cluster : bool, optional If True and if the input table has more than one column, the data is clustered and the cluster activity methods are also run. Not well-tested. score_table : str, optional Filename of pre-calculated table with motif scores. count_table : str, optional Filename of pre-calculated table with motif counts. methods : list, optional Activity methods to use. By default are all used. ncpus : int, optional If defined this specifies the number of cores to use. """ logger.info("Starting maelstrom") if infile.endswith("feather"): df = pd.read_feather(infile) df = df.set_index(df.columns[0]) else: df = pd.read_table(infile, index_col=0, comment="#") # Check for duplicates if df.index.duplicated(keep=False).any(): raise ValueError("Input file contains duplicate regions! " "Please remove them.") if not os.path.exists(outdir): os.mkdir(outdir) if methods is None: methods = Moap.list_predictors() methods = [m.lower() for m in methods] shutil.copyfile(infile, os.path.join(outdir, "input.table.txt")) # Copy the motif informatuon pwmfile = pwmfile_location(pwmfile) if pwmfile: shutil.copy2(pwmfile, outdir) mapfile = re.sub(".p[fw]m$", ".motif2factors.txt", pwmfile) if os.path.exists(mapfile): shutil.copy2(mapfile, outdir) # Create a file with the number of motif matches if not count_table: count_table = os.path.join(outdir, "motif.count.txt.gz") if not os.path.exists(count_table): logger.info("Motif scanning (counts)") counts = scan_to_table(infile, genome, "count", pwmfile=pwmfile, ncpus=ncpus) counts.to_csv(count_table, sep="\t", compression="gzip") else: logger.info("Counts, using: %s", count_table) # Create a file with the score of the best motif match if not score_table: score_table = os.path.join(outdir, "motif.score.txt.gz") if not os.path.exists(score_table): logger.info("Motif scanning (scores)") scores = scan_to_table(infile, genome, "score", pwmfile=pwmfile, ncpus=ncpus) scores.to_csv(score_table, sep="\t", float_format="%.3f", compression="gzip") else: logger.info("Scores, using: %s", score_table) if cluster: cluster = False for method in methods: m = Moap.create(method, ncpus=ncpus) if m.ptype == "classification": cluster = True break if not cluster: logger.info("Skipping clustering, no classification methods") exps = [] clusterfile = infile if df.shape[1] != 1: # More than one column for method in Moap.list_regression_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, infile]) logger.debug("Adding %s", method) if cluster: clusterfile = os.path.join(outdir, os.path.basename(infile) + ".cluster.txt") df[:] = scale(df, axis=0) names = df.columns df_changed = pd.DataFrame(index=df.index) df_changed["cluster"] = np.nan for name in names: df_changed.loc[(df[name] - df.loc[:,df.columns != name].max(1)) > 0.5, "cluster"] = name df_changed.dropna().to_csv(clusterfile, sep="\t") if df.shape[1] == 1 or cluster: for method in Moap.list_classification_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, clusterfile]) if len(exps) == 0: logger.error("No method to run.") sys.exit(1) for method, scoring, fname in exps: try: if scoring == "count" and count_table: moap_with_table(fname, count_table, outdir, method, scoring, ncpus=ncpus) elif scoring == "score" and score_table: moap_with_table(fname, score_table, outdir, method, scoring, ncpus=ncpus) else: moap_with_bg(fname, genome, outdir, method, scoring, pwmfile=pwmfile, ncpus=ncpus) except Exception as e: logger.warn("Method %s with scoring %s failed", method, scoring) logger.warn(e) logger.warn("Skipping") raise dfs = {} for method, scoring,fname in exps: t = "{}.{}".format(method,scoring) fname = os.path.join(outdir, "activity.{}.{}.out.txt".format( method, scoring)) try: dfs[t] = pd.read_table(fname, index_col=0, comment="#") except: logging.warn("Activity file for {} not found!\n".format(t)) if len(methods) > 1: logger.info("Rank aggregation") df_p = df_rank_aggregation(df, dfs, exps) df_p.to_csv(os.path.join(outdir, "final.out.csv"), sep="\t") #df_p = df_p.join(m2f) # Write motif frequency table if df.shape[1] == 1: mcount = df.join(pd.read_table(count_table, index_col=0, comment="#")) m_group = mcount.groupby(df.columns[0]) freq = m_group.sum() / m_group.count() freq.to_csv(os.path.join(outdir, "motif.freq.txt"), sep="\t") if plot and len(methods) > 1: logger.info("html report") maelstrom_html_report( outdir, os.path.join(outdir, "final.out.csv"), pwmfile ) logger.info(os.path.join(outdir, "gimme.maelstrom.report.html"))
[ "def", "run_maelstrom", "(", "infile", ",", "genome", ",", "outdir", ",", "pwmfile", "=", "None", ",", "plot", "=", "True", ",", "cluster", "=", "False", ",", "score_table", "=", "None", ",", "count_table", "=", "None", ",", "methods", "=", "None", ",", "ncpus", "=", "None", ")", ":", "logger", ".", "info", "(", "\"Starting maelstrom\"", ")", "if", "infile", ".", "endswith", "(", "\"feather\"", ")", ":", "df", "=", "pd", ".", "read_feather", "(", "infile", ")", "df", "=", "df", ".", "set_index", "(", "df", ".", "columns", "[", "0", "]", ")", "else", ":", "df", "=", "pd", ".", "read_table", "(", "infile", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "# Check for duplicates", "if", "df", ".", "index", ".", "duplicated", "(", "keep", "=", "False", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"Input file contains duplicate regions! \"", "\"Please remove them.\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "if", "methods", "is", "None", ":", "methods", "=", "Moap", ".", "list_predictors", "(", ")", "methods", "=", "[", "m", ".", "lower", "(", ")", "for", "m", "in", "methods", "]", "shutil", ".", "copyfile", "(", "infile", ",", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"input.table.txt\"", ")", ")", "# Copy the motif informatuon", "pwmfile", "=", "pwmfile_location", "(", "pwmfile", ")", "if", "pwmfile", ":", "shutil", ".", "copy2", "(", "pwmfile", ",", "outdir", ")", "mapfile", "=", "re", ".", "sub", "(", "\".p[fw]m$\"", ",", "\".motif2factors.txt\"", ",", "pwmfile", ")", "if", "os", ".", "path", ".", "exists", "(", "mapfile", ")", ":", "shutil", ".", "copy2", "(", "mapfile", ",", "outdir", ")", "# Create a file with the number of motif matches", "if", "not", "count_table", ":", "count_table", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.count.txt.gz\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "count_table", ")", ":", "logger", ".", "info", "(", "\"Motif scanning (counts)\"", ")", "counts", "=", "scan_to_table", "(", "infile", ",", "genome", ",", "\"count\"", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "counts", ".", "to_csv", "(", "count_table", ",", "sep", "=", "\"\\t\"", ",", "compression", "=", "\"gzip\"", ")", "else", ":", "logger", ".", "info", "(", "\"Counts, using: %s\"", ",", "count_table", ")", "# Create a file with the score of the best motif match", "if", "not", "score_table", ":", "score_table", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.score.txt.gz\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "score_table", ")", ":", "logger", ".", "info", "(", "\"Motif scanning (scores)\"", ")", "scores", "=", "scan_to_table", "(", "infile", ",", "genome", ",", "\"score\"", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "scores", ".", "to_csv", "(", "score_table", ",", "sep", "=", "\"\\t\"", ",", "float_format", "=", "\"%.3f\"", ",", "compression", "=", "\"gzip\"", ")", "else", ":", "logger", ".", "info", "(", "\"Scores, using: %s\"", ",", "score_table", ")", "if", "cluster", ":", "cluster", "=", "False", "for", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "if", "m", ".", "ptype", "==", "\"classification\"", ":", "cluster", "=", "True", "break", "if", "not", "cluster", ":", "logger", ".", "info", "(", "\"Skipping clustering, no classification methods\"", ")", "exps", "=", "[", "]", "clusterfile", "=", "infile", "if", "df", ".", "shape", "[", "1", "]", "!=", "1", ":", "# More than one column", "for", "method", "in", "Moap", ".", "list_regression_predictors", "(", ")", ":", "if", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "exps", ".", "append", "(", "[", "method", ",", "m", ".", "pref_table", ",", "infile", "]", ")", "logger", ".", "debug", "(", "\"Adding %s\"", ",", "method", ")", "if", "cluster", ":", "clusterfile", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "os", ".", "path", ".", "basename", "(", "infile", ")", "+", "\".cluster.txt\"", ")", "df", "[", ":", "]", "=", "scale", "(", "df", ",", "axis", "=", "0", ")", "names", "=", "df", ".", "columns", "df_changed", "=", "pd", ".", "DataFrame", "(", "index", "=", "df", ".", "index", ")", "df_changed", "[", "\"cluster\"", "]", "=", "np", ".", "nan", "for", "name", "in", "names", ":", "df_changed", ".", "loc", "[", "(", "df", "[", "name", "]", "-", "df", ".", "loc", "[", ":", ",", "df", ".", "columns", "!=", "name", "]", ".", "max", "(", "1", ")", ")", ">", "0.5", ",", "\"cluster\"", "]", "=", "name", "df_changed", ".", "dropna", "(", ")", ".", "to_csv", "(", "clusterfile", ",", "sep", "=", "\"\\t\"", ")", "if", "df", ".", "shape", "[", "1", "]", "==", "1", "or", "cluster", ":", "for", "method", "in", "Moap", ".", "list_classification_predictors", "(", ")", ":", "if", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "exps", ".", "append", "(", "[", "method", ",", "m", ".", "pref_table", ",", "clusterfile", "]", ")", "if", "len", "(", "exps", ")", "==", "0", ":", "logger", ".", "error", "(", "\"No method to run.\"", ")", "sys", ".", "exit", "(", "1", ")", "for", "method", ",", "scoring", ",", "fname", "in", "exps", ":", "try", ":", "if", "scoring", "==", "\"count\"", "and", "count_table", ":", "moap_with_table", "(", "fname", ",", "count_table", ",", "outdir", ",", "method", ",", "scoring", ",", "ncpus", "=", "ncpus", ")", "elif", "scoring", "==", "\"score\"", "and", "score_table", ":", "moap_with_table", "(", "fname", ",", "score_table", ",", "outdir", ",", "method", ",", "scoring", ",", "ncpus", "=", "ncpus", ")", "else", ":", "moap_with_bg", "(", "fname", ",", "genome", ",", "outdir", ",", "method", ",", "scoring", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "except", "Exception", "as", "e", ":", "logger", ".", "warn", "(", "\"Method %s with scoring %s failed\"", ",", "method", ",", "scoring", ")", "logger", ".", "warn", "(", "e", ")", "logger", ".", "warn", "(", "\"Skipping\"", ")", "raise", "dfs", "=", "{", "}", "for", "method", ",", "scoring", ",", "fname", "in", "exps", ":", "t", "=", "\"{}.{}\"", ".", "format", "(", "method", ",", "scoring", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"activity.{}.{}.out.txt\"", ".", "format", "(", "method", ",", "scoring", ")", ")", "try", ":", "dfs", "[", "t", "]", "=", "pd", ".", "read_table", "(", "fname", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "except", ":", "logging", ".", "warn", "(", "\"Activity file for {} not found!\\n\"", ".", "format", "(", "t", ")", ")", "if", "len", "(", "methods", ")", ">", "1", ":", "logger", ".", "info", "(", "\"Rank aggregation\"", ")", "df_p", "=", "df_rank_aggregation", "(", "df", ",", "dfs", ",", "exps", ")", "df_p", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"final.out.csv\"", ")", ",", "sep", "=", "\"\\t\"", ")", "#df_p = df_p.join(m2f)", "# Write motif frequency table", "if", "df", ".", "shape", "[", "1", "]", "==", "1", ":", "mcount", "=", "df", ".", "join", "(", "pd", ".", "read_table", "(", "count_table", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", ")", "m_group", "=", "mcount", ".", "groupby", "(", "df", ".", "columns", "[", "0", "]", ")", "freq", "=", "m_group", ".", "sum", "(", ")", "/", "m_group", ".", "count", "(", ")", "freq", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.freq.txt\"", ")", ",", "sep", "=", "\"\\t\"", ")", "if", "plot", "and", "len", "(", "methods", ")", ">", "1", ":", "logger", ".", "info", "(", "\"html report\"", ")", "maelstrom_html_report", "(", "outdir", ",", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"final.out.csv\"", ")", ",", "pwmfile", ")", "logger", ".", "info", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"gimme.maelstrom.report.html\"", ")", ")" ]
Run maelstrom on an input table. Parameters ---------- infile : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. outdir : str Output directory for all results. pwmfile : str, optional Specify a PFM file for scanning. plot : bool, optional Create heatmaps. cluster : bool, optional If True and if the input table has more than one column, the data is clustered and the cluster activity methods are also run. Not well-tested. score_table : str, optional Filename of pre-calculated table with motif scores. count_table : str, optional Filename of pre-calculated table with motif counts. methods : list, optional Activity methods to use. By default are all used. ncpus : int, optional If defined this specifies the number of cores to use.
[ "Run", "maelstrom", "on", "an", "input", "table", ".", "Parameters", "----------", "infile", ":", "str", "Filename", "of", "input", "table", ".", "Can", "be", "either", "a", "text", "-", "separated", "tab", "file", "or", "a", "feather", "file", ".", "genome", ":", "str", "Genome", "name", ".", "Can", "be", "either", "the", "name", "of", "a", "FASTA", "-", "formatted", "file", "or", "a", "genomepy", "genome", "name", ".", "outdir", ":", "str", "Output", "directory", "for", "all", "results", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L250-L428
train
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
MaelstromResult.plot_heatmap
def plot_heatmap(self, kind="final", min_freq=0.01, threshold=2, name=True, max_len=50, aspect=1, **kwargs): """Plot clustered heatmap of predicted motif activity. Parameters ---------- kind : str, optional Which data type to use for plotting. Default is 'final', which will plot the result of the rang aggregation. Other options are 'freq' for the motif frequencies, or any of the individual activities such as 'rf.score'. min_freq : float, optional Minimum frequency of motif occurrence. threshold : float, optional Minimum activity (absolute) of the rank aggregation result. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. aspect : int, optional Aspect ratio for tweaking the plot. kwargs : other keyword arguments All other keyword arguments are passed to sns.clustermap Returns ------- cg : ClusterGrid A seaborn ClusterGrid instance. """ filt = np.any(np.abs(self.result) >= threshold, 1) & np.any(np.abs(self.freq.T) >= min_freq, 1) idx = self.result[filt].index cmap = "RdBu_r" if kind == "final": data = self.result elif kind == "freq": data = self.freq.T cmap = "Reds" elif kind in self.activity: data = self.activity[dtype] if kind in ["hypergeom.count", "mwu.score"]: cmap = "Reds" else: raise ValueError("Unknown dtype") #print(data.head()) #plt.figure( m = data.loc[idx] if name: m["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in m.index] m = m.set_index("factors") h,w = m.shape cg = sns.clustermap(m, cmap=cmap, col_cluster=False, figsize=(2 + w * 0.5 * aspect, 0.5 * h), linewidths=1, **kwargs) cg.ax_col_dendrogram.set_visible(False) plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0); return cg
python
def plot_heatmap(self, kind="final", min_freq=0.01, threshold=2, name=True, max_len=50, aspect=1, **kwargs): """Plot clustered heatmap of predicted motif activity. Parameters ---------- kind : str, optional Which data type to use for plotting. Default is 'final', which will plot the result of the rang aggregation. Other options are 'freq' for the motif frequencies, or any of the individual activities such as 'rf.score'. min_freq : float, optional Minimum frequency of motif occurrence. threshold : float, optional Minimum activity (absolute) of the rank aggregation result. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. aspect : int, optional Aspect ratio for tweaking the plot. kwargs : other keyword arguments All other keyword arguments are passed to sns.clustermap Returns ------- cg : ClusterGrid A seaborn ClusterGrid instance. """ filt = np.any(np.abs(self.result) >= threshold, 1) & np.any(np.abs(self.freq.T) >= min_freq, 1) idx = self.result[filt].index cmap = "RdBu_r" if kind == "final": data = self.result elif kind == "freq": data = self.freq.T cmap = "Reds" elif kind in self.activity: data = self.activity[dtype] if kind in ["hypergeom.count", "mwu.score"]: cmap = "Reds" else: raise ValueError("Unknown dtype") #print(data.head()) #plt.figure( m = data.loc[idx] if name: m["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in m.index] m = m.set_index("factors") h,w = m.shape cg = sns.clustermap(m, cmap=cmap, col_cluster=False, figsize=(2 + w * 0.5 * aspect, 0.5 * h), linewidths=1, **kwargs) cg.ax_col_dendrogram.set_visible(False) plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0); return cg
[ "def", "plot_heatmap", "(", "self", ",", "kind", "=", "\"final\"", ",", "min_freq", "=", "0.01", ",", "threshold", "=", "2", ",", "name", "=", "True", ",", "max_len", "=", "50", ",", "aspect", "=", "1", ",", "*", "*", "kwargs", ")", ":", "filt", "=", "np", ".", "any", "(", "np", ".", "abs", "(", "self", ".", "result", ")", ">=", "threshold", ",", "1", ")", "&", "np", ".", "any", "(", "np", ".", "abs", "(", "self", ".", "freq", ".", "T", ")", ">=", "min_freq", ",", "1", ")", "idx", "=", "self", ".", "result", "[", "filt", "]", ".", "index", "cmap", "=", "\"RdBu_r\"", "if", "kind", "==", "\"final\"", ":", "data", "=", "self", ".", "result", "elif", "kind", "==", "\"freq\"", ":", "data", "=", "self", ".", "freq", ".", "T", "cmap", "=", "\"Reds\"", "elif", "kind", "in", "self", ".", "activity", ":", "data", "=", "self", ".", "activity", "[", "dtype", "]", "if", "kind", "in", "[", "\"hypergeom.count\"", ",", "\"mwu.score\"", "]", ":", "cmap", "=", "\"Reds\"", "else", ":", "raise", "ValueError", "(", "\"Unknown dtype\"", ")", "#print(data.head())", "#plt.figure(", "m", "=", "data", ".", "loc", "[", "idx", "]", "if", "name", ":", "m", "[", "\"factors\"", "]", "=", "[", "join_max", "(", "self", ".", "motifs", "[", "n", "]", ".", "factors", ",", "max_len", ",", "\",\"", ",", "suffix", "=", "\",(...)\"", ")", "for", "n", "in", "m", ".", "index", "]", "m", "=", "m", ".", "set_index", "(", "\"factors\"", ")", "h", ",", "w", "=", "m", ".", "shape", "cg", "=", "sns", ".", "clustermap", "(", "m", ",", "cmap", "=", "cmap", ",", "col_cluster", "=", "False", ",", "figsize", "=", "(", "2", "+", "w", "*", "0.5", "*", "aspect", ",", "0.5", "*", "h", ")", ",", "linewidths", "=", "1", ",", "*", "*", "kwargs", ")", "cg", ".", "ax_col_dendrogram", ".", "set_visible", "(", "False", ")", "plt", ".", "setp", "(", "cg", ".", "ax_heatmap", ".", "yaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "0", ")", "return", "cg" ]
Plot clustered heatmap of predicted motif activity. Parameters ---------- kind : str, optional Which data type to use for plotting. Default is 'final', which will plot the result of the rang aggregation. Other options are 'freq' for the motif frequencies, or any of the individual activities such as 'rf.score'. min_freq : float, optional Minimum frequency of motif occurrence. threshold : float, optional Minimum activity (absolute) of the rank aggregation result. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. aspect : int, optional Aspect ratio for tweaking the plot. kwargs : other keyword arguments All other keyword arguments are passed to sns.clustermap Returns ------- cg : ClusterGrid A seaborn ClusterGrid instance.
[ "Plot", "clustered", "heatmap", "of", "predicted", "motif", "activity", ".", "Parameters", "----------", "kind", ":", "str", "optional", "Which", "data", "type", "to", "use", "for", "plotting", ".", "Default", "is", "final", "which", "will", "plot", "the", "result", "of", "the", "rang", "aggregation", ".", "Other", "options", "are", "freq", "for", "the", "motif", "frequencies", "or", "any", "of", "the", "individual", "activities", "such", "as", "rf", ".", "score", ".", "min_freq", ":", "float", "optional", "Minimum", "frequency", "of", "motif", "occurrence", ".", "threshold", ":", "float", "optional", "Minimum", "activity", "(", "absolute", ")", "of", "the", "rank", "aggregation", "result", ".", "name", ":", "bool", "optional", "Use", "factor", "names", "instead", "of", "motif", "names", "for", "plotting", ".", "max_len", ":", "int", "optional", "Truncate", "the", "list", "of", "factors", "to", "this", "maximum", "length", ".", "aspect", ":", "int", "optional", "Aspect", "ratio", "for", "tweaking", "the", "plot", ".", "kwargs", ":", "other", "keyword", "arguments", "All", "other", "keyword", "arguments", "are", "passed", "to", "sns", ".", "clustermap" ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L495-L558
train
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
MaelstromResult.plot_scores
def plot_scores(self, motifs, name=True, max_len=50): """Create motif scores boxplot of different clusters. Motifs can be specified as either motif or factor names. The motif scores will be scaled and plotted as z-scores. Parameters ---------- motifs : iterable or str List of motif or factor names. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. Returns ------- g : FacetGrid Returns the seaborn FacetGrid object with the plot. """ if self.input.shape[1] != 1: raise ValueError("Can't make a categorical plot with real-valued data") if type("") == type(motifs): motifs = [motifs] plot_motifs = [] for motif in motifs: if motif in self.motifs: plot_motifs.append(motif) else: for m in self.motifs.values(): if motif in m.factors: plot_motifs.append(m.id) data = self.scores[plot_motifs] data[:] = data.scale(data, axix=0) if name: data = data.T data["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in plot_motifs] data = data.set_index("factors").T data = pd.melt(self.input.join(data), id_vars=["cluster"]) data.columns = ["cluster", "motif", "z-score"] g = sns.factorplot(data=data, y="motif", x="z-score", hue="cluster", kind="box", aspect=2) return g
python
def plot_scores(self, motifs, name=True, max_len=50): """Create motif scores boxplot of different clusters. Motifs can be specified as either motif or factor names. The motif scores will be scaled and plotted as z-scores. Parameters ---------- motifs : iterable or str List of motif or factor names. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. Returns ------- g : FacetGrid Returns the seaborn FacetGrid object with the plot. """ if self.input.shape[1] != 1: raise ValueError("Can't make a categorical plot with real-valued data") if type("") == type(motifs): motifs = [motifs] plot_motifs = [] for motif in motifs: if motif in self.motifs: plot_motifs.append(motif) else: for m in self.motifs.values(): if motif in m.factors: plot_motifs.append(m.id) data = self.scores[plot_motifs] data[:] = data.scale(data, axix=0) if name: data = data.T data["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in plot_motifs] data = data.set_index("factors").T data = pd.melt(self.input.join(data), id_vars=["cluster"]) data.columns = ["cluster", "motif", "z-score"] g = sns.factorplot(data=data, y="motif", x="z-score", hue="cluster", kind="box", aspect=2) return g
[ "def", "plot_scores", "(", "self", ",", "motifs", ",", "name", "=", "True", ",", "max_len", "=", "50", ")", ":", "if", "self", ".", "input", ".", "shape", "[", "1", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"Can't make a categorical plot with real-valued data\"", ")", "if", "type", "(", "\"\"", ")", "==", "type", "(", "motifs", ")", ":", "motifs", "=", "[", "motifs", "]", "plot_motifs", "=", "[", "]", "for", "motif", "in", "motifs", ":", "if", "motif", "in", "self", ".", "motifs", ":", "plot_motifs", ".", "append", "(", "motif", ")", "else", ":", "for", "m", "in", "self", ".", "motifs", ".", "values", "(", ")", ":", "if", "motif", "in", "m", ".", "factors", ":", "plot_motifs", ".", "append", "(", "m", ".", "id", ")", "data", "=", "self", ".", "scores", "[", "plot_motifs", "]", "data", "[", ":", "]", "=", "data", ".", "scale", "(", "data", ",", "axix", "=", "0", ")", "if", "name", ":", "data", "=", "data", ".", "T", "data", "[", "\"factors\"", "]", "=", "[", "join_max", "(", "self", ".", "motifs", "[", "n", "]", ".", "factors", ",", "max_len", ",", "\",\"", ",", "suffix", "=", "\",(...)\"", ")", "for", "n", "in", "plot_motifs", "]", "data", "=", "data", ".", "set_index", "(", "\"factors\"", ")", ".", "T", "data", "=", "pd", ".", "melt", "(", "self", ".", "input", ".", "join", "(", "data", ")", ",", "id_vars", "=", "[", "\"cluster\"", "]", ")", "data", ".", "columns", "=", "[", "\"cluster\"", ",", "\"motif\"", ",", "\"z-score\"", "]", "g", "=", "sns", ".", "factorplot", "(", "data", "=", "data", ",", "y", "=", "\"motif\"", ",", "x", "=", "\"z-score\"", ",", "hue", "=", "\"cluster\"", ",", "kind", "=", "\"box\"", ",", "aspect", "=", "2", ")", "return", "g" ]
Create motif scores boxplot of different clusters. Motifs can be specified as either motif or factor names. The motif scores will be scaled and plotted as z-scores. Parameters ---------- motifs : iterable or str List of motif or factor names. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. Returns ------- g : FacetGrid Returns the seaborn FacetGrid object with the plot.
[ "Create", "motif", "scores", "boxplot", "of", "different", "clusters", ".", "Motifs", "can", "be", "specified", "as", "either", "motif", "or", "factor", "names", ".", "The", "motif", "scores", "will", "be", "scaled", "and", "plotted", "as", "z", "-", "scores", ".", "Parameters", "----------", "motifs", ":", "iterable", "or", "str", "List", "of", "motif", "or", "factor", "names", ".", "name", ":", "bool", "optional", "Use", "factor", "names", "instead", "of", "motif", "names", "for", "plotting", ".", "max_len", ":", "int", "optional", "Truncate", "the", "list", "of", "factors", "to", "this", "maximum", "length", ".", "Returns", "-------", "g", ":", "FacetGrid", "Returns", "the", "seaborn", "FacetGrid", "object", "with", "the", "plot", "." ]
1dc0572179e5d0c8f96958060133c1f8d92c6675
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L561-L608
train
biosustain/swiglpk
scripts/find_swiglpk_version.py
get_version
def get_version(package, url_pattern=URL_PATTERN): """Return version of package on pypi.python.org using json. Adapted from https://stackoverflow.com/a/34366589""" req = requests.get(url_pattern.format(package=package)) version = parse('0') if req.status_code == requests.codes.ok: # j = json.loads(req.text.encode(req.encoding)) j = req.json() releases = j.get('releases', []) for release in releases: ver = parse(release) if not ver.is_prerelease: version = max(version, ver) return version
python
def get_version(package, url_pattern=URL_PATTERN): """Return version of package on pypi.python.org using json. Adapted from https://stackoverflow.com/a/34366589""" req = requests.get(url_pattern.format(package=package)) version = parse('0') if req.status_code == requests.codes.ok: # j = json.loads(req.text.encode(req.encoding)) j = req.json() releases = j.get('releases', []) for release in releases: ver = parse(release) if not ver.is_prerelease: version = max(version, ver) return version
[ "def", "get_version", "(", "package", ",", "url_pattern", "=", "URL_PATTERN", ")", ":", "req", "=", "requests", ".", "get", "(", "url_pattern", ".", "format", "(", "package", "=", "package", ")", ")", "version", "=", "parse", "(", "'0'", ")", "if", "req", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "# j = json.loads(req.text.encode(req.encoding))", "j", "=", "req", ".", "json", "(", ")", "releases", "=", "j", ".", "get", "(", "'releases'", ",", "[", "]", ")", "for", "release", "in", "releases", ":", "ver", "=", "parse", "(", "release", ")", "if", "not", "ver", ".", "is_prerelease", ":", "version", "=", "max", "(", "version", ",", "ver", ")", "return", "version" ]
Return version of package on pypi.python.org using json. Adapted from https://stackoverflow.com/a/34366589
[ "Return", "version", "of", "package", "on", "pypi", ".", "python", ".", "org", "using", "json", ".", "Adapted", "from", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "34366589" ]
a83bc5d0bb4bf7795756ba11a33fb5cb2c501bef
https://github.com/biosustain/swiglpk/blob/a83bc5d0bb4bf7795756ba11a33fb5cb2c501bef/scripts/find_swiglpk_version.py#L13-L25
train
osantana/prettyconf
prettyconf/loaders.py
get_args
def get_args(parser): """ Converts arguments extracted from a parser to a dict, and will dismiss arguments which default to NOT_SET. :param parser: an ``argparse.ArgumentParser`` instance. :type parser: argparse.ArgumentParser :return: Dictionary with the configs found in the parsed CLI arguments. :rtype: dict """ args = vars(parser.parse_args()).items() return {key: val for key, val in args if not isinstance(val, NotSet)}
python
def get_args(parser): """ Converts arguments extracted from a parser to a dict, and will dismiss arguments which default to NOT_SET. :param parser: an ``argparse.ArgumentParser`` instance. :type parser: argparse.ArgumentParser :return: Dictionary with the configs found in the parsed CLI arguments. :rtype: dict """ args = vars(parser.parse_args()).items() return {key: val for key, val in args if not isinstance(val, NotSet)}
[ "def", "get_args", "(", "parser", ")", ":", "args", "=", "vars", "(", "parser", ".", "parse_args", "(", ")", ")", ".", "items", "(", ")", "return", "{", "key", ":", "val", "for", "key", ",", "val", "in", "args", "if", "not", "isinstance", "(", "val", ",", "NotSet", ")", "}" ]
Converts arguments extracted from a parser to a dict, and will dismiss arguments which default to NOT_SET. :param parser: an ``argparse.ArgumentParser`` instance. :type parser: argparse.ArgumentParser :return: Dictionary with the configs found in the parsed CLI arguments. :rtype: dict
[ "Converts", "arguments", "extracted", "from", "a", "parser", "to", "a", "dict", "and", "will", "dismiss", "arguments", "which", "default", "to", "NOT_SET", "." ]
ddbbc8a592ebd7d80d9c3f87c8671523e3692a0d
https://github.com/osantana/prettyconf/blob/ddbbc8a592ebd7d80d9c3f87c8671523e3692a0d/prettyconf/loaders.py#L22-L33
train
aouyar/PyMunin
pysysinfo/util.py
parse_value
def parse_value(val, parsebool=False): """Parse input string and return int, float or str depending on format. @param val: Input string. @param parsebool: If True parse yes / no, on / off as boolean. @return: Value of type int, float or str. """ try: return int(val) except ValueError: pass try: return float(val) except: pass if parsebool: if re.match('yes|on', str(val), re.IGNORECASE): return True elif re.match('no|off', str(val), re.IGNORECASE): return False return val
python
def parse_value(val, parsebool=False): """Parse input string and return int, float or str depending on format. @param val: Input string. @param parsebool: If True parse yes / no, on / off as boolean. @return: Value of type int, float or str. """ try: return int(val) except ValueError: pass try: return float(val) except: pass if parsebool: if re.match('yes|on', str(val), re.IGNORECASE): return True elif re.match('no|off', str(val), re.IGNORECASE): return False return val
[ "def", "parse_value", "(", "val", ",", "parsebool", "=", "False", ")", ":", "try", ":", "return", "int", "(", "val", ")", "except", "ValueError", ":", "pass", "try", ":", "return", "float", "(", "val", ")", "except", ":", "pass", "if", "parsebool", ":", "if", "re", ".", "match", "(", "'yes|on'", ",", "str", "(", "val", ")", ",", "re", ".", "IGNORECASE", ")", ":", "return", "True", "elif", "re", ".", "match", "(", "'no|off'", ",", "str", "(", "val", ")", ",", "re", ".", "IGNORECASE", ")", ":", "return", "False", "return", "val" ]
Parse input string and return int, float or str depending on format. @param val: Input string. @param parsebool: If True parse yes / no, on / off as boolean. @return: Value of type int, float or str.
[ "Parse", "input", "string", "and", "return", "int", "float", "or", "str", "depending", "on", "format", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L27-L48
train
aouyar/PyMunin
pysysinfo/util.py
socket_read
def socket_read(fp): """Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer. """ response = '' oldlen = 0 newlen = 0 while True: response += fp.read(buffSize) newlen = len(response) if newlen - oldlen == 0: break else: oldlen = newlen return response
python
def socket_read(fp): """Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer. """ response = '' oldlen = 0 newlen = 0 while True: response += fp.read(buffSize) newlen = len(response) if newlen - oldlen == 0: break else: oldlen = newlen return response
[ "def", "socket_read", "(", "fp", ")", ":", "response", "=", "''", "oldlen", "=", "0", "newlen", "=", "0", "while", "True", ":", "response", "+=", "fp", ".", "read", "(", "buffSize", ")", "newlen", "=", "len", "(", "response", ")", "if", "newlen", "-", "oldlen", "==", "0", ":", "break", "else", ":", "oldlen", "=", "newlen", "return", "response" ]
Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer.
[ "Buffered", "read", "from", "socket", ".", "Reads", "all", "data", "available", "from", "socket", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L64-L81
train
aouyar/PyMunin
pysysinfo/util.py
exec_command
def exec_command(args, env=None): """Convenience function that executes command and returns result. @param args: Tuple of command and arguments. @param env: Dictionary of environment variables. (Environment is not modified if None.) @return: Command output. """ try: cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffSize, env=env) except OSError, e: raise Exception("Execution of command failed.\n", " Command: %s\n Error: %s" % (' '.join(args), str(e))) out, err = cmd.communicate(None) if cmd.returncode != 0: raise Exception("Execution of command failed with error code: %s\n%s\n" % (cmd.returncode, err)) return out
python
def exec_command(args, env=None): """Convenience function that executes command and returns result. @param args: Tuple of command and arguments. @param env: Dictionary of environment variables. (Environment is not modified if None.) @return: Command output. """ try: cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffSize, env=env) except OSError, e: raise Exception("Execution of command failed.\n", " Command: %s\n Error: %s" % (' '.join(args), str(e))) out, err = cmd.communicate(None) if cmd.returncode != 0: raise Exception("Execution of command failed with error code: %s\n%s\n" % (cmd.returncode, err)) return out
[ "def", "exec_command", "(", "args", ",", "env", "=", "None", ")", ":", "try", ":", "cmd", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "bufsize", "=", "buffSize", ",", "env", "=", "env", ")", "except", "OSError", ",", "e", ":", "raise", "Exception", "(", "\"Execution of command failed.\\n\"", ",", "\" Command: %s\\n Error: %s\"", "%", "(", "' '", ".", "join", "(", "args", ")", ",", "str", "(", "e", ")", ")", ")", "out", ",", "err", "=", "cmd", ".", "communicate", "(", "None", ")", "if", "cmd", ".", "returncode", "!=", "0", ":", "raise", "Exception", "(", "\"Execution of command failed with error code: %s\\n%s\\n\"", "%", "(", "cmd", ".", "returncode", ",", "err", ")", ")", "return", "out" ]
Convenience function that executes command and returns result. @param args: Tuple of command and arguments. @param env: Dictionary of environment variables. (Environment is not modified if None.) @return: Command output.
[ "Convenience", "function", "that", "executes", "command", "and", "returns", "result", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L84-L106
train
aouyar/PyMunin
pysysinfo/util.py
NestedDict.set_nested
def set_nested(self, klist, value): """D.set_nested((k1, k2,k3, ...), v) -> D[k1][k2][k3] ... = v""" keys = list(klist) if len(keys) > 0: curr_dict = self last_key = keys.pop() for key in keys: if not curr_dict.has_key(key) or not isinstance(curr_dict[key], NestedDict): curr_dict[key] = type(self)() curr_dict = curr_dict[key] curr_dict[last_key] = value
python
def set_nested(self, klist, value): """D.set_nested((k1, k2,k3, ...), v) -> D[k1][k2][k3] ... = v""" keys = list(klist) if len(keys) > 0: curr_dict = self last_key = keys.pop() for key in keys: if not curr_dict.has_key(key) or not isinstance(curr_dict[key], NestedDict): curr_dict[key] = type(self)() curr_dict = curr_dict[key] curr_dict[last_key] = value
[ "def", "set_nested", "(", "self", ",", "klist", ",", "value", ")", ":", "keys", "=", "list", "(", "klist", ")", "if", "len", "(", "keys", ")", ">", "0", ":", "curr_dict", "=", "self", "last_key", "=", "keys", ".", "pop", "(", ")", "for", "key", "in", "keys", ":", "if", "not", "curr_dict", ".", "has_key", "(", "key", ")", "or", "not", "isinstance", "(", "curr_dict", "[", "key", "]", ",", "NestedDict", ")", ":", "curr_dict", "[", "key", "]", "=", "type", "(", "self", ")", "(", ")", "curr_dict", "=", "curr_dict", "[", "key", "]", "curr_dict", "[", "last_key", "]", "=", "value" ]
D.set_nested((k1, k2,k3, ...), v) -> D[k1][k2][k3] ... = v
[ "D", ".", "set_nested", "((", "k1", "k2", "k3", "...", ")", "v", ")", "-", ">", "D", "[", "k1", "]", "[", "k2", "]", "[", "k3", "]", "...", "=", "v" ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L156-L167
train
aouyar/PyMunin
pysysinfo/util.py
TableFilter.registerFilter
def registerFilter(self, column, patterns, is_regex=False, ignore_case=False): """Register filter on a column of table. @param column: The column name. @param patterns: A single pattern or a list of patterns used for matching column values. @param is_regex: The patterns will be treated as regex if True, the column values will be tested for equality with the patterns otherwise. @param ignore_case: Case insensitive matching will be used if True. """ if isinstance(patterns, basestring): patt_list = (patterns,) elif isinstance(patterns, (tuple, list)): patt_list = list(patterns) else: raise ValueError("The patterns parameter must either be as string " "or a tuple / list of strings.") if is_regex: if ignore_case: flags = re.IGNORECASE else: flags = 0 patt_exprs = [re.compile(pattern, flags) for pattern in patt_list] else: if ignore_case: patt_exprs = [pattern.lower() for pattern in patt_list] else: patt_exprs = patt_list self._filters[column] = (patt_exprs, is_regex, ignore_case)
python
def registerFilter(self, column, patterns, is_regex=False, ignore_case=False): """Register filter on a column of table. @param column: The column name. @param patterns: A single pattern or a list of patterns used for matching column values. @param is_regex: The patterns will be treated as regex if True, the column values will be tested for equality with the patterns otherwise. @param ignore_case: Case insensitive matching will be used if True. """ if isinstance(patterns, basestring): patt_list = (patterns,) elif isinstance(patterns, (tuple, list)): patt_list = list(patterns) else: raise ValueError("The patterns parameter must either be as string " "or a tuple / list of strings.") if is_regex: if ignore_case: flags = re.IGNORECASE else: flags = 0 patt_exprs = [re.compile(pattern, flags) for pattern in patt_list] else: if ignore_case: patt_exprs = [pattern.lower() for pattern in patt_list] else: patt_exprs = patt_list self._filters[column] = (patt_exprs, is_regex, ignore_case)
[ "def", "registerFilter", "(", "self", ",", "column", ",", "patterns", ",", "is_regex", "=", "False", ",", "ignore_case", "=", "False", ")", ":", "if", "isinstance", "(", "patterns", ",", "basestring", ")", ":", "patt_list", "=", "(", "patterns", ",", ")", "elif", "isinstance", "(", "patterns", ",", "(", "tuple", ",", "list", ")", ")", ":", "patt_list", "=", "list", "(", "patterns", ")", "else", ":", "raise", "ValueError", "(", "\"The patterns parameter must either be as string \"", "\"or a tuple / list of strings.\"", ")", "if", "is_regex", ":", "if", "ignore_case", ":", "flags", "=", "re", ".", "IGNORECASE", "else", ":", "flags", "=", "0", "patt_exprs", "=", "[", "re", ".", "compile", "(", "pattern", ",", "flags", ")", "for", "pattern", "in", "patt_list", "]", "else", ":", "if", "ignore_case", ":", "patt_exprs", "=", "[", "pattern", ".", "lower", "(", ")", "for", "pattern", "in", "patt_list", "]", "else", ":", "patt_exprs", "=", "patt_list", "self", ".", "_filters", "[", "column", "]", "=", "(", "patt_exprs", ",", "is_regex", ",", "ignore_case", ")" ]
Register filter on a column of table. @param column: The column name. @param patterns: A single pattern or a list of patterns used for matching column values. @param is_regex: The patterns will be treated as regex if True, the column values will be tested for equality with the patterns otherwise. @param ignore_case: Case insensitive matching will be used if True.
[ "Register", "filter", "on", "a", "column", "of", "table", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L233-L264
train
aouyar/PyMunin
pysysinfo/util.py
TableFilter.unregisterFilter
def unregisterFilter(self, column): """Unregister filter on a column of the table. @param column: The column header. """ if self._filters.has_key(column): del self._filters[column]
python
def unregisterFilter(self, column): """Unregister filter on a column of the table. @param column: The column header. """ if self._filters.has_key(column): del self._filters[column]
[ "def", "unregisterFilter", "(", "self", ",", "column", ")", ":", "if", "self", ".", "_filters", ".", "has_key", "(", "column", ")", ":", "del", "self", ".", "_filters", "[", "column", "]" ]
Unregister filter on a column of the table. @param column: The column header.
[ "Unregister", "filter", "on", "a", "column", "of", "the", "table", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L266-L273
train
aouyar/PyMunin
pysysinfo/util.py
TableFilter.registerFilters
def registerFilters(self, **kwargs): """Register multiple filters at once. @param **kwargs: Multiple filters are registered using keyword variables. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. """ for (key, patterns) in kwargs.items(): if key.endswith('_regex'): col = key[:-len('_regex')] is_regex = True else: col = key is_regex = False if col.endswith('_ic'): col = col[:-len('_ic')] ignore_case = True else: ignore_case = False self.registerFilter(col, patterns, is_regex, ignore_case)
python
def registerFilters(self, **kwargs): """Register multiple filters at once. @param **kwargs: Multiple filters are registered using keyword variables. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. """ for (key, patterns) in kwargs.items(): if key.endswith('_regex'): col = key[:-len('_regex')] is_regex = True else: col = key is_regex = False if col.endswith('_ic'): col = col[:-len('_ic')] ignore_case = True else: ignore_case = False self.registerFilter(col, patterns, is_regex, ignore_case)
[ "def", "registerFilters", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "(", "key", ",", "patterns", ")", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", ".", "endswith", "(", "'_regex'", ")", ":", "col", "=", "key", "[", ":", "-", "len", "(", "'_regex'", ")", "]", "is_regex", "=", "True", "else", ":", "col", "=", "key", "is_regex", "=", "False", "if", "col", ".", "endswith", "(", "'_ic'", ")", ":", "col", "=", "col", "[", ":", "-", "len", "(", "'_ic'", ")", "]", "ignore_case", "=", "True", "else", ":", "ignore_case", "=", "False", "self", ".", "registerFilter", "(", "col", ",", "patterns", ",", "is_regex", ",", "ignore_case", ")" ]
Register multiple filters at once. @param **kwargs: Multiple filters are registered using keyword variables. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match.
[ "Register", "multiple", "filters", "at", "once", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L275-L304
train
aouyar/PyMunin
pysysinfo/util.py
TableFilter.applyFilters
def applyFilters(self, headers, table): """Apply filter on ps command result. @param headers: List of column headers. @param table: Nested list of rows and columns. @return: Nested list of rows and columns filtered using registered filters. """ result = [] column_idxs = {} for column in self._filters.keys(): try: column_idxs[column] = headers.index(column) except ValueError: raise ValueError('Invalid column name %s in filter.' % column) for row in table: for (column, (patterns, is_regex, ignore_case)) in self._filters.items(): col_idx = column_idxs[column] col_val = row[col_idx] if is_regex: for pattern in patterns: if pattern.search(col_val): break else: break else: if ignore_case: col_val = col_val.lower() if col_val in patterns: pass else: break else: result.append(row) return result
python
def applyFilters(self, headers, table): """Apply filter on ps command result. @param headers: List of column headers. @param table: Nested list of rows and columns. @return: Nested list of rows and columns filtered using registered filters. """ result = [] column_idxs = {} for column in self._filters.keys(): try: column_idxs[column] = headers.index(column) except ValueError: raise ValueError('Invalid column name %s in filter.' % column) for row in table: for (column, (patterns, is_regex, ignore_case)) in self._filters.items(): col_idx = column_idxs[column] col_val = row[col_idx] if is_regex: for pattern in patterns: if pattern.search(col_val): break else: break else: if ignore_case: col_val = col_val.lower() if col_val in patterns: pass else: break else: result.append(row) return result
[ "def", "applyFilters", "(", "self", ",", "headers", ",", "table", ")", ":", "result", "=", "[", "]", "column_idxs", "=", "{", "}", "for", "column", "in", "self", ".", "_filters", ".", "keys", "(", ")", ":", "try", ":", "column_idxs", "[", "column", "]", "=", "headers", ".", "index", "(", "column", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Invalid column name %s in filter.'", "%", "column", ")", "for", "row", "in", "table", ":", "for", "(", "column", ",", "(", "patterns", ",", "is_regex", ",", "ignore_case", ")", ")", "in", "self", ".", "_filters", ".", "items", "(", ")", ":", "col_idx", "=", "column_idxs", "[", "column", "]", "col_val", "=", "row", "[", "col_idx", "]", "if", "is_regex", ":", "for", "pattern", "in", "patterns", ":", "if", "pattern", ".", "search", "(", "col_val", ")", ":", "break", "else", ":", "break", "else", ":", "if", "ignore_case", ":", "col_val", "=", "col_val", ".", "lower", "(", ")", "if", "col_val", "in", "patterns", ":", "pass", "else", ":", "break", "else", ":", "result", ".", "append", "(", "row", ")", "return", "result" ]
Apply filter on ps command result. @param headers: List of column headers. @param table: Nested list of rows and columns. @return: Nested list of rows and columns filtered using registered filters.
[ "Apply", "filter", "on", "ps", "command", "result", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L306-L343
train
aouyar/PyMunin
pysysinfo/util.py
Telnet.open
def open(self, host=None, port=0, socket_file=None, timeout=socket.getdefaulttimeout()): """Connect to a host. With a host argument, it connects the instance using TCP; port number and timeout are optional, socket_file must be None. The port number defaults to the standard telnet port (23). With a socket_file argument, it connects the instance using named socket; timeout is optional and host must be None. Don't try to reopen an already connected instance. """ self.socket_file = socket_file if host is not None: if sys.version_info[:2] >= (2,6): telnetlib.Telnet.open(self, host, port, timeout) else: telnetlib.Telnet.open(self, host, port) elif socket_file is not None: self.eof = 0 self.host = host self.port = port self.timeout = timeout self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.settimeout(timeout) self.sock.connect(socket_file) else: raise TypeError("Either host or socket_file argument is required.")
python
def open(self, host=None, port=0, socket_file=None, timeout=socket.getdefaulttimeout()): """Connect to a host. With a host argument, it connects the instance using TCP; port number and timeout are optional, socket_file must be None. The port number defaults to the standard telnet port (23). With a socket_file argument, it connects the instance using named socket; timeout is optional and host must be None. Don't try to reopen an already connected instance. """ self.socket_file = socket_file if host is not None: if sys.version_info[:2] >= (2,6): telnetlib.Telnet.open(self, host, port, timeout) else: telnetlib.Telnet.open(self, host, port) elif socket_file is not None: self.eof = 0 self.host = host self.port = port self.timeout = timeout self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.settimeout(timeout) self.sock.connect(socket_file) else: raise TypeError("Either host or socket_file argument is required.")
[ "def", "open", "(", "self", ",", "host", "=", "None", ",", "port", "=", "0", ",", "socket_file", "=", "None", ",", "timeout", "=", "socket", ".", "getdefaulttimeout", "(", ")", ")", ":", "self", ".", "socket_file", "=", "socket_file", "if", "host", "is", "not", "None", ":", "if", "sys", ".", "version_info", "[", ":", "2", "]", ">=", "(", "2", ",", "6", ")", ":", "telnetlib", ".", "Telnet", ".", "open", "(", "self", ",", "host", ",", "port", ",", "timeout", ")", "else", ":", "telnetlib", ".", "Telnet", ".", "open", "(", "self", ",", "host", ",", "port", ")", "elif", "socket_file", "is", "not", "None", ":", "self", ".", "eof", "=", "0", "self", ".", "host", "=", "host", "self", ".", "port", "=", "port", "self", ".", "timeout", "=", "timeout", "self", ".", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "sock", ".", "settimeout", "(", "timeout", ")", "self", ".", "sock", ".", "connect", "(", "socket_file", ")", "else", ":", "raise", "TypeError", "(", "\"Either host or socket_file argument is required.\"", ")" ]
Connect to a host. With a host argument, it connects the instance using TCP; port number and timeout are optional, socket_file must be None. The port number defaults to the standard telnet port (23). With a socket_file argument, it connects the instance using named socket; timeout is optional and host must be None. Don't try to reopen an already connected instance.
[ "Connect", "to", "a", "host", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L366-L395
train
ContextLab/quail
quail/analysis/analysis.py
analyze
def analyze(egg, subjgroup=None, listgroup=None, subjname='Subject', listname='List', analysis=None, position=0, permute=False, n_perms=1000, parallel=False, match='exact', distance='euclidean', features=None, ts=None): """ General analysis function that groups data by subject/list number and performs analysis. Parameters ---------- egg : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : string This is the analysis you want to run. Can be accuracy, spc, pfr, temporal or fingerprint position : int Optional argument for pnr analysis. Defines encoding position of item to run pnr. Default is 0, and it is zero indexed permute : bool Optional argument for fingerprint/temporal cluster analyses. Determines whether to correct clustering scores by shuffling recall order for each list to create a distribution of clustering scores (for each feature). The "corrected" clustering score is the proportion of clustering scores in that random distribution that were lower than the clustering score for the observed recall sequence. Default is False. n_perms : int Optional argument for fingerprint/temporal cluster analyses. Number of permutations to run for "corrected" clustering scores. Default is 1000 ( per recall list). parallel : bool Option to use multiprocessing (this can help speed up the permutations tests in the clustering calculations) match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- result : quail.FriedEgg Class instance containing the analysis results """ if analysis is None: raise ValueError('You must pass an analysis type.') if analysis not in analyses.keys(): raise ValueError('Analysis not recognized. Choose one of the following: ' 'accuracy, spc, pfr, lag-crp, fingerprint, temporal') from ..egg import FriedEgg if hasattr(egg, 'subjgroup'): if egg.subjgroup is not None: subjgroup = egg.subjgroup if hasattr(egg, 'subjname'): if egg.subjname is not None: subjname = egg.subjname if hasattr(egg, 'listgroup'): if egg.listgroup is not None: listgroup = egg.listgroup if hasattr(egg, 'listname'): if egg.listname is not None: listname = egg.listname if features is None: features = egg.feature_names opts = { 'subjgroup' : subjgroup, 'listgroup' : listgroup, 'subjname' : subjname, 'parallel' : parallel, 'match' : match, 'distance' : distance, 'features' : features, 'analysis_type' : analysis, 'analysis' : analyses[analysis] } if analysis is 'pfr': opts.update({'position' : 0}) elif analysis is 'pnr': opts.update({'position' : position}) if analysis is 'temporal': opts.update({'features' : ['temporal']}) if analysis in ['temporal', 'fingerprint']: opts.update({'permute' : permute, 'n_perms' : n_perms}) if analysis is 'lagcrp': opts.update({'ts' : ts}) return FriedEgg(data=_analyze_chunk(egg, **opts), analysis=analysis, list_length=egg.list_length, n_lists=egg.n_lists, n_subjects=egg.n_subjects, position=position)
python
def analyze(egg, subjgroup=None, listgroup=None, subjname='Subject', listname='List', analysis=None, position=0, permute=False, n_perms=1000, parallel=False, match='exact', distance='euclidean', features=None, ts=None): """ General analysis function that groups data by subject/list number and performs analysis. Parameters ---------- egg : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : string This is the analysis you want to run. Can be accuracy, spc, pfr, temporal or fingerprint position : int Optional argument for pnr analysis. Defines encoding position of item to run pnr. Default is 0, and it is zero indexed permute : bool Optional argument for fingerprint/temporal cluster analyses. Determines whether to correct clustering scores by shuffling recall order for each list to create a distribution of clustering scores (for each feature). The "corrected" clustering score is the proportion of clustering scores in that random distribution that were lower than the clustering score for the observed recall sequence. Default is False. n_perms : int Optional argument for fingerprint/temporal cluster analyses. Number of permutations to run for "corrected" clustering scores. Default is 1000 ( per recall list). parallel : bool Option to use multiprocessing (this can help speed up the permutations tests in the clustering calculations) match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- result : quail.FriedEgg Class instance containing the analysis results """ if analysis is None: raise ValueError('You must pass an analysis type.') if analysis not in analyses.keys(): raise ValueError('Analysis not recognized. Choose one of the following: ' 'accuracy, spc, pfr, lag-crp, fingerprint, temporal') from ..egg import FriedEgg if hasattr(egg, 'subjgroup'): if egg.subjgroup is not None: subjgroup = egg.subjgroup if hasattr(egg, 'subjname'): if egg.subjname is not None: subjname = egg.subjname if hasattr(egg, 'listgroup'): if egg.listgroup is not None: listgroup = egg.listgroup if hasattr(egg, 'listname'): if egg.listname is not None: listname = egg.listname if features is None: features = egg.feature_names opts = { 'subjgroup' : subjgroup, 'listgroup' : listgroup, 'subjname' : subjname, 'parallel' : parallel, 'match' : match, 'distance' : distance, 'features' : features, 'analysis_type' : analysis, 'analysis' : analyses[analysis] } if analysis is 'pfr': opts.update({'position' : 0}) elif analysis is 'pnr': opts.update({'position' : position}) if analysis is 'temporal': opts.update({'features' : ['temporal']}) if analysis in ['temporal', 'fingerprint']: opts.update({'permute' : permute, 'n_perms' : n_perms}) if analysis is 'lagcrp': opts.update({'ts' : ts}) return FriedEgg(data=_analyze_chunk(egg, **opts), analysis=analysis, list_length=egg.list_length, n_lists=egg.n_lists, n_subjects=egg.n_subjects, position=position)
[ "def", "analyze", "(", "egg", ",", "subjgroup", "=", "None", ",", "listgroup", "=", "None", ",", "subjname", "=", "'Subject'", ",", "listname", "=", "'List'", ",", "analysis", "=", "None", ",", "position", "=", "0", ",", "permute", "=", "False", ",", "n_perms", "=", "1000", ",", "parallel", "=", "False", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "features", "=", "None", ",", "ts", "=", "None", ")", ":", "if", "analysis", "is", "None", ":", "raise", "ValueError", "(", "'You must pass an analysis type.'", ")", "if", "analysis", "not", "in", "analyses", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'Analysis not recognized. Choose one of the following: '", "'accuracy, spc, pfr, lag-crp, fingerprint, temporal'", ")", "from", ".", ".", "egg", "import", "FriedEgg", "if", "hasattr", "(", "egg", ",", "'subjgroup'", ")", ":", "if", "egg", ".", "subjgroup", "is", "not", "None", ":", "subjgroup", "=", "egg", ".", "subjgroup", "if", "hasattr", "(", "egg", ",", "'subjname'", ")", ":", "if", "egg", ".", "subjname", "is", "not", "None", ":", "subjname", "=", "egg", ".", "subjname", "if", "hasattr", "(", "egg", ",", "'listgroup'", ")", ":", "if", "egg", ".", "listgroup", "is", "not", "None", ":", "listgroup", "=", "egg", ".", "listgroup", "if", "hasattr", "(", "egg", ",", "'listname'", ")", ":", "if", "egg", ".", "listname", "is", "not", "None", ":", "listname", "=", "egg", ".", "listname", "if", "features", "is", "None", ":", "features", "=", "egg", ".", "feature_names", "opts", "=", "{", "'subjgroup'", ":", "subjgroup", ",", "'listgroup'", ":", "listgroup", ",", "'subjname'", ":", "subjname", ",", "'parallel'", ":", "parallel", ",", "'match'", ":", "match", ",", "'distance'", ":", "distance", ",", "'features'", ":", "features", ",", "'analysis_type'", ":", "analysis", ",", "'analysis'", ":", "analyses", "[", "analysis", "]", "}", "if", "analysis", "is", "'pfr'", ":", "opts", ".", "update", "(", "{", "'position'", ":", "0", "}", ")", "elif", "analysis", "is", "'pnr'", ":", "opts", ".", "update", "(", "{", "'position'", ":", "position", "}", ")", "if", "analysis", "is", "'temporal'", ":", "opts", ".", "update", "(", "{", "'features'", ":", "[", "'temporal'", "]", "}", ")", "if", "analysis", "in", "[", "'temporal'", ",", "'fingerprint'", "]", ":", "opts", ".", "update", "(", "{", "'permute'", ":", "permute", ",", "'n_perms'", ":", "n_perms", "}", ")", "if", "analysis", "is", "'lagcrp'", ":", "opts", ".", "update", "(", "{", "'ts'", ":", "ts", "}", ")", "return", "FriedEgg", "(", "data", "=", "_analyze_chunk", "(", "egg", ",", "*", "*", "opts", ")", ",", "analysis", "=", "analysis", ",", "list_length", "=", "egg", ".", "list_length", ",", "n_lists", "=", "egg", ".", "n_lists", ",", "n_subjects", "=", "egg", ".", "n_subjects", ",", "position", "=", "position", ")" ]
General analysis function that groups data by subject/list number and performs analysis. Parameters ---------- egg : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : string This is the analysis you want to run. Can be accuracy, spc, pfr, temporal or fingerprint position : int Optional argument for pnr analysis. Defines encoding position of item to run pnr. Default is 0, and it is zero indexed permute : bool Optional argument for fingerprint/temporal cluster analyses. Determines whether to correct clustering scores by shuffling recall order for each list to create a distribution of clustering scores (for each feature). The "corrected" clustering score is the proportion of clustering scores in that random distribution that were lower than the clustering score for the observed recall sequence. Default is False. n_perms : int Optional argument for fingerprint/temporal cluster analyses. Number of permutations to run for "corrected" clustering scores. Default is 1000 ( per recall list). parallel : bool Option to use multiprocessing (this can help speed up the permutations tests in the clustering calculations) match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- result : quail.FriedEgg Class instance containing the analysis results
[ "General", "analysis", "function", "that", "groups", "data", "by", "subject", "/", "list", "number", "and", "performs", "analysis", "." ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/analysis.py#L31-L155
train
ContextLab/quail
quail/analysis/analysis.py
_analyze_chunk
def _analyze_chunk(data, subjgroup=None, subjname='Subject', listgroup=None, listname='List', analysis=None, analysis_type=None, pass_features=False, features=None, parallel=False, **kwargs): """ Private function that groups data by subject/list number and performs analysis for a chunk of data. Parameters ---------- data : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : function This function analyzes data and returns it. pass_features : bool Logical indicating whether the analyses uses the features field of the Egg Returns ---------- analyzed_data : Pandas DataFrame DataFrame containing the analysis results """ # perform the analysis def _analysis(c): subj, lst = c subjects = [s for s in subjdict[subj]] lists = [l for l in listdict[subj][lst]] s = data.crack(lists=lists, subjects=subjects) index = pd.MultiIndex.from_arrays([[subj],[lst]], names=[subjname, listname]) opts = dict() if analysis_type is 'fingerprint': opts.update({'columns' : features}) elif analysis_type is 'lagcrp': if kwargs['ts']: opts.update({'columns' : range(-kwargs['ts'],kwargs['ts']+1)}) else: opts.update({'columns' : range(-data.list_length,data.list_length+1)}) return pd.DataFrame([analysis(s, features=features, **kwargs)], index=index, **opts) subjgroup = subjgroup if subjgroup else data.pres.index.levels[0].values listgroup = listgroup if listgroup else data.pres.index.levels[1].values subjdict = {subj : data.pres.index.levels[0].values[subj==np.array(subjgroup)] for subj in set(subjgroup)} if all(isinstance(el, list) for el in listgroup): listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgrpsub)] for lst in set(listgrpsub)} for listgrpsub in listgroup] else: listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgroup)] for lst in set(listgroup)} for subj in subjdict] chunks = [(subj, lst) for subj in subjdict for lst in listdict[0]] if parallel: import multiprocessing from pathos.multiprocessing import ProcessingPool as Pool p = Pool(multiprocessing.cpu_count()) res = p.map(_analysis, chunks) else: res = [_analysis(c) for c in chunks] return pd.concat(res)
python
def _analyze_chunk(data, subjgroup=None, subjname='Subject', listgroup=None, listname='List', analysis=None, analysis_type=None, pass_features=False, features=None, parallel=False, **kwargs): """ Private function that groups data by subject/list number and performs analysis for a chunk of data. Parameters ---------- data : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : function This function analyzes data and returns it. pass_features : bool Logical indicating whether the analyses uses the features field of the Egg Returns ---------- analyzed_data : Pandas DataFrame DataFrame containing the analysis results """ # perform the analysis def _analysis(c): subj, lst = c subjects = [s for s in subjdict[subj]] lists = [l for l in listdict[subj][lst]] s = data.crack(lists=lists, subjects=subjects) index = pd.MultiIndex.from_arrays([[subj],[lst]], names=[subjname, listname]) opts = dict() if analysis_type is 'fingerprint': opts.update({'columns' : features}) elif analysis_type is 'lagcrp': if kwargs['ts']: opts.update({'columns' : range(-kwargs['ts'],kwargs['ts']+1)}) else: opts.update({'columns' : range(-data.list_length,data.list_length+1)}) return pd.DataFrame([analysis(s, features=features, **kwargs)], index=index, **opts) subjgroup = subjgroup if subjgroup else data.pres.index.levels[0].values listgroup = listgroup if listgroup else data.pres.index.levels[1].values subjdict = {subj : data.pres.index.levels[0].values[subj==np.array(subjgroup)] for subj in set(subjgroup)} if all(isinstance(el, list) for el in listgroup): listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgrpsub)] for lst in set(listgrpsub)} for listgrpsub in listgroup] else: listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgroup)] for lst in set(listgroup)} for subj in subjdict] chunks = [(subj, lst) for subj in subjdict for lst in listdict[0]] if parallel: import multiprocessing from pathos.multiprocessing import ProcessingPool as Pool p = Pool(multiprocessing.cpu_count()) res = p.map(_analysis, chunks) else: res = [_analysis(c) for c in chunks] return pd.concat(res)
[ "def", "_analyze_chunk", "(", "data", ",", "subjgroup", "=", "None", ",", "subjname", "=", "'Subject'", ",", "listgroup", "=", "None", ",", "listname", "=", "'List'", ",", "analysis", "=", "None", ",", "analysis_type", "=", "None", ",", "pass_features", "=", "False", ",", "features", "=", "None", ",", "parallel", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# perform the analysis", "def", "_analysis", "(", "c", ")", ":", "subj", ",", "lst", "=", "c", "subjects", "=", "[", "s", "for", "s", "in", "subjdict", "[", "subj", "]", "]", "lists", "=", "[", "l", "for", "l", "in", "listdict", "[", "subj", "]", "[", "lst", "]", "]", "s", "=", "data", ".", "crack", "(", "lists", "=", "lists", ",", "subjects", "=", "subjects", ")", "index", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "[", "subj", "]", ",", "[", "lst", "]", "]", ",", "names", "=", "[", "subjname", ",", "listname", "]", ")", "opts", "=", "dict", "(", ")", "if", "analysis_type", "is", "'fingerprint'", ":", "opts", ".", "update", "(", "{", "'columns'", ":", "features", "}", ")", "elif", "analysis_type", "is", "'lagcrp'", ":", "if", "kwargs", "[", "'ts'", "]", ":", "opts", ".", "update", "(", "{", "'columns'", ":", "range", "(", "-", "kwargs", "[", "'ts'", "]", ",", "kwargs", "[", "'ts'", "]", "+", "1", ")", "}", ")", "else", ":", "opts", ".", "update", "(", "{", "'columns'", ":", "range", "(", "-", "data", ".", "list_length", ",", "data", ".", "list_length", "+", "1", ")", "}", ")", "return", "pd", ".", "DataFrame", "(", "[", "analysis", "(", "s", ",", "features", "=", "features", ",", "*", "*", "kwargs", ")", "]", ",", "index", "=", "index", ",", "*", "*", "opts", ")", "subjgroup", "=", "subjgroup", "if", "subjgroup", "else", "data", ".", "pres", ".", "index", ".", "levels", "[", "0", "]", ".", "values", "listgroup", "=", "listgroup", "if", "listgroup", "else", "data", ".", "pres", ".", "index", ".", "levels", "[", "1", "]", ".", "values", "subjdict", "=", "{", "subj", ":", "data", ".", "pres", ".", "index", ".", "levels", "[", "0", "]", ".", "values", "[", "subj", "==", "np", ".", "array", "(", "subjgroup", ")", "]", "for", "subj", "in", "set", "(", "subjgroup", ")", "}", "if", "all", "(", "isinstance", "(", "el", ",", "list", ")", "for", "el", "in", "listgroup", ")", ":", "listdict", "=", "[", "{", "lst", ":", "data", ".", "pres", ".", "index", ".", "levels", "[", "1", "]", ".", "values", "[", "lst", "==", "np", ".", "array", "(", "listgrpsub", ")", "]", "for", "lst", "in", "set", "(", "listgrpsub", ")", "}", "for", "listgrpsub", "in", "listgroup", "]", "else", ":", "listdict", "=", "[", "{", "lst", ":", "data", ".", "pres", ".", "index", ".", "levels", "[", "1", "]", ".", "values", "[", "lst", "==", "np", ".", "array", "(", "listgroup", ")", "]", "for", "lst", "in", "set", "(", "listgroup", ")", "}", "for", "subj", "in", "subjdict", "]", "chunks", "=", "[", "(", "subj", ",", "lst", ")", "for", "subj", "in", "subjdict", "for", "lst", "in", "listdict", "[", "0", "]", "]", "if", "parallel", ":", "import", "multiprocessing", "from", "pathos", ".", "multiprocessing", "import", "ProcessingPool", "as", "Pool", "p", "=", "Pool", "(", "multiprocessing", ".", "cpu_count", "(", ")", ")", "res", "=", "p", ".", "map", "(", "_analysis", ",", "chunks", ")", "else", ":", "res", "=", "[", "_analysis", "(", "c", ")", "for", "c", "in", "chunks", "]", "return", "pd", ".", "concat", "(", "res", ")" ]
Private function that groups data by subject/list number and performs analysis for a chunk of data. Parameters ---------- data : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : function This function analyzes data and returns it. pass_features : bool Logical indicating whether the analyses uses the features field of the Egg Returns ---------- analyzed_data : Pandas DataFrame DataFrame containing the analysis results
[ "Private", "function", "that", "groups", "data", "by", "subject", "/", "list", "number", "and", "performs", "analysis", "for", "a", "chunk", "of", "data", "." ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/analysis.py#L157-L236
train
aouyar/PyMunin
pymunin/plugins/tomcatstats.py
MuninTomcatPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('tomcat_memory'): stats = self._tomcatInfo.getMemoryStats() self.setGraphVal('tomcat_memory', 'used', stats['total'] - stats['free']) self.setGraphVal('tomcat_memory', 'free', stats['free']) self.setGraphVal('tomcat_memory', 'max', stats['max']) for (port, stats) in self._tomcatInfo.getConnectorStats().iteritems(): thrstats = stats['threadInfo'] reqstats = stats['requestInfo'] if self.portIncluded(port): name = "tomcat_threads_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'busy', thrstats['currentThreadsBusy']) self.setGraphVal(name, 'idle', thrstats['currentThreadCount'] - thrstats['currentThreadsBusy']) self.setGraphVal(name, 'max', thrstats['maxThreads']) name = "tomcat_access_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'reqs', reqstats['requestCount']) name = "tomcat_error_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'errors', reqstats['errorCount']) name = "tomcat_traffic_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'rx', reqstats['bytesReceived']) self.setGraphVal(name, 'tx', reqstats['bytesSent'])
python
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('tomcat_memory'): stats = self._tomcatInfo.getMemoryStats() self.setGraphVal('tomcat_memory', 'used', stats['total'] - stats['free']) self.setGraphVal('tomcat_memory', 'free', stats['free']) self.setGraphVal('tomcat_memory', 'max', stats['max']) for (port, stats) in self._tomcatInfo.getConnectorStats().iteritems(): thrstats = stats['threadInfo'] reqstats = stats['requestInfo'] if self.portIncluded(port): name = "tomcat_threads_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'busy', thrstats['currentThreadsBusy']) self.setGraphVal(name, 'idle', thrstats['currentThreadCount'] - thrstats['currentThreadsBusy']) self.setGraphVal(name, 'max', thrstats['maxThreads']) name = "tomcat_access_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'reqs', reqstats['requestCount']) name = "tomcat_error_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'errors', reqstats['errorCount']) name = "tomcat_traffic_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'rx', reqstats['bytesReceived']) self.setGraphVal(name, 'tx', reqstats['bytesSent'])
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "hasGraph", "(", "'tomcat_memory'", ")", ":", "stats", "=", "self", ".", "_tomcatInfo", ".", "getMemoryStats", "(", ")", "self", ".", "setGraphVal", "(", "'tomcat_memory'", ",", "'used'", ",", "stats", "[", "'total'", "]", "-", "stats", "[", "'free'", "]", ")", "self", ".", "setGraphVal", "(", "'tomcat_memory'", ",", "'free'", ",", "stats", "[", "'free'", "]", ")", "self", ".", "setGraphVal", "(", "'tomcat_memory'", ",", "'max'", ",", "stats", "[", "'max'", "]", ")", "for", "(", "port", ",", "stats", ")", "in", "self", ".", "_tomcatInfo", ".", "getConnectorStats", "(", ")", ".", "iteritems", "(", ")", ":", "thrstats", "=", "stats", "[", "'threadInfo'", "]", "reqstats", "=", "stats", "[", "'requestInfo'", "]", "if", "self", ".", "portIncluded", "(", "port", ")", ":", "name", "=", "\"tomcat_threads_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'busy'", ",", "thrstats", "[", "'currentThreadsBusy'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "'idle'", ",", "thrstats", "[", "'currentThreadCount'", "]", "-", "thrstats", "[", "'currentThreadsBusy'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "'max'", ",", "thrstats", "[", "'maxThreads'", "]", ")", "name", "=", "\"tomcat_access_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'reqs'", ",", "reqstats", "[", "'requestCount'", "]", ")", "name", "=", "\"tomcat_error_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'errors'", ",", "reqstats", "[", "'errorCount'", "]", ")", "name", "=", "\"tomcat_traffic_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'rx'", ",", "reqstats", "[", "'bytesReceived'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "'tx'", ",", "reqstats", "[", "'bytesSent'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/tomcatstats.py#L196-L225
train
dfm/acor
acor/acor.py
function
def function(data, maxt=None): """ Calculate the autocorrelation function for a 1D time series. Parameters ---------- data : numpy.ndarray (N,) The time series. Returns ------- rho : numpy.ndarray (N,) An autocorrelation function. """ data = np.atleast_1d(data) assert len(np.shape(data)) == 1, \ "The autocorrelation function can only by computed " \ + "on a 1D time series." if maxt is None: maxt = len(data) result = np.zeros(maxt, dtype=float) _acor.function(np.array(data, dtype=float), result) return result / result[0]
python
def function(data, maxt=None): """ Calculate the autocorrelation function for a 1D time series. Parameters ---------- data : numpy.ndarray (N,) The time series. Returns ------- rho : numpy.ndarray (N,) An autocorrelation function. """ data = np.atleast_1d(data) assert len(np.shape(data)) == 1, \ "The autocorrelation function can only by computed " \ + "on a 1D time series." if maxt is None: maxt = len(data) result = np.zeros(maxt, dtype=float) _acor.function(np.array(data, dtype=float), result) return result / result[0]
[ "def", "function", "(", "data", ",", "maxt", "=", "None", ")", ":", "data", "=", "np", ".", "atleast_1d", "(", "data", ")", "assert", "len", "(", "np", ".", "shape", "(", "data", ")", ")", "==", "1", ",", "\"The autocorrelation function can only by computed \"", "+", "\"on a 1D time series.\"", "if", "maxt", "is", "None", ":", "maxt", "=", "len", "(", "data", ")", "result", "=", "np", ".", "zeros", "(", "maxt", ",", "dtype", "=", "float", ")", "_acor", ".", "function", "(", "np", ".", "array", "(", "data", ",", "dtype", "=", "float", ")", ",", "result", ")", "return", "result", "/", "result", "[", "0", "]" ]
Calculate the autocorrelation function for a 1D time series. Parameters ---------- data : numpy.ndarray (N,) The time series. Returns ------- rho : numpy.ndarray (N,) An autocorrelation function.
[ "Calculate", "the", "autocorrelation", "function", "for", "a", "1D", "time", "series", "." ]
b55eb8efa7df6c73b6f3f0c9b64fa1c801e8f821
https://github.com/dfm/acor/blob/b55eb8efa7df6c73b6f3f0c9b64fa1c801e8f821/acor/acor.py#L36-L59
train
aouyar/PyMunin
pymunin/plugins/nginxstats.py
MuninNginxPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = nginxInfo.getServerStats() if stats: if self.hasGraph('nginx_activeconn'): self.setGraphVal('nginx_activeconn', 'proc', stats['writing']) self.setGraphVal('nginx_activeconn', 'read', stats['reading']) self.setGraphVal('nginx_activeconn', 'wait', stats['waiting']) self.setGraphVal('nginx_activeconn', 'total', stats['connections']) if self.hasGraph('nginx_connections'): self.setGraphVal('nginx_connections', 'handled', stats['handled']) self.setGraphVal('nginx_connections', 'nothandled', stats['accepts'] - stats['handled']) if self.hasGraph('nginx_requests'): self.setGraphVal('nginx_requests', 'requests', stats['requests']) if self.hasGraph('nginx_requestsperconn'): curr_stats = (stats['handled'], stats['requests']) hist_stats = self.restoreState() if hist_stats: prev_stats = hist_stats[0] else: hist_stats = [] prev_stats = (0,0) conns = max(curr_stats[0] - prev_stats[0], 0) reqs = max(curr_stats[1] - prev_stats[1], 0) if conns > 0: self.setGraphVal('nginx_requestsperconn', 'requests', float(reqs) / float(conns)) else: self.setGraphVal('nginx_requestsperconn', 'requests', 0) hist_stats.append(curr_stats) self.saveState(hist_stats[-self._numSamples:])
python
def retrieveVals(self): """Retrieve values for graphs.""" nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = nginxInfo.getServerStats() if stats: if self.hasGraph('nginx_activeconn'): self.setGraphVal('nginx_activeconn', 'proc', stats['writing']) self.setGraphVal('nginx_activeconn', 'read', stats['reading']) self.setGraphVal('nginx_activeconn', 'wait', stats['waiting']) self.setGraphVal('nginx_activeconn', 'total', stats['connections']) if self.hasGraph('nginx_connections'): self.setGraphVal('nginx_connections', 'handled', stats['handled']) self.setGraphVal('nginx_connections', 'nothandled', stats['accepts'] - stats['handled']) if self.hasGraph('nginx_requests'): self.setGraphVal('nginx_requests', 'requests', stats['requests']) if self.hasGraph('nginx_requestsperconn'): curr_stats = (stats['handled'], stats['requests']) hist_stats = self.restoreState() if hist_stats: prev_stats = hist_stats[0] else: hist_stats = [] prev_stats = (0,0) conns = max(curr_stats[0] - prev_stats[0], 0) reqs = max(curr_stats[1] - prev_stats[1], 0) if conns > 0: self.setGraphVal('nginx_requestsperconn', 'requests', float(reqs) / float(conns)) else: self.setGraphVal('nginx_requestsperconn', 'requests', 0) hist_stats.append(curr_stats) self.saveState(hist_stats[-self._numSamples:])
[ "def", "retrieveVals", "(", "self", ")", ":", "nginxInfo", "=", "NginxInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "stats", "=", "nginxInfo", ".", "getServerStats", "(", ")", "if", "stats", ":", "if", "self", ".", "hasGraph", "(", "'nginx_activeconn'", ")", ":", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'proc'", ",", "stats", "[", "'writing'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'read'", ",", "stats", "[", "'reading'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'wait'", ",", "stats", "[", "'waiting'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'total'", ",", "stats", "[", "'connections'", "]", ")", "if", "self", ".", "hasGraph", "(", "'nginx_connections'", ")", ":", "self", ".", "setGraphVal", "(", "'nginx_connections'", ",", "'handled'", ",", "stats", "[", "'handled'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_connections'", ",", "'nothandled'", ",", "stats", "[", "'accepts'", "]", "-", "stats", "[", "'handled'", "]", ")", "if", "self", ".", "hasGraph", "(", "'nginx_requests'", ")", ":", "self", ".", "setGraphVal", "(", "'nginx_requests'", ",", "'requests'", ",", "stats", "[", "'requests'", "]", ")", "if", "self", ".", "hasGraph", "(", "'nginx_requestsperconn'", ")", ":", "curr_stats", "=", "(", "stats", "[", "'handled'", "]", ",", "stats", "[", "'requests'", "]", ")", "hist_stats", "=", "self", ".", "restoreState", "(", ")", "if", "hist_stats", ":", "prev_stats", "=", "hist_stats", "[", "0", "]", "else", ":", "hist_stats", "=", "[", "]", "prev_stats", "=", "(", "0", ",", "0", ")", "conns", "=", "max", "(", "curr_stats", "[", "0", "]", "-", "prev_stats", "[", "0", "]", ",", "0", ")", "reqs", "=", "max", "(", "curr_stats", "[", "1", "]", "-", "prev_stats", "[", "1", "]", ",", "0", ")", "if", "conns", ">", "0", ":", "self", ".", "setGraphVal", "(", "'nginx_requestsperconn'", ",", "'requests'", ",", "float", "(", "reqs", ")", "/", "float", "(", "conns", ")", ")", "else", ":", "self", ".", "setGraphVal", "(", "'nginx_requestsperconn'", ",", "'requests'", ",", "0", ")", "hist_stats", ".", "append", "(", "curr_stats", ")", "self", ".", "saveState", "(", "hist_stats", "[", "-", "self", ".", "_numSamples", ":", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/nginxstats.py#L151-L186
train
aouyar/PyMunin
pymunin/plugins/nginxstats.py
MuninNginxPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return nginxInfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return nginxInfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "nginxInfo", "=", "NginxInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "return", "nginxInfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/nginxstats.py#L188-L197
train
aouyar/PyMunin
pysysinfo/phpfpm.py
PHPfpmInfo.getStats
def getStats(self): """Query and parse Web Server Status Page. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) stats = {} for line in response.splitlines(): mobj = re.match('([\w\s]+):\s+(\w+)$', line) if mobj: stats[mobj.group(1)] = util.parse_value(mobj.group(2)) return stats
python
def getStats(self): """Query and parse Web Server Status Page. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) stats = {} for line in response.splitlines(): mobj = re.match('([\w\s]+):\s+(\w+)$', line) if mobj: stats[mobj.group(1)] = util.parse_value(mobj.group(2)) return stats
[ "def", "getStats", "(", "self", ")", ":", "url", "=", "\"%s://%s:%d/%s\"", "%", "(", "self", ".", "_proto", ",", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_monpath", ")", "response", "=", "util", ".", "get_url", "(", "url", ",", "self", ".", "_user", ",", "self", ".", "_password", ")", "stats", "=", "{", "}", "for", "line", "in", "response", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'([\\w\\s]+):\\s+(\\w+)$'", ",", "line", ")", "if", "mobj", ":", "stats", "[", "mobj", ".", "group", "(", "1", ")", "]", "=", "util", ".", "parse_value", "(", "mobj", ".", "group", "(", "2", ")", ")", "return", "stats" ]
Query and parse Web Server Status Page.
[ "Query", "and", "parse", "Web", "Server", "Status", "Page", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/phpfpm.py#L65-L77
train
aouyar/PyMunin
pymunin/plugins/phpapcstats.py
MuninPHPapcPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl, self._extras) stats = apcinfo.getAllStats() if self.hasGraph('php_apc_memory') and stats: filecache = stats['cache_sys']['mem_size'] usercache = stats['cache_user']['mem_size'] total = stats['memory']['seg_size'] * stats['memory']['num_seg'] free = stats['memory']['avail_mem'] other = total - free - filecache - usercache self.setGraphVal('php_apc_memory', 'filecache', filecache) self.setGraphVal('php_apc_memory', 'usercache', usercache) self.setGraphVal('php_apc_memory', 'other', other) self.setGraphVal('php_apc_memory', 'free', free) if self.hasGraph('php_apc_items') and stats: self.setGraphVal('php_apc_items', 'filecache', stats['cache_sys']['num_entries']) self.setGraphVal('php_apc_items', 'usercache', stats['cache_user']['num_entries']) if self.hasGraph('php_apc_reqs_filecache') and stats: self.setGraphVal('php_apc_reqs_filecache', 'hits', stats['cache_sys']['num_hits']) self.setGraphVal('php_apc_reqs_filecache', 'misses', stats['cache_sys']['num_misses']) self.setGraphVal('php_apc_reqs_filecache', 'inserts', stats['cache_sys']['num_inserts']) if self.hasGraph('php_apc_reqs_usercache') and stats: self.setGraphVal('php_apc_reqs_usercache', 'hits', stats['cache_user']['num_hits']) self.setGraphVal('php_apc_reqs_usercache', 'misses', stats['cache_user']['num_misses']) self.setGraphVal('php_apc_reqs_usercache', 'inserts', stats['cache_user']['num_inserts']) if self.hasGraph('php_apc_expunge') and stats: self.setGraphVal('php_apc_expunge', 'filecache', stats['cache_sys']['expunges']) self.setGraphVal('php_apc_expunge', 'usercache', stats['cache_user']['expunges']) if self.hasGraph('php_apc_mem_util_frag'): self.setGraphVal('php_apc_mem_util_frag', 'util', stats['memory']['utilization_ratio'] * 100) self.setGraphVal('php_apc_mem_util_frag', 'frag', stats['memory']['fragmentation_ratio'] * 100) if self.hasGraph('php_apc_mem_frag_count'): self.setGraphVal('php_apc_mem_frag_count', 'num', stats['memory']['fragment_count']) if self.hasGraph('php_apc_mem_frag_avgsize'): self.setGraphVal('php_apc_mem_frag_avgsize', 'size', stats['memory']['fragment_avg_size'])
python
def retrieveVals(self): """Retrieve values for graphs.""" apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl, self._extras) stats = apcinfo.getAllStats() if self.hasGraph('php_apc_memory') and stats: filecache = stats['cache_sys']['mem_size'] usercache = stats['cache_user']['mem_size'] total = stats['memory']['seg_size'] * stats['memory']['num_seg'] free = stats['memory']['avail_mem'] other = total - free - filecache - usercache self.setGraphVal('php_apc_memory', 'filecache', filecache) self.setGraphVal('php_apc_memory', 'usercache', usercache) self.setGraphVal('php_apc_memory', 'other', other) self.setGraphVal('php_apc_memory', 'free', free) if self.hasGraph('php_apc_items') and stats: self.setGraphVal('php_apc_items', 'filecache', stats['cache_sys']['num_entries']) self.setGraphVal('php_apc_items', 'usercache', stats['cache_user']['num_entries']) if self.hasGraph('php_apc_reqs_filecache') and stats: self.setGraphVal('php_apc_reqs_filecache', 'hits', stats['cache_sys']['num_hits']) self.setGraphVal('php_apc_reqs_filecache', 'misses', stats['cache_sys']['num_misses']) self.setGraphVal('php_apc_reqs_filecache', 'inserts', stats['cache_sys']['num_inserts']) if self.hasGraph('php_apc_reqs_usercache') and stats: self.setGraphVal('php_apc_reqs_usercache', 'hits', stats['cache_user']['num_hits']) self.setGraphVal('php_apc_reqs_usercache', 'misses', stats['cache_user']['num_misses']) self.setGraphVal('php_apc_reqs_usercache', 'inserts', stats['cache_user']['num_inserts']) if self.hasGraph('php_apc_expunge') and stats: self.setGraphVal('php_apc_expunge', 'filecache', stats['cache_sys']['expunges']) self.setGraphVal('php_apc_expunge', 'usercache', stats['cache_user']['expunges']) if self.hasGraph('php_apc_mem_util_frag'): self.setGraphVal('php_apc_mem_util_frag', 'util', stats['memory']['utilization_ratio'] * 100) self.setGraphVal('php_apc_mem_util_frag', 'frag', stats['memory']['fragmentation_ratio'] * 100) if self.hasGraph('php_apc_mem_frag_count'): self.setGraphVal('php_apc_mem_frag_count', 'num', stats['memory']['fragment_count']) if self.hasGraph('php_apc_mem_frag_avgsize'): self.setGraphVal('php_apc_mem_frag_avgsize', 'size', stats['memory']['fragment_avg_size'])
[ "def", "retrieveVals", "(", "self", ")", ":", "apcinfo", "=", "APCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ",", "self", ".", "_extras", ")", "stats", "=", "apcinfo", ".", "getAllStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_memory'", ")", "and", "stats", ":", "filecache", "=", "stats", "[", "'cache_sys'", "]", "[", "'mem_size'", "]", "usercache", "=", "stats", "[", "'cache_user'", "]", "[", "'mem_size'", "]", "total", "=", "stats", "[", "'memory'", "]", "[", "'seg_size'", "]", "*", "stats", "[", "'memory'", "]", "[", "'num_seg'", "]", "free", "=", "stats", "[", "'memory'", "]", "[", "'avail_mem'", "]", "other", "=", "total", "-", "free", "-", "filecache", "-", "usercache", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'filecache'", ",", "filecache", ")", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'usercache'", ",", "usercache", ")", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'other'", ",", "other", ")", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'free'", ",", "free", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_items'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_items'", ",", "'filecache'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_entries'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_items'", ",", "'usercache'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_entries'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_reqs_filecache'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_reqs_filecache'", ",", "'hits'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_hits'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_filecache'", ",", "'misses'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_misses'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_filecache'", ",", "'inserts'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_inserts'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_reqs_usercache'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_reqs_usercache'", ",", "'hits'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_hits'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_usercache'", ",", "'misses'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_misses'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_usercache'", ",", "'inserts'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_inserts'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_expunge'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_expunge'", ",", "'filecache'", ",", "stats", "[", "'cache_sys'", "]", "[", "'expunges'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_expunge'", ",", "'usercache'", ",", "stats", "[", "'cache_user'", "]", "[", "'expunges'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_mem_util_frag'", ")", ":", "self", ".", "setGraphVal", "(", "'php_apc_mem_util_frag'", ",", "'util'", ",", "stats", "[", "'memory'", "]", "[", "'utilization_ratio'", "]", "*", "100", ")", "self", ".", "setGraphVal", "(", "'php_apc_mem_util_frag'", ",", "'frag'", ",", "stats", "[", "'memory'", "]", "[", "'fragmentation_ratio'", "]", "*", "100", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_mem_frag_count'", ")", ":", "self", ".", "setGraphVal", "(", "'php_apc_mem_frag_count'", ",", "'num'", ",", "stats", "[", "'memory'", "]", "[", "'fragment_count'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_mem_frag_avgsize'", ")", ":", "self", ".", "setGraphVal", "(", "'php_apc_mem_frag_avgsize'", ",", "'size'", ",", "stats", "[", "'memory'", "]", "[", "'fragment_avg_size'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpapcstats.py#L196-L246
train
aouyar/PyMunin
pymunin/plugins/phpapcstats.py
MuninPHPapcPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return apcinfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return apcinfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "apcinfo", "=", "APCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "return", "apcinfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpapcstats.py#L248-L256
train
aouyar/PyMunin
pysysinfo/varnish.py
VarnishInfo.getStats
def getStats(self): """Runs varnishstats command to get stats from Varnish Cache. @return: Dictionary of stats. """ info_dict = {} args = [varnishstatCmd, '-1'] if self._instance is not None: args.extend(['-n', self._instance]) output = util.exec_command(args) if self._descDict is None: self._descDict = {} for line in output.splitlines(): mobj = re.match('(\S+)\s+(\d+)\s+(\d+\.\d+|\.)\s+(\S.*\S)\s*$', line) if mobj: fname = mobj.group(1).replace('.', '_') info_dict[fname] = util.parse_value(mobj.group(2)) self._descDict[fname] = mobj.group(4) return info_dict
python
def getStats(self): """Runs varnishstats command to get stats from Varnish Cache. @return: Dictionary of stats. """ info_dict = {} args = [varnishstatCmd, '-1'] if self._instance is not None: args.extend(['-n', self._instance]) output = util.exec_command(args) if self._descDict is None: self._descDict = {} for line in output.splitlines(): mobj = re.match('(\S+)\s+(\d+)\s+(\d+\.\d+|\.)\s+(\S.*\S)\s*$', line) if mobj: fname = mobj.group(1).replace('.', '_') info_dict[fname] = util.parse_value(mobj.group(2)) self._descDict[fname] = mobj.group(4) return info_dict
[ "def", "getStats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "args", "=", "[", "varnishstatCmd", ",", "'-1'", "]", "if", "self", ".", "_instance", "is", "not", "None", ":", "args", ".", "extend", "(", "[", "'-n'", ",", "self", ".", "_instance", "]", ")", "output", "=", "util", ".", "exec_command", "(", "args", ")", "if", "self", ".", "_descDict", "is", "None", ":", "self", ".", "_descDict", "=", "{", "}", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'(\\S+)\\s+(\\d+)\\s+(\\d+\\.\\d+|\\.)\\s+(\\S.*\\S)\\s*$'", ",", "line", ")", "if", "mobj", ":", "fname", "=", "mobj", ".", "group", "(", "1", ")", ".", "replace", "(", "'.'", ",", "'_'", ")", "info_dict", "[", "fname", "]", "=", "util", ".", "parse_value", "(", "mobj", ".", "group", "(", "2", ")", ")", "self", ".", "_descDict", "[", "fname", "]", "=", "mobj", ".", "group", "(", "4", ")", "return", "info_dict" ]
Runs varnishstats command to get stats from Varnish Cache. @return: Dictionary of stats.
[ "Runs", "varnishstats", "command", "to", "get", "stats", "from", "Varnish", "Cache", ".", "@return", ":", "Dictionary", "of", "stats", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/varnish.py#L39-L59
train
aouyar/PyMunin
pysysinfo/varnish.py
VarnishInfo.getDesc
def getDesc(self, entry): """Returns description for stat entry. @param entry: Entry name. @return: Description for entry. """ if len(self._descDict) == 0: self.getStats() return self._descDict.get(entry)
python
def getDesc(self, entry): """Returns description for stat entry. @param entry: Entry name. @return: Description for entry. """ if len(self._descDict) == 0: self.getStats() return self._descDict.get(entry)
[ "def", "getDesc", "(", "self", ",", "entry", ")", ":", "if", "len", "(", "self", ".", "_descDict", ")", "==", "0", ":", "self", ".", "getStats", "(", ")", "return", "self", ".", "_descDict", ".", "get", "(", "entry", ")" ]
Returns description for stat entry. @param entry: Entry name. @return: Description for entry.
[ "Returns", "description", "for", "stat", "entry", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/varnish.py#L71-L80
train
aouyar/PyMunin
pymunin/plugins/phpopcstats.py
MuninPHPOPCPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) stats = opcinfo.getAllStats() if self.hasGraph('php_opc_memory') and stats: mem = stats['memory_usage'] keys = ('used_memory', 'wasted_memory', 'free_memory') map(lambda k:self.setGraphVal('php_opc_memory',k,mem[k]), keys) if self.hasGraph('php_opc_opcache_statistics') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_statistics', 'hits', st['hits']) self.setGraphVal('php_opc_opcache_statistics', 'misses', st['misses']) if self.hasGraph('php_opc_opcache_hitrate') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_hitrate', 'opcache_hit_rate', st['opcache_hit_rate']) if self.hasGraph('php_opc_key_status') and stats: st = stats['opcache_statistics'] wasted = st['num_cached_keys'] - st['num_cached_scripts'] free = st['max_cached_keys'] - st['num_cached_keys'] self.setGraphVal('php_opc_key_status', 'num_cached_scripts', st['num_cached_scripts']) self.setGraphVal('php_opc_key_status', 'num_wasted_keys', wasted) self.setGraphVal('php_opc_key_status', 'num_free_keys', free)
python
def retrieveVals(self): """Retrieve values for graphs.""" opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) stats = opcinfo.getAllStats() if self.hasGraph('php_opc_memory') and stats: mem = stats['memory_usage'] keys = ('used_memory', 'wasted_memory', 'free_memory') map(lambda k:self.setGraphVal('php_opc_memory',k,mem[k]), keys) if self.hasGraph('php_opc_opcache_statistics') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_statistics', 'hits', st['hits']) self.setGraphVal('php_opc_opcache_statistics', 'misses', st['misses']) if self.hasGraph('php_opc_opcache_hitrate') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_hitrate', 'opcache_hit_rate', st['opcache_hit_rate']) if self.hasGraph('php_opc_key_status') and stats: st = stats['opcache_statistics'] wasted = st['num_cached_keys'] - st['num_cached_scripts'] free = st['max_cached_keys'] - st['num_cached_keys'] self.setGraphVal('php_opc_key_status', 'num_cached_scripts', st['num_cached_scripts']) self.setGraphVal('php_opc_key_status', 'num_wasted_keys', wasted) self.setGraphVal('php_opc_key_status', 'num_free_keys', free)
[ "def", "retrieveVals", "(", "self", ")", ":", "opcinfo", "=", "OPCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "stats", "=", "opcinfo", ".", "getAllStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_memory'", ")", "and", "stats", ":", "mem", "=", "stats", "[", "'memory_usage'", "]", "keys", "=", "(", "'used_memory'", ",", "'wasted_memory'", ",", "'free_memory'", ")", "map", "(", "lambda", "k", ":", "self", ".", "setGraphVal", "(", "'php_opc_memory'", ",", "k", ",", "mem", "[", "k", "]", ")", ",", "keys", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_opcache_statistics'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "self", ".", "setGraphVal", "(", "'php_opc_opcache_statistics'", ",", "'hits'", ",", "st", "[", "'hits'", "]", ")", "self", ".", "setGraphVal", "(", "'php_opc_opcache_statistics'", ",", "'misses'", ",", "st", "[", "'misses'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_opcache_hitrate'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "self", ".", "setGraphVal", "(", "'php_opc_opcache_hitrate'", ",", "'opcache_hit_rate'", ",", "st", "[", "'opcache_hit_rate'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_key_status'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "wasted", "=", "st", "[", "'num_cached_keys'", "]", "-", "st", "[", "'num_cached_scripts'", "]", "free", "=", "st", "[", "'max_cached_keys'", "]", "-", "st", "[", "'num_cached_keys'", "]", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_cached_scripts'", ",", "st", "[", "'num_cached_scripts'", "]", ")", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_wasted_keys'", ",", "wasted", ")", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_free_keys'", ",", "free", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpopcstats.py#L148-L177
train
aouyar/PyMunin
pymunin/plugins/phpopcstats.py
MuninPHPOPCPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return opcinfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return opcinfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "opcinfo", "=", "OPCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "return", "opcinfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpopcstats.py#L179-L187
train
ContextLab/quail
quail/analysis/clustering.py
fingerprint_helper
def fingerprint_helper(egg, permute=False, n_perms=1000, match='exact', distance='euclidean', features=None): """ Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension """ if features is None: features = egg.dist_funcs.keys() inds = egg.pres.index.tolist() slices = [egg.crack(subjects=[i], lists=[j]) for i, j in inds] weights = _get_weights(slices, features, distdict, permute, n_perms, match, distance) return np.nanmean(weights, axis=0)
python
def fingerprint_helper(egg, permute=False, n_perms=1000, match='exact', distance='euclidean', features=None): """ Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension """ if features is None: features = egg.dist_funcs.keys() inds = egg.pres.index.tolist() slices = [egg.crack(subjects=[i], lists=[j]) for i, j in inds] weights = _get_weights(slices, features, distdict, permute, n_perms, match, distance) return np.nanmean(weights, axis=0)
[ "def", "fingerprint_helper", "(", "egg", ",", "permute", "=", "False", ",", "n_perms", "=", "1000", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "features", "=", "None", ")", ":", "if", "features", "is", "None", ":", "features", "=", "egg", ".", "dist_funcs", ".", "keys", "(", ")", "inds", "=", "egg", ".", "pres", ".", "index", ".", "tolist", "(", ")", "slices", "=", "[", "egg", ".", "crack", "(", "subjects", "=", "[", "i", "]", ",", "lists", "=", "[", "j", "]", ")", "for", "i", ",", "j", "in", "inds", "]", "weights", "=", "_get_weights", "(", "slices", ",", "features", ",", "distdict", ",", "permute", ",", "n_perms", ",", "match", ",", "distance", ")", "return", "np", ".", "nanmean", "(", "weights", ",", "axis", "=", "0", ")" ]
Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension
[ "Computes", "clustering", "along", "a", "set", "of", "feature", "dimensions" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/clustering.py#L9-L37
train
ContextLab/quail
quail/analysis/clustering.py
compute_feature_weights
def compute_feature_weights(pres_list, rec_list, feature_list, distances): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) <= 2: print('Not enough recalls to compute fingerprint, returning default' 'fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word dists = distances[feature][pres_list.index(c),:] # distance between current and next word cdist = dists[pres_list.index(n)] # filter dists removing the words that have already been recalled dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == cdist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
python
def compute_feature_weights(pres_list, rec_list, feature_list, distances): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) <= 2: print('Not enough recalls to compute fingerprint, returning default' 'fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word dists = distances[feature][pres_list.index(c),:] # distance between current and next word cdist = dists[pres_list.index(n)] # filter dists removing the words that have already been recalled dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == cdist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
[ "def", "compute_feature_weights", "(", "pres_list", ",", "rec_list", ",", "feature_list", ",", "distances", ")", ":", "# initialize the weights object for just this list", "weights", "=", "{", "}", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", "[", "]", "# return default list if there is not enough data to compute the fingerprint", "if", "len", "(", "rec_list", ")", "<=", "2", ":", "print", "(", "'Not enough recalls to compute fingerprint, returning default'", "'fingerprint.. (everything is .5)'", ")", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", ".5", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]", "# initialize past word list", "past_words", "=", "[", "]", "past_idxs", "=", "[", "]", "# loop over words", "for", "i", "in", "range", "(", "len", "(", "rec_list", ")", "-", "1", ")", ":", "# grab current word", "c", "=", "rec_list", "[", "i", "]", "# grab the next word", "n", "=", "rec_list", "[", "i", "+", "1", "]", "# if both recalled words are in the encoding list and haven't been recalled before", "if", "(", "c", "in", "pres_list", "and", "n", "in", "pres_list", ")", "and", "(", "c", "not", "in", "past_words", "and", "n", "not", "in", "past_words", ")", ":", "# for each feature", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "# get the distance vector for the current word", "dists", "=", "distances", "[", "feature", "]", "[", "pres_list", ".", "index", "(", "c", ")", ",", ":", "]", "# distance between current and next word", "cdist", "=", "dists", "[", "pres_list", ".", "index", "(", "n", ")", "]", "# filter dists removing the words that have already been recalled", "dists_filt", "=", "np", ".", "array", "(", "[", "dist", "for", "idx", ",", "dist", "in", "enumerate", "(", "dists", ")", "if", "idx", "not", "in", "past_idxs", "]", ")", "# get indices", "avg_rank", "=", "np", ".", "mean", "(", "np", ".", "where", "(", "np", ".", "sort", "(", "dists_filt", ")", "[", ":", ":", "-", "1", "]", "==", "cdist", ")", "[", "0", "]", "+", "1", ")", "# compute the weight", "weights", "[", "feature", "]", ".", "append", "(", "avg_rank", "/", "len", "(", "dists_filt", ")", ")", "# keep track of what has been recalled already", "past_idxs", ".", "append", "(", "pres_list", ".", "index", "(", "c", ")", ")", "past_words", ".", "append", "(", "c", ")", "# average over the cluster scores for a particular dimension", "for", "feature", "in", "weights", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "weights", "[", "feature", "]", "=", "np", ".", "nanmean", "(", "weights", "[", "feature", "]", ")", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]" ]
Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension
[ "Compute", "clustering", "scores", "along", "a", "set", "of", "feature", "dimensions" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/clustering.py#L137-L218
train
ContextLab/quail
quail/analysis/lagcrp.py
lagcrp_helper
def lagcrp_helper(egg, match='exact', distance='euclidean', ts=None, features=None): """ Computes probabilities for each transition distance (probability that a word recalled will be a given distance--in presentation order--from the previous recalled word). Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prec : numpy array each float is the probability of transition distance (distnaces indexed by position, from -(n-1) to (n-1), excluding zero """ def lagcrp(rec, lstlen): """Computes lag-crp for a given recall list""" def check_pair(a, b): if (a>0 and b>0) and (a!=b): return True else: return False def compute_actual(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in range(0,len(rec)-1): a=rec[trial] b=rec[trial+1] if check_pair(a, b) and (a not in recalled) and (b not in recalled): arr[b-a]+=1 recalled.append(a) return arr def compute_possible(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in rec: if np.isnan(trial): pass else: lbound=int(1-trial) ubound=int(lstlen-trial) chances=list(range(lbound,0))+list(range(1,ubound+1)) for each in recalled: if each-trial in chances: chances.remove(each-trial) arr[chances]+=1 recalled.append(trial) return arr actual = compute_actual(rec, lstlen) possible = compute_possible(rec, lstlen) crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)] crp.insert(int(len(crp) / 2), np.nan) return crp def nlagcrp(distmat, ts=None): def lagcrp_model(s): idx = list(range(0, -s, -1)) return np.array([list(range(i, i+s)) for i in idx]) # remove nan columns distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T model = lagcrp_model(distmat.shape[1]) lagcrp = np.zeros(ts * 2) for rdx in range(len(distmat)-1): item = distmat[rdx, :] next_item = distmat[rdx+1, :] if not np.isnan(item).any() and not np.isnan(next_item).any(): outer = np.outer(item, next_item) lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts)))) lagcrp /= ts lagcrp = list(lagcrp) lagcrp.insert(int(len(lagcrp) / 2), np.nan) return np.array(lagcrp) def _format(p, r): p = np.matrix([np.array(i) for i in p]) if p.shape[0]==1: p=p.T r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r) r = np.matrix([np.array(i) for i in r]) if r.shape[0]==1: r=r.T return p, r opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if not ts: ts = egg.pres.shape[1] if match in ['exact', 'best']: lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat] elif match is 'smooth': lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0)) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(lagcrp, axis=0)
python
def lagcrp_helper(egg, match='exact', distance='euclidean', ts=None, features=None): """ Computes probabilities for each transition distance (probability that a word recalled will be a given distance--in presentation order--from the previous recalled word). Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prec : numpy array each float is the probability of transition distance (distnaces indexed by position, from -(n-1) to (n-1), excluding zero """ def lagcrp(rec, lstlen): """Computes lag-crp for a given recall list""" def check_pair(a, b): if (a>0 and b>0) and (a!=b): return True else: return False def compute_actual(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in range(0,len(rec)-1): a=rec[trial] b=rec[trial+1] if check_pair(a, b) and (a not in recalled) and (b not in recalled): arr[b-a]+=1 recalled.append(a) return arr def compute_possible(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in rec: if np.isnan(trial): pass else: lbound=int(1-trial) ubound=int(lstlen-trial) chances=list(range(lbound,0))+list(range(1,ubound+1)) for each in recalled: if each-trial in chances: chances.remove(each-trial) arr[chances]+=1 recalled.append(trial) return arr actual = compute_actual(rec, lstlen) possible = compute_possible(rec, lstlen) crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)] crp.insert(int(len(crp) / 2), np.nan) return crp def nlagcrp(distmat, ts=None): def lagcrp_model(s): idx = list(range(0, -s, -1)) return np.array([list(range(i, i+s)) for i in idx]) # remove nan columns distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T model = lagcrp_model(distmat.shape[1]) lagcrp = np.zeros(ts * 2) for rdx in range(len(distmat)-1): item = distmat[rdx, :] next_item = distmat[rdx+1, :] if not np.isnan(item).any() and not np.isnan(next_item).any(): outer = np.outer(item, next_item) lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts)))) lagcrp /= ts lagcrp = list(lagcrp) lagcrp.insert(int(len(lagcrp) / 2), np.nan) return np.array(lagcrp) def _format(p, r): p = np.matrix([np.array(i) for i in p]) if p.shape[0]==1: p=p.T r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r) r = np.matrix([np.array(i) for i in r]) if r.shape[0]==1: r=r.T return p, r opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if not ts: ts = egg.pres.shape[1] if match in ['exact', 'best']: lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat] elif match is 'smooth': lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0)) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(lagcrp, axis=0)
[ "def", "lagcrp_helper", "(", "egg", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "ts", "=", "None", ",", "features", "=", "None", ")", ":", "def", "lagcrp", "(", "rec", ",", "lstlen", ")", ":", "\"\"\"Computes lag-crp for a given recall list\"\"\"", "def", "check_pair", "(", "a", ",", "b", ")", ":", "if", "(", "a", ">", "0", "and", "b", ">", "0", ")", "and", "(", "a", "!=", "b", ")", ":", "return", "True", "else", ":", "return", "False", "def", "compute_actual", "(", "rec", ",", "lstlen", ")", ":", "arr", "=", "pd", ".", "Series", "(", "data", "=", "np", ".", "zeros", "(", "(", "lstlen", ")", "*", "2", ")", ",", "index", "=", "list", "(", "range", "(", "-", "lstlen", ",", "0", ")", ")", "+", "list", "(", "range", "(", "1", ",", "lstlen", "+", "1", ")", ")", ")", "recalled", "=", "[", "]", "for", "trial", "in", "range", "(", "0", ",", "len", "(", "rec", ")", "-", "1", ")", ":", "a", "=", "rec", "[", "trial", "]", "b", "=", "rec", "[", "trial", "+", "1", "]", "if", "check_pair", "(", "a", ",", "b", ")", "and", "(", "a", "not", "in", "recalled", ")", "and", "(", "b", "not", "in", "recalled", ")", ":", "arr", "[", "b", "-", "a", "]", "+=", "1", "recalled", ".", "append", "(", "a", ")", "return", "arr", "def", "compute_possible", "(", "rec", ",", "lstlen", ")", ":", "arr", "=", "pd", ".", "Series", "(", "data", "=", "np", ".", "zeros", "(", "(", "lstlen", ")", "*", "2", ")", ",", "index", "=", "list", "(", "range", "(", "-", "lstlen", ",", "0", ")", ")", "+", "list", "(", "range", "(", "1", ",", "lstlen", "+", "1", ")", ")", ")", "recalled", "=", "[", "]", "for", "trial", "in", "rec", ":", "if", "np", ".", "isnan", "(", "trial", ")", ":", "pass", "else", ":", "lbound", "=", "int", "(", "1", "-", "trial", ")", "ubound", "=", "int", "(", "lstlen", "-", "trial", ")", "chances", "=", "list", "(", "range", "(", "lbound", ",", "0", ")", ")", "+", "list", "(", "range", "(", "1", ",", "ubound", "+", "1", ")", ")", "for", "each", "in", "recalled", ":", "if", "each", "-", "trial", "in", "chances", ":", "chances", ".", "remove", "(", "each", "-", "trial", ")", "arr", "[", "chances", "]", "+=", "1", "recalled", ".", "append", "(", "trial", ")", "return", "arr", "actual", "=", "compute_actual", "(", "rec", ",", "lstlen", ")", "possible", "=", "compute_possible", "(", "rec", ",", "lstlen", ")", "crp", "=", "[", "0.0", "if", "j", "==", "0", "else", "i", "/", "j", "for", "i", ",", "j", "in", "zip", "(", "actual", ",", "possible", ")", "]", "crp", ".", "insert", "(", "int", "(", "len", "(", "crp", ")", "/", "2", ")", ",", "np", ".", "nan", ")", "return", "crp", "def", "nlagcrp", "(", "distmat", ",", "ts", "=", "None", ")", ":", "def", "lagcrp_model", "(", "s", ")", ":", "idx", "=", "list", "(", "range", "(", "0", ",", "-", "s", ",", "-", "1", ")", ")", "return", "np", ".", "array", "(", "[", "list", "(", "range", "(", "i", ",", "i", "+", "s", ")", ")", "for", "i", "in", "idx", "]", ")", "# remove nan columns", "distmat", "=", "distmat", "[", ":", ",", "~", "np", ".", "all", "(", "np", ".", "isnan", "(", "distmat", ")", ",", "axis", "=", "0", ")", "]", ".", "T", "model", "=", "lagcrp_model", "(", "distmat", ".", "shape", "[", "1", "]", ")", "lagcrp", "=", "np", ".", "zeros", "(", "ts", "*", "2", ")", "for", "rdx", "in", "range", "(", "len", "(", "distmat", ")", "-", "1", ")", ":", "item", "=", "distmat", "[", "rdx", ",", ":", "]", "next_item", "=", "distmat", "[", "rdx", "+", "1", ",", ":", "]", "if", "not", "np", ".", "isnan", "(", "item", ")", ".", "any", "(", ")", "and", "not", "np", ".", "isnan", "(", "next_item", ")", ".", "any", "(", ")", ":", "outer", "=", "np", ".", "outer", "(", "item", ",", "next_item", ")", "lagcrp", "+=", "np", ".", "array", "(", "list", "(", "map", "(", "lambda", "lag", ":", "np", ".", "mean", "(", "outer", "[", "model", "==", "lag", "]", ")", ",", "range", "(", "-", "ts", ",", "ts", ")", ")", ")", ")", "lagcrp", "/=", "ts", "lagcrp", "=", "list", "(", "lagcrp", ")", "lagcrp", ".", "insert", "(", "int", "(", "len", "(", "lagcrp", ")", "/", "2", ")", ",", "np", ".", "nan", ")", "return", "np", ".", "array", "(", "lagcrp", ")", "def", "_format", "(", "p", ",", "r", ")", ":", "p", "=", "np", ".", "matrix", "(", "[", "np", ".", "array", "(", "i", ")", "for", "i", "in", "p", "]", ")", "if", "p", ".", "shape", "[", "0", "]", "==", "1", ":", "p", "=", "p", ".", "T", "r", "=", "map", "(", "lambda", "x", ":", "[", "np", ".", "nan", "]", "*", "p", ".", "shape", "[", "1", "]", "if", "check_nan", "(", "x", ")", "else", "x", ",", "r", ")", "r", "=", "np", ".", "matrix", "(", "[", "np", ".", "array", "(", "i", ")", "for", "i", "in", "r", "]", ")", "if", "r", ".", "shape", "[", "0", "]", "==", "1", ":", "r", "=", "r", ".", "T", "return", "p", ",", "r", "opts", "=", "dict", "(", "match", "=", "match", ",", "distance", "=", "distance", ",", "features", "=", "features", ")", "if", "match", "is", "'exact'", ":", "opts", ".", "update", "(", "{", "'features'", ":", "'item'", "}", ")", "recmat", "=", "recall_matrix", "(", "egg", ",", "*", "*", "opts", ")", "if", "not", "ts", ":", "ts", "=", "egg", ".", "pres", ".", "shape", "[", "1", "]", "if", "match", "in", "[", "'exact'", ",", "'best'", "]", ":", "lagcrp", "=", "[", "lagcrp", "(", "lst", ",", "egg", ".", "list_length", ")", "for", "lst", "in", "recmat", "]", "elif", "match", "is", "'smooth'", ":", "lagcrp", "=", "np", ".", "atleast_2d", "(", "np", ".", "mean", "(", "[", "nlagcrp", "(", "r", ",", "ts", "=", "ts", ")", "for", "r", "in", "recmat", "]", ",", "0", ")", ")", "else", ":", "raise", "ValueError", "(", "'Match must be set to exact, best or smooth.'", ")", "return", "np", ".", "nanmean", "(", "lagcrp", ",", "axis", "=", "0", ")" ]
Computes probabilities for each transition distance (probability that a word recalled will be a given distance--in presentation order--from the previous recalled word). Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prec : numpy array each float is the probability of transition distance (distnaces indexed by position, from -(n-1) to (n-1), excluding zero
[ "Computes", "probabilities", "for", "each", "transition", "distance", "(", "probability", "that", "a", "word", "recalled", "will", "be", "a", "given", "distance", "--", "in", "presentation", "order", "--", "from", "the", "previous", "recalled", "word", ")", "." ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/lagcrp.py#L7-L129
train
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self._diskList: self._fetchDevAll('disk', self._diskList, self._info.getDiskStats) if self._mdList: self._fetchDevAll('md', self._mdList, self._info.getMDstats) if self._partList: self._fetchDevAll('part', self._partList, self._info.getPartitionStats) if self._lvList: self._fetchDevAll('lv', self._lvList, self._info.getLVstats) self._fetchDevAll('fs', self._fsList, self._info.getFilesystemStats)
python
def retrieveVals(self): """Retrieve values for graphs.""" if self._diskList: self._fetchDevAll('disk', self._diskList, self._info.getDiskStats) if self._mdList: self._fetchDevAll('md', self._mdList, self._info.getMDstats) if self._partList: self._fetchDevAll('part', self._partList, self._info.getPartitionStats) if self._lvList: self._fetchDevAll('lv', self._lvList, self._info.getLVstats) self._fetchDevAll('fs', self._fsList, self._info.getFilesystemStats)
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "_diskList", ":", "self", ".", "_fetchDevAll", "(", "'disk'", ",", "self", ".", "_diskList", ",", "self", ".", "_info", ".", "getDiskStats", ")", "if", "self", ".", "_mdList", ":", "self", ".", "_fetchDevAll", "(", "'md'", ",", "self", ".", "_mdList", ",", "self", ".", "_info", ".", "getMDstats", ")", "if", "self", ".", "_partList", ":", "self", ".", "_fetchDevAll", "(", "'part'", ",", "self", ".", "_partList", ",", "self", ".", "_info", ".", "getPartitionStats", ")", "if", "self", ".", "_lvList", ":", "self", ".", "_fetchDevAll", "(", "'lv'", ",", "self", ".", "_lvList", ",", "self", ".", "_info", ".", "getLVstats", ")", "self", ".", "_fetchDevAll", "(", "'fs'", ",", "self", ".", "_fsList", ",", "self", ".", "_info", ".", "getFilesystemStats", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L125-L140
train
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin._configDevRequests
def _configDevRequests(self, namestr, titlestr, devlist): """Generate configuration for I/O Request stats. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_requests' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Requests' % titlestr, self._category, info='Disk I/O - %s Throughput, Read / write requests per second.' % titlestr, args='--base 1000 --lower-limit 0', vlabel='reqs/sec read (-) / write (+)', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev + '_read', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, graph=False) graph.addField(dev + '_write', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, negative=(dev + '_read'),info=dev) self.appendGraph(name, graph)
python
def _configDevRequests(self, namestr, titlestr, devlist): """Generate configuration for I/O Request stats. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_requests' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Requests' % titlestr, self._category, info='Disk I/O - %s Throughput, Read / write requests per second.' % titlestr, args='--base 1000 --lower-limit 0', vlabel='reqs/sec read (-) / write (+)', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev + '_read', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, graph=False) graph.addField(dev + '_write', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, negative=(dev + '_read'),info=dev) self.appendGraph(name, graph)
[ "def", "_configDevRequests", "(", "self", ",", "namestr", ",", "titlestr", ",", "devlist", ")", ":", "name", "=", "'diskio_%s_requests'", "%", "namestr", "if", "self", ".", "graphEnabled", "(", "name", ")", ":", "graph", "=", "MuninGraph", "(", "'Disk I/O - %s - Requests'", "%", "titlestr", ",", "self", ".", "_category", ",", "info", "=", "'Disk I/O - %s Throughput, Read / write requests per second.'", "%", "titlestr", ",", "args", "=", "'--base 1000 --lower-limit 0'", ",", "vlabel", "=", "'reqs/sec read (-) / write (+)'", ",", "printf", "=", "'%6.1lf'", ",", "autoFixNames", "=", "True", ")", "for", "dev", "in", "devlist", ":", "graph", ".", "addField", "(", "dev", "+", "'_read'", ",", "fixLabel", "(", "dev", ",", "maxLabelLenGraphDual", ",", "repl", "=", "'..'", ",", "truncend", "=", "False", ",", "delim", "=", "self", ".", "_labelDelim", ".", "get", "(", "namestr", ")", ")", ",", "draw", "=", "'LINE2'", ",", "type", "=", "'DERIVE'", ",", "min", "=", "0", ",", "graph", "=", "False", ")", "graph", ".", "addField", "(", "dev", "+", "'_write'", ",", "fixLabel", "(", "dev", ",", "maxLabelLenGraphDual", ",", "repl", "=", "'..'", ",", "truncend", "=", "False", ",", "delim", "=", "self", ".", "_labelDelim", ".", "get", "(", "namestr", ")", ")", ",", "draw", "=", "'LINE2'", ",", "type", "=", "'DERIVE'", ",", "min", "=", "0", ",", "negative", "=", "(", "dev", "+", "'_read'", ")", ",", "info", "=", "dev", ")", "self", ".", "appendGraph", "(", "name", ",", "graph", ")" ]
Generate configuration for I/O Request stats. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices.
[ "Generate", "configuration", "for", "I", "/", "O", "Request", "stats", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L142-L170
train
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin._configDevActive
def _configDevActive(self, namestr, titlestr, devlist): """Generate configuration for I/O Queue Length. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_active' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Queue Length' % titlestr, self._category, info='Disk I/O - Number of I/O Operations in Progress for every %s.' % titlestr, args='--base 1000 --lower-limit 0', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev, fixLabel(dev, maxLabelLenGraphSimple, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='AREASTACK', type='GAUGE', info=dev) self.appendGraph(name, graph)
python
def _configDevActive(self, namestr, titlestr, devlist): """Generate configuration for I/O Queue Length. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_active' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Queue Length' % titlestr, self._category, info='Disk I/O - Number of I/O Operations in Progress for every %s.' % titlestr, args='--base 1000 --lower-limit 0', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev, fixLabel(dev, maxLabelLenGraphSimple, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='AREASTACK', type='GAUGE', info=dev) self.appendGraph(name, graph)
[ "def", "_configDevActive", "(", "self", ",", "namestr", ",", "titlestr", ",", "devlist", ")", ":", "name", "=", "'diskio_%s_active'", "%", "namestr", "if", "self", ".", "graphEnabled", "(", "name", ")", ":", "graph", "=", "MuninGraph", "(", "'Disk I/O - %s - Queue Length'", "%", "titlestr", ",", "self", ".", "_category", ",", "info", "=", "'Disk I/O - Number of I/O Operations in Progress for every %s.'", "%", "titlestr", ",", "args", "=", "'--base 1000 --lower-limit 0'", ",", "printf", "=", "'%6.1lf'", ",", "autoFixNames", "=", "True", ")", "for", "dev", "in", "devlist", ":", "graph", ".", "addField", "(", "dev", ",", "fixLabel", "(", "dev", ",", "maxLabelLenGraphSimple", ",", "repl", "=", "'..'", ",", "truncend", "=", "False", ",", "delim", "=", "self", ".", "_labelDelim", ".", "get", "(", "namestr", ")", ")", ",", "draw", "=", "'AREASTACK'", ",", "type", "=", "'GAUGE'", ",", "info", "=", "dev", ")", "self", ".", "appendGraph", "(", "name", ",", "graph", ")" ]
Generate configuration for I/O Queue Length. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices.
[ "Generate", "configuration", "for", "I", "/", "O", "Queue", "Length", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L202-L224
train
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin._fetchDevAll
def _fetchDevAll(self, namestr, devlist, statsfunc): """Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device. """ for dev in devlist: stats = statsfunc(dev) name = 'diskio_%s_requests' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rios']) self.setGraphVal(name, dev + '_write', stats['wios']) name = 'diskio_%s_bytes' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rbytes']) self.setGraphVal(name, dev + '_write', stats['wbytes']) name = 'diskio_%s_active' % namestr if self.hasGraph(name): self.setGraphVal(name, dev, stats['ios_active'])
python
def _fetchDevAll(self, namestr, devlist, statsfunc): """Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device. """ for dev in devlist: stats = statsfunc(dev) name = 'diskio_%s_requests' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rios']) self.setGraphVal(name, dev + '_write', stats['wios']) name = 'diskio_%s_bytes' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rbytes']) self.setGraphVal(name, dev + '_write', stats['wbytes']) name = 'diskio_%s_active' % namestr if self.hasGraph(name): self.setGraphVal(name, dev, stats['ios_active'])
[ "def", "_fetchDevAll", "(", "self", ",", "namestr", ",", "devlist", ",", "statsfunc", ")", ":", "for", "dev", "in", "devlist", ":", "stats", "=", "statsfunc", "(", "dev", ")", "name", "=", "'diskio_%s_requests'", "%", "namestr", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_read'", ",", "stats", "[", "'rios'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_write'", ",", "stats", "[", "'wios'", "]", ")", "name", "=", "'diskio_%s_bytes'", "%", "namestr", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_read'", ",", "stats", "[", "'rbytes'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_write'", ",", "stats", "[", "'wbytes'", "]", ")", "name", "=", "'diskio_%s_active'", "%", "namestr", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "dev", ",", "stats", "[", "'ios_active'", "]", ")" ]
Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device.
[ "Initialize", "I", "/", "O", "stats", "for", "devices", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L226-L246
train
aouyar/PyMunin
pymunin/plugins/redisstats.py
RedisPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" for graph_name in self.getGraphList(): for field_name in self.getGraphFieldList(graph_name): self.setGraphVal(graph_name, field_name, self._stats.get(field_name))
python
def retrieveVals(self): """Retrieve values for graphs.""" for graph_name in self.getGraphList(): for field_name in self.getGraphFieldList(graph_name): self.setGraphVal(graph_name, field_name, self._stats.get(field_name))
[ "def", "retrieveVals", "(", "self", ")", ":", "for", "graph_name", "in", "self", ".", "getGraphList", "(", ")", ":", "for", "field_name", "in", "self", ".", "getGraphFieldList", "(", "graph_name", ")", ":", "self", ".", "setGraphVal", "(", "graph_name", ",", "field_name", ",", "self", ".", "_stats", ".", "get", "(", "field_name", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/redisstats.py#L296-L300
train
aouyar/PyMunin
pymunin/plugins/sysstats.py
MuninSysStatsPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('sys_loadavg'): self._loadstats = self._sysinfo.getLoadAvg() if self._loadstats: self.setGraphVal('sys_loadavg', 'load15min', self._loadstats[2]) self.setGraphVal('sys_loadavg', 'load5min', self._loadstats[1]) self.setGraphVal('sys_loadavg', 'load1min', self._loadstats[0]) if self._cpustats and self.hasGraph('sys_cpu_util'): for field in self.getGraphFieldList('sys_cpu_util'): self.setGraphVal('sys_cpu_util', field, int(self._cpustats[field] * 1000)) if self._memstats: if self.hasGraph('sys_mem_util'): for field in self.getGraphFieldList('sys_mem_util'): self.setGraphVal('sys_mem_util', field, self._memstats[field]) if self.hasGraph('sys_mem_avail'): for field in self.getGraphFieldList('sys_mem_avail'): self.setGraphVal('sys_mem_avail', field, self._memstats[field]) if self.hasGraph('sys_mem_huge'): for field in ['Rsvd', 'Surp', 'Free']: fkey = 'HugePages_' + field if self._memstats.has_key(fkey): self.setGraphVal('sys_mem_huge', field, self._memstats[fkey] * self._memstats['Hugepagesize']) if self.hasGraph('sys_processes'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_processes', 'running', self._procstats['procs_running']) self.setGraphVal('sys_processes', 'blocked', self._procstats['procs_blocked']) if self.hasGraph('sys_forks'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_forks', 'forks', self._procstats['processes']) if self.hasGraph('sys_intr_ctxt'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: for field in self.getGraphFieldList('sys_intr_ctxt'): self.setGraphVal('sys_intr_ctxt', field, self._procstats[field]) if self.hasGraph('sys_vm_paging'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_paging', 'in', self._vmstats['pgpgin']) self.setGraphVal('sys_vm_paging', 'out', self._vmstats['pgpgout']) if self.hasGraph('sys_vm_swapping'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_swapping', 'in', self._vmstats['pswpin']) self.setGraphVal('sys_vm_swapping', 'out', self._vmstats['pswpout'])
python
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('sys_loadavg'): self._loadstats = self._sysinfo.getLoadAvg() if self._loadstats: self.setGraphVal('sys_loadavg', 'load15min', self._loadstats[2]) self.setGraphVal('sys_loadavg', 'load5min', self._loadstats[1]) self.setGraphVal('sys_loadavg', 'load1min', self._loadstats[0]) if self._cpustats and self.hasGraph('sys_cpu_util'): for field in self.getGraphFieldList('sys_cpu_util'): self.setGraphVal('sys_cpu_util', field, int(self._cpustats[field] * 1000)) if self._memstats: if self.hasGraph('sys_mem_util'): for field in self.getGraphFieldList('sys_mem_util'): self.setGraphVal('sys_mem_util', field, self._memstats[field]) if self.hasGraph('sys_mem_avail'): for field in self.getGraphFieldList('sys_mem_avail'): self.setGraphVal('sys_mem_avail', field, self._memstats[field]) if self.hasGraph('sys_mem_huge'): for field in ['Rsvd', 'Surp', 'Free']: fkey = 'HugePages_' + field if self._memstats.has_key(fkey): self.setGraphVal('sys_mem_huge', field, self._memstats[fkey] * self._memstats['Hugepagesize']) if self.hasGraph('sys_processes'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_processes', 'running', self._procstats['procs_running']) self.setGraphVal('sys_processes', 'blocked', self._procstats['procs_blocked']) if self.hasGraph('sys_forks'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_forks', 'forks', self._procstats['processes']) if self.hasGraph('sys_intr_ctxt'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: for field in self.getGraphFieldList('sys_intr_ctxt'): self.setGraphVal('sys_intr_ctxt', field, self._procstats[field]) if self.hasGraph('sys_vm_paging'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_paging', 'in', self._vmstats['pgpgin']) self.setGraphVal('sys_vm_paging', 'out', self._vmstats['pgpgout']) if self.hasGraph('sys_vm_swapping'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_swapping', 'in', self._vmstats['pswpin']) self.setGraphVal('sys_vm_swapping', 'out', self._vmstats['pswpout'])
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "hasGraph", "(", "'sys_loadavg'", ")", ":", "self", ".", "_loadstats", "=", "self", ".", "_sysinfo", ".", "getLoadAvg", "(", ")", "if", "self", ".", "_loadstats", ":", "self", ".", "setGraphVal", "(", "'sys_loadavg'", ",", "'load15min'", ",", "self", ".", "_loadstats", "[", "2", "]", ")", "self", ".", "setGraphVal", "(", "'sys_loadavg'", ",", "'load5min'", ",", "self", ".", "_loadstats", "[", "1", "]", ")", "self", ".", "setGraphVal", "(", "'sys_loadavg'", ",", "'load1min'", ",", "self", ".", "_loadstats", "[", "0", "]", ")", "if", "self", ".", "_cpustats", "and", "self", ".", "hasGraph", "(", "'sys_cpu_util'", ")", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_cpu_util'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_cpu_util'", ",", "field", ",", "int", "(", "self", ".", "_cpustats", "[", "field", "]", "*", "1000", ")", ")", "if", "self", ".", "_memstats", ":", "if", "self", ".", "hasGraph", "(", "'sys_mem_util'", ")", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_mem_util'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_mem_util'", ",", "field", ",", "self", ".", "_memstats", "[", "field", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_mem_avail'", ")", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_mem_avail'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_mem_avail'", ",", "field", ",", "self", ".", "_memstats", "[", "field", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_mem_huge'", ")", ":", "for", "field", "in", "[", "'Rsvd'", ",", "'Surp'", ",", "'Free'", "]", ":", "fkey", "=", "'HugePages_'", "+", "field", "if", "self", ".", "_memstats", ".", "has_key", "(", "fkey", ")", ":", "self", ".", "setGraphVal", "(", "'sys_mem_huge'", ",", "field", ",", "self", ".", "_memstats", "[", "fkey", "]", "*", "self", ".", "_memstats", "[", "'Hugepagesize'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_processes'", ")", ":", "if", "self", ".", "_procstats", "is", "None", ":", "self", ".", "_procstats", "=", "self", ".", "_sysinfo", ".", "getProcessStats", "(", ")", "if", "self", ".", "_procstats", ":", "self", ".", "setGraphVal", "(", "'sys_processes'", ",", "'running'", ",", "self", ".", "_procstats", "[", "'procs_running'", "]", ")", "self", ".", "setGraphVal", "(", "'sys_processes'", ",", "'blocked'", ",", "self", ".", "_procstats", "[", "'procs_blocked'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_forks'", ")", ":", "if", "self", ".", "_procstats", "is", "None", ":", "self", ".", "_procstats", "=", "self", ".", "_sysinfo", ".", "getProcessStats", "(", ")", "if", "self", ".", "_procstats", ":", "self", ".", "setGraphVal", "(", "'sys_forks'", ",", "'forks'", ",", "self", ".", "_procstats", "[", "'processes'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_intr_ctxt'", ")", ":", "if", "self", ".", "_procstats", "is", "None", ":", "self", ".", "_procstats", "=", "self", ".", "_sysinfo", ".", "getProcessStats", "(", ")", "if", "self", ".", "_procstats", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_intr_ctxt'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_intr_ctxt'", ",", "field", ",", "self", ".", "_procstats", "[", "field", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_vm_paging'", ")", ":", "if", "self", ".", "_vmstats", "is", "None", ":", "self", ".", "_vmstats", "=", "self", ".", "_sysinfo", ".", "getVMstats", "(", ")", "if", "self", ".", "_vmstats", ":", "self", ".", "setGraphVal", "(", "'sys_vm_paging'", ",", "'in'", ",", "self", ".", "_vmstats", "[", "'pgpgin'", "]", ")", "self", ".", "setGraphVal", "(", "'sys_vm_paging'", ",", "'out'", ",", "self", ".", "_vmstats", "[", "'pgpgout'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_vm_swapping'", ")", ":", "if", "self", ".", "_vmstats", "is", "None", ":", "self", ".", "_vmstats", "=", "self", ".", "_sysinfo", ".", "getVMstats", "(", ")", "if", "self", ".", "_vmstats", ":", "self", ".", "setGraphVal", "(", "'sys_vm_swapping'", ",", "'in'", ",", "self", ".", "_vmstats", "[", "'pswpin'", "]", ")", "self", ".", "setGraphVal", "(", "'sys_vm_swapping'", ",", "'out'", ",", "self", ".", "_vmstats", "[", "'pswpout'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/sysstats.py#L211-L274
train
mattseymour/python-env
dotenv/__init__.py
get
def get(key, default=None): """ Searches os.environ. If a key is found try evaluating its type else; return the string. returns: k->value (type as defined by ast.literal_eval) """ try: # Attempt to evaluate into python literal return ast.literal_eval(os.environ.get(key.upper(), default)) except (ValueError, SyntaxError): return os.environ.get(key.upper(), default)
python
def get(key, default=None): """ Searches os.environ. If a key is found try evaluating its type else; return the string. returns: k->value (type as defined by ast.literal_eval) """ try: # Attempt to evaluate into python literal return ast.literal_eval(os.environ.get(key.upper(), default)) except (ValueError, SyntaxError): return os.environ.get(key.upper(), default)
[ "def", "get", "(", "key", ",", "default", "=", "None", ")", ":", "try", ":", "# Attempt to evaluate into python literal", "return", "ast", ".", "literal_eval", "(", "os", ".", "environ", ".", "get", "(", "key", ".", "upper", "(", ")", ",", "default", ")", ")", "except", "(", "ValueError", ",", "SyntaxError", ")", ":", "return", "os", ".", "environ", ".", "get", "(", "key", ".", "upper", "(", ")", ",", "default", ")" ]
Searches os.environ. If a key is found try evaluating its type else; return the string. returns: k->value (type as defined by ast.literal_eval)
[ "Searches", "os", ".", "environ", ".", "If", "a", "key", "is", "found", "try", "evaluating", "its", "type", "else", ";", "return", "the", "string", "." ]
5ac09b1685fbba75c174c79cb40287aa49d0f208
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L17-L28
train
mattseymour/python-env
dotenv/__init__.py
save
def save(filepath=None, **kwargs): """ Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file. """ if filepath is None: filepath = os.path.join('.env') with open(filepath, 'wb') as file_handle: file_handle.writelines( '{0}={1}\n'.format(key.upper(), val) for key, val in kwargs.items() )
python
def save(filepath=None, **kwargs): """ Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file. """ if filepath is None: filepath = os.path.join('.env') with open(filepath, 'wb') as file_handle: file_handle.writelines( '{0}={1}\n'.format(key.upper(), val) for key, val in kwargs.items() )
[ "def", "save", "(", "filepath", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "filepath", "is", "None", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "'.env'", ")", "with", "open", "(", "filepath", ",", "'wb'", ")", "as", "file_handle", ":", "file_handle", ".", "writelines", "(", "'{0}={1}\\n'", ".", "format", "(", "key", ".", "upper", "(", ")", ",", "val", ")", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ")" ]
Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file.
[ "Saves", "a", "list", "of", "keyword", "arguments", "as", "environment", "variables", "to", "a", "file", ".", "If", "no", "filepath", "given", "will", "default", "to", "the", "default", ".", "env", "file", "." ]
5ac09b1685fbba75c174c79cb40287aa49d0f208
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L31-L43
train
mattseymour/python-env
dotenv/__init__.py
load
def load(filepath=None): """ Reads a .env file into os.environ. For a set filepath, open the file and read contents into os.environ. If filepath is not set then look in current dir for a .env file. """ if filepath and os.path.exists(filepath): pass else: if not os.path.exists('.env'): return False filepath = os.path.join('.env') for key, value in _get_line_(filepath): # set the key, value in the python environment vars dictionary # does not make modifications system wide. os.environ.setdefault(key, str(value)) return True
python
def load(filepath=None): """ Reads a .env file into os.environ. For a set filepath, open the file and read contents into os.environ. If filepath is not set then look in current dir for a .env file. """ if filepath and os.path.exists(filepath): pass else: if not os.path.exists('.env'): return False filepath = os.path.join('.env') for key, value in _get_line_(filepath): # set the key, value in the python environment vars dictionary # does not make modifications system wide. os.environ.setdefault(key, str(value)) return True
[ "def", "load", "(", "filepath", "=", "None", ")", ":", "if", "filepath", "and", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "pass", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "'.env'", ")", ":", "return", "False", "filepath", "=", "os", ".", "path", ".", "join", "(", "'.env'", ")", "for", "key", ",", "value", "in", "_get_line_", "(", "filepath", ")", ":", "# set the key, value in the python environment vars dictionary", "# does not make modifications system wide.", "os", ".", "environ", ".", "setdefault", "(", "key", ",", "str", "(", "value", ")", ")", "return", "True" ]
Reads a .env file into os.environ. For a set filepath, open the file and read contents into os.environ. If filepath is not set then look in current dir for a .env file.
[ "Reads", "a", ".", "env", "file", "into", "os", ".", "environ", "." ]
5ac09b1685fbba75c174c79cb40287aa49d0f208
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L46-L64
train
mattseymour/python-env
dotenv/__init__.py
_get_line_
def _get_line_(filepath): """ Gets each line from the file and parse the data. Attempt to translate the value into a python type is possible (falls back to string). """ for line in open(filepath): line = line.strip() # allows for comments in the file if line.startswith('#') or '=' not in line: continue # split on the first =, allows for subsiquent `=` in strings key, value = line.split('=', 1) key = key.strip().upper() value = value.strip() if not (key and value): continue try: # evaluate the string before adding into environment # resolves any hanging (') characters value = ast.literal_eval(value) except (ValueError, SyntaxError): pass #return line yield (key, value)
python
def _get_line_(filepath): """ Gets each line from the file and parse the data. Attempt to translate the value into a python type is possible (falls back to string). """ for line in open(filepath): line = line.strip() # allows for comments in the file if line.startswith('#') or '=' not in line: continue # split on the first =, allows for subsiquent `=` in strings key, value = line.split('=', 1) key = key.strip().upper() value = value.strip() if not (key and value): continue try: # evaluate the string before adding into environment # resolves any hanging (') characters value = ast.literal_eval(value) except (ValueError, SyntaxError): pass #return line yield (key, value)
[ "def", "_get_line_", "(", "filepath", ")", ":", "for", "line", "in", "open", "(", "filepath", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "# allows for comments in the file", "if", "line", ".", "startswith", "(", "'#'", ")", "or", "'='", "not", "in", "line", ":", "continue", "# split on the first =, allows for subsiquent `=` in strings", "key", ",", "value", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "key", "=", "key", ".", "strip", "(", ")", ".", "upper", "(", ")", "value", "=", "value", ".", "strip", "(", ")", "if", "not", "(", "key", "and", "value", ")", ":", "continue", "try", ":", "# evaluate the string before adding into environment", "# resolves any hanging (') characters", "value", "=", "ast", ".", "literal_eval", "(", "value", ")", "except", "(", "ValueError", ",", "SyntaxError", ")", ":", "pass", "#return line", "yield", "(", "key", ",", "value", ")" ]
Gets each line from the file and parse the data. Attempt to translate the value into a python type is possible (falls back to string).
[ "Gets", "each", "line", "from", "the", "file", "and", "parse", "the", "data", ".", "Attempt", "to", "translate", "the", "value", "into", "a", "python", "type", "is", "possible", "(", "falls", "back", "to", "string", ")", "." ]
5ac09b1685fbba75c174c79cb40287aa49d0f208
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L67-L94
train
aouyar/PyMunin
pysysinfo/apache.py
ApacheInfo.initStats
def initStats(self): """Query and parse Apache Web Server Status Page.""" url = "%s://%s:%d/%s?auto" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('(\S.*\S)\s*:\s*(\S+)\s*$', line) if mobj: self._statusDict[mobj.group(1)] = util.parse_value(mobj.group(2)) if self._statusDict.has_key('Scoreboard'): self._statusDict['MaxWorkers'] = len(self._statusDict['Scoreboard'])
python
def initStats(self): """Query and parse Apache Web Server Status Page.""" url = "%s://%s:%d/%s?auto" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('(\S.*\S)\s*:\s*(\S+)\s*$', line) if mobj: self._statusDict[mobj.group(1)] = util.parse_value(mobj.group(2)) if self._statusDict.has_key('Scoreboard'): self._statusDict['MaxWorkers'] = len(self._statusDict['Scoreboard'])
[ "def", "initStats", "(", "self", ")", ":", "url", "=", "\"%s://%s:%d/%s?auto\"", "%", "(", "self", ".", "_proto", ",", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_statuspath", ")", "response", "=", "util", ".", "get_url", "(", "url", ",", "self", ".", "_user", ",", "self", ".", "_password", ")", "self", ".", "_statusDict", "=", "{", "}", "for", "line", "in", "response", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'(\\S.*\\S)\\s*:\\s*(\\S+)\\s*$'", ",", "line", ")", "if", "mobj", ":", "self", ".", "_statusDict", "[", "mobj", ".", "group", "(", "1", ")", "]", "=", "util", ".", "parse_value", "(", "mobj", ".", "group", "(", "2", ")", ")", "if", "self", ".", "_statusDict", ".", "has_key", "(", "'Scoreboard'", ")", ":", "self", ".", "_statusDict", "[", "'MaxWorkers'", "]", "=", "len", "(", "self", ".", "_statusDict", "[", "'Scoreboard'", "]", ")" ]
Query and parse Apache Web Server Status Page.
[ "Query", "and", "parse", "Apache", "Web", "Server", "Status", "Page", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/apache.py#L68-L79
train
ContextLab/quail
quail/egg.py
Egg.get_pres_features
def get_pres_features(self, features=None): """ Returns a df of features for presented items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.pres.applymap(lambda x: {k:v for k,v in x.items() if k in features} if x is not None else None)
python
def get_pres_features(self, features=None): """ Returns a df of features for presented items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.pres.applymap(lambda x: {k:v for k,v in x.items() if k in features} if x is not None else None)
[ "def", "get_pres_features", "(", "self", ",", "features", "=", "None", ")", ":", "if", "features", "is", "None", ":", "features", "=", "self", ".", "dist_funcs", ".", "keys", "(", ")", "elif", "not", "isinstance", "(", "features", ",", "list", ")", ":", "features", "=", "[", "features", "]", "return", "self", ".", "pres", ".", "applymap", "(", "lambda", "x", ":", "{", "k", ":", "v", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", "if", "k", "in", "features", "}", "if", "x", "is", "not", "None", "else", "None", ")" ]
Returns a df of features for presented items
[ "Returns", "a", "df", "of", "features", "for", "presented", "items" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L220-L228
train
ContextLab/quail
quail/egg.py
Egg.get_rec_features
def get_rec_features(self, features=None): """ Returns a df of features for recalled items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.rec.applymap(lambda x: {k:v for k,v in x.items() if k != 'item'} if x is not None else None)
python
def get_rec_features(self, features=None): """ Returns a df of features for recalled items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.rec.applymap(lambda x: {k:v for k,v in x.items() if k != 'item'} if x is not None else None)
[ "def", "get_rec_features", "(", "self", ",", "features", "=", "None", ")", ":", "if", "features", "is", "None", ":", "features", "=", "self", ".", "dist_funcs", ".", "keys", "(", ")", "elif", "not", "isinstance", "(", "features", ",", "list", ")", ":", "features", "=", "[", "features", "]", "return", "self", ".", "rec", ".", "applymap", "(", "lambda", "x", ":", "{", "k", ":", "v", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", "if", "k", "!=", "'item'", "}", "if", "x", "is", "not", "None", "else", "None", ")" ]
Returns a df of features for recalled items
[ "Returns", "a", "df", "of", "features", "for", "recalled", "items" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L236-L244
train
ContextLab/quail
quail/egg.py
Egg.info
def info(self): """ Print info about the data egg """ print('Number of subjects: ' + str(self.n_subjects)) print('Number of lists per subject: ' + str(self.n_lists)) print('Number of words per list: ' + str(self.list_length)) print('Date created: ' + str(self.date_created)) print('Meta data: ' + str(self.meta))
python
def info(self): """ Print info about the data egg """ print('Number of subjects: ' + str(self.n_subjects)) print('Number of lists per subject: ' + str(self.n_lists)) print('Number of words per list: ' + str(self.list_length)) print('Date created: ' + str(self.date_created)) print('Meta data: ' + str(self.meta))
[ "def", "info", "(", "self", ")", ":", "print", "(", "'Number of subjects: '", "+", "str", "(", "self", ".", "n_subjects", ")", ")", "print", "(", "'Number of lists per subject: '", "+", "str", "(", "self", ".", "n_lists", ")", ")", "print", "(", "'Number of words per list: '", "+", "str", "(", "self", ".", "list_length", ")", ")", "print", "(", "'Date created: '", "+", "str", "(", "self", ".", "date_created", ")", ")", "print", "(", "'Meta data: '", "+", "str", "(", "self", ".", "meta", ")", ")" ]
Print info about the data egg
[ "Print", "info", "about", "the", "data", "egg" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L247-L255
train
ContextLab/quail
quail/egg.py
Egg.save
def save(self, fname, compression='blosc'): """ Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ # put egg vars into a dict egg = { 'pres' : df2list(self.pres), 'rec' : df2list(self.rec), 'dist_funcs' : self.dist_funcs, 'subjgroup' : self.subjgroup, 'subjname' : self.subjname, 'listgroup' : self.listgroup, 'listname' : self.listname, 'date_created' : self.date_created, 'meta' : self.meta } # if extension wasn't included, add it if fname[-4:]!='.egg': fname+='.egg' # save with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
python
def save(self, fname, compression='blosc'): """ Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ # put egg vars into a dict egg = { 'pres' : df2list(self.pres), 'rec' : df2list(self.rec), 'dist_funcs' : self.dist_funcs, 'subjgroup' : self.subjgroup, 'subjname' : self.subjname, 'listgroup' : self.listgroup, 'listname' : self.listname, 'date_created' : self.date_created, 'meta' : self.meta } # if extension wasn't included, add it if fname[-4:]!='.egg': fname+='.egg' # save with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
[ "def", "save", "(", "self", ",", "fname", ",", "compression", "=", "'blosc'", ")", ":", "# put egg vars into a dict", "egg", "=", "{", "'pres'", ":", "df2list", "(", "self", ".", "pres", ")", ",", "'rec'", ":", "df2list", "(", "self", ".", "rec", ")", ",", "'dist_funcs'", ":", "self", ".", "dist_funcs", ",", "'subjgroup'", ":", "self", ".", "subjgroup", ",", "'subjname'", ":", "self", ".", "subjname", ",", "'listgroup'", ":", "self", ".", "listgroup", ",", "'listname'", ":", "self", ".", "listname", ",", "'date_created'", ":", "self", ".", "date_created", ",", "'meta'", ":", "self", ".", "meta", "}", "# if extension wasn't included, add it", "if", "fname", "[", "-", "4", ":", "]", "!=", "'.egg'", ":", "fname", "+=", "'.egg'", "# save", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "dd", ".", "io", ".", "save", "(", "fname", ",", "egg", ",", "compression", "=", "compression", ")" ]
Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
[ "Save", "method", "for", "the", "Egg", "object" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L257-L298
train
ContextLab/quail
quail/egg.py
FriedEgg.save
def save(self, fname, compression='blosc'): """ Save method for the FriedEgg object The data will be saved as a 'fegg' file, which is a dictionary containing the elements of a FriedEgg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.fegg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ egg = { 'data' : self.data, 'analysis' : self.analysis, 'list_length' : self.list_length, 'n_lists' : self.n_lists, 'n_subjects' : self.n_subjects, 'position' : self.position, 'date_created' : self.date_created, 'meta' : self.meta } if fname[-4:]!='.fegg': fname+='.fegg' with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
python
def save(self, fname, compression='blosc'): """ Save method for the FriedEgg object The data will be saved as a 'fegg' file, which is a dictionary containing the elements of a FriedEgg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.fegg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ egg = { 'data' : self.data, 'analysis' : self.analysis, 'list_length' : self.list_length, 'n_lists' : self.n_lists, 'n_subjects' : self.n_subjects, 'position' : self.position, 'date_created' : self.date_created, 'meta' : self.meta } if fname[-4:]!='.fegg': fname+='.fegg' with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
[ "def", "save", "(", "self", ",", "fname", ",", "compression", "=", "'blosc'", ")", ":", "egg", "=", "{", "'data'", ":", "self", ".", "data", ",", "'analysis'", ":", "self", ".", "analysis", ",", "'list_length'", ":", "self", ".", "list_length", ",", "'n_lists'", ":", "self", ".", "n_lists", ",", "'n_subjects'", ":", "self", ".", "n_subjects", ",", "'position'", ":", "self", ".", "position", ",", "'date_created'", ":", "self", ".", "date_created", ",", "'meta'", ":", "self", ".", "meta", "}", "if", "fname", "[", "-", "4", ":", "]", "!=", "'.fegg'", ":", "fname", "+=", "'.fegg'", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "dd", ".", "io", ".", "save", "(", "fname", ",", "egg", ",", "compression", "=", "compression", ")" ]
Save method for the FriedEgg object The data will be saved as a 'fegg' file, which is a dictionary containing the elements of a FriedEgg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.fegg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
[ "Save", "method", "for", "the", "FriedEgg", "object" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L381-L418
train
ContextLab/quail
quail/analysis/pnr.py
pnr_helper
def pnr_helper(egg, position, match='exact', distance='euclidean', features=None): """ Computes probability of a word being recalled nth (in the appropriate recall list), given its presentation position. Note: zero indexed Parameters ---------- egg : quail.Egg Data to analyze position : int Position of item to be analyzed match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prob_recalled : numpy array each number represents the probability of nth recall for a word presented in given position/index """ def pnr(lst, position): return [1 if pos==lst[position] else 0 for pos in range(1,egg.list_length+1)] opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if match in ['exact', 'best']: result = [pnr(lst, position) for lst in recmat] elif match is 'smooth': result = np.atleast_2d(recmat[:, :, 0]) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(result, axis=0)
python
def pnr_helper(egg, position, match='exact', distance='euclidean', features=None): """ Computes probability of a word being recalled nth (in the appropriate recall list), given its presentation position. Note: zero indexed Parameters ---------- egg : quail.Egg Data to analyze position : int Position of item to be analyzed match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prob_recalled : numpy array each number represents the probability of nth recall for a word presented in given position/index """ def pnr(lst, position): return [1 if pos==lst[position] else 0 for pos in range(1,egg.list_length+1)] opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if match in ['exact', 'best']: result = [pnr(lst, position) for lst in recmat] elif match is 'smooth': result = np.atleast_2d(recmat[:, :, 0]) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(result, axis=0)
[ "def", "pnr_helper", "(", "egg", ",", "position", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "features", "=", "None", ")", ":", "def", "pnr", "(", "lst", ",", "position", ")", ":", "return", "[", "1", "if", "pos", "==", "lst", "[", "position", "]", "else", "0", "for", "pos", "in", "range", "(", "1", ",", "egg", ".", "list_length", "+", "1", ")", "]", "opts", "=", "dict", "(", "match", "=", "match", ",", "distance", "=", "distance", ",", "features", "=", "features", ")", "if", "match", "is", "'exact'", ":", "opts", ".", "update", "(", "{", "'features'", ":", "'item'", "}", ")", "recmat", "=", "recall_matrix", "(", "egg", ",", "*", "*", "opts", ")", "if", "match", "in", "[", "'exact'", ",", "'best'", "]", ":", "result", "=", "[", "pnr", "(", "lst", ",", "position", ")", "for", "lst", "in", "recmat", "]", "elif", "match", "is", "'smooth'", ":", "result", "=", "np", ".", "atleast_2d", "(", "recmat", "[", ":", ",", ":", ",", "0", "]", ")", "else", ":", "raise", "ValueError", "(", "'Match must be set to exact, best or smooth.'", ")", "return", "np", ".", "nanmean", "(", "result", ",", "axis", "=", "0", ")" ]
Computes probability of a word being recalled nth (in the appropriate recall list), given its presentation position. Note: zero indexed Parameters ---------- egg : quail.Egg Data to analyze position : int Position of item to be analyzed match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prob_recalled : numpy array each number represents the probability of nth recall for a word presented in given position/index
[ "Computes", "probability", "of", "a", "word", "being", "recalled", "nth", "(", "in", "the", "appropriate", "recall", "list", ")", "given", "its", "presentation", "position", ".", "Note", ":", "zero", "indexed" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/pnr.py#L4-L52
train
aouyar/PyMunin
pymunin/plugins/apachestats.py
MuninApachePlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = apacheInfo.getServerStats() if self.hasGraph('apache_access'): self.setGraphVal('apache_access', 'reqs', stats['Total Accesses']) if self.hasGraph('apache_bytes'): self.setGraphVal('apache_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('apache_workers'): self.setGraphVal('apache_workers', 'busy', stats['BusyWorkers']) self.setGraphVal('apache_workers', 'idle', stats['IdleWorkers']) self.setGraphVal('apache_workers', 'max', stats['MaxWorkers'])
python
def retrieveVals(self): """Retrieve values for graphs.""" apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = apacheInfo.getServerStats() if self.hasGraph('apache_access'): self.setGraphVal('apache_access', 'reqs', stats['Total Accesses']) if self.hasGraph('apache_bytes'): self.setGraphVal('apache_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('apache_workers'): self.setGraphVal('apache_workers', 'busy', stats['BusyWorkers']) self.setGraphVal('apache_workers', 'idle', stats['IdleWorkers']) self.setGraphVal('apache_workers', 'max', stats['MaxWorkers'])
[ "def", "retrieveVals", "(", "self", ")", ":", "apacheInfo", "=", "ApacheInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "stats", "=", "apacheInfo", ".", "getServerStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'apache_access'", ")", ":", "self", ".", "setGraphVal", "(", "'apache_access'", ",", "'reqs'", ",", "stats", "[", "'Total Accesses'", "]", ")", "if", "self", ".", "hasGraph", "(", "'apache_bytes'", ")", ":", "self", ".", "setGraphVal", "(", "'apache_bytes'", ",", "'bytes'", ",", "stats", "[", "'Total kBytes'", "]", "*", "1000", ")", "if", "self", ".", "hasGraph", "(", "'apache_workers'", ")", ":", "self", ".", "setGraphVal", "(", "'apache_workers'", ",", "'busy'", ",", "stats", "[", "'BusyWorkers'", "]", ")", "self", ".", "setGraphVal", "(", "'apache_workers'", ",", "'idle'", ",", "stats", "[", "'IdleWorkers'", "]", ")", "self", ".", "setGraphVal", "(", "'apache_workers'", ",", "'max'", ",", "stats", "[", "'MaxWorkers'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/apachestats.py#L124-L138
train
aouyar/PyMunin
pymunin/plugins/apachestats.py
MuninApachePlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return apacheInfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return apacheInfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "apacheInfo", "=", "ApacheInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "return", "apacheInfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/apachestats.py#L140-L149
train
aouyar/PyMunin
pymunin/plugins/ntphostoffsets.py
MuninNTPhostOffsetsPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) if ntpstats: for host in self._remoteHosts: hostkey = re.sub('\.', '_', host) hoststats = ntpstats.get(host) if hoststats: if self.hasGraph('ntp_host_stratums'): self.setGraphVal('ntp_host_stratums', hostkey, hoststats.get('stratum')) if self.hasGraph('ntp_host_offsets'): self.setGraphVal('ntp_host_offsets', hostkey, hoststats.get('offset')) if self.hasGraph('ntp_host_delays'): self.setGraphVal('ntp_host_delays', hostkey, hoststats.get('delay'))
python
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) if ntpstats: for host in self._remoteHosts: hostkey = re.sub('\.', '_', host) hoststats = ntpstats.get(host) if hoststats: if self.hasGraph('ntp_host_stratums'): self.setGraphVal('ntp_host_stratums', hostkey, hoststats.get('stratum')) if self.hasGraph('ntp_host_offsets'): self.setGraphVal('ntp_host_offsets', hostkey, hoststats.get('offset')) if self.hasGraph('ntp_host_delays'): self.setGraphVal('ntp_host_delays', hostkey, hoststats.get('delay'))
[ "def", "retrieveVals", "(", "self", ")", ":", "ntpinfo", "=", "NTPinfo", "(", ")", "ntpstats", "=", "ntpinfo", ".", "getHostOffsets", "(", "self", ".", "_remoteHosts", ")", "if", "ntpstats", ":", "for", "host", "in", "self", ".", "_remoteHosts", ":", "hostkey", "=", "re", ".", "sub", "(", "'\\.'", ",", "'_'", ",", "host", ")", "hoststats", "=", "ntpstats", ".", "get", "(", "host", ")", "if", "hoststats", ":", "if", "self", ".", "hasGraph", "(", "'ntp_host_stratums'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_host_stratums'", ",", "hostkey", ",", "hoststats", ".", "get", "(", "'stratum'", ")", ")", "if", "self", ".", "hasGraph", "(", "'ntp_host_offsets'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_host_offsets'", ",", "hostkey", ",", "hoststats", ".", "get", "(", "'offset'", ")", ")", "if", "self", ".", "hasGraph", "(", "'ntp_host_delays'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_host_delays'", ",", "hostkey", ",", "hoststats", ".", "get", "(", "'delay'", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/ntphostoffsets.py#L119-L136
train
aouyar/PyMunin
pymunin/plugins/ntphostoffsets.py
MuninNTPhostOffsetsPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) return len(ntpstats) > 0
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) return len(ntpstats) > 0
[ "def", "autoconf", "(", "self", ")", ":", "ntpinfo", "=", "NTPinfo", "(", ")", "ntpstats", "=", "ntpinfo", ".", "getHostOffsets", "(", "self", ".", "_remoteHosts", ")", "return", "len", "(", "ntpstats", ")", ">", "0" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/ntphostoffsets.py#L138-L146
train
aouyar/PyMunin
pymunin/plugins/lighttpdstats.py
MuninLighttpdPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = lighttpdInfo.getServerStats() if self.hasGraph('lighttpd_access'): self.setGraphVal('lighttpd_access', 'reqs', stats['Total Accesses']) if self.hasGraph('lighttpd_bytes'): self.setGraphVal('lighttpd_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('lighttpd_servers'): self.setGraphVal('lighttpd_servers', 'busy', stats['BusyServers']) self.setGraphVal('lighttpd_servers', 'idle', stats['IdleServers']) self.setGraphVal('lighttpd_servers', 'max', stats['MaxServers'])
python
def retrieveVals(self): """Retrieve values for graphs.""" lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = lighttpdInfo.getServerStats() if self.hasGraph('lighttpd_access'): self.setGraphVal('lighttpd_access', 'reqs', stats['Total Accesses']) if self.hasGraph('lighttpd_bytes'): self.setGraphVal('lighttpd_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('lighttpd_servers'): self.setGraphVal('lighttpd_servers', 'busy', stats['BusyServers']) self.setGraphVal('lighttpd_servers', 'idle', stats['IdleServers']) self.setGraphVal('lighttpd_servers', 'max', stats['MaxServers'])
[ "def", "retrieveVals", "(", "self", ")", ":", "lighttpdInfo", "=", "LighttpdInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "stats", "=", "lighttpdInfo", ".", "getServerStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'lighttpd_access'", ")", ":", "self", ".", "setGraphVal", "(", "'lighttpd_access'", ",", "'reqs'", ",", "stats", "[", "'Total Accesses'", "]", ")", "if", "self", ".", "hasGraph", "(", "'lighttpd_bytes'", ")", ":", "self", ".", "setGraphVal", "(", "'lighttpd_bytes'", ",", "'bytes'", ",", "stats", "[", "'Total kBytes'", "]", "*", "1000", ")", "if", "self", ".", "hasGraph", "(", "'lighttpd_servers'", ")", ":", "self", ".", "setGraphVal", "(", "'lighttpd_servers'", ",", "'busy'", ",", "stats", "[", "'BusyServers'", "]", ")", "self", ".", "setGraphVal", "(", "'lighttpd_servers'", ",", "'idle'", ",", "stats", "[", "'IdleServers'", "]", ")", "self", ".", "setGraphVal", "(", "'lighttpd_servers'", ",", "'max'", ",", "stats", "[", "'MaxServers'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/lighttpdstats.py#L124-L138
train
aouyar/PyMunin
pymunin/plugins/lighttpdstats.py
MuninLighttpdPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return lighttpdInfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return lighttpdInfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "lighttpdInfo", "=", "LighttpdInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "return", "lighttpdInfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/lighttpdstats.py#L140-L149
train
aouyar/PyMunin
pymunin/plugins/diskusagestats.py
MuninDiskUsagePlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" name = 'diskspace' if self.hasGraph(name): for fspath in self._fslist: if self._statsSpace.has_key(fspath): self.setGraphVal(name, fspath, self._statsSpace[fspath]['inuse_pcent']) name = 'diskinode' if self.hasGraph(name): for fspath in self._fslist: if self._statsInode.has_key(fspath): self.setGraphVal(name, fspath, self._statsInode[fspath]['inuse_pcent'])
python
def retrieveVals(self): """Retrieve values for graphs.""" name = 'diskspace' if self.hasGraph(name): for fspath in self._fslist: if self._statsSpace.has_key(fspath): self.setGraphVal(name, fspath, self._statsSpace[fspath]['inuse_pcent']) name = 'diskinode' if self.hasGraph(name): for fspath in self._fslist: if self._statsInode.has_key(fspath): self.setGraphVal(name, fspath, self._statsInode[fspath]['inuse_pcent'])
[ "def", "retrieveVals", "(", "self", ")", ":", "name", "=", "'diskspace'", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "for", "fspath", "in", "self", ".", "_fslist", ":", "if", "self", ".", "_statsSpace", ".", "has_key", "(", "fspath", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "fspath", ",", "self", ".", "_statsSpace", "[", "fspath", "]", "[", "'inuse_pcent'", "]", ")", "name", "=", "'diskinode'", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "for", "fspath", "in", "self", ".", "_fslist", ":", "if", "self", ".", "_statsInode", ".", "has_key", "(", "fspath", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "fspath", ",", "self", ".", "_statsInode", "[", "fspath", "]", "[", "'inuse_pcent'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskusagestats.py#L121-L134
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.symbolic
def symbolic(self, mtx): """ Perform symbolic object (symbolic LU decomposition) computation for a given sparsity pattern. """ self.free_symbolic() indx = self._getIndx(mtx) if not assumeSortedIndices: # row/column indices cannot be assumed to be sorted mtx.sort_indices() if self.isReal: status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, mtx.data, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, real, imag, self.control, self.info) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.symbolic, umfStatus[status])) self.mtx = mtx
python
def symbolic(self, mtx): """ Perform symbolic object (symbolic LU decomposition) computation for a given sparsity pattern. """ self.free_symbolic() indx = self._getIndx(mtx) if not assumeSortedIndices: # row/column indices cannot be assumed to be sorted mtx.sort_indices() if self.isReal: status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, mtx.data, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, real, imag, self.control, self.info) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.symbolic, umfStatus[status])) self.mtx = mtx
[ "def", "symbolic", "(", "self", ",", "mtx", ")", ":", "self", ".", "free_symbolic", "(", ")", "indx", "=", "self", ".", "_getIndx", "(", "mtx", ")", "if", "not", "assumeSortedIndices", ":", "# row/column indices cannot be assumed to be sorted", "mtx", ".", "sort_indices", "(", ")", "if", "self", ".", "isReal", ":", "status", ",", "self", ".", "_symbolic", "=", "self", ".", "funs", ".", "symbolic", "(", "mtx", ".", "shape", "[", "0", "]", ",", "mtx", ".", "shape", "[", "1", "]", ",", "mtx", ".", "indptr", ",", "indx", ",", "mtx", ".", "data", ",", "self", ".", "control", ",", "self", ".", "info", ")", "else", ":", "real", ",", "imag", "=", "mtx", ".", "data", ".", "real", ".", "copy", "(", ")", ",", "mtx", ".", "data", ".", "imag", ".", "copy", "(", ")", "status", ",", "self", ".", "_symbolic", "=", "self", ".", "funs", ".", "symbolic", "(", "mtx", ".", "shape", "[", "0", "]", ",", "mtx", ".", "shape", "[", "1", "]", ",", "mtx", ".", "indptr", ",", "indx", ",", "real", ",", "imag", ",", "self", ".", "control", ",", "self", ".", "info", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "symbolic", ",", "umfStatus", "[", "status", "]", ")", ")", "self", ".", "mtx", "=", "mtx" ]
Perform symbolic object (symbolic LU decomposition) computation for a given sparsity pattern.
[ "Perform", "symbolic", "object", "(", "symbolic", "LU", "decomposition", ")", "computation", "for", "a", "given", "sparsity", "pattern", "." ]
a2102ef92f4dd060138e72bb5d7c444f8ec49cbc
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L492-L525
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.numeric
def numeric(self, mtx): """ Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary. """ self.free_numeric() if self._symbolic is None: self.symbolic(mtx) indx = self._getIndx(mtx) failCount = 0 while 1: if self.isReal: status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, mtx.data, self._symbolic, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, real, imag, self._symbolic, self.control, self.info) if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: warnings.warn('Singular matrix', UmfpackWarning) break elif status in (UMFPACK_ERROR_different_pattern, UMFPACK_ERROR_invalid_Symbolic_object): # Try again. warnings.warn('Recomputing symbolic', UmfpackWarning) self.symbolic(mtx) failCount += 1 else: failCount += 100 else: break if failCount >= 2: raise RuntimeError('%s failed with %s' % (self.funs.numeric, umfStatus[status]))
python
def numeric(self, mtx): """ Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary. """ self.free_numeric() if self._symbolic is None: self.symbolic(mtx) indx = self._getIndx(mtx) failCount = 0 while 1: if self.isReal: status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, mtx.data, self._symbolic, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, real, imag, self._symbolic, self.control, self.info) if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: warnings.warn('Singular matrix', UmfpackWarning) break elif status in (UMFPACK_ERROR_different_pattern, UMFPACK_ERROR_invalid_Symbolic_object): # Try again. warnings.warn('Recomputing symbolic', UmfpackWarning) self.symbolic(mtx) failCount += 1 else: failCount += 100 else: break if failCount >= 2: raise RuntimeError('%s failed with %s' % (self.funs.numeric, umfStatus[status]))
[ "def", "numeric", "(", "self", ",", "mtx", ")", ":", "self", ".", "free_numeric", "(", ")", "if", "self", ".", "_symbolic", "is", "None", ":", "self", ".", "symbolic", "(", "mtx", ")", "indx", "=", "self", ".", "_getIndx", "(", "mtx", ")", "failCount", "=", "0", "while", "1", ":", "if", "self", ".", "isReal", ":", "status", ",", "self", ".", "_numeric", "=", "self", ".", "funs", ".", "numeric", "(", "mtx", ".", "indptr", ",", "indx", ",", "mtx", ".", "data", ",", "self", ".", "_symbolic", ",", "self", ".", "control", ",", "self", ".", "info", ")", "else", ":", "real", ",", "imag", "=", "mtx", ".", "data", ".", "real", ".", "copy", "(", ")", ",", "mtx", ".", "data", ".", "imag", ".", "copy", "(", ")", "status", ",", "self", ".", "_numeric", "=", "self", ".", "funs", ".", "numeric", "(", "mtx", ".", "indptr", ",", "indx", ",", "real", ",", "imag", ",", "self", ".", "_symbolic", ",", "self", ".", "control", ",", "self", ".", "info", ")", "if", "status", "!=", "UMFPACK_OK", ":", "if", "status", "==", "UMFPACK_WARNING_singular_matrix", ":", "warnings", ".", "warn", "(", "'Singular matrix'", ",", "UmfpackWarning", ")", "break", "elif", "status", "in", "(", "UMFPACK_ERROR_different_pattern", ",", "UMFPACK_ERROR_invalid_Symbolic_object", ")", ":", "# Try again.", "warnings", ".", "warn", "(", "'Recomputing symbolic'", ",", "UmfpackWarning", ")", "self", ".", "symbolic", "(", "mtx", ")", "failCount", "+=", "1", "else", ":", "failCount", "+=", "100", "else", ":", "break", "if", "failCount", ">=", "2", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "numeric", ",", "umfStatus", "[", "status", "]", ")", ")" ]
Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary.
[ "Perform", "numeric", "object", "(", "LU", "decomposition", ")", "computation", "using", "the", "symbolic", "decomposition", ".", "The", "symbolic", "decomposition", "is", "(", "re", ")", "computed", "if", "necessary", "." ]
a2102ef92f4dd060138e72bb5d7c444f8ec49cbc
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L532-L577
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.free_symbolic
def free_symbolic(self): """Free symbolic data""" if self._symbolic is not None: self.funs.free_symbolic(self._symbolic) self._symbolic = None self.mtx = None
python
def free_symbolic(self): """Free symbolic data""" if self._symbolic is not None: self.funs.free_symbolic(self._symbolic) self._symbolic = None self.mtx = None
[ "def", "free_symbolic", "(", "self", ")", ":", "if", "self", ".", "_symbolic", "is", "not", "None", ":", "self", ".", "funs", ".", "free_symbolic", "(", "self", ".", "_symbolic", ")", "self", ".", "_symbolic", "=", "None", "self", ".", "mtx", "=", "None" ]
Free symbolic data
[ "Free", "symbolic", "data" ]
a2102ef92f4dd060138e72bb5d7c444f8ec49cbc
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L615-L620
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.free_numeric
def free_numeric(self): """Free numeric data""" if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
python
def free_numeric(self): """Free numeric data""" if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
[ "def", "free_numeric", "(", "self", ")", ":", "if", "self", ".", "_numeric", "is", "not", "None", ":", "self", ".", "funs", ".", "free_numeric", "(", "self", ".", "_numeric", ")", "self", ".", "_numeric", "=", "None", "self", ".", "free_symbolic", "(", ")" ]
Free numeric data
[ "Free", "numeric", "data" ]
a2102ef92f4dd060138e72bb5d7c444f8ec49cbc
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L625-L630
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.solve
def solve(self, sys, mtx, rhs, autoTranspose=False): """ Solution of system of linear equation using the Numeric object. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if autoTranspose and self.isCSR: ## # UMFPACK uses CSC internally... if self.family in umfRealTypes: ii = 0 else: ii = 1 if sys in umfSys_transposeMap[ii]: sys = umfSys_transposeMap[ii][sys] else: raise RuntimeError('autoTranspose ambiguous, switch it off') if self._numeric is not None: if self.mtx is not mtx: raise ValueError('must be called with same matrix as numeric()') else: raise RuntimeError('numeric() not called') indx = self._getIndx(mtx) if self.isReal: rhs = rhs.astype(np.float64) sol = np.zeros((mtx.shape[1],), dtype=np.float64) status = self.funs.solve(sys, mtx.indptr, indx, mtx.data, sol, rhs, self._numeric, self.control, self.info) else: rhs = rhs.astype(np.complex128) sol = np.zeros((mtx.shape[1],), dtype=np.complex128) mreal, mimag = mtx.data.real.copy(), mtx.data.imag.copy() sreal, simag = sol.real.copy(), sol.imag.copy() rreal, rimag = rhs.real.copy(), rhs.imag.copy() status = self.funs.solve(sys, mtx.indptr, indx, mreal, mimag, sreal, simag, rreal, rimag, self._numeric, self.control, self.info) sol.real, sol.imag = sreal, simag # self.funs.report_info( self.control, self.info ) # pause() if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: ## Change inf, nan to zeros. warnings.warn('Zeroing nan and inf entries...', UmfpackWarning) sol[~np.isfinite(sol)] = 0.0 else: raise RuntimeError('%s failed with %s' % (self.funs.solve, umfStatus[status])) econd = 1.0 / self.info[UMFPACK_RCOND] if econd > self.maxCond: msg = '(almost) singular matrix! '\ + '(estimated cond. number: %.2e)' % econd warnings.warn(msg, UmfpackWarning) return sol
python
def solve(self, sys, mtx, rhs, autoTranspose=False): """ Solution of system of linear equation using the Numeric object. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if autoTranspose and self.isCSR: ## # UMFPACK uses CSC internally... if self.family in umfRealTypes: ii = 0 else: ii = 1 if sys in umfSys_transposeMap[ii]: sys = umfSys_transposeMap[ii][sys] else: raise RuntimeError('autoTranspose ambiguous, switch it off') if self._numeric is not None: if self.mtx is not mtx: raise ValueError('must be called with same matrix as numeric()') else: raise RuntimeError('numeric() not called') indx = self._getIndx(mtx) if self.isReal: rhs = rhs.astype(np.float64) sol = np.zeros((mtx.shape[1],), dtype=np.float64) status = self.funs.solve(sys, mtx.indptr, indx, mtx.data, sol, rhs, self._numeric, self.control, self.info) else: rhs = rhs.astype(np.complex128) sol = np.zeros((mtx.shape[1],), dtype=np.complex128) mreal, mimag = mtx.data.real.copy(), mtx.data.imag.copy() sreal, simag = sol.real.copy(), sol.imag.copy() rreal, rimag = rhs.real.copy(), rhs.imag.copy() status = self.funs.solve(sys, mtx.indptr, indx, mreal, mimag, sreal, simag, rreal, rimag, self._numeric, self.control, self.info) sol.real, sol.imag = sreal, simag # self.funs.report_info( self.control, self.info ) # pause() if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: ## Change inf, nan to zeros. warnings.warn('Zeroing nan and inf entries...', UmfpackWarning) sol[~np.isfinite(sol)] = 0.0 else: raise RuntimeError('%s failed with %s' % (self.funs.solve, umfStatus[status])) econd = 1.0 / self.info[UMFPACK_RCOND] if econd > self.maxCond: msg = '(almost) singular matrix! '\ + '(estimated cond. number: %.2e)' % econd warnings.warn(msg, UmfpackWarning) return sol
[ "def", "solve", "(", "self", ",", "sys", ",", "mtx", ",", "rhs", ",", "autoTranspose", "=", "False", ")", ":", "if", "sys", "not", "in", "umfSys", ":", "raise", "ValueError", "(", "'sys must be in'", "%", "umfSys", ")", "if", "autoTranspose", "and", "self", ".", "isCSR", ":", "##", "# UMFPACK uses CSC internally...", "if", "self", ".", "family", "in", "umfRealTypes", ":", "ii", "=", "0", "else", ":", "ii", "=", "1", "if", "sys", "in", "umfSys_transposeMap", "[", "ii", "]", ":", "sys", "=", "umfSys_transposeMap", "[", "ii", "]", "[", "sys", "]", "else", ":", "raise", "RuntimeError", "(", "'autoTranspose ambiguous, switch it off'", ")", "if", "self", ".", "_numeric", "is", "not", "None", ":", "if", "self", ".", "mtx", "is", "not", "mtx", ":", "raise", "ValueError", "(", "'must be called with same matrix as numeric()'", ")", "else", ":", "raise", "RuntimeError", "(", "'numeric() not called'", ")", "indx", "=", "self", ".", "_getIndx", "(", "mtx", ")", "if", "self", ".", "isReal", ":", "rhs", "=", "rhs", ".", "astype", "(", "np", ".", "float64", ")", "sol", "=", "np", ".", "zeros", "(", "(", "mtx", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "status", "=", "self", ".", "funs", ".", "solve", "(", "sys", ",", "mtx", ".", "indptr", ",", "indx", ",", "mtx", ".", "data", ",", "sol", ",", "rhs", ",", "self", ".", "_numeric", ",", "self", ".", "control", ",", "self", ".", "info", ")", "else", ":", "rhs", "=", "rhs", ".", "astype", "(", "np", ".", "complex128", ")", "sol", "=", "np", ".", "zeros", "(", "(", "mtx", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "mreal", ",", "mimag", "=", "mtx", ".", "data", ".", "real", ".", "copy", "(", ")", ",", "mtx", ".", "data", ".", "imag", ".", "copy", "(", ")", "sreal", ",", "simag", "=", "sol", ".", "real", ".", "copy", "(", ")", ",", "sol", ".", "imag", ".", "copy", "(", ")", "rreal", ",", "rimag", "=", "rhs", ".", "real", ".", "copy", "(", ")", ",", "rhs", ".", "imag", ".", "copy", "(", ")", "status", "=", "self", ".", "funs", ".", "solve", "(", "sys", ",", "mtx", ".", "indptr", ",", "indx", ",", "mreal", ",", "mimag", ",", "sreal", ",", "simag", ",", "rreal", ",", "rimag", ",", "self", ".", "_numeric", ",", "self", ".", "control", ",", "self", ".", "info", ")", "sol", ".", "real", ",", "sol", ".", "imag", "=", "sreal", ",", "simag", "# self.funs.report_info( self.control, self.info )", "# pause()", "if", "status", "!=", "UMFPACK_OK", ":", "if", "status", "==", "UMFPACK_WARNING_singular_matrix", ":", "## Change inf, nan to zeros.", "warnings", ".", "warn", "(", "'Zeroing nan and inf entries...'", ",", "UmfpackWarning", ")", "sol", "[", "~", "np", ".", "isfinite", "(", "sol", ")", "]", "=", "0.0", "else", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "solve", ",", "umfStatus", "[", "status", "]", ")", ")", "econd", "=", "1.0", "/", "self", ".", "info", "[", "UMFPACK_RCOND", "]", "if", "econd", ">", "self", ".", "maxCond", ":", "msg", "=", "'(almost) singular matrix! '", "+", "'(estimated cond. number: %.2e)'", "%", "econd", "warnings", ".", "warn", "(", "msg", ",", "UmfpackWarning", ")", "return", "sol" ]
Solution of system of linear equation using the Numeric object. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system.
[ "Solution", "of", "system", "of", "linear", "equation", "using", "the", "Numeric", "object", "." ]
a2102ef92f4dd060138e72bb5d7c444f8ec49cbc
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L645-L723
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.linsolve
def linsolve(self, sys, mtx, rhs, autoTranspose=False): """ One-shot solution of system of linear equation. Reuses Numeric object if possible. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if self._numeric is None: self.numeric(mtx) else: if self.mtx is not mtx: self.numeric(mtx) sol = self.solve(sys, mtx, rhs, autoTranspose) self.free_numeric() return sol
python
def linsolve(self, sys, mtx, rhs, autoTranspose=False): """ One-shot solution of system of linear equation. Reuses Numeric object if possible. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if self._numeric is None: self.numeric(mtx) else: if self.mtx is not mtx: self.numeric(mtx) sol = self.solve(sys, mtx, rhs, autoTranspose) self.free_numeric() return sol
[ "def", "linsolve", "(", "self", ",", "sys", ",", "mtx", ",", "rhs", ",", "autoTranspose", "=", "False", ")", ":", "if", "sys", "not", "in", "umfSys", ":", "raise", "ValueError", "(", "'sys must be in'", "%", "umfSys", ")", "if", "self", ".", "_numeric", "is", "None", ":", "self", ".", "numeric", "(", "mtx", ")", "else", ":", "if", "self", ".", "mtx", "is", "not", "mtx", ":", "self", ".", "numeric", "(", "mtx", ")", "sol", "=", "self", ".", "solve", "(", "sys", ",", "mtx", ",", "rhs", ",", "autoTranspose", ")", "self", ".", "free_numeric", "(", ")", "return", "sol" ]
One-shot solution of system of linear equation. Reuses Numeric object if possible. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system.
[ "One", "-", "shot", "solution", "of", "system", "of", "linear", "equation", ".", "Reuses", "Numeric", "object", "if", "possible", "." ]
a2102ef92f4dd060138e72bb5d7c444f8ec49cbc
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L728-L764
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.lu
def lu(self, mtx): """ Perform LU decomposition. For a given matrix A, the decomposition satisfies:: LU = PRAQ when do_recip is true LU = P(R^-1)AQ when do_recip is false Parameters ---------- mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. Returns ------- L : csr_matrix Lower triangular m-by-min(m,n) CSR matrix U : csc_matrix Upper triangular min(m,n)-by-n CSC matrix P : ndarray Vector of row permutations Q : ndarray Vector of column permutations R : ndarray Vector of diagonal row scalings do_recip : bool Whether R is R^-1 or R """ # this should probably be changed mtx = mtx.tocsc() self.numeric(mtx) # first find out how much space to reserve (status, lnz, unz, n_row, n_col, nz_udiag)\ = self.funs.get_lunz(self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_lunz, umfStatus[status])) # allocate storage for decomposition data i_type = mtx.indptr.dtype Lp = np.zeros((n_row+1,), dtype=i_type) Lj = np.zeros((lnz,), dtype=i_type) Lx = np.zeros((lnz,), dtype=np.double) Up = np.zeros((n_col+1,), dtype=i_type) Ui = np.zeros((unz,), dtype=i_type) Ux = np.zeros((unz,), dtype=np.double) P = np.zeros((n_row,), dtype=i_type) Q = np.zeros((n_col,), dtype=i_type) Dx = np.zeros((min(n_row,n_col),), dtype=np.double) Rs = np.zeros((n_row,), dtype=np.double) if self.isReal: (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Up,Ui,Ux, P,Q,Dx,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip)) else: # allocate additional storage for imaginary parts Lz = np.zeros((lnz,), dtype=np.double) Uz = np.zeros((unz,), dtype=np.double) Dz = np.zeros((min(n_row,n_col),), dtype=np.double) (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Lz,Up,Ui,Ux,Uz, P,Q,Dx,Dz,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) Lxz = np.zeros((lnz,), dtype=np.complex128) Uxz = np.zeros((unz,), dtype=np.complex128) Dxz = np.zeros((min(n_row,n_col),), dtype=np.complex128) Lxz.real,Lxz.imag = Lx,Lz Uxz.real,Uxz.imag = Ux,Uz Dxz.real,Dxz.imag = Dx,Dz L = sp.csr_matrix((Lxz,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Uxz,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip))
python
def lu(self, mtx): """ Perform LU decomposition. For a given matrix A, the decomposition satisfies:: LU = PRAQ when do_recip is true LU = P(R^-1)AQ when do_recip is false Parameters ---------- mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. Returns ------- L : csr_matrix Lower triangular m-by-min(m,n) CSR matrix U : csc_matrix Upper triangular min(m,n)-by-n CSC matrix P : ndarray Vector of row permutations Q : ndarray Vector of column permutations R : ndarray Vector of diagonal row scalings do_recip : bool Whether R is R^-1 or R """ # this should probably be changed mtx = mtx.tocsc() self.numeric(mtx) # first find out how much space to reserve (status, lnz, unz, n_row, n_col, nz_udiag)\ = self.funs.get_lunz(self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_lunz, umfStatus[status])) # allocate storage for decomposition data i_type = mtx.indptr.dtype Lp = np.zeros((n_row+1,), dtype=i_type) Lj = np.zeros((lnz,), dtype=i_type) Lx = np.zeros((lnz,), dtype=np.double) Up = np.zeros((n_col+1,), dtype=i_type) Ui = np.zeros((unz,), dtype=i_type) Ux = np.zeros((unz,), dtype=np.double) P = np.zeros((n_row,), dtype=i_type) Q = np.zeros((n_col,), dtype=i_type) Dx = np.zeros((min(n_row,n_col),), dtype=np.double) Rs = np.zeros((n_row,), dtype=np.double) if self.isReal: (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Up,Ui,Ux, P,Q,Dx,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip)) else: # allocate additional storage for imaginary parts Lz = np.zeros((lnz,), dtype=np.double) Uz = np.zeros((unz,), dtype=np.double) Dz = np.zeros((min(n_row,n_col),), dtype=np.double) (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Lz,Up,Ui,Ux,Uz, P,Q,Dx,Dz,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) Lxz = np.zeros((lnz,), dtype=np.complex128) Uxz = np.zeros((unz,), dtype=np.complex128) Dxz = np.zeros((min(n_row,n_col),), dtype=np.complex128) Lxz.real,Lxz.imag = Lx,Lz Uxz.real,Uxz.imag = Ux,Uz Dxz.real,Dxz.imag = Dx,Dz L = sp.csr_matrix((Lxz,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Uxz,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip))
[ "def", "lu", "(", "self", ",", "mtx", ")", ":", "# this should probably be changed", "mtx", "=", "mtx", ".", "tocsc", "(", ")", "self", ".", "numeric", "(", "mtx", ")", "# first find out how much space to reserve", "(", "status", ",", "lnz", ",", "unz", ",", "n_row", ",", "n_col", ",", "nz_udiag", ")", "=", "self", ".", "funs", ".", "get_lunz", "(", "self", ".", "_numeric", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "get_lunz", ",", "umfStatus", "[", "status", "]", ")", ")", "# allocate storage for decomposition data", "i_type", "=", "mtx", ".", "indptr", ".", "dtype", "Lp", "=", "np", ".", "zeros", "(", "(", "n_row", "+", "1", ",", ")", ",", "dtype", "=", "i_type", ")", "Lj", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "i_type", ")", "Lx", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Up", "=", "np", ".", "zeros", "(", "(", "n_col", "+", "1", ",", ")", ",", "dtype", "=", "i_type", ")", "Ui", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "i_type", ")", "Ux", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "P", "=", "np", ".", "zeros", "(", "(", "n_row", ",", ")", ",", "dtype", "=", "i_type", ")", "Q", "=", "np", ".", "zeros", "(", "(", "n_col", ",", ")", ",", "dtype", "=", "i_type", ")", "Dx", "=", "np", ".", "zeros", "(", "(", "min", "(", "n_row", ",", "n_col", ")", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Rs", "=", "np", ".", "zeros", "(", "(", "n_row", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "if", "self", ".", "isReal", ":", "(", "status", ",", "do_recip", ")", "=", "self", ".", "funs", ".", "get_numeric", "(", "Lp", ",", "Lj", ",", "Lx", ",", "Up", ",", "Ui", ",", "Ux", ",", "P", ",", "Q", ",", "Dx", ",", "Rs", ",", "self", ".", "_numeric", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "get_numeric", ",", "umfStatus", "[", "status", "]", ")", ")", "L", "=", "sp", ".", "csr_matrix", "(", "(", "Lx", ",", "Lj", ",", "Lp", ")", ",", "(", "n_row", ",", "min", "(", "n_row", ",", "n_col", ")", ")", ")", "U", "=", "sp", ".", "csc_matrix", "(", "(", "Ux", ",", "Ui", ",", "Up", ")", ",", "(", "min", "(", "n_row", ",", "n_col", ")", ",", "n_col", ")", ")", "R", "=", "Rs", "return", "(", "L", ",", "U", ",", "P", ",", "Q", ",", "R", ",", "bool", "(", "do_recip", ")", ")", "else", ":", "# allocate additional storage for imaginary parts", "Lz", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Uz", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Dz", "=", "np", ".", "zeros", "(", "(", "min", "(", "n_row", ",", "n_col", ")", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "(", "status", ",", "do_recip", ")", "=", "self", ".", "funs", ".", "get_numeric", "(", "Lp", ",", "Lj", ",", "Lx", ",", "Lz", ",", "Up", ",", "Ui", ",", "Ux", ",", "Uz", ",", "P", ",", "Q", ",", "Dx", ",", "Dz", ",", "Rs", ",", "self", ".", "_numeric", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "get_numeric", ",", "umfStatus", "[", "status", "]", ")", ")", "Lxz", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "Uxz", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "Dxz", "=", "np", ".", "zeros", "(", "(", "min", "(", "n_row", ",", "n_col", ")", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "Lxz", ".", "real", ",", "Lxz", ".", "imag", "=", "Lx", ",", "Lz", "Uxz", ".", "real", ",", "Uxz", ".", "imag", "=", "Ux", ",", "Uz", "Dxz", ".", "real", ",", "Dxz", ".", "imag", "=", "Dx", ",", "Dz", "L", "=", "sp", ".", "csr_matrix", "(", "(", "Lxz", ",", "Lj", ",", "Lp", ")", ",", "(", "n_row", ",", "min", "(", "n_row", ",", "n_col", ")", ")", ")", "U", "=", "sp", ".", "csc_matrix", "(", "(", "Uxz", ",", "Ui", ",", "Up", ")", ",", "(", "min", "(", "n_row", ",", "n_col", ")", ",", "n_col", ")", ")", "R", "=", "Rs", "return", "(", "L", ",", "U", ",", "P", ",", "Q", ",", "R", ",", "bool", "(", "do_recip", ")", ")" ]
Perform LU decomposition. For a given matrix A, the decomposition satisfies:: LU = PRAQ when do_recip is true LU = P(R^-1)AQ when do_recip is false Parameters ---------- mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. Returns ------- L : csr_matrix Lower triangular m-by-min(m,n) CSR matrix U : csc_matrix Upper triangular min(m,n)-by-n CSC matrix P : ndarray Vector of row permutations Q : ndarray Vector of column permutations R : ndarray Vector of diagonal row scalings do_recip : bool Whether R is R^-1 or R
[ "Perform", "LU", "decomposition", "." ]
a2102ef92f4dd060138e72bb5d7c444f8ec49cbc
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L801-L903
train
ContextLab/quail
quail/fingerprint.py
order_stick
def order_stick(presenter, egg, dist_dict, strategy, fingerprint): """ Reorders a list according to strategy """ def compute_feature_stick(features, weights, alpha): '''create a 'stick' of feature weights''' feature_stick = [] for f, w in zip(features, weights): feature_stick+=[f]*int(np.power(w,alpha)*100) return feature_stick def reorder_list(egg, feature_stick, dist_dict, tau): def compute_stimulus_stick(s, tau): '''create a 'stick' of feature weights''' feature_stick = [[weights[feature]]*round(weights[feature]**alpha)*100 for feature in w] return [item for sublist in feature_stick for item in sublist] # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # turn pres and features into np arrays pres_arr = np.array(pres) features_arr = np.array(features) # starting with a random word reordered_list = [] reordered_features = [] # start with a random choice idx = np.random.choice(len(pres), 1)[0] # original inds inds = list(range(len(pres))) # keep track of the indices inds_used = [idx] # get the word current_word = pres[idx] # get the features dict current_features = features[idx] # append that word to the reordered list reordered_list.append(current_word) # append the features to the reordered list reordered_features.append(current_features) # loop over the word list for i in range(len(pres)-1): # sample from the stick feature_sample = feature_stick[np.random.choice(len(feature_stick), 1)[0]] # indices left inds_left = [ind for ind in inds if ind not in inds_used] # make a copy of the words filtering out the already used ones words_left = pres[inds_left] # get word distances for the word dists_left = np.array([dist_dict[current_word][word][feature_sample] for word in words_left]) # features left features_left = features[inds_left] # normalize distances dists_left_max = np.max(dists_left) if dists_left_max>0: dists_left_norm = dists_left/np.max(dists_left) else: dists_left_norm = dists_left # get the min dists_left_min = np.min(-dists_left_norm) # invert the word distances to turn distance->similarity dists_left_inv = - dists_left_norm - dists_left_min + .01 # create a word stick words_stick = [] for word, dist in zip(words_left, dists_left_inv): words_stick+=[word]*int(np.power(dist,tau)*100) next_word = np.random.choice(words_stick) next_word_idx = np.where(pres==next_word)[0] inds_used.append(next_word_idx) reordered_list.append(next_word) reordered_features.append(features[next_word_idx][0]) return Egg(pres=[reordered_list], rec=[reordered_list], features=[[reordered_features]], dist_funcs=dist_funcs) # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # get params needed for list reordering features = presenter.get_params('fingerprint').get_features() alpha = presenter.get_params('alpha') tau = presenter.get_params('tau') weights = fingerprint # invert the weights if strategy is destabilize if strategy=='destabilize': weights = 1 - weights # compute feature stick feature_stick = compute_feature_stick(features, weights, alpha) # reorder list return reorder_list(egg, feature_stick, dist_dict, tau)
python
def order_stick(presenter, egg, dist_dict, strategy, fingerprint): """ Reorders a list according to strategy """ def compute_feature_stick(features, weights, alpha): '''create a 'stick' of feature weights''' feature_stick = [] for f, w in zip(features, weights): feature_stick+=[f]*int(np.power(w,alpha)*100) return feature_stick def reorder_list(egg, feature_stick, dist_dict, tau): def compute_stimulus_stick(s, tau): '''create a 'stick' of feature weights''' feature_stick = [[weights[feature]]*round(weights[feature]**alpha)*100 for feature in w] return [item for sublist in feature_stick for item in sublist] # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # turn pres and features into np arrays pres_arr = np.array(pres) features_arr = np.array(features) # starting with a random word reordered_list = [] reordered_features = [] # start with a random choice idx = np.random.choice(len(pres), 1)[0] # original inds inds = list(range(len(pres))) # keep track of the indices inds_used = [idx] # get the word current_word = pres[idx] # get the features dict current_features = features[idx] # append that word to the reordered list reordered_list.append(current_word) # append the features to the reordered list reordered_features.append(current_features) # loop over the word list for i in range(len(pres)-1): # sample from the stick feature_sample = feature_stick[np.random.choice(len(feature_stick), 1)[0]] # indices left inds_left = [ind for ind in inds if ind not in inds_used] # make a copy of the words filtering out the already used ones words_left = pres[inds_left] # get word distances for the word dists_left = np.array([dist_dict[current_word][word][feature_sample] for word in words_left]) # features left features_left = features[inds_left] # normalize distances dists_left_max = np.max(dists_left) if dists_left_max>0: dists_left_norm = dists_left/np.max(dists_left) else: dists_left_norm = dists_left # get the min dists_left_min = np.min(-dists_left_norm) # invert the word distances to turn distance->similarity dists_left_inv = - dists_left_norm - dists_left_min + .01 # create a word stick words_stick = [] for word, dist in zip(words_left, dists_left_inv): words_stick+=[word]*int(np.power(dist,tau)*100) next_word = np.random.choice(words_stick) next_word_idx = np.where(pres==next_word)[0] inds_used.append(next_word_idx) reordered_list.append(next_word) reordered_features.append(features[next_word_idx][0]) return Egg(pres=[reordered_list], rec=[reordered_list], features=[[reordered_features]], dist_funcs=dist_funcs) # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # get params needed for list reordering features = presenter.get_params('fingerprint').get_features() alpha = presenter.get_params('alpha') tau = presenter.get_params('tau') weights = fingerprint # invert the weights if strategy is destabilize if strategy=='destabilize': weights = 1 - weights # compute feature stick feature_stick = compute_feature_stick(features, weights, alpha) # reorder list return reorder_list(egg, feature_stick, dist_dict, tau)
[ "def", "order_stick", "(", "presenter", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "fingerprint", ")", ":", "def", "compute_feature_stick", "(", "features", ",", "weights", ",", "alpha", ")", ":", "'''create a 'stick' of feature weights'''", "feature_stick", "=", "[", "]", "for", "f", ",", "w", "in", "zip", "(", "features", ",", "weights", ")", ":", "feature_stick", "+=", "[", "f", "]", "*", "int", "(", "np", ".", "power", "(", "w", ",", "alpha", ")", "*", "100", ")", "return", "feature_stick", "def", "reorder_list", "(", "egg", ",", "feature_stick", ",", "dist_dict", ",", "tau", ")", ":", "def", "compute_stimulus_stick", "(", "s", ",", "tau", ")", ":", "'''create a 'stick' of feature weights'''", "feature_stick", "=", "[", "[", "weights", "[", "feature", "]", "]", "*", "round", "(", "weights", "[", "feature", "]", "**", "alpha", ")", "*", "100", "for", "feature", "in", "w", "]", "return", "[", "item", "for", "sublist", "in", "feature_stick", "for", "item", "in", "sublist", "]", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "# turn pres and features into np arrays", "pres_arr", "=", "np", ".", "array", "(", "pres", ")", "features_arr", "=", "np", ".", "array", "(", "features", ")", "# starting with a random word", "reordered_list", "=", "[", "]", "reordered_features", "=", "[", "]", "# start with a random choice", "idx", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "pres", ")", ",", "1", ")", "[", "0", "]", "# original inds", "inds", "=", "list", "(", "range", "(", "len", "(", "pres", ")", ")", ")", "# keep track of the indices", "inds_used", "=", "[", "idx", "]", "# get the word", "current_word", "=", "pres", "[", "idx", "]", "# get the features dict", "current_features", "=", "features", "[", "idx", "]", "# append that word to the reordered list", "reordered_list", ".", "append", "(", "current_word", ")", "# append the features to the reordered list", "reordered_features", ".", "append", "(", "current_features", ")", "# loop over the word list", "for", "i", "in", "range", "(", "len", "(", "pres", ")", "-", "1", ")", ":", "# sample from the stick", "feature_sample", "=", "feature_stick", "[", "np", ".", "random", ".", "choice", "(", "len", "(", "feature_stick", ")", ",", "1", ")", "[", "0", "]", "]", "# indices left", "inds_left", "=", "[", "ind", "for", "ind", "in", "inds", "if", "ind", "not", "in", "inds_used", "]", "# make a copy of the words filtering out the already used ones", "words_left", "=", "pres", "[", "inds_left", "]", "# get word distances for the word", "dists_left", "=", "np", ".", "array", "(", "[", "dist_dict", "[", "current_word", "]", "[", "word", "]", "[", "feature_sample", "]", "for", "word", "in", "words_left", "]", ")", "# features left", "features_left", "=", "features", "[", "inds_left", "]", "# normalize distances", "dists_left_max", "=", "np", ".", "max", "(", "dists_left", ")", "if", "dists_left_max", ">", "0", ":", "dists_left_norm", "=", "dists_left", "/", "np", ".", "max", "(", "dists_left", ")", "else", ":", "dists_left_norm", "=", "dists_left", "# get the min", "dists_left_min", "=", "np", ".", "min", "(", "-", "dists_left_norm", ")", "# invert the word distances to turn distance->similarity", "dists_left_inv", "=", "-", "dists_left_norm", "-", "dists_left_min", "+", ".01", "# create a word stick", "words_stick", "=", "[", "]", "for", "word", ",", "dist", "in", "zip", "(", "words_left", ",", "dists_left_inv", ")", ":", "words_stick", "+=", "[", "word", "]", "*", "int", "(", "np", ".", "power", "(", "dist", ",", "tau", ")", "*", "100", ")", "next_word", "=", "np", ".", "random", ".", "choice", "(", "words_stick", ")", "next_word_idx", "=", "np", ".", "where", "(", "pres", "==", "next_word", ")", "[", "0", "]", "inds_used", ".", "append", "(", "next_word_idx", ")", "reordered_list", ".", "append", "(", "next_word", ")", "reordered_features", ".", "append", "(", "features", "[", "next_word_idx", "]", "[", "0", "]", ")", "return", "Egg", "(", "pres", "=", "[", "reordered_list", "]", ",", "rec", "=", "[", "reordered_list", "]", ",", "features", "=", "[", "[", "reordered_features", "]", "]", ",", "dist_funcs", "=", "dist_funcs", ")", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "# get params needed for list reordering", "features", "=", "presenter", ".", "get_params", "(", "'fingerprint'", ")", ".", "get_features", "(", ")", "alpha", "=", "presenter", ".", "get_params", "(", "'alpha'", ")", "tau", "=", "presenter", ".", "get_params", "(", "'tau'", ")", "weights", "=", "fingerprint", "# invert the weights if strategy is destabilize", "if", "strategy", "==", "'destabilize'", ":", "weights", "=", "1", "-", "weights", "# compute feature stick", "feature_stick", "=", "compute_feature_stick", "(", "features", ",", "weights", ",", "alpha", ")", "# reorder list", "return", "reorder_list", "(", "egg", ",", "feature_stick", ",", "dist_dict", ",", "tau", ")" ]
Reorders a list according to strategy
[ "Reorders", "a", "list", "according", "to", "strategy" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L346-L464
train
ContextLab/quail
quail/fingerprint.py
stick_perm
def stick_perm(presenter, egg, dist_dict, strategy): """Computes weights for one reordering using stick-breaking method""" # seed RNG np.random.seed() # unpack egg egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg) # reorder regg = order_stick(presenter, egg, dist_dict, strategy) # unpack regg regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg) # # get the order regg_pres = list(regg_pres) egg_pres = list(egg_pres) idx = [egg_pres.index(r) for r in regg_pres] # compute weights weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict) # save out the order orders = idx return weights, orders
python
def stick_perm(presenter, egg, dist_dict, strategy): """Computes weights for one reordering using stick-breaking method""" # seed RNG np.random.seed() # unpack egg egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg) # reorder regg = order_stick(presenter, egg, dist_dict, strategy) # unpack regg regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg) # # get the order regg_pres = list(regg_pres) egg_pres = list(egg_pres) idx = [egg_pres.index(r) for r in regg_pres] # compute weights weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict) # save out the order orders = idx return weights, orders
[ "def", "stick_perm", "(", "presenter", ",", "egg", ",", "dist_dict", ",", "strategy", ")", ":", "# seed RNG", "np", ".", "random", ".", "seed", "(", ")", "# unpack egg", "egg_pres", ",", "egg_rec", ",", "egg_features", ",", "egg_dist_funcs", "=", "parse_egg", "(", "egg", ")", "# reorder", "regg", "=", "order_stick", "(", "presenter", ",", "egg", ",", "dist_dict", ",", "strategy", ")", "# unpack regg", "regg_pres", ",", "regg_rec", ",", "regg_features", ",", "regg_dist_funcs", "=", "parse_egg", "(", "regg", ")", "# # get the order", "regg_pres", "=", "list", "(", "regg_pres", ")", "egg_pres", "=", "list", "(", "egg_pres", ")", "idx", "=", "[", "egg_pres", ".", "index", "(", "r", ")", "for", "r", "in", "regg_pres", "]", "# compute weights", "weights", "=", "compute_feature_weights_dict", "(", "list", "(", "regg_pres", ")", ",", "list", "(", "regg_pres", ")", ",", "list", "(", "regg_features", ")", ",", "dist_dict", ")", "# save out the order", "orders", "=", "idx", "return", "weights", ",", "orders" ]
Computes weights for one reordering using stick-breaking method
[ "Computes", "weights", "for", "one", "reordering", "using", "stick", "-", "breaking", "method" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L577-L603
train
ContextLab/quail
quail/fingerprint.py
compute_distances_dict
def compute_distances_dict(egg): """ Creates a nested dict of distances """ pres, rec, features, dist_funcs = parse_egg(egg) pres_list = list(pres) features_list = list(features) # initialize dist dict distances = {} # for each word in the list for idx1, item1 in enumerate(pres_list): distances[item1]={} # for each word in the list for idx2, item2 in enumerate(pres_list): distances[item1][item2]={} # for each feature in dist_funcs for feature in dist_funcs: distances[item1][item2][feature] = builtin_dist_funcs[dist_funcs[feature]](features_list[idx1][feature],features_list[idx2][feature]) return distances
python
def compute_distances_dict(egg): """ Creates a nested dict of distances """ pres, rec, features, dist_funcs = parse_egg(egg) pres_list = list(pres) features_list = list(features) # initialize dist dict distances = {} # for each word in the list for idx1, item1 in enumerate(pres_list): distances[item1]={} # for each word in the list for idx2, item2 in enumerate(pres_list): distances[item1][item2]={} # for each feature in dist_funcs for feature in dist_funcs: distances[item1][item2][feature] = builtin_dist_funcs[dist_funcs[feature]](features_list[idx1][feature],features_list[idx2][feature]) return distances
[ "def", "compute_distances_dict", "(", "egg", ")", ":", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "pres_list", "=", "list", "(", "pres", ")", "features_list", "=", "list", "(", "features", ")", "# initialize dist dict", "distances", "=", "{", "}", "# for each word in the list", "for", "idx1", ",", "item1", "in", "enumerate", "(", "pres_list", ")", ":", "distances", "[", "item1", "]", "=", "{", "}", "# for each word in the list", "for", "idx2", ",", "item2", "in", "enumerate", "(", "pres_list", ")", ":", "distances", "[", "item1", "]", "[", "item2", "]", "=", "{", "}", "# for each feature in dist_funcs", "for", "feature", "in", "dist_funcs", ":", "distances", "[", "item1", "]", "[", "item2", "]", "[", "feature", "]", "=", "builtin_dist_funcs", "[", "dist_funcs", "[", "feature", "]", "]", "(", "features_list", "[", "idx1", "]", "[", "feature", "]", ",", "features_list", "[", "idx2", "]", "[", "feature", "]", ")", "return", "distances" ]
Creates a nested dict of distances
[ "Creates", "a", "nested", "dict", "of", "distances" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L637-L660
train
ContextLab/quail
quail/fingerprint.py
compute_feature_weights_dict
def compute_feature_weights_dict(pres_list, rec_list, feature_list, dist_dict): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) < 2: print('Not enough recalls to compute fingerprint, returning default fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word # dists = [dist_dict[c][j][feature] for j in dist_dict[c]] # distance between current and next word c_dist = dist_dict[c][n][feature] # filter dists removing the words that have already been recalled # dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) dists_filt = [dist_dict[c][j][feature] for j in dist_dict[c] if j not in past_words] # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == c_dist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
python
def compute_feature_weights_dict(pres_list, rec_list, feature_list, dist_dict): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) < 2: print('Not enough recalls to compute fingerprint, returning default fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word # dists = [dist_dict[c][j][feature] for j in dist_dict[c]] # distance between current and next word c_dist = dist_dict[c][n][feature] # filter dists removing the words that have already been recalled # dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) dists_filt = [dist_dict[c][j][feature] for j in dist_dict[c] if j not in past_words] # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == c_dist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
[ "def", "compute_feature_weights_dict", "(", "pres_list", ",", "rec_list", ",", "feature_list", ",", "dist_dict", ")", ":", "# initialize the weights object for just this list", "weights", "=", "{", "}", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", "[", "]", "# return default list if there is not enough data to compute the fingerprint", "if", "len", "(", "rec_list", ")", "<", "2", ":", "print", "(", "'Not enough recalls to compute fingerprint, returning default fingerprint.. (everything is .5)'", ")", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", ".5", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]", "# initialize past word list", "past_words", "=", "[", "]", "past_idxs", "=", "[", "]", "# loop over words", "for", "i", "in", "range", "(", "len", "(", "rec_list", ")", "-", "1", ")", ":", "# grab current word", "c", "=", "rec_list", "[", "i", "]", "# grab the next word", "n", "=", "rec_list", "[", "i", "+", "1", "]", "# if both recalled words are in the encoding list and haven't been recalled before", "if", "(", "c", "in", "pres_list", "and", "n", "in", "pres_list", ")", "and", "(", "c", "not", "in", "past_words", "and", "n", "not", "in", "past_words", ")", ":", "# for each feature", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "# get the distance vector for the current word", "# dists = [dist_dict[c][j][feature] for j in dist_dict[c]]", "# distance between current and next word", "c_dist", "=", "dist_dict", "[", "c", "]", "[", "n", "]", "[", "feature", "]", "# filter dists removing the words that have already been recalled", "# dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs])", "dists_filt", "=", "[", "dist_dict", "[", "c", "]", "[", "j", "]", "[", "feature", "]", "for", "j", "in", "dist_dict", "[", "c", "]", "if", "j", "not", "in", "past_words", "]", "# get indices", "avg_rank", "=", "np", ".", "mean", "(", "np", ".", "where", "(", "np", ".", "sort", "(", "dists_filt", ")", "[", ":", ":", "-", "1", "]", "==", "c_dist", ")", "[", "0", "]", "+", "1", ")", "# compute the weight", "weights", "[", "feature", "]", ".", "append", "(", "avg_rank", "/", "len", "(", "dists_filt", ")", ")", "# keep track of what has been recalled already", "past_idxs", ".", "append", "(", "pres_list", ".", "index", "(", "c", ")", ")", "past_words", ".", "append", "(", "c", ")", "# average over the cluster scores for a particular dimension", "for", "feature", "in", "weights", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "weights", "[", "feature", "]", "=", "np", ".", "nanmean", "(", "weights", "[", "feature", "]", ")", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]" ]
Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension
[ "Compute", "clustering", "scores", "along", "a", "set", "of", "feature", "dimensions" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L662-L739
train
ContextLab/quail
quail/fingerprint.py
Fingerprint.update
def update(self, egg, permute=False, nperms=1000, parallel=False): """ In-place method that updates fingerprint with new data Parameters ---------- egg : quail.Egg Data to update fingerprint Returns ---------- None """ # increment n self.n+=1 next_weights = np.nanmean(_analyze_chunk(egg, analysis=fingerprint_helper, analysis_type='fingerprint', pass_features=True, permute=permute, n_perms=nperms, parallel=parallel).values, 0) if self.state is not None: # multiply states by n c = self.state*self.n # update state self.state = np.nansum(np.array([c, next_weights]), axis=0)/(self.n+1) else: self.state = next_weights # update the history self.history.append(next_weights)
python
def update(self, egg, permute=False, nperms=1000, parallel=False): """ In-place method that updates fingerprint with new data Parameters ---------- egg : quail.Egg Data to update fingerprint Returns ---------- None """ # increment n self.n+=1 next_weights = np.nanmean(_analyze_chunk(egg, analysis=fingerprint_helper, analysis_type='fingerprint', pass_features=True, permute=permute, n_perms=nperms, parallel=parallel).values, 0) if self.state is not None: # multiply states by n c = self.state*self.n # update state self.state = np.nansum(np.array([c, next_weights]), axis=0)/(self.n+1) else: self.state = next_weights # update the history self.history.append(next_weights)
[ "def", "update", "(", "self", ",", "egg", ",", "permute", "=", "False", ",", "nperms", "=", "1000", ",", "parallel", "=", "False", ")", ":", "# increment n", "self", ".", "n", "+=", "1", "next_weights", "=", "np", ".", "nanmean", "(", "_analyze_chunk", "(", "egg", ",", "analysis", "=", "fingerprint_helper", ",", "analysis_type", "=", "'fingerprint'", ",", "pass_features", "=", "True", ",", "permute", "=", "permute", ",", "n_perms", "=", "nperms", ",", "parallel", "=", "parallel", ")", ".", "values", ",", "0", ")", "if", "self", ".", "state", "is", "not", "None", ":", "# multiply states by n", "c", "=", "self", ".", "state", "*", "self", ".", "n", "# update state", "self", ".", "state", "=", "np", ".", "nansum", "(", "np", ".", "array", "(", "[", "c", ",", "next_weights", "]", ")", ",", "axis", "=", "0", ")", "/", "(", "self", ".", "n", "+", "1", ")", "else", ":", "self", ".", "state", "=", "next_weights", "# update the history", "self", ".", "history", ".", "append", "(", "next_weights", ")" ]
In-place method that updates fingerprint with new data Parameters ---------- egg : quail.Egg Data to update fingerprint Returns ---------- None
[ "In", "-", "place", "method", "that", "updates", "fingerprint", "with", "new", "data" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L83-L121
train
ContextLab/quail
quail/fingerprint.py
OptimalPresenter.order
def order(self, egg, method='permute', nperms=2500, strategy=None, distfun='correlation', fingerprint=None): """ Reorders a list of stimuli to match a fingerprint Parameters ---------- egg : quail.Egg Data to compute fingerprint method : str Method to re-sort list. Can be 'stick' or 'permute' (default: permute) nperms : int Number of permutations to use. Only used if method='permute'. (default: 2500) strategy : str or None The strategy to use to reorder the list. This can be 'stabilize', 'destabilize', 'random' or None. If None, the self.strategy field will be used. (default: None) distfun : str or function The distance function to reorder the list fingerprint to the target fingerprint. Can be any distance function supported by scipy.spatial.distance.cdist. For more info, see: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html (default: euclidean) fingerprint : quail.Fingerprint or np.array Fingerprint (or just the state of a fingerprint) to reorder by. If None, the list will be reordered according to the fingerprint attached to the presenter object. Returns ---------- egg : quail.Egg Egg re-sorted to match fingerprint """ def order_perm(self, egg, dist_dict, strategy, nperm, distperm, fingerprint): """ This function re-sorts a list by computing permutations of a given list and choosing the one that maximizes/minimizes variance. """ # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # length of list pres_len = len(pres) weights = [] orders = [] for i in range(nperms): x = rand_perm(pres, features, dist_dict, dist_funcs) weights.append(x[0]) orders.append(x[1]) weights = np.array(weights) orders = np.array(orders) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])]) def order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint): # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(stick_perm)(self, egg, dist_dict, strategy) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) def order_best_choice(self, egg, dist_dict, nperms, distfun, fingerprint): # get strategy strategy = self.strategy # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(choice_perm)(self, egg, dist_dict) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) # if strategy is not set explicitly, default to the class strategy if strategy is None: strategy = self.strategy dist_dict = compute_distances_dict(egg) if fingerprint is None: fingerprint = self.get_params('fingerprint').state elif isinstance(fingerprint, Fingerprint): fingerprint = fingerprint.state else: print('using custom fingerprint') if (strategy=='random') or (method=='random'): return shuffle_egg(egg) elif method=='permute': return order_perm(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='stick': return order_stick(self, egg, dist_dict, strategy, fingerprint) # elif method=='best_stick': return order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='best_choice': return order_best_choice(self, egg, dist_dict, nperms, fingerprint)
python
def order(self, egg, method='permute', nperms=2500, strategy=None, distfun='correlation', fingerprint=None): """ Reorders a list of stimuli to match a fingerprint Parameters ---------- egg : quail.Egg Data to compute fingerprint method : str Method to re-sort list. Can be 'stick' or 'permute' (default: permute) nperms : int Number of permutations to use. Only used if method='permute'. (default: 2500) strategy : str or None The strategy to use to reorder the list. This can be 'stabilize', 'destabilize', 'random' or None. If None, the self.strategy field will be used. (default: None) distfun : str or function The distance function to reorder the list fingerprint to the target fingerprint. Can be any distance function supported by scipy.spatial.distance.cdist. For more info, see: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html (default: euclidean) fingerprint : quail.Fingerprint or np.array Fingerprint (or just the state of a fingerprint) to reorder by. If None, the list will be reordered according to the fingerprint attached to the presenter object. Returns ---------- egg : quail.Egg Egg re-sorted to match fingerprint """ def order_perm(self, egg, dist_dict, strategy, nperm, distperm, fingerprint): """ This function re-sorts a list by computing permutations of a given list and choosing the one that maximizes/minimizes variance. """ # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # length of list pres_len = len(pres) weights = [] orders = [] for i in range(nperms): x = rand_perm(pres, features, dist_dict, dist_funcs) weights.append(x[0]) orders.append(x[1]) weights = np.array(weights) orders = np.array(orders) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])]) def order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint): # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(stick_perm)(self, egg, dist_dict, strategy) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) def order_best_choice(self, egg, dist_dict, nperms, distfun, fingerprint): # get strategy strategy = self.strategy # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(choice_perm)(self, egg, dist_dict) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) # if strategy is not set explicitly, default to the class strategy if strategy is None: strategy = self.strategy dist_dict = compute_distances_dict(egg) if fingerprint is None: fingerprint = self.get_params('fingerprint').state elif isinstance(fingerprint, Fingerprint): fingerprint = fingerprint.state else: print('using custom fingerprint') if (strategy=='random') or (method=='random'): return shuffle_egg(egg) elif method=='permute': return order_perm(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='stick': return order_stick(self, egg, dist_dict, strategy, fingerprint) # elif method=='best_stick': return order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='best_choice': return order_best_choice(self, egg, dist_dict, nperms, fingerprint)
[ "def", "order", "(", "self", ",", "egg", ",", "method", "=", "'permute'", ",", "nperms", "=", "2500", ",", "strategy", "=", "None", ",", "distfun", "=", "'correlation'", ",", "fingerprint", "=", "None", ")", ":", "def", "order_perm", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperm", ",", "distperm", ",", "fingerprint", ")", ":", "\"\"\"\n This function re-sorts a list by computing permutations of a given\n list and choosing the one that maximizes/minimizes variance.\n \"\"\"", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "# length of list", "pres_len", "=", "len", "(", "pres", ")", "weights", "=", "[", "]", "orders", "=", "[", "]", "for", "i", "in", "range", "(", "nperms", ")", ":", "x", "=", "rand_perm", "(", "pres", ",", "features", ",", "dist_dict", ",", "dist_funcs", ")", "weights", ".", "append", "(", "x", "[", "0", "]", ")", "orders", ".", "append", "(", "x", "[", "1", "]", ")", "weights", "=", "np", ".", "array", "(", "weights", ")", "orders", "=", "np", ".", "array", "(", "orders", ")", "# find the closest (or farthest)", "if", "strategy", "==", "'stabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmin", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distperm", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "elif", "strategy", "==", "'destabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmax", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distperm", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "# return a re-sorted egg", "return", "Egg", "(", "pres", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "rec", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "features", "=", "[", "list", "(", "features", "[", "closest", "]", ")", "]", ")", "def", "order_best_stick", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", ":", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "results", "=", "Parallel", "(", "n_jobs", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")", "(", "delayed", "(", "stick_perm", ")", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ")", "for", "i", "in", "range", "(", "nperms", ")", ")", "weights", "=", "np", ".", "array", "(", "[", "x", "[", "0", "]", "for", "x", "in", "results", "]", ")", "orders", "=", "np", ".", "array", "(", "[", "x", "[", "1", "]", "for", "x", "in", "results", "]", ")", "# find the closest (or farthest)", "if", "strategy", "==", "'stabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmin", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "elif", "strategy", "==", "'destabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmax", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "# return a re-sorted egg", "return", "Egg", "(", "pres", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "rec", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "features", "=", "[", "list", "(", "features", "[", "closest", "]", ")", "]", ",", "dist_funcs", "=", "dist_funcs", ")", "def", "order_best_choice", "(", "self", ",", "egg", ",", "dist_dict", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", ":", "# get strategy", "strategy", "=", "self", ".", "strategy", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "results", "=", "Parallel", "(", "n_jobs", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")", "(", "delayed", "(", "choice_perm", ")", "(", "self", ",", "egg", ",", "dist_dict", ")", "for", "i", "in", "range", "(", "nperms", ")", ")", "weights", "=", "np", ".", "array", "(", "[", "x", "[", "0", "]", "for", "x", "in", "results", "]", ")", "orders", "=", "np", ".", "array", "(", "[", "x", "[", "1", "]", "for", "x", "in", "results", "]", ")", "# find the closest (or farthest)", "if", "strategy", "==", "'stabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmin", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "elif", "strategy", "==", "'destabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmax", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "# return a re-sorted egg", "return", "Egg", "(", "pres", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "rec", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "features", "=", "[", "list", "(", "features", "[", "closest", "]", ")", "]", ",", "dist_funcs", "=", "dist_funcs", ")", "# if strategy is not set explicitly, default to the class strategy", "if", "strategy", "is", "None", ":", "strategy", "=", "self", ".", "strategy", "dist_dict", "=", "compute_distances_dict", "(", "egg", ")", "if", "fingerprint", "is", "None", ":", "fingerprint", "=", "self", ".", "get_params", "(", "'fingerprint'", ")", ".", "state", "elif", "isinstance", "(", "fingerprint", ",", "Fingerprint", ")", ":", "fingerprint", "=", "fingerprint", ".", "state", "else", ":", "print", "(", "'using custom fingerprint'", ")", "if", "(", "strategy", "==", "'random'", ")", "or", "(", "method", "==", "'random'", ")", ":", "return", "shuffle_egg", "(", "egg", ")", "elif", "method", "==", "'permute'", ":", "return", "order_perm", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", "#", "elif", "method", "==", "'stick'", ":", "return", "order_stick", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "fingerprint", ")", "#", "elif", "method", "==", "'best_stick'", ":", "return", "order_best_stick", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", "#", "elif", "method", "==", "'best_choice'", ":", "return", "order_best_choice", "(", "self", ",", "egg", ",", "dist_dict", ",", "nperms", ",", "fingerprint", ")" ]
Reorders a list of stimuli to match a fingerprint Parameters ---------- egg : quail.Egg Data to compute fingerprint method : str Method to re-sort list. Can be 'stick' or 'permute' (default: permute) nperms : int Number of permutations to use. Only used if method='permute'. (default: 2500) strategy : str or None The strategy to use to reorder the list. This can be 'stabilize', 'destabilize', 'random' or None. If None, the self.strategy field will be used. (default: None) distfun : str or function The distance function to reorder the list fingerprint to the target fingerprint. Can be any distance function supported by scipy.spatial.distance.cdist. For more info, see: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html (default: euclidean) fingerprint : quail.Fingerprint or np.array Fingerprint (or just the state of a fingerprint) to reorder by. If None, the list will be reordered according to the fingerprint attached to the presenter object. Returns ---------- egg : quail.Egg Egg re-sorted to match fingerprint
[ "Reorders", "a", "list", "of", "stimuli", "to", "match", "a", "fingerprint" ]
71dd53c792dd915dc84879d8237e3582dd68b7a4
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L203-L344
train
aouyar/PyMunin
pysysinfo/phpopc.py
OPCinfo.initStats
def initStats(self, extras=None): """Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) #with open('/tmp/opcinfo.json') as f: # response = f.read() self._statusDict = json.loads(response)
python
def initStats(self, extras=None): """Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) #with open('/tmp/opcinfo.json') as f: # response = f.read() self._statusDict = json.loads(response)
[ "def", "initStats", "(", "self", ",", "extras", "=", "None", ")", ":", "url", "=", "\"%s://%s:%d/%s\"", "%", "(", "self", ".", "_proto", ",", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_monpath", ")", "response", "=", "util", ".", "get_url", "(", "url", ",", "self", ".", "_user", ",", "self", ".", "_password", ")", "#with open('/tmp/opcinfo.json') as f:", "# response = f.read()", "self", ".", "_statusDict", "=", "json", ".", "loads", "(", "response", ")" ]
Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive.
[ "Query", "and", "parse", "Web", "Server", "Status", "Page", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/phpopc.py#L72-L83
train
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._getDevMajorMinor
def _getDevMajorMinor(self, devpath): """Return major and minor device number for block device path devpath. @param devpath: Full path for block device. @return: Tuple (major, minor). """ fstat = os.stat(devpath) if stat.S_ISBLK(fstat.st_mode): return(os.major(fstat.st_rdev), os.minor(fstat.st_rdev)) else: raise ValueError("The file %s is not a valid block device." % devpath)
python
def _getDevMajorMinor(self, devpath): """Return major and minor device number for block device path devpath. @param devpath: Full path for block device. @return: Tuple (major, minor). """ fstat = os.stat(devpath) if stat.S_ISBLK(fstat.st_mode): return(os.major(fstat.st_rdev), os.minor(fstat.st_rdev)) else: raise ValueError("The file %s is not a valid block device." % devpath)
[ "def", "_getDevMajorMinor", "(", "self", ",", "devpath", ")", ":", "fstat", "=", "os", ".", "stat", "(", "devpath", ")", "if", "stat", ".", "S_ISBLK", "(", "fstat", ".", "st_mode", ")", ":", "return", "(", "os", ".", "major", "(", "fstat", ".", "st_rdev", ")", ",", "os", ".", "minor", "(", "fstat", ".", "st_rdev", ")", ")", "else", ":", "raise", "ValueError", "(", "\"The file %s is not a valid block device.\"", "%", "devpath", ")" ]
Return major and minor device number for block device path devpath. @param devpath: Full path for block device. @return: Tuple (major, minor).
[ "Return", "major", "and", "minor", "device", "number", "for", "block", "device", "path", "devpath", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L53-L63
train
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._getUniqueDev
def _getUniqueDev(self, devpath): """Return unique device for any block device path. @param devpath: Full path for block device. @return: Unique device string without the /dev prefix. """ realpath = os.path.realpath(devpath) mobj = re.match('\/dev\/(.*)$', realpath) if mobj: dev = mobj.group(1) if dev in self._diskStats: return dev else: try: (major, minor) = self._getDevMajorMinor(realpath) except: return None return self._mapMajorMinor2dev.get((major, minor)) return None
python
def _getUniqueDev(self, devpath): """Return unique device for any block device path. @param devpath: Full path for block device. @return: Unique device string without the /dev prefix. """ realpath = os.path.realpath(devpath) mobj = re.match('\/dev\/(.*)$', realpath) if mobj: dev = mobj.group(1) if dev in self._diskStats: return dev else: try: (major, minor) = self._getDevMajorMinor(realpath) except: return None return self._mapMajorMinor2dev.get((major, minor)) return None
[ "def", "_getUniqueDev", "(", "self", ",", "devpath", ")", ":", "realpath", "=", "os", ".", "path", ".", "realpath", "(", "devpath", ")", "mobj", "=", "re", ".", "match", "(", "'\\/dev\\/(.*)$'", ",", "realpath", ")", "if", "mobj", ":", "dev", "=", "mobj", ".", "group", "(", "1", ")", "if", "dev", "in", "self", ".", "_diskStats", ":", "return", "dev", "else", ":", "try", ":", "(", "major", ",", "minor", ")", "=", "self", ".", "_getDevMajorMinor", "(", "realpath", ")", "except", ":", "return", "None", "return", "self", ".", "_mapMajorMinor2dev", ".", "get", "(", "(", "major", ",", "minor", ")", ")", "return", "None" ]
Return unique device for any block device path. @param devpath: Full path for block device. @return: Unique device string without the /dev prefix.
[ "Return", "unique", "device", "for", "any", "block", "device", "path", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L65-L84
train
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initBlockMajorMap
def _initBlockMajorMap(self): """Parses /proc/devices to initialize device class - major number map for block devices. """ self._mapMajorDevclass = {} try: fp = open(devicesFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading device information from file: %s' % devicesFile) skip = True for line in data.splitlines(): if skip: if re.match('block.*:', line, re.IGNORECASE): skip = False else: mobj = re.match('\s*(\d+)\s+([\w\-]+)$', line) if mobj: major = int(mobj.group(1)) devtype = mobj.group(2) self._mapMajorDevclass[major] = devtype if devtype == 'device-mapper': self._dmMajorNum = major
python
def _initBlockMajorMap(self): """Parses /proc/devices to initialize device class - major number map for block devices. """ self._mapMajorDevclass = {} try: fp = open(devicesFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading device information from file: %s' % devicesFile) skip = True for line in data.splitlines(): if skip: if re.match('block.*:', line, re.IGNORECASE): skip = False else: mobj = re.match('\s*(\d+)\s+([\w\-]+)$', line) if mobj: major = int(mobj.group(1)) devtype = mobj.group(2) self._mapMajorDevclass[major] = devtype if devtype == 'device-mapper': self._dmMajorNum = major
[ "def", "_initBlockMajorMap", "(", "self", ")", ":", "self", ".", "_mapMajorDevclass", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "devicesFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading device information from file: %s'", "%", "devicesFile", ")", "skip", "=", "True", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "if", "skip", ":", "if", "re", ".", "match", "(", "'block.*:'", ",", "line", ",", "re", ".", "IGNORECASE", ")", ":", "skip", "=", "False", "else", ":", "mobj", "=", "re", ".", "match", "(", "'\\s*(\\d+)\\s+([\\w\\-]+)$'", ",", "line", ")", "if", "mobj", ":", "major", "=", "int", "(", "mobj", ".", "group", "(", "1", ")", ")", "devtype", "=", "mobj", ".", "group", "(", "2", ")", "self", ".", "_mapMajorDevclass", "[", "major", "]", "=", "devtype", "if", "devtype", "==", "'device-mapper'", ":", "self", ".", "_dmMajorNum", "=", "major" ]
Parses /proc/devices to initialize device class - major number map for block devices.
[ "Parses", "/", "proc", "/", "devices", "to", "initialize", "device", "class", "-", "major", "number", "map", "for", "block", "devices", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L86-L111
train
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initDMinfo
def _initDMinfo(self): """Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs. """ self._mapLVtuple2dm = {} self._mapLVname2dm = {} self._vgTree = {} if self._dmMajorNum is None: self._initBlockMajorMap() for file in os.listdir(devmapperDir): mobj = re.match('([a-zA-Z0-9+_.\-]*[a-zA-Z0-9+_.])-([a-zA-Z0-9+_.][a-zA-Z0-9+_.\-]*)$', file) if mobj: path = os.path.join(devmapperDir, file) (major, minor) = self._getDevMajorMinor(path) if major == self._dmMajorNum: vg = mobj.group(1).replace('--', '-') lv = mobj.group(2).replace('--', '-') dmdev = "dm-%d" % minor self._mapLVtuple2dm[(vg,lv)] = dmdev self._mapLVname2dm[file] = dmdev if not vg in self._vgTree: self._vgTree[vg] = [] self._vgTree[vg].append(lv)
python
def _initDMinfo(self): """Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs. """ self._mapLVtuple2dm = {} self._mapLVname2dm = {} self._vgTree = {} if self._dmMajorNum is None: self._initBlockMajorMap() for file in os.listdir(devmapperDir): mobj = re.match('([a-zA-Z0-9+_.\-]*[a-zA-Z0-9+_.])-([a-zA-Z0-9+_.][a-zA-Z0-9+_.\-]*)$', file) if mobj: path = os.path.join(devmapperDir, file) (major, minor) = self._getDevMajorMinor(path) if major == self._dmMajorNum: vg = mobj.group(1).replace('--', '-') lv = mobj.group(2).replace('--', '-') dmdev = "dm-%d" % minor self._mapLVtuple2dm[(vg,lv)] = dmdev self._mapLVname2dm[file] = dmdev if not vg in self._vgTree: self._vgTree[vg] = [] self._vgTree[vg].append(lv)
[ "def", "_initDMinfo", "(", "self", ")", ":", "self", ".", "_mapLVtuple2dm", "=", "{", "}", "self", ".", "_mapLVname2dm", "=", "{", "}", "self", ".", "_vgTree", "=", "{", "}", "if", "self", ".", "_dmMajorNum", "is", "None", ":", "self", ".", "_initBlockMajorMap", "(", ")", "for", "file", "in", "os", ".", "listdir", "(", "devmapperDir", ")", ":", "mobj", "=", "re", ".", "match", "(", "'([a-zA-Z0-9+_.\\-]*[a-zA-Z0-9+_.])-([a-zA-Z0-9+_.][a-zA-Z0-9+_.\\-]*)$'", ",", "file", ")", "if", "mobj", ":", "path", "=", "os", ".", "path", ".", "join", "(", "devmapperDir", ",", "file", ")", "(", "major", ",", "minor", ")", "=", "self", ".", "_getDevMajorMinor", "(", "path", ")", "if", "major", "==", "self", ".", "_dmMajorNum", ":", "vg", "=", "mobj", ".", "group", "(", "1", ")", ".", "replace", "(", "'--'", ",", "'-'", ")", "lv", "=", "mobj", ".", "group", "(", "2", ")", ".", "replace", "(", "'--'", ",", "'-'", ")", "dmdev", "=", "\"dm-%d\"", "%", "minor", "self", ".", "_mapLVtuple2dm", "[", "(", "vg", ",", "lv", ")", "]", "=", "dmdev", "self", ".", "_mapLVname2dm", "[", "file", "]", "=", "dmdev", "if", "not", "vg", "in", "self", ".", "_vgTree", ":", "self", ".", "_vgTree", "[", "vg", "]", "=", "[", "]", "self", ".", "_vgTree", "[", "vg", "]", ".", "append", "(", "lv", ")" ]
Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs.
[ "Check", "files", "in", "/", "dev", "/", "mapper", "to", "initialize", "data", "structures", "for", "mappings", "between", "device", "-", "mapper", "devices", "minor", "device", "numbers", "VGs", "and", "LVs", "." ]
4f58a64b6b37c85a84cc7e1e07aafaa0321b249d
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L113-L137
train