repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
StarFile.read
def read(filehandle, source): """Read data into a :class:`~nmrstarlib.nmrstarlib.StarFile` instance. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper`, :py:class:`gzip.GzipFile`, :py:class:`bz2.BZ2File`, :py:class:`zipfile.ZipFile` :param str source: String indicating where file is coming from (path, url). :return: subclass of :class:`~nmrstarlib.nmrstarlib.StarFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.NMRStarFile` or :class:`~nmrstarlib.nmrstarlib.CIFFile` """ input_str = filehandle.read() nmrstar_str = StarFile._is_nmrstar(input_str) cif_str = StarFile._is_cif(input_str) json_str = StarFile._is_json(input_str) if not input_str: pass elif nmrstar_str: starfile = NMRStarFile(source) starfile._build_file(nmrstar_str) filehandle.close() return starfile elif cif_str: starfile = CIFFile(source) starfile._build_file(cif_str) filehandle.close() return starfile elif json_str: if u"save_" in json_str: starfile = NMRStarFile(source) starfile.update(json.loads(json_str, object_pairs_hook=OrderedDict)) starfile.id = starfile[u"data"] filehandle.close() return starfile elif u"entry.id" in json_str: starfile = CIFFile(source) starfile.update(json.loads(json_str, object_pairs_hook=OrderedDict)) starfile.id = starfile[u"data"] filehandle.close() return starfile else: raise TypeError("Unknown file format") else: raise TypeError("Unknown file format")
python
def read(filehandle, source): """Read data into a :class:`~nmrstarlib.nmrstarlib.StarFile` instance. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper`, :py:class:`gzip.GzipFile`, :py:class:`bz2.BZ2File`, :py:class:`zipfile.ZipFile` :param str source: String indicating where file is coming from (path, url). :return: subclass of :class:`~nmrstarlib.nmrstarlib.StarFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.NMRStarFile` or :class:`~nmrstarlib.nmrstarlib.CIFFile` """ input_str = filehandle.read() nmrstar_str = StarFile._is_nmrstar(input_str) cif_str = StarFile._is_cif(input_str) json_str = StarFile._is_json(input_str) if not input_str: pass elif nmrstar_str: starfile = NMRStarFile(source) starfile._build_file(nmrstar_str) filehandle.close() return starfile elif cif_str: starfile = CIFFile(source) starfile._build_file(cif_str) filehandle.close() return starfile elif json_str: if u"save_" in json_str: starfile = NMRStarFile(source) starfile.update(json.loads(json_str, object_pairs_hook=OrderedDict)) starfile.id = starfile[u"data"] filehandle.close() return starfile elif u"entry.id" in json_str: starfile = CIFFile(source) starfile.update(json.loads(json_str, object_pairs_hook=OrderedDict)) starfile.id = starfile[u"data"] filehandle.close() return starfile else: raise TypeError("Unknown file format") else: raise TypeError("Unknown file format")
[ "def", "read", "(", "filehandle", ",", "source", ")", ":", "input_str", "=", "filehandle", ".", "read", "(", ")", "nmrstar_str", "=", "StarFile", ".", "_is_nmrstar", "(", "input_str", ")", "cif_str", "=", "StarFile", ".", "_is_cif", "(", "input_str", ")", ...
Read data into a :class:`~nmrstarlib.nmrstarlib.StarFile` instance. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper`, :py:class:`gzip.GzipFile`, :py:class:`bz2.BZ2File`, :py:class:`zipfile.ZipFile` :param str source: String indicating where file is coming from (path, url). :return: subclass of :class:`~nmrstarlib.nmrstarlib.StarFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.NMRStarFile` or :class:`~nmrstarlib.nmrstarlib.CIFFile`
[ "Read", "data", "into", "a", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "StarFile", "instance", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L117-L164
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
StarFile.write
def write(self, filehandle, file_format): """Write :class:`~nmrstarlib.nmrstarlib.StarFile` data into file. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper` :param str file_format: Format to use to write data: `nmrstar`, `cif`, or `json`. :return: None :rtype: :py:obj:`None` """ try: if file_format == "json": json_str = self._to_json() filehandle.write(json_str) elif file_format == "nmrstar" and isinstance(self, NMRStarFile): nmrstar_str = self._to_star() filehandle.write(nmrstar_str) elif file_format == "cif" and isinstance(self, CIFFile): cif_str = self._to_star() filehandle.write(cif_str) else: raise TypeError("Unknown file format.") except IOError: raise IOError('"filehandle" parameter must be writable.') filehandle.close()
python
def write(self, filehandle, file_format): """Write :class:`~nmrstarlib.nmrstarlib.StarFile` data into file. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper` :param str file_format: Format to use to write data: `nmrstar`, `cif`, or `json`. :return: None :rtype: :py:obj:`None` """ try: if file_format == "json": json_str = self._to_json() filehandle.write(json_str) elif file_format == "nmrstar" and isinstance(self, NMRStarFile): nmrstar_str = self._to_star() filehandle.write(nmrstar_str) elif file_format == "cif" and isinstance(self, CIFFile): cif_str = self._to_star() filehandle.write(cif_str) else: raise TypeError("Unknown file format.") except IOError: raise IOError('"filehandle" parameter must be writable.') filehandle.close()
[ "def", "write", "(", "self", ",", "filehandle", ",", "file_format", ")", ":", "try", ":", "if", "file_format", "==", "\"json\"", ":", "json_str", "=", "self", ".", "_to_json", "(", ")", "filehandle", ".", "write", "(", "json_str", ")", "elif", "file_form...
Write :class:`~nmrstarlib.nmrstarlib.StarFile` data into file. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper` :param str file_format: Format to use to write data: `nmrstar`, `cif`, or `json`. :return: None :rtype: :py:obj:`None`
[ "Write", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "StarFile", "data", "into", "file", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L166-L189
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
StarFile._to_star
def _to_star(self): """Save :class:`~nmrstarlib.nmrstarlib.StarFile` into NMR-STAR or CIF formatted string. :return: NMR-STAR string. :rtype: :py:class:`str` """ star_str = io.StringIO() self.print_file(star_str) return star_str.getvalue()
python
def _to_star(self): """Save :class:`~nmrstarlib.nmrstarlib.StarFile` into NMR-STAR or CIF formatted string. :return: NMR-STAR string. :rtype: :py:class:`str` """ star_str = io.StringIO() self.print_file(star_str) return star_str.getvalue()
[ "def", "_to_star", "(", "self", ")", ":", "star_str", "=", "io", ".", "StringIO", "(", ")", "self", ".", "print_file", "(", "star_str", ")", "return", "star_str", ".", "getvalue", "(", ")" ]
Save :class:`~nmrstarlib.nmrstarlib.StarFile` into NMR-STAR or CIF formatted string. :return: NMR-STAR string. :rtype: :py:class:`str`
[ "Save", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "StarFile", "into", "NMR", "-", "STAR", "or", "CIF", "formatted", "string", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L232-L240
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
StarFile._is_nmrstar
def _is_nmrstar(string): """Test if input string is in NMR-STAR format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in NMR-STAR format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ if (string[0:5] == u"data_" and u"save_" in string) or (string[0:5] == b"data_" and b"save_" in string): return string return False
python
def _is_nmrstar(string): """Test if input string is in NMR-STAR format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in NMR-STAR format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ if (string[0:5] == u"data_" and u"save_" in string) or (string[0:5] == b"data_" and b"save_" in string): return string return False
[ "def", "_is_nmrstar", "(", "string", ")", ":", "if", "(", "string", "[", "0", ":", "5", "]", "==", "u\"data_\"", "and", "u\"save_\"", "in", "string", ")", "or", "(", "string", "[", "0", ":", "5", "]", "==", "b\"data_\"", "and", "b\"save_\"", "in", ...
Test if input string is in NMR-STAR format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in NMR-STAR format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False`
[ "Test", "if", "input", "string", "is", "in", "NMR", "-", "STAR", "format", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L243-L253
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
StarFile._is_cif
def _is_cif(string): """Test if input string is in CIF format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in CIF format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ if (string[0:5] == u"data_" and u"_entry.id" in string) or (string[0:5] == b"data_" and b"_entry.id" in string): return string return False
python
def _is_cif(string): """Test if input string is in CIF format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in CIF format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ if (string[0:5] == u"data_" and u"_entry.id" in string) or (string[0:5] == b"data_" and b"_entry.id" in string): return string return False
[ "def", "_is_cif", "(", "string", ")", ":", "if", "(", "string", "[", "0", ":", "5", "]", "==", "u\"data_\"", "and", "u\"_entry.id\"", "in", "string", ")", "or", "(", "string", "[", "0", ":", "5", "]", "==", "b\"data_\"", "and", "b\"_entry.id\"", "in"...
Test if input string is in CIF format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in CIF format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False`
[ "Test", "if", "input", "string", "is", "in", "CIF", "format", ".", ":", "param", "string", ":", "Input", "string", ".", ":", "type", "string", ":", ":", "py", ":", "class", ":", "str", "or", ":", "py", ":", "class", ":", "bytes", ":", "return", "...
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L256-L266
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
StarFile._is_json
def _is_json(string): """Test if input string is in JSON format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in JSON format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ try: if isinstance(string, bytes): string = string.decode("utf-8") json.loads(string) elif isinstance(string, str): json.loads(string) else: raise TypeError("Expecting <class 'str'> or <class 'bytes'>, but {} was passed".format(type(string))) return string except ValueError: return False
python
def _is_json(string): """Test if input string is in JSON format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in JSON format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ try: if isinstance(string, bytes): string = string.decode("utf-8") json.loads(string) elif isinstance(string, str): json.loads(string) else: raise TypeError("Expecting <class 'str'> or <class 'bytes'>, but {} was passed".format(type(string))) return string except ValueError: return False
[ "def", "_is_json", "(", "string", ")", ":", "try", ":", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "string", "=", "string", ".", "decode", "(", "\"utf-8\"", ")", "json", ".", "loads", "(", "string", ")", "elif", "isinstance", "(", "st...
Test if input string is in JSON format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in JSON format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False`
[ "Test", "if", "input", "string", "is", "in", "JSON", "format", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L269-L288
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile._build_file
def _build_file(self, nmrstar_str): """Build :class:`~nmrstarlib.nmrstarlib.NMRStarFile` object. :param nmrstar_str: NMR-STAR-formatted string. :type nmrstar_str: :py:class:`str` or :py:class:`bytes` :return: instance of :class:`~nmrstarlib.nmrstarlib.NMRStarFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.NMRStarFile` """ odict = self comment_count = 0 lexer = bmrblex(nmrstar_str) token = next(lexer) while token != u"": try: if token[0:5] == u"save_": name = token frame = self._build_saveframe(lexer) if frame: odict[name] = frame elif token[0:5] == u"data_": self.id = token[5:] odict[u"data"] = self.id elif token.lstrip().startswith(u"#"): odict[u"comment_{}".format(comment_count)] = token comment_count += 1 else: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_starfile try block", file=sys.stderr) raise InvalidToken("{}".format(token)) except IndexError: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_starfile except block", file=sys.stderr) raise finally: token = next(lexer) return self
python
def _build_file(self, nmrstar_str): """Build :class:`~nmrstarlib.nmrstarlib.NMRStarFile` object. :param nmrstar_str: NMR-STAR-formatted string. :type nmrstar_str: :py:class:`str` or :py:class:`bytes` :return: instance of :class:`~nmrstarlib.nmrstarlib.NMRStarFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.NMRStarFile` """ odict = self comment_count = 0 lexer = bmrblex(nmrstar_str) token = next(lexer) while token != u"": try: if token[0:5] == u"save_": name = token frame = self._build_saveframe(lexer) if frame: odict[name] = frame elif token[0:5] == u"data_": self.id = token[5:] odict[u"data"] = self.id elif token.lstrip().startswith(u"#"): odict[u"comment_{}".format(comment_count)] = token comment_count += 1 else: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_starfile try block", file=sys.stderr) raise InvalidToken("{}".format(token)) except IndexError: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_starfile except block", file=sys.stderr) raise finally: token = next(lexer) return self
[ "def", "_build_file", "(", "self", ",", "nmrstar_str", ")", ":", "odict", "=", "self", "comment_count", "=", "0", "lexer", "=", "bmrblex", "(", "nmrstar_str", ")", "token", "=", "next", "(", "lexer", ")", "while", "token", "!=", "u\"\"", ":", "try", ":...
Build :class:`~nmrstarlib.nmrstarlib.NMRStarFile` object. :param nmrstar_str: NMR-STAR-formatted string. :type nmrstar_str: :py:class:`str` or :py:class:`bytes` :return: instance of :class:`~nmrstarlib.nmrstarlib.NMRStarFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.NMRStarFile`
[ "Build", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "NMRStarFile", "object", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L307-L348
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile._build_saveframe
def _build_saveframe(self, lexer): """Build NMR-STAR file saveframe. :param lexer: instance of the lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Saveframe dictionary. :rtype: :py:class:`collections.OrderedDict` """ odict = OrderedDict() loop_count = 0 token = next(lexer) while token != u"save_": try: if token[0] == u"_": # This strips off the leading underscore of tagnames for readability odict[token[1:]] = next(lexer) # Skip the saveframe if it's not in the list of wanted categories if self._frame_categories: if token == "_Saveframe_category" and odict[token[1:]] not in self._frame_categories: raise SkipSaveFrame() elif token == u"loop_": odict[u"loop_{}".format(loop_count)] = self._build_loop(lexer) loop_count += 1 elif token.lstrip().startswith(u"#"): continue else: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_saveframe try block", file=sys.stderr) raise InvalidToken("{}".format(token)) except IndexError: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_saveframe except block", file=sys.stderr) raise except SkipSaveFrame: self._skip_saveframe(lexer) odict = None finally: if odict is None: token = u"save_" else: token = next(lexer) return odict
python
def _build_saveframe(self, lexer): """Build NMR-STAR file saveframe. :param lexer: instance of the lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Saveframe dictionary. :rtype: :py:class:`collections.OrderedDict` """ odict = OrderedDict() loop_count = 0 token = next(lexer) while token != u"save_": try: if token[0] == u"_": # This strips off the leading underscore of tagnames for readability odict[token[1:]] = next(lexer) # Skip the saveframe if it's not in the list of wanted categories if self._frame_categories: if token == "_Saveframe_category" and odict[token[1:]] not in self._frame_categories: raise SkipSaveFrame() elif token == u"loop_": odict[u"loop_{}".format(loop_count)] = self._build_loop(lexer) loop_count += 1 elif token.lstrip().startswith(u"#"): continue else: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_saveframe try block", file=sys.stderr) raise InvalidToken("{}".format(token)) except IndexError: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_saveframe except block", file=sys.stderr) raise except SkipSaveFrame: self._skip_saveframe(lexer) odict = None finally: if odict is None: token = u"save_" else: token = next(lexer) return odict
[ "def", "_build_saveframe", "(", "self", ",", "lexer", ")", ":", "odict", "=", "OrderedDict", "(", ")", "loop_count", "=", "0", "token", "=", "next", "(", "lexer", ")", "while", "token", "!=", "u\"save_\"", ":", "try", ":", "if", "token", "[", "0", "]...
Build NMR-STAR file saveframe. :param lexer: instance of the lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Saveframe dictionary. :rtype: :py:class:`collections.OrderedDict`
[ "Build", "NMR", "-", "STAR", "file", "saveframe", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L350-L397
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile._build_loop
def _build_loop(self, lexer): """Build saveframe loop. :param lexer: instance of lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Fields and values of the loop. :rtype: :py:class:`tuple` """ fields = [] values = [] token = next(lexer) while token[0] == u"_": fields.append(token[1:]) token = next(lexer) while token != u"stop_": values.append(token) token = next(lexer) assert float(len(values) / len(fields)).is_integer(), \ "Error in loop construction: number of fields must be equal to number of values." values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))] return fields, values
python
def _build_loop(self, lexer): """Build saveframe loop. :param lexer: instance of lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Fields and values of the loop. :rtype: :py:class:`tuple` """ fields = [] values = [] token = next(lexer) while token[0] == u"_": fields.append(token[1:]) token = next(lexer) while token != u"stop_": values.append(token) token = next(lexer) assert float(len(values) / len(fields)).is_integer(), \ "Error in loop construction: number of fields must be equal to number of values." values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))] return fields, values
[ "def", "_build_loop", "(", "self", ",", "lexer", ")", ":", "fields", "=", "[", "]", "values", "=", "[", "]", "token", "=", "next", "(", "lexer", ")", "while", "token", "[", "0", "]", "==", "u\"_\"", ":", "fields", ".", "append", "(", "token", "["...
Build saveframe loop. :param lexer: instance of lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Fields and values of the loop. :rtype: :py:class:`tuple`
[ "Build", "saveframe", "loop", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L399-L423
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile._skip_saveframe
def _skip_saveframe(self, lexer): """Skip entire saveframe - keep emitting tokens until the end of saveframe. :param lexer: instance of the lexical analyzer class. :type lexer: :class:`~nmrstarlib.bmrblex.bmrblex` :return: None :rtype: :py:obj:`None` """ token = u"" while token != u"save_": token = next(lexer)
python
def _skip_saveframe(self, lexer): """Skip entire saveframe - keep emitting tokens until the end of saveframe. :param lexer: instance of the lexical analyzer class. :type lexer: :class:`~nmrstarlib.bmrblex.bmrblex` :return: None :rtype: :py:obj:`None` """ token = u"" while token != u"save_": token = next(lexer)
[ "def", "_skip_saveframe", "(", "self", ",", "lexer", ")", ":", "token", "=", "u\"\"", "while", "token", "!=", "u\"save_\"", ":", "token", "=", "next", "(", "lexer", ")" ]
Skip entire saveframe - keep emitting tokens until the end of saveframe. :param lexer: instance of the lexical analyzer class. :type lexer: :class:`~nmrstarlib.bmrblex.bmrblex` :return: None :rtype: :py:obj:`None`
[ "Skip", "entire", "saveframe", "-", "keep", "emitting", "tokens", "until", "the", "end", "of", "saveframe", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L425-L435
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile.print_file
def print_file(self, f=sys.stdout, file_format="nmrstar", tw=3): """Print :class:`~nmrstarlib.nmrstarlib.NMRStarFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": for saveframe in self.keys(): if saveframe == u"data": print(u"{}_{}\n".format(saveframe, self[saveframe]), file=f) elif saveframe.startswith(u"comment"): print(u"{}".format(self[saveframe]), file=f) else: print(u"{}".format(saveframe), file=f) self.print_saveframe(saveframe, f, file_format, tw) print(u"\nsave_\n\n", file=f) elif file_format == "json": print(self._to_json(), file=f)
python
def print_file(self, f=sys.stdout, file_format="nmrstar", tw=3): """Print :class:`~nmrstarlib.nmrstarlib.NMRStarFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": for saveframe in self.keys(): if saveframe == u"data": print(u"{}_{}\n".format(saveframe, self[saveframe]), file=f) elif saveframe.startswith(u"comment"): print(u"{}".format(self[saveframe]), file=f) else: print(u"{}".format(saveframe), file=f) self.print_saveframe(saveframe, f, file_format, tw) print(u"\nsave_\n\n", file=f) elif file_format == "json": print(self._to_json(), file=f)
[ "def", "print_file", "(", "self", ",", "f", "=", "sys", ".", "stdout", ",", "file_format", "=", "\"nmrstar\"", ",", "tw", "=", "3", ")", ":", "if", "file_format", "==", "\"nmrstar\"", ":", "for", "saveframe", "in", "self", ".", "keys", "(", ")", ":",...
Print :class:`~nmrstarlib.nmrstarlib.NMRStarFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None`
[ "Print", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "NMRStarFile", "into", "a", "file", "or", "stdout", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L437-L458
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile.print_saveframe
def print_saveframe(self, sf, f=sys.stdout, file_format="nmrstar", tw=3): """Print saveframe into a file or stdout. We need to keep track of how far over everything is tabbed. The "tab width" variable tw does this for us. :param str sf: Saveframe name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": for sftag in self[sf].keys(): # handle loops if sftag[:5] == "loop_": print(u"\n{}loop_".format(tw * u" "), file=f) self.print_loop(sf, sftag, f, file_format, tw * 2) print(u"\n{}stop_".format(tw * u" "), file=f) # handle the NMR-Star "multiline string" elif self[sf][sftag].endswith(u"\n"): print(u"{}_{}".format(tw * u" ", sftag), file=f) print(u";\n{};".format(self[sf][sftag]), file=f) elif len(self[sf][sftag].split()) > 1: # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words print(u"{}_{}\t {}".format(tw * u" ", sftag, u"'{}'".format(self[sf][sftag])), file=f) else: print(u"{}_{}\t {}".format(tw * u" ", sftag, self[sf][sftag]), file=f) elif file_format == "json": print(json.dumps(self[sf], sort_keys=False, indent=4), file=f)
python
def print_saveframe(self, sf, f=sys.stdout, file_format="nmrstar", tw=3): """Print saveframe into a file or stdout. We need to keep track of how far over everything is tabbed. The "tab width" variable tw does this for us. :param str sf: Saveframe name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": for sftag in self[sf].keys(): # handle loops if sftag[:5] == "loop_": print(u"\n{}loop_".format(tw * u" "), file=f) self.print_loop(sf, sftag, f, file_format, tw * 2) print(u"\n{}stop_".format(tw * u" "), file=f) # handle the NMR-Star "multiline string" elif self[sf][sftag].endswith(u"\n"): print(u"{}_{}".format(tw * u" ", sftag), file=f) print(u";\n{};".format(self[sf][sftag]), file=f) elif len(self[sf][sftag].split()) > 1: # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words print(u"{}_{}\t {}".format(tw * u" ", sftag, u"'{}'".format(self[sf][sftag])), file=f) else: print(u"{}_{}\t {}".format(tw * u" ", sftag, self[sf][sftag]), file=f) elif file_format == "json": print(json.dumps(self[sf], sort_keys=False, indent=4), file=f)
[ "def", "print_saveframe", "(", "self", ",", "sf", ",", "f", "=", "sys", ".", "stdout", ",", "file_format", "=", "\"nmrstar\"", ",", "tw", "=", "3", ")", ":", "if", "file_format", "==", "\"nmrstar\"", ":", "for", "sftag", "in", "self", "[", "sf", "]",...
Print saveframe into a file or stdout. We need to keep track of how far over everything is tabbed. The "tab width" variable tw does this for us. :param str sf: Saveframe name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None`
[ "Print", "saveframe", "into", "a", "file", "or", "stdout", ".", "We", "need", "to", "keep", "track", "of", "how", "far", "over", "everything", "is", "tabbed", ".", "The", "tab", "width", "variable", "tw", "does", "this", "for", "us", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L460-L493
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile.print_loop
def print_loop(self, sf, sftag, f=sys.stdout, file_format="nmrstar", tw=3): """Print loop into a file or stdout. :param str sf: Saveframe name. :param str sftag: Saveframe tag, i.e. field name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": # First print the fields for field in self[sf][sftag][0]: print(u"{}_{}".format(tw * u" ", field), file=f) print(u"", file=f) # new line between fields and values # Then print the values for valuesdict in self[sf][sftag][1]: # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words print(u"{}{}".format(tw * u" ", u" ".join([u"'{}'".format(value) if len(value.split()) > 1 else value for value in valuesdict.values()])), file=f) elif file_format == "json": print(json.dumps(self[sf][sftag], sort_keys=False, indent=4), file=f)
python
def print_loop(self, sf, sftag, f=sys.stdout, file_format="nmrstar", tw=3): """Print loop into a file or stdout. :param str sf: Saveframe name. :param str sftag: Saveframe tag, i.e. field name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": # First print the fields for field in self[sf][sftag][0]: print(u"{}_{}".format(tw * u" ", field), file=f) print(u"", file=f) # new line between fields and values # Then print the values for valuesdict in self[sf][sftag][1]: # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words print(u"{}{}".format(tw * u" ", u" ".join([u"'{}'".format(value) if len(value.split()) > 1 else value for value in valuesdict.values()])), file=f) elif file_format == "json": print(json.dumps(self[sf][sftag], sort_keys=False, indent=4), file=f)
[ "def", "print_loop", "(", "self", ",", "sf", ",", "sftag", ",", "f", "=", "sys", ".", "stdout", ",", "file_format", "=", "\"nmrstar\"", ",", "tw", "=", "3", ")", ":", "if", "file_format", "==", "\"nmrstar\"", ":", "# First print the fields", "for", "fiel...
Print loop into a file or stdout. :param str sf: Saveframe name. :param str sftag: Saveframe tag, i.e. field name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None`
[ "Print", "loop", "into", "a", "file", "or", "stdout", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L495-L519
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
NMRStarFile.chem_shifts_by_residue
def chem_shifts_by_residue(self, amino_acids=None, atoms=None, amino_acids_and_atoms=None, nmrstar_version="3"): """Organize chemical shifts by amino acid residue. :param list amino_acids: List of amino acids three-letter codes. :param list atoms: List of BMRB atom type codes. :param dict amino_acids_and_atoms: Amino acid and its atoms key-value pairs. :param str nmrstar_version: Version of NMR-STAR format to use for look up chemical shifts loop. :return: List of OrderedDict per each chain :rtype: :py:class:`list` of :py:class:`collections.OrderedDict` """ if (amino_acids_and_atoms and amino_acids) or (amino_acids_and_atoms and atoms): raise ValueError('"amino_acids_and_atoms" parameter cannot be used simultaneously with ' '"amino_acids" and "atoms" parameters, one or another must be provided.') chemshifts_loop = NMRSTAR_CONSTANTS[nmrstar_version]["chemshifts_loop"] aminoacid_seq_id = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_seq_id"] aminoacid_code = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_code"] atom_code = NMRSTAR_CONSTANTS[nmrstar_version]["atom_code"] chemshift_value = NMRSTAR_CONSTANTS[nmrstar_version]["chemshift_value"] chains = [] for saveframe in self: if saveframe == u"data" or saveframe.startswith(u"comment"): continue else: for ind in self[saveframe].keys(): if ind.startswith(u"loop_"): if list(self[saveframe][ind][0]) == chemshifts_loop: chem_shifts_dict = OrderedDict() for entry in self[saveframe][ind][1]: residue_id = entry[aminoacid_seq_id] chem_shifts_dict.setdefault(residue_id, OrderedDict()) chem_shifts_dict[residue_id][u"AA3Code"] = entry[aminoacid_code] chem_shifts_dict[residue_id][u"Seq_ID"] = residue_id chem_shifts_dict[residue_id][entry[atom_code]] = entry[chemshift_value] chains.append(chem_shifts_dict) if amino_acids_and_atoms: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in list(amino_acids_and_atoms.keys()): chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) else: for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in amino_acids_and_atoms[aa_dict[u"AA3Code"]]: continue else: aa_dict.pop(resonance) else: if amino_acids: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in amino_acids: chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) if atoms: for chem_shifts_dict in chains: for aa_dict in chem_shifts_dict.values(): for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in atoms: continue else: aa_dict.pop(resonance) return chains
python
def chem_shifts_by_residue(self, amino_acids=None, atoms=None, amino_acids_and_atoms=None, nmrstar_version="3"): """Organize chemical shifts by amino acid residue. :param list amino_acids: List of amino acids three-letter codes. :param list atoms: List of BMRB atom type codes. :param dict amino_acids_and_atoms: Amino acid and its atoms key-value pairs. :param str nmrstar_version: Version of NMR-STAR format to use for look up chemical shifts loop. :return: List of OrderedDict per each chain :rtype: :py:class:`list` of :py:class:`collections.OrderedDict` """ if (amino_acids_and_atoms and amino_acids) or (amino_acids_and_atoms and atoms): raise ValueError('"amino_acids_and_atoms" parameter cannot be used simultaneously with ' '"amino_acids" and "atoms" parameters, one or another must be provided.') chemshifts_loop = NMRSTAR_CONSTANTS[nmrstar_version]["chemshifts_loop"] aminoacid_seq_id = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_seq_id"] aminoacid_code = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_code"] atom_code = NMRSTAR_CONSTANTS[nmrstar_version]["atom_code"] chemshift_value = NMRSTAR_CONSTANTS[nmrstar_version]["chemshift_value"] chains = [] for saveframe in self: if saveframe == u"data" or saveframe.startswith(u"comment"): continue else: for ind in self[saveframe].keys(): if ind.startswith(u"loop_"): if list(self[saveframe][ind][0]) == chemshifts_loop: chem_shifts_dict = OrderedDict() for entry in self[saveframe][ind][1]: residue_id = entry[aminoacid_seq_id] chem_shifts_dict.setdefault(residue_id, OrderedDict()) chem_shifts_dict[residue_id][u"AA3Code"] = entry[aminoacid_code] chem_shifts_dict[residue_id][u"Seq_ID"] = residue_id chem_shifts_dict[residue_id][entry[atom_code]] = entry[chemshift_value] chains.append(chem_shifts_dict) if amino_acids_and_atoms: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in list(amino_acids_and_atoms.keys()): chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) else: for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in amino_acids_and_atoms[aa_dict[u"AA3Code"]]: continue else: aa_dict.pop(resonance) else: if amino_acids: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in amino_acids: chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) if atoms: for chem_shifts_dict in chains: for aa_dict in chem_shifts_dict.values(): for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in atoms: continue else: aa_dict.pop(resonance) return chains
[ "def", "chem_shifts_by_residue", "(", "self", ",", "amino_acids", "=", "None", ",", "atoms", "=", "None", ",", "amino_acids_and_atoms", "=", "None", ",", "nmrstar_version", "=", "\"3\"", ")", ":", "if", "(", "amino_acids_and_atoms", "and", "amino_acids", ")", ...
Organize chemical shifts by amino acid residue. :param list amino_acids: List of amino acids three-letter codes. :param list atoms: List of BMRB atom type codes. :param dict amino_acids_and_atoms: Amino acid and its atoms key-value pairs. :param str nmrstar_version: Version of NMR-STAR format to use for look up chemical shifts loop. :return: List of OrderedDict per each chain :rtype: :py:class:`list` of :py:class:`collections.OrderedDict`
[ "Organize", "chemical", "shifts", "by", "amino", "acid", "residue", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L521-L584
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
CIFFile._build_file
def _build_file(self, cif_str): """Build :class:`~nmrstarlib.nmrstarlib.CIFFile` object. :param cif_str: NMR-STAR-formatted string. :type cif_str: :py:class:`str` or :py:class:`bytes` :return: instance of :class:`~nmrstarlib.nmrstarlib.CIFFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.CIFFile` """ odict = self comment_count = 0 loop_count = 0 lexer = bmrblex(cif_str) token = next(lexer) while token != u"": try: if token[0:5] == u"data_": self.id = token[5:] self[u"data"] = self.id elif token.lstrip().startswith(u"#"): odict[u"comment_{}".format(comment_count)] = token comment_count += 1 elif token[0] == u"_": # This strips off the leading underscore of tagnames for readability value = next(lexer) odict[token[1:]] = value elif token == u"loop_": odict[u"loop_{}".format(loop_count)] = self._build_loop(lexer) loop_count += 1 else: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_file try block", file=sys.stderr) raise InvalidToken("{}".format(token)) except IndexError: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_file except block", file=sys.stderr) raise finally: token = next(lexer) return self
python
def _build_file(self, cif_str): """Build :class:`~nmrstarlib.nmrstarlib.CIFFile` object. :param cif_str: NMR-STAR-formatted string. :type cif_str: :py:class:`str` or :py:class:`bytes` :return: instance of :class:`~nmrstarlib.nmrstarlib.CIFFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.CIFFile` """ odict = self comment_count = 0 loop_count = 0 lexer = bmrblex(cif_str) token = next(lexer) while token != u"": try: if token[0:5] == u"data_": self.id = token[5:] self[u"data"] = self.id elif token.lstrip().startswith(u"#"): odict[u"comment_{}".format(comment_count)] = token comment_count += 1 elif token[0] == u"_": # This strips off the leading underscore of tagnames for readability value = next(lexer) odict[token[1:]] = value elif token == u"loop_": odict[u"loop_{}".format(loop_count)] = self._build_loop(lexer) loop_count += 1 else: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_file try block", file=sys.stderr) raise InvalidToken("{}".format(token)) except IndexError: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_file except block", file=sys.stderr) raise finally: token = next(lexer) return self
[ "def", "_build_file", "(", "self", ",", "cif_str", ")", ":", "odict", "=", "self", "comment_count", "=", "0", "loop_count", "=", "0", "lexer", "=", "bmrblex", "(", "cif_str", ")", "token", "=", "next", "(", "lexer", ")", "while", "token", "!=", "u\"\""...
Build :class:`~nmrstarlib.nmrstarlib.CIFFile` object. :param cif_str: NMR-STAR-formatted string. :type cif_str: :py:class:`str` or :py:class:`bytes` :return: instance of :class:`~nmrstarlib.nmrstarlib.CIFFile`. :rtype: :class:`~nmrstarlib.nmrstarlib.CIFFile`
[ "Build", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "CIFFile", "object", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L600-L645
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
CIFFile.print_file
def print_file(self, f=sys.stdout, file_format="cif", tw=0): """Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `cif` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "cif": for key in self.keys(): if key == u"data": print(u"{}_{}".format(key, self[key]), file=f) elif key.startswith(u"comment"): print(u"{}".format(self[key].strip()), file=f) elif key.startswith(u"loop_"): print(u"{}loop_".format(tw * u" "), file=f) self.print_loop(key, f, file_format, tw) else: # handle the NMR-Star "multiline string" if self[key].endswith(u"\n"): print(u"{}_{}".format(tw * u" ", key), file=f) print(u";{};".format(self[key]), file=f) # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words elif len(self[key].split()) > 1: print(u"{}_{}\t {}".format(tw * u" ", key, u"'{}'".format(self[key])), file=f) else: print(u"{}_{}\t {}".format(tw * u" ", key, self[key]), file=f) elif file_format == "json": print(self._to_json(), file=f)
python
def print_file(self, f=sys.stdout, file_format="cif", tw=0): """Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `cif` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "cif": for key in self.keys(): if key == u"data": print(u"{}_{}".format(key, self[key]), file=f) elif key.startswith(u"comment"): print(u"{}".format(self[key].strip()), file=f) elif key.startswith(u"loop_"): print(u"{}loop_".format(tw * u" "), file=f) self.print_loop(key, f, file_format, tw) else: # handle the NMR-Star "multiline string" if self[key].endswith(u"\n"): print(u"{}_{}".format(tw * u" ", key), file=f) print(u";{};".format(self[key]), file=f) # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words elif len(self[key].split()) > 1: print(u"{}_{}\t {}".format(tw * u" ", key, u"'{}'".format(self[key])), file=f) else: print(u"{}_{}\t {}".format(tw * u" ", key, self[key]), file=f) elif file_format == "json": print(self._to_json(), file=f)
[ "def", "print_file", "(", "self", ",", "f", "=", "sys", ".", "stdout", ",", "file_format", "=", "\"cif\"", ",", "tw", "=", "0", ")", ":", "if", "file_format", "==", "\"cif\"", ":", "for", "key", "in", "self", ".", "keys", "(", ")", ":", "if", "ke...
Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `cif` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None`
[ "Print", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "CIFFile", "into", "a", "file", "or", "stdout", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L673-L705
carpedm20/ndrive
ndrive/client.py
Ndrive.GET
def GET(self, func, data): """Send GET request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: metadata when success or False when failed """ if func not in ['getRegisterUserInfo']: s, message = self.checkAccount() if s is False: return False, message url = nurls[func] r = self.session.get(url, params = data) r.encoding = 'utf-8' if self.debug: print r.text try: try: metadata = json.loads(r.text) except: metadata = json.loads(r.text[r.text.find('{'):-1]) message = metadata['message'] if message == 'success': return True, metadata['resultvalue'] else: return False, message except: for e in sys.exc_info(): print e sys.exit(1) return False, "Error %s: Failed to send GET request" %func
python
def GET(self, func, data): """Send GET request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: metadata when success or False when failed """ if func not in ['getRegisterUserInfo']: s, message = self.checkAccount() if s is False: return False, message url = nurls[func] r = self.session.get(url, params = data) r.encoding = 'utf-8' if self.debug: print r.text try: try: metadata = json.loads(r.text) except: metadata = json.loads(r.text[r.text.find('{'):-1]) message = metadata['message'] if message == 'success': return True, metadata['resultvalue'] else: return False, message except: for e in sys.exc_info(): print e sys.exit(1) return False, "Error %s: Failed to send GET request" %func
[ "def", "GET", "(", "self", ",", "func", ",", "data", ")", ":", "if", "func", "not", "in", "[", "'getRegisterUserInfo'", "]", ":", "s", ",", "message", "=", "self", ".", "checkAccount", "(", ")", "if", "s", "is", "False", ":", "return", "False", ","...
Send GET request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: metadata when success or False when failed
[ "Send", "GET", "request", "to", "execute", "Ndrive", "API" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L110-L146
carpedm20/ndrive
ndrive/client.py
Ndrive.POST
def POST(self, func, data): """Send POST request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: ``metadata`` when success or ``False`` when failed """ s, message = self.checkAccount() if s is False: return False, message url = nurls[func] r = self.session.post(url, data = data) r.encoding = 'utf-8' if self.debug: print r.text.encode("utf-8") try: metadata = json.loads(r.text) message = metadata['message'] if message == 'success': try: return True, metadata['resultvalue'] except: return True, metadata['resultcode'] else: return False, "Error %s: %s" %(func, message) except: #for e in sys.exc_info(): # print e #sys.exit(1) return False, "Error %s: Failed to send POST request" %func
python
def POST(self, func, data): """Send POST request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: ``metadata`` when success or ``False`` when failed """ s, message = self.checkAccount() if s is False: return False, message url = nurls[func] r = self.session.post(url, data = data) r.encoding = 'utf-8' if self.debug: print r.text.encode("utf-8") try: metadata = json.loads(r.text) message = metadata['message'] if message == 'success': try: return True, metadata['resultvalue'] except: return True, metadata['resultcode'] else: return False, "Error %s: %s" %(func, message) except: #for e in sys.exc_info(): # print e #sys.exit(1) return False, "Error %s: Failed to send POST request" %func
[ "def", "POST", "(", "self", ",", "func", ",", "data", ")", ":", "s", ",", "message", "=", "self", ".", "checkAccount", "(", ")", "if", "s", "is", "False", ":", "return", "False", ",", "message", "url", "=", "nurls", "[", "func", "]", "r", "=", ...
Send POST request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: ``metadata`` when success or ``False`` when failed
[ "Send", "POST", "request", "to", "execute", "Ndrive", "API" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L148-L181
carpedm20/ndrive
ndrive/client.py
Ndrive.getRegisterUserInfo
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0): """Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed """ data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth } s, metadata = self.GET('getRegisterUserInfo', data) if s is True: self.useridx = metadata['useridx'] return True, metadata else: return False, metadata
python
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0): """Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed """ data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth } s, metadata = self.GET('getRegisterUserInfo', data) if s is True: self.useridx = metadata['useridx'] return True, metadata else: return False, metadata
[ "def", "getRegisterUserInfo", "(", "self", ",", "svctype", "=", "\"Android NDrive App ver\"", ",", "auth", "=", "0", ")", ":", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'svctype'", ":", "svctype", ",", "'auth'", ":", "auth", "}", "...
Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed
[ "Retrieve", "information", "about", "useridx" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L183-L202
carpedm20/ndrive
ndrive/client.py
Ndrive.uploadFile
def uploadFile(self, file_obj, full_path, overwrite = False): """Upload a file as Ndrive really do. >>> nd.uploadFile('~/flower.png','/Picture/flower.png',True) This function imitates the process when Ndrive uploads a local file to its server. The process follows 7 steps: 1. POST /CheckStatus.ndrive 2. POST /GetDiskSpace.ndrive 3. POST /CheckUpload.ndrive 4. PUT /FILE_PATH 5. POST /GetList.ndrive 6. POST /GetWasteInfo.ndrive 7. POST /GetDiskSpace.ndrive nd.uploadFile('./flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) """ s = self.checkStatus() s = self.getDiskSpace() s = self.checkUpload(file_obj, full_path, overwrite) if s is True: self.put(file_obj, full_path, overwrite)
python
def uploadFile(self, file_obj, full_path, overwrite = False): """Upload a file as Ndrive really do. >>> nd.uploadFile('~/flower.png','/Picture/flower.png',True) This function imitates the process when Ndrive uploads a local file to its server. The process follows 7 steps: 1. POST /CheckStatus.ndrive 2. POST /GetDiskSpace.ndrive 3. POST /CheckUpload.ndrive 4. PUT /FILE_PATH 5. POST /GetList.ndrive 6. POST /GetWasteInfo.ndrive 7. POST /GetDiskSpace.ndrive nd.uploadFile('./flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) """ s = self.checkStatus() s = self.getDiskSpace() s = self.checkUpload(file_obj, full_path, overwrite) if s is True: self.put(file_obj, full_path, overwrite)
[ "def", "uploadFile", "(", "self", ",", "file_obj", ",", "full_path", ",", "overwrite", "=", "False", ")", ":", "s", "=", "self", ".", "checkStatus", "(", ")", "s", "=", "self", ".", "getDiskSpace", "(", ")", "s", "=", "self", ".", "checkUpload", "(",...
Upload a file as Ndrive really do. >>> nd.uploadFile('~/flower.png','/Picture/flower.png',True) This function imitates the process when Ndrive uploads a local file to its server. The process follows 7 steps: 1. POST /CheckStatus.ndrive 2. POST /GetDiskSpace.ndrive 3. POST /CheckUpload.ndrive 4. PUT /FILE_PATH 5. POST /GetList.ndrive 6. POST /GetWasteInfo.ndrive 7. POST /GetDiskSpace.ndrive nd.uploadFile('./flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.)
[ "Upload", "a", "file", "as", "Ndrive", "really", "do", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L227-L252
carpedm20/ndrive
ndrive/client.py
Ndrive.getDiskSpace
def getDiskSpace(self): """Get disk space information. >>> disk_info = nd.getDiskSpace() :return: ``metadata`` if success or ``error message`` :metadata: - expandablespace - filemaxsize - largefileminsize - largefileunusedspace - largefileusedspace - paymentspace - totallargespace - totalspace - unusedspace - usedspace """ data = {'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.POST('getDiskSpace',data) if s is True: usedspace = byte_readable(metadata['usedspace']) totalspace = byte_readable(metadata['totalspace']) print "Capacity: %s / %s" % (usedspace, totalspace) return metadata else: print message
python
def getDiskSpace(self): """Get disk space information. >>> disk_info = nd.getDiskSpace() :return: ``metadata`` if success or ``error message`` :metadata: - expandablespace - filemaxsize - largefileminsize - largefileunusedspace - largefileusedspace - paymentspace - totallargespace - totalspace - unusedspace - usedspace """ data = {'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.POST('getDiskSpace',data) if s is True: usedspace = byte_readable(metadata['usedspace']) totalspace = byte_readable(metadata['totalspace']) print "Capacity: %s / %s" % (usedspace, totalspace) return metadata else: print message
[ "def", "getDiskSpace", "(", "self", ")", ":", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", ".", "useridx", ",", "}", "s", ",", "metadata", "=", "self", ".", "POST", "(", "'getDiskSpace'", ",", "data", ")",...
Get disk space information. >>> disk_info = nd.getDiskSpace() :return: ``metadata`` if success or ``error message`` :metadata: - expandablespace - filemaxsize - largefileminsize - largefileunusedspace - largefileusedspace - paymentspace - totallargespace - totalspace - unusedspace - usedspace
[ "Get", "disk", "space", "information", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L254-L285
carpedm20/ndrive
ndrive/client.py
Ndrive.checkUpload
def checkUpload(self, file_obj, full_path = '/', overwrite = False): """Check whether it is possible to upload a file. >>> s = nd.checkUpload('~/flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param str full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :return: ``True`` if possible to upload or ``False`` if impossible to upload. """ try: file_obj = file_obj.name except: file_obj = file_obj # do nothing file_size = os.stat(file_obj).st_size now = datetime.datetime.now().isoformat() data = {'uploadsize': file_size, 'overwrite': 'T' if overwrite else 'F', 'getlastmodified': now, 'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.POST('checkUpload', data) if not s: print metadata return s
python
def checkUpload(self, file_obj, full_path = '/', overwrite = False): """Check whether it is possible to upload a file. >>> s = nd.checkUpload('~/flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param str full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :return: ``True`` if possible to upload or ``False`` if impossible to upload. """ try: file_obj = file_obj.name except: file_obj = file_obj # do nothing file_size = os.stat(file_obj).st_size now = datetime.datetime.now().isoformat() data = {'uploadsize': file_size, 'overwrite': 'T' if overwrite else 'F', 'getlastmodified': now, 'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.POST('checkUpload', data) if not s: print metadata return s
[ "def", "checkUpload", "(", "self", ",", "file_obj", ",", "full_path", "=", "'/'", ",", "overwrite", "=", "False", ")", ":", "try", ":", "file_obj", "=", "file_obj", ".", "name", "except", ":", "file_obj", "=", "file_obj", "# do nothing", "file_size", "=", ...
Check whether it is possible to upload a file. >>> s = nd.checkUpload('~/flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param str full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :return: ``True`` if possible to upload or ``False`` if impossible to upload.
[ "Check", "whether", "it", "is", "possible", "to", "upload", "a", "file", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L287-L319
carpedm20/ndrive
ndrive/client.py
Ndrive.downloadFile
def downloadFile(self, from_path, to_path = ''): """Download a file. >>> nd.downloadFile('/Picture/flower.png', '~/flower.png') :param from_path: The full path to download the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param to_path: The full path of a file to be saved in local directory. :returns: File object """ if to_path == '': file_name = os.path.basename(from_path) to_path = os.path.join(os.getcwd(), file_name) url = nurls['download'] + from_path data = {'attachment':2, 'userid': self.user_id, 'useridx': self.useridx, 'NDriveSvcType': "NHN/ND-WEB Ver", } if '~' in to_path: to_path = expanduser(to_path) with open(to_path, 'wb') as handle: request = self.session.get(url, params = data, stream=True) for block in request.iter_content(1024): if not block: break handle.write(block) return handle
python
def downloadFile(self, from_path, to_path = ''): """Download a file. >>> nd.downloadFile('/Picture/flower.png', '~/flower.png') :param from_path: The full path to download the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param to_path: The full path of a file to be saved in local directory. :returns: File object """ if to_path == '': file_name = os.path.basename(from_path) to_path = os.path.join(os.getcwd(), file_name) url = nurls['download'] + from_path data = {'attachment':2, 'userid': self.user_id, 'useridx': self.useridx, 'NDriveSvcType': "NHN/ND-WEB Ver", } if '~' in to_path: to_path = expanduser(to_path) with open(to_path, 'wb') as handle: request = self.session.get(url, params = data, stream=True) for block in request.iter_content(1024): if not block: break handle.write(block) return handle
[ "def", "downloadFile", "(", "self", ",", "from_path", ",", "to_path", "=", "''", ")", ":", "if", "to_path", "==", "''", ":", "file_name", "=", "os", ".", "path", ".", "basename", "(", "from_path", ")", "to_path", "=", "os", ".", "path", ".", "join", ...
Download a file. >>> nd.downloadFile('/Picture/flower.png', '~/flower.png') :param from_path: The full path to download the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param to_path: The full path of a file to be saved in local directory. :returns: File object
[ "Download", "a", "file", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L321-L354
carpedm20/ndrive
ndrive/client.py
Ndrive.put
def put(self, file_obj, full_path, overwrite = False): """Upload a file. >>> nd.put('./flower.png','/Picture/flower.png') >>> nd.put(open('./flower.png','r'),'/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :return: ``True`` when succcess to upload a file or ``False`` """ try: file_obj = open(file_obj, 'r') except: file_obj = file_obj # do nothing content = file_obj.read() file_name = os.path.basename(full_path) now = datetime.datetime.now().isoformat() url = nurls['put'] + full_path if overwrite: overwrite = 'T' else: overwrite = 'F' headers = {'userid': self.user_id, 'useridx': self.useridx, 'MODIFYDATE': now, 'Content-Type': magic.from_file(file_obj.name, mime=True), 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', 'OVERWRITE': overwrite, 'X-Requested-With': 'XMLHttpRequest', 'NDriveSvcType': 'NHN/DRAGDROP Ver', } r = self.session.put(url = url, data = content, headers = headers) r.encoding = 'utf-8' message = json.loads(r.text)['message'] if message != 'success': print "Error put: " + message return False else: print "Success put: " + file_obj.name return True
python
def put(self, file_obj, full_path, overwrite = False): """Upload a file. >>> nd.put('./flower.png','/Picture/flower.png') >>> nd.put(open('./flower.png','r'),'/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :return: ``True`` when succcess to upload a file or ``False`` """ try: file_obj = open(file_obj, 'r') except: file_obj = file_obj # do nothing content = file_obj.read() file_name = os.path.basename(full_path) now = datetime.datetime.now().isoformat() url = nurls['put'] + full_path if overwrite: overwrite = 'T' else: overwrite = 'F' headers = {'userid': self.user_id, 'useridx': self.useridx, 'MODIFYDATE': now, 'Content-Type': magic.from_file(file_obj.name, mime=True), 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', 'OVERWRITE': overwrite, 'X-Requested-With': 'XMLHttpRequest', 'NDriveSvcType': 'NHN/DRAGDROP Ver', } r = self.session.put(url = url, data = content, headers = headers) r.encoding = 'utf-8' message = json.loads(r.text)['message'] if message != 'success': print "Error put: " + message return False else: print "Success put: " + file_obj.name return True
[ "def", "put", "(", "self", ",", "file_obj", ",", "full_path", ",", "overwrite", "=", "False", ")", ":", "try", ":", "file_obj", "=", "open", "(", "file_obj", ",", "'r'", ")", "except", ":", "file_obj", "=", "file_obj", "# do nothing", "content", "=", "...
Upload a file. >>> nd.put('./flower.png','/Picture/flower.png') >>> nd.put(open('./flower.png','r'),'/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :return: ``True`` when succcess to upload a file or ``False``
[ "Upload", "a", "file", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L356-L403
carpedm20/ndrive
ndrive/client.py
Ndrive.delete
def delete(self, full_path): """Delete a file in full_path >>> nd.delete('/Picture/flower.png') :param full_path: The full path to delete the file to, *including the file name*. :return: ``True`` if success to delete the file or ``False`` """ now = datetime.datetime.now().isoformat() url = nurls['delete'] + full_path headers = {'userid': self.user_id, 'useridx': self.useridx, 'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8", 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } try: r = self.session.delete(url = url, headers = headers) r.encoding = 'utf-8' except: print "Error delete: wrong full_path" return False message = json.loads(r.text)['message'] if message != 'success': print "Error delete: " + message return False else: return True
python
def delete(self, full_path): """Delete a file in full_path >>> nd.delete('/Picture/flower.png') :param full_path: The full path to delete the file to, *including the file name*. :return: ``True`` if success to delete the file or ``False`` """ now = datetime.datetime.now().isoformat() url = nurls['delete'] + full_path headers = {'userid': self.user_id, 'useridx': self.useridx, 'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8", 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } try: r = self.session.delete(url = url, headers = headers) r.encoding = 'utf-8' except: print "Error delete: wrong full_path" return False message = json.loads(r.text)['message'] if message != 'success': print "Error delete: " + message return False else: return True
[ "def", "delete", "(", "self", ",", "full_path", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "url", "=", "nurls", "[", "'delete'", "]", "+", "full_path", "headers", "=", "{", "'userid'", ":", ...
Delete a file in full_path >>> nd.delete('/Picture/flower.png') :param full_path: The full path to delete the file to, *including the file name*. :return: ``True`` if success to delete the file or ``False``
[ "Delete", "a", "file", "in", "full_path" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L405-L436
carpedm20/ndrive
ndrive/client.py
Ndrive.getList
def getList(self, full_path, type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000, dummy = 56184): """Get a list of files >>> nd_list = nd.getList('/', type=3) >>> print nd_list There are 5 kinds of ``type``: - 1 => only directories with idxfolder property - 2 => only files - 3 => directories and files with thumbnail info (like viewHeight, viewWidth for Image file) - 4 => only directories except idxfolder - 5 => directories and files without thumbnail info There are 5 kindes of ``sort``: - file : file type, 종류 - length : size of file, 크기 - date : edited date, 수정한 날짜 - credate : creation date, 올린 날짜 - protect : protect or not, 중요 표시 :param full_path: The full path to get the file list. :param type: 1, 2, 3, 4 or 5 :param depth: Dept for file list :param sort: name => 이름 :param order: Order by (asc, desc) :return: metadata (list of dict) or False when failed to get list :metadata: - u'copyright': u'N', - u'creationdate': u'2013-05-12T21:17:23+09:00', - u'filelink': None, - u'fileuploadstatus': u'1', - u'getcontentlength': 0, - u'getlastmodified': u'2014-01-26T12:23:07+09:00', - u'href': u'/Codes/', - u'lastaccessed': u'2013-05-12T21:17:23+09:00', - u'lastmodifieduser': None, - u'priority': u'1', - u'protect': u'N', - u'resourceno': 204041859, - u'resourcetype': u'collection', - u'sharedinfo': u'F', - u'sharemsgcnt': 0, - u'shareno': 0, - u'subfoldercnt': 5, - u'thumbnailpath': u'N', - u'virusstatus': u'N' """ if type not in range(1, 6): print "Error getList: `type` should be between 1 to 5" return False data = {'orgresource': full_path, 'type': type, 'dept': dept, 'sort': sort, 'order': order, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getList', data) if s is True: return metadata else: print metadata return False
python
def getList(self, full_path, type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000, dummy = 56184): """Get a list of files >>> nd_list = nd.getList('/', type=3) >>> print nd_list There are 5 kinds of ``type``: - 1 => only directories with idxfolder property - 2 => only files - 3 => directories and files with thumbnail info (like viewHeight, viewWidth for Image file) - 4 => only directories except idxfolder - 5 => directories and files without thumbnail info There are 5 kindes of ``sort``: - file : file type, 종류 - length : size of file, 크기 - date : edited date, 수정한 날짜 - credate : creation date, 올린 날짜 - protect : protect or not, 중요 표시 :param full_path: The full path to get the file list. :param type: 1, 2, 3, 4 or 5 :param depth: Dept for file list :param sort: name => 이름 :param order: Order by (asc, desc) :return: metadata (list of dict) or False when failed to get list :metadata: - u'copyright': u'N', - u'creationdate': u'2013-05-12T21:17:23+09:00', - u'filelink': None, - u'fileuploadstatus': u'1', - u'getcontentlength': 0, - u'getlastmodified': u'2014-01-26T12:23:07+09:00', - u'href': u'/Codes/', - u'lastaccessed': u'2013-05-12T21:17:23+09:00', - u'lastmodifieduser': None, - u'priority': u'1', - u'protect': u'N', - u'resourceno': 204041859, - u'resourcetype': u'collection', - u'sharedinfo': u'F', - u'sharemsgcnt': 0, - u'shareno': 0, - u'subfoldercnt': 5, - u'thumbnailpath': u'N', - u'virusstatus': u'N' """ if type not in range(1, 6): print "Error getList: `type` should be between 1 to 5" return False data = {'orgresource': full_path, 'type': type, 'dept': dept, 'sort': sort, 'order': order, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getList', data) if s is True: return metadata else: print metadata return False
[ "def", "getList", "(", "self", ",", "full_path", ",", "type", "=", "1", ",", "dept", "=", "0", ",", "sort", "=", "'name'", ",", "order", "=", "'asc'", ",", "startnum", "=", "0", ",", "pagingrow", "=", "1000", ",", "dummy", "=", "56184", ")", ":",...
Get a list of files >>> nd_list = nd.getList('/', type=3) >>> print nd_list There are 5 kinds of ``type``: - 1 => only directories with idxfolder property - 2 => only files - 3 => directories and files with thumbnail info (like viewHeight, viewWidth for Image file) - 4 => only directories except idxfolder - 5 => directories and files without thumbnail info There are 5 kindes of ``sort``: - file : file type, 종류 - length : size of file, 크기 - date : edited date, 수정한 날짜 - credate : creation date, 올린 날짜 - protect : protect or not, 중요 표시 :param full_path: The full path to get the file list. :param type: 1, 2, 3, 4 or 5 :param depth: Dept for file list :param sort: name => 이름 :param order: Order by (asc, desc) :return: metadata (list of dict) or False when failed to get list :metadata: - u'copyright': u'N', - u'creationdate': u'2013-05-12T21:17:23+09:00', - u'filelink': None, - u'fileuploadstatus': u'1', - u'getcontentlength': 0, - u'getlastmodified': u'2014-01-26T12:23:07+09:00', - u'href': u'/Codes/', - u'lastaccessed': u'2013-05-12T21:17:23+09:00', - u'lastmodifieduser': None, - u'priority': u'1', - u'protect': u'N', - u'resourceno': 204041859, - u'resourcetype': u'collection', - u'sharedinfo': u'F', - u'sharemsgcnt': 0, - u'shareno': 0, - u'subfoldercnt': 5, - u'thumbnailpath': u'N', - u'virusstatus': u'N'
[ "Get", "a", "list", "of", "files" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L438-L512
carpedm20/ndrive
ndrive/client.py
Ndrive.doMove
def doMove(self, from_path, to_path, overwrite = False, bShareFireCopy = 'false', dummy = 56147): """Move a file. >>> nd.doMove('/Picture/flower.png', '/flower.png') :param from_path: The path to the file or folder to be moved. :param to_path: The destination path of the file or folder to be copied. File name should be included in the end of to_path. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :param bShareFireCopy: ??? :return: ``True`` if success to move a file or ``False``. """ if overwrite: overwrite = 'F' else: overwrite = 'T' data = {'orgresource': from_path, 'dstresource': to_path, 'overwrite': overwrite, 'bShareFireCopy': bShareFireCopy, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('doMove', data) return s
python
def doMove(self, from_path, to_path, overwrite = False, bShareFireCopy = 'false', dummy = 56147): """Move a file. >>> nd.doMove('/Picture/flower.png', '/flower.png') :param from_path: The path to the file or folder to be moved. :param to_path: The destination path of the file or folder to be copied. File name should be included in the end of to_path. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :param bShareFireCopy: ??? :return: ``True`` if success to move a file or ``False``. """ if overwrite: overwrite = 'F' else: overwrite = 'T' data = {'orgresource': from_path, 'dstresource': to_path, 'overwrite': overwrite, 'bShareFireCopy': bShareFireCopy, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('doMove', data) return s
[ "def", "doMove", "(", "self", ",", "from_path", ",", "to_path", ",", "overwrite", "=", "False", ",", "bShareFireCopy", "=", "'false'", ",", "dummy", "=", "56147", ")", ":", "if", "overwrite", ":", "overwrite", "=", "'F'", "else", ":", "overwrite", "=", ...
Move a file. >>> nd.doMove('/Picture/flower.png', '/flower.png') :param from_path: The path to the file or folder to be moved. :param to_path: The destination path of the file or folder to be copied. File name should be included in the end of to_path. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :param bShareFireCopy: ??? :return: ``True`` if success to move a file or ``False``.
[ "Move", "a", "file", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L514-L542
carpedm20/ndrive
ndrive/client.py
Ndrive.makeDirectory
def makeDirectory(self, full_path, dummy = 40841): """Make a directory >>> nd.makeDirectory('/test') :param full_path: The full path to get the directory property. Should be end with '/'. :return: ``True`` when success to make a directory or ``False`` """ if full_path[-1] is not '/': full_path += '/' data = {'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('makeDirectory', data) return s
python
def makeDirectory(self, full_path, dummy = 40841): """Make a directory >>> nd.makeDirectory('/test') :param full_path: The full path to get the directory property. Should be end with '/'. :return: ``True`` when success to make a directory or ``False`` """ if full_path[-1] is not '/': full_path += '/' data = {'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('makeDirectory', data) return s
[ "def", "makeDirectory", "(", "self", ",", "full_path", ",", "dummy", "=", "40841", ")", ":", "if", "full_path", "[", "-", "1", "]", "is", "not", "'/'", ":", "full_path", "+=", "'/'", "data", "=", "{", "'dstresource'", ":", "full_path", ",", "'userid'",...
Make a directory >>> nd.makeDirectory('/test') :param full_path: The full path to get the directory property. Should be end with '/'. :return: ``True`` when success to make a directory or ``False``
[ "Make", "a", "directory" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L544-L564
carpedm20/ndrive
ndrive/client.py
Ndrive.makeShareUrl
def makeShareUrl(self, full_path, passwd): """Make a share url of directory >>> nd.makeShareUrl('/Picture/flower.png', PASSWORD) Args: full_path: The full path of directory to get share url. Should be end with '/'. ex) /folder/ passwd: Access password for shared directory Returns: URL: share url for a directory False: Failed to share a directory """ if full_path[-1] is not '/': full_path += '/' data = {'_callback': 'window.__jindo_callback._347', 'path': full_path, 'passwd': passwd, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.GET('shareUrl', data) if s: print "URL: %s" % (metadata['href']) return metadata['href'] else: print "Error makeShareUrl: %s" % (metadata) return False
python
def makeShareUrl(self, full_path, passwd): """Make a share url of directory >>> nd.makeShareUrl('/Picture/flower.png', PASSWORD) Args: full_path: The full path of directory to get share url. Should be end with '/'. ex) /folder/ passwd: Access password for shared directory Returns: URL: share url for a directory False: Failed to share a directory """ if full_path[-1] is not '/': full_path += '/' data = {'_callback': 'window.__jindo_callback._347', 'path': full_path, 'passwd': passwd, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.GET('shareUrl', data) if s: print "URL: %s" % (metadata['href']) return metadata['href'] else: print "Error makeShareUrl: %s" % (metadata) return False
[ "def", "makeShareUrl", "(", "self", ",", "full_path", ",", "passwd", ")", ":", "if", "full_path", "[", "-", "1", "]", "is", "not", "'/'", ":", "full_path", "+=", "'/'", "data", "=", "{", "'_callback'", ":", "'window.__jindo_callback._347'", ",", "'path'", ...
Make a share url of directory >>> nd.makeShareUrl('/Picture/flower.png', PASSWORD) Args: full_path: The full path of directory to get share url. Should be end with '/'. ex) /folder/ passwd: Access password for shared directory Returns: URL: share url for a directory False: Failed to share a directory
[ "Make", "a", "share", "url", "of", "directory" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L566-L601
carpedm20/ndrive
ndrive/client.py
Ndrive.getFileLink
def getFileLink(self, full_path): """Get a link of file >>> file_link = nd.getFileLink('/Picture/flower.png') :param full_path: The full path of file to get file link. Path should start and end with '/'. :return: ``Shared url`` or ``False`` if failed to share a file or directory through url """ prop = self.getProperty(full_path) if not prop: print "Error getFileLink: wrong full_path" return False else: prop_url = prop['filelinkurl'] if prop_url: print "URL: " + prop_url return prop_url else: resourceno = prop["resourceno"] url = self.createFileLink(resourceno) if url: return url else: return False
python
def getFileLink(self, full_path): """Get a link of file >>> file_link = nd.getFileLink('/Picture/flower.png') :param full_path: The full path of file to get file link. Path should start and end with '/'. :return: ``Shared url`` or ``False`` if failed to share a file or directory through url """ prop = self.getProperty(full_path) if not prop: print "Error getFileLink: wrong full_path" return False else: prop_url = prop['filelinkurl'] if prop_url: print "URL: " + prop_url return prop_url else: resourceno = prop["resourceno"] url = self.createFileLink(resourceno) if url: return url else: return False
[ "def", "getFileLink", "(", "self", ",", "full_path", ")", ":", "prop", "=", "self", ".", "getProperty", "(", "full_path", ")", "if", "not", "prop", ":", "print", "\"Error getFileLink: wrong full_path\"", "return", "False", "else", ":", "prop_url", "=", "prop",...
Get a link of file >>> file_link = nd.getFileLink('/Picture/flower.png') :param full_path: The full path of file to get file link. Path should start and end with '/'. :return: ``Shared url`` or ``False`` if failed to share a file or directory through url
[ "Get", "a", "link", "of", "file" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L603-L629
carpedm20/ndrive
ndrive/client.py
Ndrive.createFileLink
def createFileLink(self, resourceno): """Make a link of file If you don't know ``resourceno``, you'd better use ``getFileLink``. :param resourceno: Resource number of a file to create link :return: ``Shared url`` or ``False`` when failed to share a file """ data = {'_callback': 'window.__jindo_callback._8920', 'resourceno': resourceno, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.GET('createFileLink', data) if s: print "URL: %s" % (metadata['short_url']) return metadata['short_url'] else: print "Error createFileLink: %s" % (metadata) return False
python
def createFileLink(self, resourceno): """Make a link of file If you don't know ``resourceno``, you'd better use ``getFileLink``. :param resourceno: Resource number of a file to create link :return: ``Shared url`` or ``False`` when failed to share a file """ data = {'_callback': 'window.__jindo_callback._8920', 'resourceno': resourceno, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.GET('createFileLink', data) if s: print "URL: %s" % (metadata['short_url']) return metadata['short_url'] else: print "Error createFileLink: %s" % (metadata) return False
[ "def", "createFileLink", "(", "self", ",", "resourceno", ")", ":", "data", "=", "{", "'_callback'", ":", "'window.__jindo_callback._8920'", ",", "'resourceno'", ":", "resourceno", ",", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", "."...
Make a link of file If you don't know ``resourceno``, you'd better use ``getFileLink``. :param resourceno: Resource number of a file to create link :return: ``Shared url`` or ``False`` when failed to share a file
[ "Make", "a", "link", "of", "file" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L631-L653
carpedm20/ndrive
ndrive/client.py
Ndrive.getProperty
def getProperty(self, full_path, dummy = 56184): """Get a file property :param full_path: The full path to get the file or directory property. :return: ``metadata`` if success or ``False`` if failed to get property :metadata: - creationdate - exif - filelink - filelinkurl - filetype => 1: document, 2: image, 3: video, 4: music, 5: zip - fileuploadstatus - getcontentlength - getlastmodified - href - lastaccessed - protect - resourceno - resourcetype - thumbnail - totalfilecnt - totalfoldercnt - virusstatus """ data = {'orgresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getProperty', data) if s is True: return metadata else: return False
python
def getProperty(self, full_path, dummy = 56184): """Get a file property :param full_path: The full path to get the file or directory property. :return: ``metadata`` if success or ``False`` if failed to get property :metadata: - creationdate - exif - filelink - filelinkurl - filetype => 1: document, 2: image, 3: video, 4: music, 5: zip - fileuploadstatus - getcontentlength - getlastmodified - href - lastaccessed - protect - resourceno - resourcetype - thumbnail - totalfilecnt - totalfoldercnt - virusstatus """ data = {'orgresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getProperty', data) if s is True: return metadata else: return False
[ "def", "getProperty", "(", "self", ",", "full_path", ",", "dummy", "=", "56184", ")", ":", "data", "=", "{", "'orgresource'", ":", "full_path", ",", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", ".", "useridx", ",", "'dummy'", ...
Get a file property :param full_path: The full path to get the file or directory property. :return: ``metadata`` if success or ``False`` if failed to get property :metadata: - creationdate - exif - filelink - filelinkurl - filetype => 1: document, 2: image, 3: video, 4: music, 5: zip - fileuploadstatus - getcontentlength - getlastmodified - href - lastaccessed - protect - resourceno - resourcetype - thumbnail - totalfilecnt - totalfoldercnt - virusstatus
[ "Get", "a", "file", "property" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L655-L692
carpedm20/ndrive
ndrive/client.py
Ndrive.getVersionList
def getVersionList(self, full_path, startnum = 0, pagingrow = 50, dummy = 54213): """Get a version list of a file or dierectory. :param full_path: The full path to get the file or directory property. Path should start with '/' :param startnum: Start version index. :param pagingrow: Max # of version list in one page. :returns: ``metadata`` if succcess or ``False`` (failed to get history or there is no history) :metadata: - createuser - filesize - getlastmodified - href - versioninfo - versionkey """ data = {'orgresource': full_path, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getVersionList', data) if s is True: return metadata else: print "Error getVersionList: Cannot get version list" return False
python
def getVersionList(self, full_path, startnum = 0, pagingrow = 50, dummy = 54213): """Get a version list of a file or dierectory. :param full_path: The full path to get the file or directory property. Path should start with '/' :param startnum: Start version index. :param pagingrow: Max # of version list in one page. :returns: ``metadata`` if succcess or ``False`` (failed to get history or there is no history) :metadata: - createuser - filesize - getlastmodified - href - versioninfo - versionkey """ data = {'orgresource': full_path, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getVersionList', data) if s is True: return metadata else: print "Error getVersionList: Cannot get version list" return False
[ "def", "getVersionList", "(", "self", ",", "full_path", ",", "startnum", "=", "0", ",", "pagingrow", "=", "50", ",", "dummy", "=", "54213", ")", ":", "data", "=", "{", "'orgresource'", ":", "full_path", ",", "'startnum'", ":", "startnum", ",", "'pagingro...
Get a version list of a file or dierectory. :param full_path: The full path to get the file or directory property. Path should start with '/' :param startnum: Start version index. :param pagingrow: Max # of version list in one page. :returns: ``metadata`` if succcess or ``False`` (failed to get history or there is no history) :metadata: - createuser - filesize - getlastmodified - href - versioninfo - versionkey
[ "Get", "a", "version", "list", "of", "a", "file", "or", "dierectory", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L694-L725
carpedm20/ndrive
ndrive/client.py
Ndrive.setProperty
def setProperty(self, full_path, protect, dummy = 7046): """Set property of a file. :param full_path: The full path to get the file or directory property. :param protect: 'Y' or 'N', 중요 표시 :return: ``True`` when success to set property or ``False`` """ data = {'orgresource': full_path, 'protect': protect, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('setProperty', data) if s is True: return True else: return False
python
def setProperty(self, full_path, protect, dummy = 7046): """Set property of a file. :param full_path: The full path to get the file or directory property. :param protect: 'Y' or 'N', 중요 표시 :return: ``True`` when success to set property or ``False`` """ data = {'orgresource': full_path, 'protect': protect, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('setProperty', data) if s is True: return True else: return False
[ "def", "setProperty", "(", "self", ",", "full_path", ",", "protect", ",", "dummy", "=", "7046", ")", ":", "data", "=", "{", "'orgresource'", ":", "full_path", ",", "'protect'", ":", "protect", ",", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'...
Set property of a file. :param full_path: The full path to get the file or directory property. :param protect: 'Y' or 'N', 중요 표시 :return: ``True`` when success to set property or ``False``
[ "Set", "property", "of", "a", "file", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L746-L765
carpedm20/ndrive
ndrive/client.py
Ndrive.getMusicAlbumList
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100, dummy = 51467): """Get music album list. :param tagtype: ? :return: ``metadata`` or ``False`` :metadata: - u'album':u'Greatest Hits Coldplay', - u'artist':u'Coldplay', - u'href':u'/Coldplay - Clocks.mp3', - u'musiccount':1, - u'resourceno':12459548378, - u'tagtype':1, - u'thumbnailpath':u'N', - u'totalpath':u'/' """ data = {'tagtype': tagtype, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getMusicAlbumList', data) if s is True: return metadata else: return False
python
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100, dummy = 51467): """Get music album list. :param tagtype: ? :return: ``metadata`` or ``False`` :metadata: - u'album':u'Greatest Hits Coldplay', - u'artist':u'Coldplay', - u'href':u'/Coldplay - Clocks.mp3', - u'musiccount':1, - u'resourceno':12459548378, - u'tagtype':1, - u'thumbnailpath':u'N', - u'totalpath':u'/' """ data = {'tagtype': tagtype, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getMusicAlbumList', data) if s is True: return metadata else: return False
[ "def", "getMusicAlbumList", "(", "self", ",", "tagtype", "=", "0", ",", "startnum", "=", "0", ",", "pagingrow", "=", "100", ",", "dummy", "=", "51467", ")", ":", "data", "=", "{", "'tagtype'", ":", "tagtype", ",", "'startnum'", ":", "startnum", ",", ...
Get music album list. :param tagtype: ? :return: ``metadata`` or ``False`` :metadata: - u'album':u'Greatest Hits Coldplay', - u'artist':u'Coldplay', - u'href':u'/Coldplay - Clocks.mp3', - u'musiccount':1, - u'resourceno':12459548378, - u'tagtype':1, - u'thumbnailpath':u'N', - u'totalpath':u'/'
[ "Get", "music", "album", "list", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L767-L796
carpedm20/ndrive
ndrive/client.py
Ndrive.doSearch
def doSearch(self, filename, filetype = None, type = 3, full_path = '/', sharedowner = 'A', datatype = 'all', sort = 'update', order = 'desc', searchtype = 'filesearch', startnum = 0, pagingrow = 100, includeworks = 'N', bodysearch = 'N', dummy = 36644): """Get music album list. There are 4 kinds in ``type``: - 1 : only directories with idxfolder property - 2 : only files - 3 : directories and files with thumbnail info (like viewHeight, viewWidth for Image file) - 4 : only directories except idxfolder - 5 : directories and files without thumbnail info Tyere are 5 kindes of ``filetype``: ex) None: all, 1: document, 2:image, 3: video, 4: msuic, 5: zip :param filename: Query to search. :param filetype: Type of a file to search. :param full_path: Directory path to search recursively. :param sharedowner: File priority to search. (P: priority files only, A: all files.) :param datatype: Data type of a file to search :param sort: Order criteria of search result. ('update', 'date' ...) :param order: Order of files. ('desc' or 'inc') :param searchtype: ??? :param includeworks: Whether to include Naver Work files to result. :param bodysearch: Search content of file. :returns: ``metadata`` or ``False`` :metadata: - authtoken - content - copyright - creationdate - domaintype - filelink - fileuploadstatus - getcontentlength - getlastmodified - hilightfilename - href - lastaccessed - owner - ownerid - owneridc - owneridx - ownership - protect - resourceno - resourcetype - root - root_shareno - s_type - sharedfoldername - sharedinfo - shareno - subpath - thumbnailpath - virusstatus """ data = {'filename': filename, 'filetype': filetype, 'dstresource': full_path, 'sharedowner': sharedowner, 'datatype': datatype, 'sort': sort, 'order': order, 'searchtype': searchtype, 'startnum': startnum, 'pagingrow': pagingrow, 'includeworks': includeworks, 'bodysearch': bodysearch, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('doSearch', data) if s is True: if metadata: print "Success doSearch: no result found" return {} return metadata else: print "Failed doSearch: search failed" return False
python
def doSearch(self, filename, filetype = None, type = 3, full_path = '/', sharedowner = 'A', datatype = 'all', sort = 'update', order = 'desc', searchtype = 'filesearch', startnum = 0, pagingrow = 100, includeworks = 'N', bodysearch = 'N', dummy = 36644): """Get music album list. There are 4 kinds in ``type``: - 1 : only directories with idxfolder property - 2 : only files - 3 : directories and files with thumbnail info (like viewHeight, viewWidth for Image file) - 4 : only directories except idxfolder - 5 : directories and files without thumbnail info Tyere are 5 kindes of ``filetype``: ex) None: all, 1: document, 2:image, 3: video, 4: msuic, 5: zip :param filename: Query to search. :param filetype: Type of a file to search. :param full_path: Directory path to search recursively. :param sharedowner: File priority to search. (P: priority files only, A: all files.) :param datatype: Data type of a file to search :param sort: Order criteria of search result. ('update', 'date' ...) :param order: Order of files. ('desc' or 'inc') :param searchtype: ??? :param includeworks: Whether to include Naver Work files to result. :param bodysearch: Search content of file. :returns: ``metadata`` or ``False`` :metadata: - authtoken - content - copyright - creationdate - domaintype - filelink - fileuploadstatus - getcontentlength - getlastmodified - hilightfilename - href - lastaccessed - owner - ownerid - owneridc - owneridx - ownership - protect - resourceno - resourcetype - root - root_shareno - s_type - sharedfoldername - sharedinfo - shareno - subpath - thumbnailpath - virusstatus """ data = {'filename': filename, 'filetype': filetype, 'dstresource': full_path, 'sharedowner': sharedowner, 'datatype': datatype, 'sort': sort, 'order': order, 'searchtype': searchtype, 'startnum': startnum, 'pagingrow': pagingrow, 'includeworks': includeworks, 'bodysearch': bodysearch, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('doSearch', data) if s is True: if metadata: print "Success doSearch: no result found" return {} return metadata else: print "Failed doSearch: search failed" return False
[ "def", "doSearch", "(", "self", ",", "filename", ",", "filetype", "=", "None", ",", "type", "=", "3", ",", "full_path", "=", "'/'", ",", "sharedowner", "=", "'A'", ",", "datatype", "=", "'all'", ",", "sort", "=", "'update'", ",", "order", "=", "'desc...
Get music album list. There are 4 kinds in ``type``: - 1 : only directories with idxfolder property - 2 : only files - 3 : directories and files with thumbnail info (like viewHeight, viewWidth for Image file) - 4 : only directories except idxfolder - 5 : directories and files without thumbnail info Tyere are 5 kindes of ``filetype``: ex) None: all, 1: document, 2:image, 3: video, 4: msuic, 5: zip :param filename: Query to search. :param filetype: Type of a file to search. :param full_path: Directory path to search recursively. :param sharedowner: File priority to search. (P: priority files only, A: all files.) :param datatype: Data type of a file to search :param sort: Order criteria of search result. ('update', 'date' ...) :param order: Order of files. ('desc' or 'inc') :param searchtype: ??? :param includeworks: Whether to include Naver Work files to result. :param bodysearch: Search content of file. :returns: ``metadata`` or ``False`` :metadata: - authtoken - content - copyright - creationdate - domaintype - filelink - fileuploadstatus - getcontentlength - getlastmodified - hilightfilename - href - lastaccessed - owner - ownerid - owneridc - owneridx - ownership - protect - resourceno - resourcetype - root - root_shareno - s_type - sharedfoldername - sharedinfo - shareno - subpath - thumbnailpath - virusstatus
[ "Get", "music", "album", "list", "." ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/client.py#L797-L879
ClimateImpactLab/DataFS
datafs/core/data_file.py
_choose_read_fs
def _choose_read_fs(authority, cache, read_path, version_check, hasher): ''' Context manager returning the appropriate up-to-date readable filesystem Use ``cache`` if it is a valid filessystem and has a file at ``read_path``, otherwise use ``authority``. If the file at ``read_path`` is out of date, update the file in ``cache`` before returning it. ''' if cache and cache.fs.isfile(read_path): if version_check(hasher(cache.fs.open(read_path, 'rb'))): yield cache.fs elif authority.fs.isfile(read_path): fs.utils.copyfile( authority.fs, read_path, cache.fs, read_path) yield cache.fs else: _makedirs(authority.fs, fs.path.dirname(read_path)) _makedirs(cache.fs, fs.path.dirname(read_path)) yield cache.fs else: if not authority.fs.isfile(read_path): _makedirs(authority.fs, fs.path.dirname(read_path)) yield authority.fs
python
def _choose_read_fs(authority, cache, read_path, version_check, hasher): ''' Context manager returning the appropriate up-to-date readable filesystem Use ``cache`` if it is a valid filessystem and has a file at ``read_path``, otherwise use ``authority``. If the file at ``read_path`` is out of date, update the file in ``cache`` before returning it. ''' if cache and cache.fs.isfile(read_path): if version_check(hasher(cache.fs.open(read_path, 'rb'))): yield cache.fs elif authority.fs.isfile(read_path): fs.utils.copyfile( authority.fs, read_path, cache.fs, read_path) yield cache.fs else: _makedirs(authority.fs, fs.path.dirname(read_path)) _makedirs(cache.fs, fs.path.dirname(read_path)) yield cache.fs else: if not authority.fs.isfile(read_path): _makedirs(authority.fs, fs.path.dirname(read_path)) yield authority.fs
[ "def", "_choose_read_fs", "(", "authority", ",", "cache", ",", "read_path", ",", "version_check", ",", "hasher", ")", ":", "if", "cache", "and", "cache", ".", "fs", ".", "isfile", "(", "read_path", ")", ":", "if", "version_check", "(", "hasher", "(", "ca...
Context manager returning the appropriate up-to-date readable filesystem Use ``cache`` if it is a valid filessystem and has a file at ``read_path``, otherwise use ``authority``. If the file at ``read_path`` is out of date, update the file in ``cache`` before returning it.
[ "Context", "manager", "returning", "the", "appropriate", "up", "-", "to", "-", "date", "readable", "filesystem" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_file.py#L47-L78
ClimateImpactLab/DataFS
datafs/core/data_file.py
_get_write_fs
def _get_write_fs(): ''' Context manager returning a writable filesystem Use a temporary directory and clean on exit. .. todo:: Evaluate options for using a cached memoryFS or streaming object instead of an OSFS(tmp). This could offer significant performance improvements. Writing to the cache is less of a problem since this would be done in any case, though performance could be improved by writing to an in-memory filesystem and then writing to both cache and auth. ''' tmp = tempfile.mkdtemp() try: # Create a writeFS and path to the directory containing the archive write_fs = OSFS(tmp) try: yield write_fs finally: _close(write_fs) finally: shutil.rmtree(tmp)
python
def _get_write_fs(): ''' Context manager returning a writable filesystem Use a temporary directory and clean on exit. .. todo:: Evaluate options for using a cached memoryFS or streaming object instead of an OSFS(tmp). This could offer significant performance improvements. Writing to the cache is less of a problem since this would be done in any case, though performance could be improved by writing to an in-memory filesystem and then writing to both cache and auth. ''' tmp = tempfile.mkdtemp() try: # Create a writeFS and path to the directory containing the archive write_fs = OSFS(tmp) try: yield write_fs finally: _close(write_fs) finally: shutil.rmtree(tmp)
[ "def", "_get_write_fs", "(", ")", ":", "tmp", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "# Create a writeFS and path to the directory containing the archive", "write_fs", "=", "OSFS", "(", "tmp", ")", "try", ":", "yield", "write_fs", "finally", ":", ...
Context manager returning a writable filesystem Use a temporary directory and clean on exit. .. todo:: Evaluate options for using a cached memoryFS or streaming object instead of an OSFS(tmp). This could offer significant performance improvements. Writing to the cache is less of a problem since this would be done in any case, though performance could be improved by writing to an in-memory filesystem and then writing to both cache and auth.
[ "Context", "manager", "returning", "a", "writable", "filesystem" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_file.py#L82-L113
ClimateImpactLab/DataFS
datafs/core/data_file.py
_prepare_write_fs
def _prepare_write_fs(read_fs, cache, read_path, readwrite_mode=True): ''' Prepare a temporary filesystem for writing to read_path The file will be moved to write_path on close if modified. ''' with _get_write_fs() as write_fs: # If opening in read/write or append mode, make sure file data is # accessible if readwrite_mode: if not write_fs.isfile(read_path): _touch(write_fs, read_path) if read_fs.isfile(read_path): fs.utils.copyfile( read_fs, read_path, write_fs, read_path) else: _touch(write_fs, read_path) yield write_fs
python
def _prepare_write_fs(read_fs, cache, read_path, readwrite_mode=True): ''' Prepare a temporary filesystem for writing to read_path The file will be moved to write_path on close if modified. ''' with _get_write_fs() as write_fs: # If opening in read/write or append mode, make sure file data is # accessible if readwrite_mode: if not write_fs.isfile(read_path): _touch(write_fs, read_path) if read_fs.isfile(read_path): fs.utils.copyfile( read_fs, read_path, write_fs, read_path) else: _touch(write_fs, read_path) yield write_fs
[ "def", "_prepare_write_fs", "(", "read_fs", ",", "cache", ",", "read_path", ",", "readwrite_mode", "=", "True", ")", ":", "with", "_get_write_fs", "(", ")", "as", "write_fs", ":", "# If opening in read/write or append mode, make sure file data is", "# accessible", "if",...
Prepare a temporary filesystem for writing to read_path The file will be moved to write_path on close if modified.
[ "Prepare", "a", "temporary", "filesystem", "for", "writing", "to", "read_path" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_file.py#L117-L140
ClimateImpactLab/DataFS
datafs/core/data_file.py
get_local_path
def get_local_path( authority, cache, update, version_check, hasher, read_path, write_path=None, cache_on_write=False): ''' Context manager for retrieving a system path for I/O and updating on change Parameters ---------- authority : object :py:mod:`pyFilesystem` filesystem object to use as the authoritative, up-to-date source for the archive cache : object :py:mod:`pyFilesystem` filesystem object to use as the cache. Default ``None``. use_cache : bool update, service_path, version_check, \*\*kwargs ''' if write_path is None: write_path = read_path with _choose_read_fs( authority, cache, read_path, version_check, hasher) as read_fs: with _prepare_write_fs( read_fs, cache, read_path, readwrite_mode=True) as write_fs: yield write_fs.getsyspath(read_path) if write_fs.isfile(read_path): info = write_fs.getinfokeys(read_path, 'size') if 'size' in info: if info['size'] == 0: return with write_fs.open(read_path, 'rb') as f: checksum = hasher(f) if not version_check(checksum): if ( cache_on_write or ( cache and ( fs.path.abspath(read_path) == fs.path.abspath(write_path)) and cache.fs.isfile(read_path) ) ): _makedirs(cache.fs, fs.path.dirname(write_path)) fs.utils.copyfile( write_fs, read_path, cache.fs, write_path) _makedirs(authority.fs, fs.path.dirname(write_path)) fs.utils.copyfile( cache.fs, write_path, authority.fs, write_path) else: _makedirs(authority.fs, fs.path.dirname(write_path)) fs.utils.copyfile( write_fs, read_path, authority.fs, write_path) update(**checksum) else: raise OSError( 'Local file removed during execution. ' 'Archive not updated.')
python
def get_local_path( authority, cache, update, version_check, hasher, read_path, write_path=None, cache_on_write=False): ''' Context manager for retrieving a system path for I/O and updating on change Parameters ---------- authority : object :py:mod:`pyFilesystem` filesystem object to use as the authoritative, up-to-date source for the archive cache : object :py:mod:`pyFilesystem` filesystem object to use as the cache. Default ``None``. use_cache : bool update, service_path, version_check, \*\*kwargs ''' if write_path is None: write_path = read_path with _choose_read_fs( authority, cache, read_path, version_check, hasher) as read_fs: with _prepare_write_fs( read_fs, cache, read_path, readwrite_mode=True) as write_fs: yield write_fs.getsyspath(read_path) if write_fs.isfile(read_path): info = write_fs.getinfokeys(read_path, 'size') if 'size' in info: if info['size'] == 0: return with write_fs.open(read_path, 'rb') as f: checksum = hasher(f) if not version_check(checksum): if ( cache_on_write or ( cache and ( fs.path.abspath(read_path) == fs.path.abspath(write_path)) and cache.fs.isfile(read_path) ) ): _makedirs(cache.fs, fs.path.dirname(write_path)) fs.utils.copyfile( write_fs, read_path, cache.fs, write_path) _makedirs(authority.fs, fs.path.dirname(write_path)) fs.utils.copyfile( cache.fs, write_path, authority.fs, write_path) else: _makedirs(authority.fs, fs.path.dirname(write_path)) fs.utils.copyfile( write_fs, read_path, authority.fs, write_path) update(**checksum) else: raise OSError( 'Local file removed during execution. ' 'Archive not updated.')
[ "def", "get_local_path", "(", "authority", ",", "cache", ",", "update", ",", "version_check", ",", "hasher", ",", "read_path", ",", "write_path", "=", "None", ",", "cache_on_write", "=", "False", ")", ":", "if", "write_path", "is", "None", ":", "write_path",...
Context manager for retrieving a system path for I/O and updating on change Parameters ---------- authority : object :py:mod:`pyFilesystem` filesystem object to use as the authoritative, up-to-date source for the archive cache : object :py:mod:`pyFilesystem` filesystem object to use as the cache. Default ``None``. use_cache : bool update, service_path, version_check, \*\*kwargs
[ "Context", "manager", "for", "retrieving", "a", "system", "path", "for", "I", "/", "O", "and", "updating", "on", "change" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_file.py#L247-L327
whtsky/Django-WeRoBot
django_werobot.py
make_view
def make_view(robot): """ 为一个 BaseRoBot 生成 Django view。 :param robot: 一个 BaseRoBot 实例。 :return: 一个标准的 Django view """ assert isinstance(robot, BaseRoBot),\ "RoBot should be an BaseRoBot instance." @csrf_exempt def werobot_view(request): timestamp = request.GET.get("timestamp", "") nonce = request.GET.get("nonce", "") signature = request.GET.get("signature", "") if not robot.check_signature( timestamp=timestamp, nonce=nonce, signature=signature ): return HttpResponseForbidden() if request.method == "GET": return HttpResponse(request.GET.get("echostr", "")) elif request.method == "POST": body = request.body message = parse_user_msg(body) reply = robot.get_reply(message) return HttpResponse( create_reply(reply, message=message), content_type="application/xml;charset=utf-8" ) return HttpResponseNotAllowed(['GET', 'POST']) return werobot_view
python
def make_view(robot): """ 为一个 BaseRoBot 生成 Django view。 :param robot: 一个 BaseRoBot 实例。 :return: 一个标准的 Django view """ assert isinstance(robot, BaseRoBot),\ "RoBot should be an BaseRoBot instance." @csrf_exempt def werobot_view(request): timestamp = request.GET.get("timestamp", "") nonce = request.GET.get("nonce", "") signature = request.GET.get("signature", "") if not robot.check_signature( timestamp=timestamp, nonce=nonce, signature=signature ): return HttpResponseForbidden() if request.method == "GET": return HttpResponse(request.GET.get("echostr", "")) elif request.method == "POST": body = request.body message = parse_user_msg(body) reply = robot.get_reply(message) return HttpResponse( create_reply(reply, message=message), content_type="application/xml;charset=utf-8" ) return HttpResponseNotAllowed(['GET', 'POST']) return werobot_view
[ "def", "make_view", "(", "robot", ")", ":", "assert", "isinstance", "(", "robot", ",", "BaseRoBot", ")", ",", "\"RoBot should be an BaseRoBot instance.\"", "@", "csrf_exempt", "def", "werobot_view", "(", "request", ")", ":", "timestamp", "=", "request", ".", "GE...
为一个 BaseRoBot 生成 Django view。 :param robot: 一个 BaseRoBot 实例。 :return: 一个标准的 Django view
[ "为一个", "BaseRoBot", "生成", "Django", "view。" ]
train
https://github.com/whtsky/Django-WeRoBot/blob/8068135198da8556c1b57e9195a6f1ceab4d199d/django_werobot.py#L27-L60
pydanny/simplicity
simplicity.py
text_cleanup
def text_cleanup(data, key, last_type): """ I strip extra whitespace off multi-line strings if they are ready to be stripped!""" if key in data and last_type == STRING_TYPE: data[key] = data[key].strip() return data
python
def text_cleanup(data, key, last_type): """ I strip extra whitespace off multi-line strings if they are ready to be stripped!""" if key in data and last_type == STRING_TYPE: data[key] = data[key].strip() return data
[ "def", "text_cleanup", "(", "data", ",", "key", ",", "last_type", ")", ":", "if", "key", "in", "data", "and", "last_type", "==", "STRING_TYPE", ":", "data", "[", "key", "]", "=", "data", "[", "key", "]", ".", "strip", "(", ")", "return", "data" ]
I strip extra whitespace off multi-line strings if they are ready to be stripped!
[ "I", "strip", "extra", "whitespace", "off", "multi", "-", "line", "strings", "if", "they", "are", "ready", "to", "be", "stripped!" ]
train
https://github.com/pydanny/simplicity/blob/aef4ce39b0965b8d333c67c9d6ec5baecee9c617/simplicity.py#L24-L28
pydanny/simplicity
simplicity.py
rst_to_json
def rst_to_json(text): """ I convert Restructured Text with field lists into Dictionaries! TODO: Convert to text node approach. """ records = [] last_type = None key = None data = {} directive = False lines = text.splitlines() for index, line in enumerate(lines): # check for directives if len(line) and line.strip().startswith(".."): directive = True continue # set the title if len(line) and (line[0] in string.ascii_letters or line[0].isdigit()): directive = False try: if lines[index + 1][0] not in DIVIDERS: continue except IndexError: continue data = text_cleanup(data, key, last_type) data = {"title": line.strip()} records.append( data ) continue # Grab standard fields (int, string, float) if len(line) and line[0].startswith(":"): data = text_cleanup(data, key, last_type) index = line.index(":", 1) key = line[1:index] value = line[index + 1:].strip() data[key], last_type = type_converter(value) directive = False continue # Work on multi-line strings if len(line) and line[0].startswith(" ") and directive == False: if not isinstance(data[key], str): # Not a string so continue on continue value = line.strip() if not len(value): # empty string, continue on continue # add next line data[key] += "\n{}".format(value) continue if last_type == STRING_TYPE and not len(line): if key in data.keys(): data[key] += "\n" return json.dumps(records)
python
def rst_to_json(text): """ I convert Restructured Text with field lists into Dictionaries! TODO: Convert to text node approach. """ records = [] last_type = None key = None data = {} directive = False lines = text.splitlines() for index, line in enumerate(lines): # check for directives if len(line) and line.strip().startswith(".."): directive = True continue # set the title if len(line) and (line[0] in string.ascii_letters or line[0].isdigit()): directive = False try: if lines[index + 1][0] not in DIVIDERS: continue except IndexError: continue data = text_cleanup(data, key, last_type) data = {"title": line.strip()} records.append( data ) continue # Grab standard fields (int, string, float) if len(line) and line[0].startswith(":"): data = text_cleanup(data, key, last_type) index = line.index(":", 1) key = line[1:index] value = line[index + 1:].strip() data[key], last_type = type_converter(value) directive = False continue # Work on multi-line strings if len(line) and line[0].startswith(" ") and directive == False: if not isinstance(data[key], str): # Not a string so continue on continue value = line.strip() if not len(value): # empty string, continue on continue # add next line data[key] += "\n{}".format(value) continue if last_type == STRING_TYPE and not len(line): if key in data.keys(): data[key] += "\n" return json.dumps(records)
[ "def", "rst_to_json", "(", "text", ")", ":", "records", "=", "[", "]", "last_type", "=", "None", "key", "=", "None", "data", "=", "{", "}", "directive", "=", "False", "lines", "=", "text", ".", "splitlines", "(", ")", "for", "index", ",", "line", "...
I convert Restructured Text with field lists into Dictionaries! TODO: Convert to text node approach.
[ "I", "convert", "Restructured", "Text", "with", "field", "lists", "into", "Dictionaries!" ]
train
https://github.com/pydanny/simplicity/blob/aef4ce39b0965b8d333c67c9d6ec5baecee9c617/simplicity.py#L31-L92
pydanny/simplicity
simplicity.py
type_converter
def type_converter(text): """ I convert strings into integers, floats, and strings! """ if text.isdigit(): return int(text), int try: return float(text), float except ValueError: return text, STRING_TYPE
python
def type_converter(text): """ I convert strings into integers, floats, and strings! """ if text.isdigit(): return int(text), int try: return float(text), float except ValueError: return text, STRING_TYPE
[ "def", "type_converter", "(", "text", ")", ":", "if", "text", ".", "isdigit", "(", ")", ":", "return", "int", "(", "text", ")", ",", "int", "try", ":", "return", "float", "(", "text", ")", ",", "float", "except", "ValueError", ":", "return", "text", ...
I convert strings into integers, floats, and strings!
[ "I", "convert", "strings", "into", "integers", "floats", "and", "strings!" ]
train
https://github.com/pydanny/simplicity/blob/aef4ce39b0965b8d333c67c9d6ec5baecee9c617/simplicity.py#L95-L103
pydanny/simplicity
simplicity.py
command_line_runner
def command_line_runner(): """ I run functions from the command-line! """ filename = sys.argv[-1] if not filename.endswith(".rst"): print("ERROR! Please enter a ReStructuredText filename!") sys.exit() print(rst_to_json(file_opener(filename)))
python
def command_line_runner(): """ I run functions from the command-line! """ filename = sys.argv[-1] if not filename.endswith(".rst"): print("ERROR! Please enter a ReStructuredText filename!") sys.exit() print(rst_to_json(file_opener(filename)))
[ "def", "command_line_runner", "(", ")", ":", "filename", "=", "sys", ".", "argv", "[", "-", "1", "]", "if", "not", "filename", ".", "endswith", "(", "\".rst\"", ")", ":", "print", "(", "\"ERROR! Please enter a ReStructuredText filename!\"", ")", "sys", ".", ...
I run functions from the command-line!
[ "I", "run", "functions", "from", "the", "command", "-", "line!" ]
train
https://github.com/pydanny/simplicity/blob/aef4ce39b0965b8d333c67c9d6ec5baecee9c617/simplicity.py#L106-L112
minrk/umsgpack
msgpacku.py
packb
def packb(obj, **kwargs): """wrap msgpack.packb, setting use_bin_type=True by default""" kwargs.setdefault('use_bin_type', True) return msgpack.packb(obj, **kwargs)
python
def packb(obj, **kwargs): """wrap msgpack.packb, setting use_bin_type=True by default""" kwargs.setdefault('use_bin_type', True) return msgpack.packb(obj, **kwargs)
[ "def", "packb", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'use_bin_type'", ",", "True", ")", "return", "msgpack", ".", "packb", "(", "obj", ",", "*", "*", "kwargs", ")" ]
wrap msgpack.packb, setting use_bin_type=True by default
[ "wrap", "msgpack", ".", "packb", "setting", "use_bin_type", "=", "True", "by", "default" ]
train
https://github.com/minrk/umsgpack/blob/975c64b68d7b43d67c94d305026de2bd56034065/msgpacku.py#L11-L14
snipsco/snipsmanagercore
snipsmanagercore/audio_player.py
AudioPlayer.play
def play(cls, file_path, on_done=None, logger=None): """ Play an audio file. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes. """ pygame.mixer.init() try: pygame.mixer.music.load(file_path) except pygame.error as e: if logger is not None: logger.warning(str(e)) return pygame.mixer.music.play() while pygame.mixer.music.get_busy(): time.sleep(0.1) continue if on_done: on_done()
python
def play(cls, file_path, on_done=None, logger=None): """ Play an audio file. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes. """ pygame.mixer.init() try: pygame.mixer.music.load(file_path) except pygame.error as e: if logger is not None: logger.warning(str(e)) return pygame.mixer.music.play() while pygame.mixer.music.get_busy(): time.sleep(0.1) continue if on_done: on_done()
[ "def", "play", "(", "cls", ",", "file_path", ",", "on_done", "=", "None", ",", "logger", "=", "None", ")", ":", "pygame", ".", "mixer", ".", "init", "(", ")", "try", ":", "pygame", ".", "mixer", ".", "music", ".", "load", "(", "file_path", ")", "...
Play an audio file. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes.
[ "Play", "an", "audio", "file", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/audio_player.py#L12-L31
snipsco/snipsmanagercore
snipsmanagercore/audio_player.py
AudioPlayer.play_async
def play_async(cls, file_path, on_done=None): """ Play an audio file asynchronously. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes. """ thread = threading.Thread( target=AudioPlayer.play, args=(file_path, on_done,)) thread.start()
python
def play_async(cls, file_path, on_done=None): """ Play an audio file asynchronously. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes. """ thread = threading.Thread( target=AudioPlayer.play, args=(file_path, on_done,)) thread.start()
[ "def", "play_async", "(", "cls", ",", "file_path", ",", "on_done", "=", "None", ")", ":", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "AudioPlayer", ".", "play", ",", "args", "=", "(", "file_path", ",", "on_done", ",", ")", ")", "t...
Play an audio file asynchronously. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes.
[ "Play", "an", "audio", "file", "asynchronously", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/audio_player.py#L34-L42
pletzer/pnumpy
src/pnPartition.py
Partition.extract
def extract(self, disp): """ Extraction operation @param displacement vector in index space @return the part of the domain that is exposed by the shift """ res = copy.deepcopy(self) for i in range(self.ndims): d = disp[i] s = self.domain[i] if d > 0: res.domain[i] = slice(s.start, d) elif d < 0: res.domain[i] = slice(d, s.stop) return res
python
def extract(self, disp): """ Extraction operation @param displacement vector in index space @return the part of the domain that is exposed by the shift """ res = copy.deepcopy(self) for i in range(self.ndims): d = disp[i] s = self.domain[i] if d > 0: res.domain[i] = slice(s.start, d) elif d < 0: res.domain[i] = slice(d, s.stop) return res
[ "def", "extract", "(", "self", ",", "disp", ")", ":", "res", "=", "copy", ".", "deepcopy", "(", "self", ")", "for", "i", "in", "range", "(", "self", ".", "ndims", ")", ":", "d", "=", "disp", "[", "i", "]", "s", "=", "self", ".", "domain", "["...
Extraction operation @param displacement vector in index space @return the part of the domain that is exposed by the shift
[ "Extraction", "operation" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnPartition.py#L48-L62
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
LiquidCrystal.home
def home(self): """Return to initial position (row=0, column=0)""" self.row = 0 self.column = 0 self.command(Command.RETURN_HOME) msleep(2)
python
def home(self): """Return to initial position (row=0, column=0)""" self.row = 0 self.column = 0 self.command(Command.RETURN_HOME) msleep(2)
[ "def", "home", "(", "self", ")", ":", "self", ".", "row", "=", "0", "self", ".", "column", "=", "0", "self", ".", "command", "(", "Command", ".", "RETURN_HOME", ")", "msleep", "(", "2", ")" ]
Return to initial position (row=0, column=0)
[ "Return", "to", "initial", "position", "(", "row", "=", "0", "column", "=", "0", ")" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L149-L155
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
LiquidCrystal.visible
def visible(self, status): """Turn the display on/off (quickly)""" self._display_control = ByteUtil.apply_flag(self._display_control, Command.DISPLAY_ON, status) self.command(self._display_control)
python
def visible(self, status): """Turn the display on/off (quickly)""" self._display_control = ByteUtil.apply_flag(self._display_control, Command.DISPLAY_ON, status) self.command(self._display_control)
[ "def", "visible", "(", "self", ",", "status", ")", ":", "self", ".", "_display_control", "=", "ByteUtil", ".", "apply_flag", "(", "self", ".", "_display_control", ",", "Command", ".", "DISPLAY_ON", ",", "status", ")", "self", ".", "command", "(", "self", ...
Turn the display on/off (quickly)
[ "Turn", "the", "display", "on", "/", "off", "(", "quickly", ")" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L163-L166
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
LiquidCrystal.cursor
def cursor(self, status): """Turn underline cursor visibility on/off""" self._display_control = ByteUtil.apply_flag(self._display_control, Command.CURSOR_ON, status) self.command(self._display_control)
python
def cursor(self, status): """Turn underline cursor visibility on/off""" self._display_control = ByteUtil.apply_flag(self._display_control, Command.CURSOR_ON, status) self.command(self._display_control)
[ "def", "cursor", "(", "self", ",", "status", ")", ":", "self", ".", "_display_control", "=", "ByteUtil", ".", "apply_flag", "(", "self", ".", "_display_control", ",", "Command", ".", "CURSOR_ON", ",", "status", ")", "self", ".", "command", "(", "self", "...
Turn underline cursor visibility on/off
[ "Turn", "underline", "cursor", "visibility", "on", "/", "off" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L174-L177
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
LiquidCrystal.blink
def blink(self, status): """Turn blink cursor visibility on/off""" self._display_control = ByteUtil.apply_flag(self._display_control, Command.BLINKING_ON, status) self.command(self._display_control)
python
def blink(self, status): """Turn blink cursor visibility on/off""" self._display_control = ByteUtil.apply_flag(self._display_control, Command.BLINKING_ON, status) self.command(self._display_control)
[ "def", "blink", "(", "self", ",", "status", ")", ":", "self", ".", "_display_control", "=", "ByteUtil", ".", "apply_flag", "(", "self", ".", "_display_control", ",", "Command", ".", "BLINKING_ON", ",", "status", ")", "self", ".", "command", "(", "self", ...
Turn blink cursor visibility on/off
[ "Turn", "blink", "cursor", "visibility", "on", "/", "off" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L185-L188
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
LiquidCrystal.left_to_right
def left_to_right(self): """This is for text that flows Left to Right""" self._entry_mode |= Command.MODE_INCREMENT self.command(self._entry_mode)
python
def left_to_right(self): """This is for text that flows Left to Right""" self._entry_mode |= Command.MODE_INCREMENT self.command(self._entry_mode)
[ "def", "left_to_right", "(", "self", ")", ":", "self", ".", "_entry_mode", "|=", "Command", ".", "MODE_INCREMENT", "self", ".", "command", "(", "self", ".", "_entry_mode", ")" ]
This is for text that flows Left to Right
[ "This", "is", "for", "text", "that", "flows", "Left", "to", "Right" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L206-L209
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
LiquidCrystal.right_to_left
def right_to_left(self): """This is for text that flows Right to Left""" self._entry_mode &= ~Command.MODE_INCREMENT self.command(self._entry_mode)
python
def right_to_left(self): """This is for text that flows Right to Left""" self._entry_mode &= ~Command.MODE_INCREMENT self.command(self._entry_mode)
[ "def", "right_to_left", "(", "self", ")", ":", "self", ".", "_entry_mode", "&=", "~", "Command", ".", "MODE_INCREMENT", "self", ".", "command", "(", "self", ".", "_entry_mode", ")" ]
This is for text that flows Right to Left
[ "This", "is", "for", "text", "that", "flows", "Right", "to", "Left" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L211-L214
caktus/django-sticky-uploads
stickyuploads/storage.py
TempFileSystemStorage.get_available_name
def get_available_name(self, name, max_length=None): """Return relative path to name placed in random directory""" tempdir = tempfile.mkdtemp(dir=self.base_location) name = os.path.join( os.path.basename(tempdir), os.path.basename(name), ) method = super(TempFileSystemStorage, self).get_available_name return method(name, max_length=max_length)
python
def get_available_name(self, name, max_length=None): """Return relative path to name placed in random directory""" tempdir = tempfile.mkdtemp(dir=self.base_location) name = os.path.join( os.path.basename(tempdir), os.path.basename(name), ) method = super(TempFileSystemStorage, self).get_available_name return method(name, max_length=max_length)
[ "def", "get_available_name", "(", "self", ",", "name", ",", "max_length", "=", "None", ")", ":", "tempdir", "=", "tempfile", ".", "mkdtemp", "(", "dir", "=", "self", ".", "base_location", ")", "name", "=", "os", ".", "path", ".", "join", "(", "os", "...
Return relative path to name placed in random directory
[ "Return", "relative", "path", "to", "name", "placed", "in", "random", "directory" ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/storage.py#L18-L26
morngrar/ui
ui/ui.py
menu
def menu(items, heading): '''Takes list of dictionaries and prints a menu. items parameter should be in the form of a list, containing dictionaries with the keys: {"key", "text", "function"}. Typing the key for a menuitem, followed by return, will run "function". ''' heading = "\n"*5 + heading # A little vertical padding while True: keydict = {} clear_screen() print(heading) for item in items: menustring = " " + item["key"] + " " + item["text"] keydict[item["key"]] = item["function"] print(menustring) key = input("\nType key and Return (q to quit): ").strip() if key.lower() == "q": return else: try: ret = keydict[key]() if ret: # If child returns non-false, exit menu. return 1 except KeyError: # Handle garbage input. continue
python
def menu(items, heading): '''Takes list of dictionaries and prints a menu. items parameter should be in the form of a list, containing dictionaries with the keys: {"key", "text", "function"}. Typing the key for a menuitem, followed by return, will run "function". ''' heading = "\n"*5 + heading # A little vertical padding while True: keydict = {} clear_screen() print(heading) for item in items: menustring = " " + item["key"] + " " + item["text"] keydict[item["key"]] = item["function"] print(menustring) key = input("\nType key and Return (q to quit): ").strip() if key.lower() == "q": return else: try: ret = keydict[key]() if ret: # If child returns non-false, exit menu. return 1 except KeyError: # Handle garbage input. continue
[ "def", "menu", "(", "items", ",", "heading", ")", ":", "heading", "=", "\"\\n\"", "*", "5", "+", "heading", "# A little vertical padding", "while", "True", ":", "keydict", "=", "{", "}", "clear_screen", "(", ")", "print", "(", "heading", ")", "for", "ite...
Takes list of dictionaries and prints a menu. items parameter should be in the form of a list, containing dictionaries with the keys: {"key", "text", "function"}. Typing the key for a menuitem, followed by return, will run "function".
[ "Takes", "list", "of", "dictionaries", "and", "prints", "a", "menu", ".", "items", "parameter", "should", "be", "in", "the", "form", "of", "a", "list", "containing", "dictionaries", "with", "the", "keys", ":", "{", "key", "text", "function", "}", "." ]
train
https://github.com/morngrar/ui/blob/93e160b55ff7d486a53dba7a8c0f2d46e6f95ed9/ui/ui.py#L20-L52
morngrar/ui
ui/ui.py
yn_prompt
def yn_prompt(text): ''' Takes the text prompt, and presents it, takes only "y" or "n" for answers, and returns True or False. Repeats itself on bad input. ''' text = "\n"+ text + "\n('y' or 'n'): " while True: answer = input(text).strip() if answer != 'y' and answer != 'n': continue elif answer == 'y': return True elif answer == 'n': return False
python
def yn_prompt(text): ''' Takes the text prompt, and presents it, takes only "y" or "n" for answers, and returns True or False. Repeats itself on bad input. ''' text = "\n"+ text + "\n('y' or 'n'): " while True: answer = input(text).strip() if answer != 'y' and answer != 'n': continue elif answer == 'y': return True elif answer == 'n': return False
[ "def", "yn_prompt", "(", "text", ")", ":", "text", "=", "\"\\n\"", "+", "text", "+", "\"\\n('y' or 'n'): \"", "while", "True", ":", "answer", "=", "input", "(", "text", ")", ".", "strip", "(", ")", "if", "answer", "!=", "'y'", "and", "answer", "!=", ...
Takes the text prompt, and presents it, takes only "y" or "n" for answers, and returns True or False. Repeats itself on bad input.
[ "Takes", "the", "text", "prompt", "and", "presents", "it", "takes", "only", "y", "or", "n", "for", "answers", "and", "returns", "True", "or", "False", ".", "Repeats", "itself", "on", "bad", "input", "." ]
train
https://github.com/morngrar/ui/blob/93e160b55ff7d486a53dba7a8c0f2d46e6f95ed9/ui/ui.py#L55-L71
morngrar/ui
ui/ui.py
underline
def underline(text): '''Takes a string, and returns it underscored.''' text += "\n" for i in range(len(text)-1): text += "=" text += "\n" return text
python
def underline(text): '''Takes a string, and returns it underscored.''' text += "\n" for i in range(len(text)-1): text += "=" text += "\n" return text
[ "def", "underline", "(", "text", ")", ":", "text", "+=", "\"\\n\"", "for", "i", "in", "range", "(", "len", "(", "text", ")", "-", "1", ")", ":", "text", "+=", "\"=\"", "text", "+=", "\"\\n\"", "return", "text" ]
Takes a string, and returns it underscored.
[ "Takes", "a", "string", "and", "returns", "it", "underscored", "." ]
train
https://github.com/morngrar/ui/blob/93e160b55ff7d486a53dba7a8c0f2d46e6f95ed9/ui/ui.py#L79-L86
timstaley/voevent-parse
src/voeventparse/convenience.py
get_event_time_as_utc
def get_event_time_as_utc(voevent, index=0): """ Extracts the event time from a given `WhereWhen.ObsDataLocation`. Returns a datetime (timezone-aware, UTC). Accesses a `WhereWhere.ObsDataLocation.ObservationLocation` element and returns the AstroCoords.Time.TimeInstant.ISOTime element, converted to a (UTC-timezoned) datetime. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to access the first. This function now implements conversion from the TDB (Barycentric Dynamical Time) time scale in ISOTime format, since this is the format used by GAIA VOEvents. (See also http://docs.astropy.org/en/stable/time/#time-scale ) Other timescales (i.e. TT, GPS) will presumably be formatted as a TimeOffset, parsing this format is not yet implemented. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. index (int): Index of the ObsDataLocation to extract an ISOtime from. Returns: :class:`datetime.datetime`: Datetime representing the event-timestamp, converted to UTC (timezone aware). """ try: od = voevent.WhereWhen.ObsDataLocation[index] ol = od.ObservationLocation coord_sys = ol.AstroCoords.attrib['coord_system_id'] timesys_identifier = coord_sys.split('-')[0] if timesys_identifier == 'UTC': isotime_str = str(ol.AstroCoords.Time.TimeInstant.ISOTime) return iso8601.parse_date(isotime_str) elif (timesys_identifier == 'TDB'): isotime_str = str(ol.AstroCoords.Time.TimeInstant.ISOTime) isotime_dtime = iso8601.parse_date(isotime_str) tdb_time = astropy.time.Time(isotime_dtime, scale='tdb') return tdb_time.utc.to_datetime().replace(tzinfo=pytz.UTC) elif (timesys_identifier == 'TT' or timesys_identifier == 'GPS'): raise NotImplementedError( "Conversion from time-system '{}' to UTC not yet implemented" ) else: raise ValueError( 'Unrecognised time-system: {} (badly formatted VOEvent?)'.format( timesys_identifier ) ) except AttributeError: return None
python
def get_event_time_as_utc(voevent, index=0): """ Extracts the event time from a given `WhereWhen.ObsDataLocation`. Returns a datetime (timezone-aware, UTC). Accesses a `WhereWhere.ObsDataLocation.ObservationLocation` element and returns the AstroCoords.Time.TimeInstant.ISOTime element, converted to a (UTC-timezoned) datetime. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to access the first. This function now implements conversion from the TDB (Barycentric Dynamical Time) time scale in ISOTime format, since this is the format used by GAIA VOEvents. (See also http://docs.astropy.org/en/stable/time/#time-scale ) Other timescales (i.e. TT, GPS) will presumably be formatted as a TimeOffset, parsing this format is not yet implemented. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. index (int): Index of the ObsDataLocation to extract an ISOtime from. Returns: :class:`datetime.datetime`: Datetime representing the event-timestamp, converted to UTC (timezone aware). """ try: od = voevent.WhereWhen.ObsDataLocation[index] ol = od.ObservationLocation coord_sys = ol.AstroCoords.attrib['coord_system_id'] timesys_identifier = coord_sys.split('-')[0] if timesys_identifier == 'UTC': isotime_str = str(ol.AstroCoords.Time.TimeInstant.ISOTime) return iso8601.parse_date(isotime_str) elif (timesys_identifier == 'TDB'): isotime_str = str(ol.AstroCoords.Time.TimeInstant.ISOTime) isotime_dtime = iso8601.parse_date(isotime_str) tdb_time = astropy.time.Time(isotime_dtime, scale='tdb') return tdb_time.utc.to_datetime().replace(tzinfo=pytz.UTC) elif (timesys_identifier == 'TT' or timesys_identifier == 'GPS'): raise NotImplementedError( "Conversion from time-system '{}' to UTC not yet implemented" ) else: raise ValueError( 'Unrecognised time-system: {} (badly formatted VOEvent?)'.format( timesys_identifier ) ) except AttributeError: return None
[ "def", "get_event_time_as_utc", "(", "voevent", ",", "index", "=", "0", ")", ":", "try", ":", "od", "=", "voevent", ".", "WhereWhen", ".", "ObsDataLocation", "[", "index", "]", "ol", "=", "od", ".", "ObservationLocation", "coord_sys", "=", "ol", ".", "As...
Extracts the event time from a given `WhereWhen.ObsDataLocation`. Returns a datetime (timezone-aware, UTC). Accesses a `WhereWhere.ObsDataLocation.ObservationLocation` element and returns the AstroCoords.Time.TimeInstant.ISOTime element, converted to a (UTC-timezoned) datetime. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to access the first. This function now implements conversion from the TDB (Barycentric Dynamical Time) time scale in ISOTime format, since this is the format used by GAIA VOEvents. (See also http://docs.astropy.org/en/stable/time/#time-scale ) Other timescales (i.e. TT, GPS) will presumably be formatted as a TimeOffset, parsing this format is not yet implemented. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. index (int): Index of the ObsDataLocation to extract an ISOtime from. Returns: :class:`datetime.datetime`: Datetime representing the event-timestamp, converted to UTC (timezone aware).
[ "Extracts", "the", "event", "time", "from", "a", "given", "WhereWhen", ".", "ObsDataLocation", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L16-L75
timstaley/voevent-parse
src/voeventparse/convenience.py
get_event_position
def get_event_position(voevent, index=0): """Extracts the `AstroCoords` from a given `WhereWhen.ObsDataLocation`. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to just return co-ords extracted from the first. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOEvent etree. index (int): Index of the ObsDataLocation to extract AstroCoords from. Returns: Position (:py:class:`.Position2D`): The sky position defined in the ObsDataLocation. """ od = voevent.WhereWhen.ObsDataLocation[index] ac = od.ObservationLocation.AstroCoords ac_sys = voevent.WhereWhen.ObsDataLocation.ObservationLocation.AstroCoordSystem sys = ac_sys.attrib['id'] if hasattr(ac.Position2D, "Name1"): assert ac.Position2D.Name1 == 'RA' and ac.Position2D.Name2 == 'Dec' posn = Position2D(ra=float(ac.Position2D.Value2.C1), dec=float(ac.Position2D.Value2.C2), err=float(ac.Position2D.Error2Radius), units=ac.Position2D.attrib['unit'], system=sys) return posn
python
def get_event_position(voevent, index=0): """Extracts the `AstroCoords` from a given `WhereWhen.ObsDataLocation`. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to just return co-ords extracted from the first. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOEvent etree. index (int): Index of the ObsDataLocation to extract AstroCoords from. Returns: Position (:py:class:`.Position2D`): The sky position defined in the ObsDataLocation. """ od = voevent.WhereWhen.ObsDataLocation[index] ac = od.ObservationLocation.AstroCoords ac_sys = voevent.WhereWhen.ObsDataLocation.ObservationLocation.AstroCoordSystem sys = ac_sys.attrib['id'] if hasattr(ac.Position2D, "Name1"): assert ac.Position2D.Name1 == 'RA' and ac.Position2D.Name2 == 'Dec' posn = Position2D(ra=float(ac.Position2D.Value2.C1), dec=float(ac.Position2D.Value2.C2), err=float(ac.Position2D.Error2Radius), units=ac.Position2D.attrib['unit'], system=sys) return posn
[ "def", "get_event_position", "(", "voevent", ",", "index", "=", "0", ")", ":", "od", "=", "voevent", ".", "WhereWhen", ".", "ObsDataLocation", "[", "index", "]", "ac", "=", "od", ".", "ObservationLocation", ".", "AstroCoords", "ac_sys", "=", "voevent", "."...
Extracts the `AstroCoords` from a given `WhereWhen.ObsDataLocation`. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to just return co-ords extracted from the first. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOEvent etree. index (int): Index of the ObsDataLocation to extract AstroCoords from. Returns: Position (:py:class:`.Position2D`): The sky position defined in the ObsDataLocation.
[ "Extracts", "the", "AstroCoords", "from", "a", "given", "WhereWhen", ".", "ObsDataLocation", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L78-L107
timstaley/voevent-parse
src/voeventparse/convenience.py
get_grouped_params
def get_grouped_params(voevent): """ Fetch grouped Params from the `What` section of a voevent as an omdict. This fetches 'grouped' Params, i.e. those enclosed in a Group element, and returns them as a nested dict-like structure, keyed by GroupName->ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')] """ groups_omd = OMDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) if w.find('Group') is not None: for grp in w.Group: groups_omd.add(grp.attrib.get('name'), _get_param_children_as_omdict(grp)) return groups_omd
python
def get_grouped_params(voevent): """ Fetch grouped Params from the `What` section of a voevent as an omdict. This fetches 'grouped' Params, i.e. those enclosed in a Group element, and returns them as a nested dict-like structure, keyed by GroupName->ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')] """ groups_omd = OMDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) if w.find('Group') is not None: for grp in w.Group: groups_omd.add(grp.attrib.get('name'), _get_param_children_as_omdict(grp)) return groups_omd
[ "def", "get_grouped_params", "(", "voevent", ")", ":", "groups_omd", "=", "OMDict", "(", ")", "w", "=", "deepcopy", "(", "voevent", ".", "What", ")", "lxml", ".", "objectify", ".", "deannotate", "(", "w", ")", "if", "w", ".", "find", "(", "'Group'", ...
Fetch grouped Params from the `What` section of a voevent as an omdict. This fetches 'grouped' Params, i.e. those enclosed in a Group element, and returns them as a nested dict-like structure, keyed by GroupName->ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')]
[ "Fetch", "grouped", "Params", "from", "the", "What", "section", "of", "a", "voevent", "as", "an", "omdict", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L119-L150
timstaley/voevent-parse
src/voeventparse/convenience.py
get_toplevel_params
def get_toplevel_params(voevent): """ Fetch ungrouped Params from the `What` section of a voevent as an omdict. This fetches 'toplevel' Params, i.e. those not enclosed in a Group element, and returns them as a nested dict-like structure, keyed like ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Any Params with no defined name (technically off-spec, but not invalidated by the XML schema) are returned under the dict-key ``None``. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')] """ result = OrderedDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) return _get_param_children_as_omdict(w)
python
def get_toplevel_params(voevent): """ Fetch ungrouped Params from the `What` section of a voevent as an omdict. This fetches 'toplevel' Params, i.e. those not enclosed in a Group element, and returns them as a nested dict-like structure, keyed like ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Any Params with no defined name (technically off-spec, but not invalidated by the XML schema) are returned under the dict-key ``None``. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')] """ result = OrderedDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) return _get_param_children_as_omdict(w)
[ "def", "get_toplevel_params", "(", "voevent", ")", ":", "result", "=", "OrderedDict", "(", ")", "w", "=", "deepcopy", "(", "voevent", ".", "What", ")", "lxml", ".", "objectify", ".", "deannotate", "(", "w", ")", "return", "_get_param_children_as_omdict", "("...
Fetch ungrouped Params from the `What` section of a voevent as an omdict. This fetches 'toplevel' Params, i.e. those not enclosed in a Group element, and returns them as a nested dict-like structure, keyed like ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Any Params with no defined name (technically off-spec, but not invalidated by the XML schema) are returned under the dict-key ``None``. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')]
[ "Fetch", "ungrouped", "Params", "from", "the", "What", "section", "of", "a", "voevent", "as", "an", "omdict", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L153-L183
timstaley/voevent-parse
src/voeventparse/convenience.py
pull_astro_coords
def pull_astro_coords(voevent, index=0): """ Deprecated alias of :func:`.get_event_position` """ import warnings warnings.warn( """ The function `pull_astro_coords` has been renamed to `get_event_position`. This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) return get_event_position(voevent, index)
python
def pull_astro_coords(voevent, index=0): """ Deprecated alias of :func:`.get_event_position` """ import warnings warnings.warn( """ The function `pull_astro_coords` has been renamed to `get_event_position`. This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) return get_event_position(voevent, index)
[ "def", "pull_astro_coords", "(", "voevent", ",", "index", "=", "0", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"\"\"\n The function `pull_astro_coords` has been renamed to\n `get_event_position`. This alias is preserved for backwards\n compat...
Deprecated alias of :func:`.get_event_position`
[ "Deprecated", "alias", "of", ":", "func", ":", ".", "get_event_position" ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L186-L198
timstaley/voevent-parse
src/voeventparse/convenience.py
pull_isotime
def pull_isotime(voevent, index=0): """ Deprecated alias of :func:`.get_event_time_as_utc` """ import warnings warnings.warn( """ The function `pull_isotime` has been renamed to `get_event_time_as_utc`. This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) return get_event_time_as_utc(voevent, index)
python
def pull_isotime(voevent, index=0): """ Deprecated alias of :func:`.get_event_time_as_utc` """ import warnings warnings.warn( """ The function `pull_isotime` has been renamed to `get_event_time_as_utc`. This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) return get_event_time_as_utc(voevent, index)
[ "def", "pull_isotime", "(", "voevent", ",", "index", "=", "0", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"\"\"\n The function `pull_isotime` has been renamed to\n `get_event_time_as_utc`. This alias is preserved for backwards\n compatibility...
Deprecated alias of :func:`.get_event_time_as_utc`
[ "Deprecated", "alias", "of", ":", "func", ":", ".", "get_event_time_as_utc" ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L201-L213
timstaley/voevent-parse
src/voeventparse/convenience.py
pull_params
def pull_params(voevent): """ Attempts to load the `What` section of a voevent as a nested dictionary. .. warning:: Deprecated due to `Missing name attributes` issues. `Param` or `Group` entries which are missing the `name` attribute will be entered under a dictionary key of ``None``. This means that if there are multiple entries missing the `name` attribute then earlier entries will be overwritten by later entries, so you will not be able to use this convenience routine effectively. Use :func:`get_grouped_params` and :func:`get_toplevel_params` instead. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns: dict: Mapping of ``Group->Param->Attribs``. Access like so:: foo_param_val = what_dict['GroupName']['ParamName']['value'] .. note:: Parameters without a group are indexed under the key 'None' - otherwise, we might get name-clashes between `params` and `groups` (unlikely but possible) so for ungrouped Params you'll need something like:: what_dict[None]['ParamName']['value'] """ import warnings warnings.warn( """ The function `pull_params` has been deprecated in favour of the split functions `get_toplevel_params` and `get_grouped_params`, due to possible name-shadowing issues when combining multilevel-nested-dicts (see docs for details). This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) result = OrderedDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) if w.countchildren() == 0: return result toplevel_params = OrderedDict() result[None] = toplevel_params if w.find('Param') is not None: for p in w.Param: toplevel_params[p.attrib.get('name')] = p.attrib if w.find('Group') is not None: for g in w.Group: g_params = {} result[g.attrib.get('name')] = g_params if hasattr(g, 'Param'): for p in g.Param: g_params[p.attrib.get('name')] = p.attrib return result
python
def pull_params(voevent): """ Attempts to load the `What` section of a voevent as a nested dictionary. .. warning:: Deprecated due to `Missing name attributes` issues. `Param` or `Group` entries which are missing the `name` attribute will be entered under a dictionary key of ``None``. This means that if there are multiple entries missing the `name` attribute then earlier entries will be overwritten by later entries, so you will not be able to use this convenience routine effectively. Use :func:`get_grouped_params` and :func:`get_toplevel_params` instead. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns: dict: Mapping of ``Group->Param->Attribs``. Access like so:: foo_param_val = what_dict['GroupName']['ParamName']['value'] .. note:: Parameters without a group are indexed under the key 'None' - otherwise, we might get name-clashes between `params` and `groups` (unlikely but possible) so for ungrouped Params you'll need something like:: what_dict[None]['ParamName']['value'] """ import warnings warnings.warn( """ The function `pull_params` has been deprecated in favour of the split functions `get_toplevel_params` and `get_grouped_params`, due to possible name-shadowing issues when combining multilevel-nested-dicts (see docs for details). This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) result = OrderedDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) if w.countchildren() == 0: return result toplevel_params = OrderedDict() result[None] = toplevel_params if w.find('Param') is not None: for p in w.Param: toplevel_params[p.attrib.get('name')] = p.attrib if w.find('Group') is not None: for g in w.Group: g_params = {} result[g.attrib.get('name')] = g_params if hasattr(g, 'Param'): for p in g.Param: g_params[p.attrib.get('name')] = p.attrib return result
[ "def", "pull_params", "(", "voevent", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"\"\"\n The function `pull_params` has been deprecated in favour of the split\n functions `get_toplevel_params` and `get_grouped_params`, due to \n possible name-shadowi...
Attempts to load the `What` section of a voevent as a nested dictionary. .. warning:: Deprecated due to `Missing name attributes` issues. `Param` or `Group` entries which are missing the `name` attribute will be entered under a dictionary key of ``None``. This means that if there are multiple entries missing the `name` attribute then earlier entries will be overwritten by later entries, so you will not be able to use this convenience routine effectively. Use :func:`get_grouped_params` and :func:`get_toplevel_params` instead. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns: dict: Mapping of ``Group->Param->Attribs``. Access like so:: foo_param_val = what_dict['GroupName']['ParamName']['value'] .. note:: Parameters without a group are indexed under the key 'None' - otherwise, we might get name-clashes between `params` and `groups` (unlikely but possible) so for ungrouped Params you'll need something like:: what_dict[None]['ParamName']['value']
[ "Attempts", "to", "load", "the", "What", "section", "of", "a", "voevent", "as", "a", "nested", "dictionary", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L216-L276
timstaley/voevent-parse
src/voeventparse/convenience.py
prettystr
def prettystr(subtree): """Print an element tree with nice indentation. Prettyprinting a whole VOEvent often doesn't seem to work, probably for issues relating to whitespace cf. http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output This function is a quick workaround for prettyprinting a subsection of a VOEvent, for easier desk-checking. Args: subtree(:class`lxml.etree.ElementTree`): A node in the VOEvent element tree. Returns: str: Prettyprinted string representation of the raw XML. """ subtree = deepcopy(subtree) lxml.objectify.deannotate(subtree) lxml.etree.cleanup_namespaces(subtree) return lxml.etree.tostring(subtree, pretty_print=True).decode( encoding="utf-8")
python
def prettystr(subtree): """Print an element tree with nice indentation. Prettyprinting a whole VOEvent often doesn't seem to work, probably for issues relating to whitespace cf. http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output This function is a quick workaround for prettyprinting a subsection of a VOEvent, for easier desk-checking. Args: subtree(:class`lxml.etree.ElementTree`): A node in the VOEvent element tree. Returns: str: Prettyprinted string representation of the raw XML. """ subtree = deepcopy(subtree) lxml.objectify.deannotate(subtree) lxml.etree.cleanup_namespaces(subtree) return lxml.etree.tostring(subtree, pretty_print=True).decode( encoding="utf-8")
[ "def", "prettystr", "(", "subtree", ")", ":", "subtree", "=", "deepcopy", "(", "subtree", ")", "lxml", ".", "objectify", ".", "deannotate", "(", "subtree", ")", "lxml", ".", "etree", ".", "cleanup_namespaces", "(", "subtree", ")", "return", "lxml", ".", ...
Print an element tree with nice indentation. Prettyprinting a whole VOEvent often doesn't seem to work, probably for issues relating to whitespace cf. http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output This function is a quick workaround for prettyprinting a subsection of a VOEvent, for easier desk-checking. Args: subtree(:class`lxml.etree.ElementTree`): A node in the VOEvent element tree. Returns: str: Prettyprinted string representation of the raw XML.
[ "Print", "an", "element", "tree", "with", "nice", "indentation", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L279-L297
Kane610/aiounifi
aiounifi/controller.py
Controller.request
async def request(self, method, path, json=None): """Make a request to the API.""" url = 'https://{}:{}/api/'.format(self.host, self.port) url += path.format(site=self.site) try: async with self.session.request(method, url, json=json) as res: if res.content_type != 'application/json': raise ResponseError( 'Invalid content type: {}'.format(res.content_type)) response = await res.json() _raise_on_error(response) return response['data'] except client_exceptions.ClientError as err: raise RequestError( 'Error requesting data from {}: {}'.format(self.host, err) ) from None
python
async def request(self, method, path, json=None): """Make a request to the API.""" url = 'https://{}:{}/api/'.format(self.host, self.port) url += path.format(site=self.site) try: async with self.session.request(method, url, json=json) as res: if res.content_type != 'application/json': raise ResponseError( 'Invalid content type: {}'.format(res.content_type)) response = await res.json() _raise_on_error(response) return response['data'] except client_exceptions.ClientError as err: raise RequestError( 'Error requesting data from {}: {}'.format(self.host, err) ) from None
[ "async", "def", "request", "(", "self", ",", "method", ",", "path", ",", "json", "=", "None", ")", ":", "url", "=", "'https://{}:{}/api/'", ".", "format", "(", "self", ".", "host", ",", "self", ".", "port", ")", "url", "+=", "path", ".", "format", ...
Make a request to the API.
[ "Make", "a", "request", "to", "the", "API", "." ]
train
https://github.com/Kane610/aiounifi/blob/a1a871dc4c9158b48e2647b6c29f7c88965da389/aiounifi/controller.py#L45-L62
snipsco/snipsmanagercore
snipsmanagercore/server.py
Server.start
def start(self): """ Start the MQTT client. """ self.thread_handler.run(target=self.start_blocking) self.thread_handler.start_run_loop()
python
def start(self): """ Start the MQTT client. """ self.thread_handler.run(target=self.start_blocking) self.thread_handler.start_run_loop()
[ "def", "start", "(", "self", ")", ":", "self", ".", "thread_handler", ".", "run", "(", "target", "=", "self", ".", "start_blocking", ")", "self", ".", "thread_handler", ".", "start_run_loop", "(", ")" ]
Start the MQTT client.
[ "Start", "the", "MQTT", "client", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L72-L75
snipsco/snipsmanagercore
snipsmanagercore/server.py
Server.start_blocking
def start_blocking(self, run_event): """ Start the MQTT client, as a blocking method. :param run_event: a run event object provided by the thread handler. """ topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0), ("snipsmanager/#", 0)] self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port))) retry = 0 while True and run_event.is_set(): try: self.log_info("Trying to connect to {}".format(self.mqtt_hostname)) self.client.connect(self.mqtt_hostname, self.mqtt_port, 60) break except (socket_error, Exception) as e: self.log_info("MQTT error {}".format(e)) time.sleep(5 + int(retry / 5)) retry = retry + 1 topics = [ (MQTT_TOPIC_INTENT + '#', 0), (MQTT_TOPIC_HOTWORD + '#', 0), (MQTT_TOPIC_ASR + '#', 0), (MQTT_TOPIC_SNIPSFILE, 0), (MQTT_TOPIC_DIALOG_MANAGER + '#', 0), ("snipsmanager/#", 0) ] self.client.subscribe(topics) while run_event.is_set(): try: self.client.loop() except AttributeError as e: self.log_info("Error in mqtt run loop {}".format(e)) time.sleep(1)
python
def start_blocking(self, run_event): """ Start the MQTT client, as a blocking method. :param run_event: a run event object provided by the thread handler. """ topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0), ("snipsmanager/#", 0)] self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port))) retry = 0 while True and run_event.is_set(): try: self.log_info("Trying to connect to {}".format(self.mqtt_hostname)) self.client.connect(self.mqtt_hostname, self.mqtt_port, 60) break except (socket_error, Exception) as e: self.log_info("MQTT error {}".format(e)) time.sleep(5 + int(retry / 5)) retry = retry + 1 topics = [ (MQTT_TOPIC_INTENT + '#', 0), (MQTT_TOPIC_HOTWORD + '#', 0), (MQTT_TOPIC_ASR + '#', 0), (MQTT_TOPIC_SNIPSFILE, 0), (MQTT_TOPIC_DIALOG_MANAGER + '#', 0), ("snipsmanager/#", 0) ] self.client.subscribe(topics) while run_event.is_set(): try: self.client.loop() except AttributeError as e: self.log_info("Error in mqtt run loop {}".format(e)) time.sleep(1)
[ "def", "start_blocking", "(", "self", ",", "run_event", ")", ":", "topics", "=", "[", "(", "\"hermes/intent/#\"", ",", "0", ")", ",", "(", "\"hermes/hotword/#\"", ",", "0", ")", ",", "(", "\"hermes/asr/#\"", ",", "0", ")", ",", "(", "\"hermes/nlu/#\"", "...
Start the MQTT client, as a blocking method. :param run_event: a run event object provided by the thread handler.
[ "Start", "the", "MQTT", "client", "as", "a", "blocking", "method", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L77-L113
snipsco/snipsmanagercore
snipsmanagercore/server.py
Server.on_connect
def on_connect(self, client, userdata, flags, result_code): """ Callback when the MQTT client is connected. :param client: the client being connected. :param userdata: unused. :param flags: unused. :param result_code: result code. """ self.log_info("Connected with result code {}".format(result_code)) self.state_handler.set_state(State.welcome)
python
def on_connect(self, client, userdata, flags, result_code): """ Callback when the MQTT client is connected. :param client: the client being connected. :param userdata: unused. :param flags: unused. :param result_code: result code. """ self.log_info("Connected with result code {}".format(result_code)) self.state_handler.set_state(State.welcome)
[ "def", "on_connect", "(", "self", ",", "client", ",", "userdata", ",", "flags", ",", "result_code", ")", ":", "self", ".", "log_info", "(", "\"Connected with result code {}\"", ".", "format", "(", "result_code", ")", ")", "self", ".", "state_handler", ".", "...
Callback when the MQTT client is connected. :param client: the client being connected. :param userdata: unused. :param flags: unused. :param result_code: result code.
[ "Callback", "when", "the", "MQTT", "client", "is", "connected", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L116-L125
snipsco/snipsmanagercore
snipsmanagercore/server.py
Server.on_disconnect
def on_disconnect(self, client, userdata, result_code): """ Callback when the MQTT client is disconnected. In this case, the server waits five seconds before trying to reconnected. :param client: the client being disconnected. :param userdata: unused. :param result_code: result code. """ self.log_info("Disconnected with result code " + str(result_code)) self.state_handler.set_state(State.goodbye) time.sleep(5) self.thread_handler.run(target=self.start_blocking)
python
def on_disconnect(self, client, userdata, result_code): """ Callback when the MQTT client is disconnected. In this case, the server waits five seconds before trying to reconnected. :param client: the client being disconnected. :param userdata: unused. :param result_code: result code. """ self.log_info("Disconnected with result code " + str(result_code)) self.state_handler.set_state(State.goodbye) time.sleep(5) self.thread_handler.run(target=self.start_blocking)
[ "def", "on_disconnect", "(", "self", ",", "client", ",", "userdata", ",", "result_code", ")", ":", "self", ".", "log_info", "(", "\"Disconnected with result code \"", "+", "str", "(", "result_code", ")", ")", "self", ".", "state_handler", ".", "set_state", "("...
Callback when the MQTT client is disconnected. In this case, the server waits five seconds before trying to reconnected. :param client: the client being disconnected. :param userdata: unused. :param result_code: result code.
[ "Callback", "when", "the", "MQTT", "client", "is", "disconnected", ".", "In", "this", "case", "the", "server", "waits", "five", "seconds", "before", "trying", "to", "reconnected", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L128-L139
snipsco/snipsmanagercore
snipsmanagercore/server.py
Server.on_message
def on_message(self, client, userdata, msg): """ Callback when the MQTT client received a new message. :param client: the MQTT client. :param userdata: unused. :param msg: the MQTT message. """ if msg is None: return self.log_info("New message on topic {}".format(msg.topic)) self.log_debug("Payload {}".format(msg.payload)) if msg.payload is None or len(msg.payload) == 0: pass if msg.payload: payload = json.loads(msg.payload.decode('utf-8')) site_id = payload.get('siteId') session_id = payload.get('sessionId') if msg.topic is not None and msg.topic.startswith(MQTT_TOPIC_INTENT) and msg.payload: payload = json.loads(msg.payload.decode('utf-8')) intent = IntentParser.parse(payload, self.registry.intent_classes) self.log_debug("Parsed intent: {}".format(intent)) if self.handle_intent is not None: if intent is not None: self.log_debug("New intent: {}".format(str(intent.intentName))) self.handle_intent(intent, payload) elif msg.topic is not None and msg.topic == MQTT_TOPIC_HOTWORD + "toggleOn": self.state_handler.set_state(State.hotword_toggle_on) elif MQTT_TOPIC_HOTWORD_DETECTED_RE.match(msg.topic): if not self.first_hotword_detected: self.client.publish( "hermes/feedback/sound/toggleOff", payload=None, qos=0, retain=False) self.first_hotword_detected = True self.state_handler.set_state(State.hotword_detected) if self.handle_start_listening is not None: self.handle_start_listening() elif msg.topic == MQTT_TOPIC_ASR + "startListening": self.state_handler.set_state(State.asr_start_listening) elif msg.topic == MQTT_TOPIC_ASR + "textCaptured": self.state_handler.set_state(State.asr_text_captured) if msg.payload is not None: self.log_debug("Text captured: {}".format(str(msg.payload))) if self.handle_done_listening is not None: self.handle_done_listening() payload = json.loads(msg.payload.decode('utf-8')) if payload['text'] == '': self.handle_intent(None, None) elif msg.topic is not None and msg.topic == "hermes/nlu/intentNotRecognized": self.handle_intent(None, None) elif msg.topic == "snipsmanager/setSnipsfile" and msg.payload: self.state_handler.set_state(State.asr_text_captured) elif msg.topic == MQTT_TOPIC_SESSION_STARTED: self.state_handler.set_state(State.session_started) if self.handlers_dialogue_events is not None: self.handlers_dialogue_events(self.DIALOGUE_EVENT_STARTED, session_id, site_id) elif msg.topic == MQTT_TOPIC_SESSION_ENDED: self.state_handler.set_state(State.session_ended) if self.handlers_dialogue_events is not None: self.handlers_dialogue_events(self.DIALOGUE_EVENT_ENDED, session_id, site_id) elif msg.topic == MQTT_TOPIC_SESSION_QUEUED: self.state_handler.set_state(State.session_queued) if self.handlers_dialogue_events is not None: self.handlers_dialogue_events(self.DIALOGUE_EVENT_QUEUED, session_id, site_id)
python
def on_message(self, client, userdata, msg): """ Callback when the MQTT client received a new message. :param client: the MQTT client. :param userdata: unused. :param msg: the MQTT message. """ if msg is None: return self.log_info("New message on topic {}".format(msg.topic)) self.log_debug("Payload {}".format(msg.payload)) if msg.payload is None or len(msg.payload) == 0: pass if msg.payload: payload = json.loads(msg.payload.decode('utf-8')) site_id = payload.get('siteId') session_id = payload.get('sessionId') if msg.topic is not None and msg.topic.startswith(MQTT_TOPIC_INTENT) and msg.payload: payload = json.loads(msg.payload.decode('utf-8')) intent = IntentParser.parse(payload, self.registry.intent_classes) self.log_debug("Parsed intent: {}".format(intent)) if self.handle_intent is not None: if intent is not None: self.log_debug("New intent: {}".format(str(intent.intentName))) self.handle_intent(intent, payload) elif msg.topic is not None and msg.topic == MQTT_TOPIC_HOTWORD + "toggleOn": self.state_handler.set_state(State.hotword_toggle_on) elif MQTT_TOPIC_HOTWORD_DETECTED_RE.match(msg.topic): if not self.first_hotword_detected: self.client.publish( "hermes/feedback/sound/toggleOff", payload=None, qos=0, retain=False) self.first_hotword_detected = True self.state_handler.set_state(State.hotword_detected) if self.handle_start_listening is not None: self.handle_start_listening() elif msg.topic == MQTT_TOPIC_ASR + "startListening": self.state_handler.set_state(State.asr_start_listening) elif msg.topic == MQTT_TOPIC_ASR + "textCaptured": self.state_handler.set_state(State.asr_text_captured) if msg.payload is not None: self.log_debug("Text captured: {}".format(str(msg.payload))) if self.handle_done_listening is not None: self.handle_done_listening() payload = json.loads(msg.payload.decode('utf-8')) if payload['text'] == '': self.handle_intent(None, None) elif msg.topic is not None and msg.topic == "hermes/nlu/intentNotRecognized": self.handle_intent(None, None) elif msg.topic == "snipsmanager/setSnipsfile" and msg.payload: self.state_handler.set_state(State.asr_text_captured) elif msg.topic == MQTT_TOPIC_SESSION_STARTED: self.state_handler.set_state(State.session_started) if self.handlers_dialogue_events is not None: self.handlers_dialogue_events(self.DIALOGUE_EVENT_STARTED, session_id, site_id) elif msg.topic == MQTT_TOPIC_SESSION_ENDED: self.state_handler.set_state(State.session_ended) if self.handlers_dialogue_events is not None: self.handlers_dialogue_events(self.DIALOGUE_EVENT_ENDED, session_id, site_id) elif msg.topic == MQTT_TOPIC_SESSION_QUEUED: self.state_handler.set_state(State.session_queued) if self.handlers_dialogue_events is not None: self.handlers_dialogue_events(self.DIALOGUE_EVENT_QUEUED, session_id, site_id)
[ "def", "on_message", "(", "self", ",", "client", ",", "userdata", ",", "msg", ")", ":", "if", "msg", "is", "None", ":", "return", "self", ".", "log_info", "(", "\"New message on topic {}\"", ".", "format", "(", "msg", ".", "topic", ")", ")", "self", "....
Callback when the MQTT client received a new message. :param client: the MQTT client. :param userdata: unused. :param msg: the MQTT message.
[ "Callback", "when", "the", "MQTT", "client", "received", "a", "new", "message", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L142-L207
datasift/datasift-python
datasift/token.py
Token.list
def list(self, identity_id, per_page=20, page=1): """ Get a list of tokens :param identity_id: The ID of the identity to retrieve tokens for :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} return self.request.get(str(identity_id) + '/token', params)
python
def list(self, identity_id, per_page=20, page=1): """ Get a list of tokens :param identity_id: The ID of the identity to retrieve tokens for :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} return self.request.get(str(identity_id) + '/token', params)
[ "def", "list", "(", "self", ",", "identity_id", ",", "per_page", "=", "20", ",", "page", "=", "1", ")", ":", "params", "=", "{", "'per_page'", ":", "per_page", ",", "'page'", ":", "page", "}", "return", "self", ".", "request", ".", "get", "(", "str...
Get a list of tokens :param identity_id: The ID of the identity to retrieve tokens for :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "a", "list", "of", "tokens" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/token.py#L10-L24
datasift/datasift-python
datasift/token.py
Token.create
def create(self, identity_id, service, token): """ Create the token :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param token: The token provided by the the service :param expires_at: Set an expiry for this token :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'service': service, 'token': token} return self.request.post(str(identity_id) + '/token', params)
python
def create(self, identity_id, service, token): """ Create the token :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param token: The token provided by the the service :param expires_at: Set an expiry for this token :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'service': service, 'token': token} return self.request.post(str(identity_id) + '/token', params)
[ "def", "create", "(", "self", ",", "identity_id", ",", "service", ",", "token", ")", ":", "params", "=", "{", "'service'", ":", "service", ",", "'token'", ":", "token", "}", "return", "self", ".", "request", ".", "post", "(", "str", "(", "identity_id",...
Create the token :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param token: The token provided by the the service :param expires_at: Set an expiry for this token :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Create", "the", "token" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/token.py#L39-L54
datasift/datasift-python
datasift/token.py
Token.update
def update(self, identity_id, service, token=None): """ Update the token :param identity_id: The ID of the identity to retrieve :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if token: params['token'] = token return self.request.put(str(identity_id) + '/token/' + service, params)
python
def update(self, identity_id, service, token=None): """ Update the token :param identity_id: The ID of the identity to retrieve :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if token: params['token'] = token return self.request.put(str(identity_id) + '/token/' + service, params)
[ "def", "update", "(", "self", ",", "identity_id", ",", "service", ",", "token", "=", "None", ")", ":", "params", "=", "{", "}", "if", "token", ":", "params", "[", "'token'", "]", "=", "token", "return", "self", ".", "request", ".", "put", "(", "str...
Update the token :param identity_id: The ID of the identity to retrieve :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Update", "the", "token" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/token.py#L56-L71
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/autocloud.py
AutoCloudProcessor._func_router
def _func_router(self, msg, fname, **config): """ This method routes the messages based on the params and calls the appropriate method to process the message. The utility of the method is to cope up with the major message change during different releases. """ FNAME = 'handle_%s_autocloud_%s' if ('compose_id' in msg['msg'] or 'compose_job_id' in msg['msg'] or 'autocloud.compose' in msg['topic']): return getattr(self, FNAME % ('v2', fname))(msg, **config) else: return getattr(self, FNAME % ('v1', fname))(msg, **config)
python
def _func_router(self, msg, fname, **config): """ This method routes the messages based on the params and calls the appropriate method to process the message. The utility of the method is to cope up with the major message change during different releases. """ FNAME = 'handle_%s_autocloud_%s' if ('compose_id' in msg['msg'] or 'compose_job_id' in msg['msg'] or 'autocloud.compose' in msg['topic']): return getattr(self, FNAME % ('v2', fname))(msg, **config) else: return getattr(self, FNAME % ('v1', fname))(msg, **config)
[ "def", "_func_router", "(", "self", ",", "msg", ",", "fname", ",", "*", "*", "config", ")", ":", "FNAME", "=", "'handle_%s_autocloud_%s'", "if", "(", "'compose_id'", "in", "msg", "[", "'msg'", "]", "or", "'compose_job_id'", "in", "msg", "[", "'msg'", "]"...
This method routes the messages based on the params and calls the appropriate method to process the message. The utility of the method is to cope up with the major message change during different releases.
[ "This", "method", "routes", "the", "messages", "based", "on", "the", "params", "and", "calls", "the", "appropriate", "method", "to", "process", "the", "message", ".", "The", "utility", "of", "the", "method", "is", "to", "cope", "up", "with", "the", "major"...
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/autocloud.py#L33-L45
trisk/pysesame
pysesame/__init__.py
get_sesames
def get_sesames(email, password, device_ids=None, nicknames=None, timeout=5): """Return list of available Sesame objects.""" sesames = [] account = CandyHouseAccount(email, password, timeout=timeout) for sesame in account.sesames: if device_ids is not None and sesame['device_id'] not in device_ids: continue if nicknames is not None and sesame['nickname'] not in nicknames: continue sesames.append(Sesame(account, sesame)) return sesames
python
def get_sesames(email, password, device_ids=None, nicknames=None, timeout=5): """Return list of available Sesame objects.""" sesames = [] account = CandyHouseAccount(email, password, timeout=timeout) for sesame in account.sesames: if device_ids is not None and sesame['device_id'] not in device_ids: continue if nicknames is not None and sesame['nickname'] not in nicknames: continue sesames.append(Sesame(account, sesame)) return sesames
[ "def", "get_sesames", "(", "email", ",", "password", ",", "device_ids", "=", "None", ",", "nicknames", "=", "None", ",", "timeout", "=", "5", ")", ":", "sesames", "=", "[", "]", "account", "=", "CandyHouseAccount", "(", "email", ",", "password", ",", "...
Return list of available Sesame objects.
[ "Return", "list", "of", "available", "Sesame", "objects", "." ]
train
https://github.com/trisk/pysesame/blob/8f9df4a478cf8f328ec8185bcac7c8704cbd9c01/pysesame/__init__.py#L8-L20
PedalPi/Raspberry-Physical
physical/rotary_encoder.py
RotaryEncoder.pulse
def pulse(self): """ Calls when_rotated callback if detected changes """ new_b_value = self.gpio_b.is_active new_a_value = self.gpio_a.is_active value = self.table_values.value(new_b_value, new_a_value, self.old_b_value, self.old_a_value) self.old_b_value = new_b_value self.old_a_value = new_a_value if value != 0: self.when_rotated(value)
python
def pulse(self): """ Calls when_rotated callback if detected changes """ new_b_value = self.gpio_b.is_active new_a_value = self.gpio_a.is_active value = self.table_values.value(new_b_value, new_a_value, self.old_b_value, self.old_a_value) self.old_b_value = new_b_value self.old_a_value = new_a_value if value != 0: self.when_rotated(value)
[ "def", "pulse", "(", "self", ")", ":", "new_b_value", "=", "self", ".", "gpio_b", ".", "is_active", "new_a_value", "=", "self", ".", "gpio_a", ".", "is_active", "value", "=", "self", ".", "table_values", ".", "value", "(", "new_b_value", ",", "new_a_value"...
Calls when_rotated callback if detected changes
[ "Calls", "when_rotated", "callback", "if", "detected", "changes" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/rotary_encoder.py#L75-L88
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI.default_versions
def default_versions(self, default_versions): ''' Set archive default read versions Parameters ---------- default_versions: dict Dictionary of archive_name, version pairs. On read/download, archives in this dictionary will download the specified version by default. Before assignment, archive_names are checked and normalized. ''' default_versions = { self._normalize_archive_name(arch)[1]: v for arch, v in default_versions.items()} self._default_versions = default_versions
python
def default_versions(self, default_versions): ''' Set archive default read versions Parameters ---------- default_versions: dict Dictionary of archive_name, version pairs. On read/download, archives in this dictionary will download the specified version by default. Before assignment, archive_names are checked and normalized. ''' default_versions = { self._normalize_archive_name(arch)[1]: v for arch, v in default_versions.items()} self._default_versions = default_versions
[ "def", "default_versions", "(", "self", ",", "default_versions", ")", ":", "default_versions", "=", "{", "self", ".", "_normalize_archive_name", "(", "arch", ")", "[", "1", "]", ":", "v", "for", "arch", ",", "v", "in", "default_versions", ".", "items", "("...
Set archive default read versions Parameters ---------- default_versions: dict Dictionary of archive_name, version pairs. On read/download, archives in this dictionary will download the specified version by default. Before assignment, archive_names are checked and normalized.
[ "Set", "archive", "default", "read", "versions" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L102-L119
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI.create
def create( self, archive_name, authority_name=None, versioned=True, raise_on_err=True, metadata=None, tags=None, helper=False): ''' Create a DataFS archive Parameters ---------- archive_name: str Name of the archive authority_name: str Name of the data service to use as the archive's data authority versioned: bool If true, store all versions with explicit version numbers (defualt) raise_on_err: bool Raise an error if the archive already exists (default True) metadata: dict Dictionary of additional archive metadata helper: bool If true, interactively prompt for required metadata (default False) ''' authority_name, archive_name = self._normalize_archive_name( archive_name, authority_name=authority_name) if authority_name is None: authority_name = self.default_authority_name self._validate_archive_name(archive_name) if metadata is None: metadata = {} res = self.manager.create_archive( archive_name, authority_name, archive_path=archive_name, versioned=versioned, raise_on_err=raise_on_err, metadata=metadata, user_config=self.user_config, tags=tags, helper=helper) return self._ArchiveConstructor( api=self, **res)
python
def create( self, archive_name, authority_name=None, versioned=True, raise_on_err=True, metadata=None, tags=None, helper=False): ''' Create a DataFS archive Parameters ---------- archive_name: str Name of the archive authority_name: str Name of the data service to use as the archive's data authority versioned: bool If true, store all versions with explicit version numbers (defualt) raise_on_err: bool Raise an error if the archive already exists (default True) metadata: dict Dictionary of additional archive metadata helper: bool If true, interactively prompt for required metadata (default False) ''' authority_name, archive_name = self._normalize_archive_name( archive_name, authority_name=authority_name) if authority_name is None: authority_name = self.default_authority_name self._validate_archive_name(archive_name) if metadata is None: metadata = {} res = self.manager.create_archive( archive_name, authority_name, archive_path=archive_name, versioned=versioned, raise_on_err=raise_on_err, metadata=metadata, user_config=self.user_config, tags=tags, helper=helper) return self._ArchiveConstructor( api=self, **res)
[ "def", "create", "(", "self", ",", "archive_name", ",", "authority_name", "=", "None", ",", "versioned", "=", "True", ",", "raise_on_err", "=", "True", ",", "metadata", "=", "None", ",", "tags", "=", "None", ",", "helper", "=", "False", ")", ":", "auth...
Create a DataFS archive Parameters ---------- archive_name: str Name of the archive authority_name: str Name of the data service to use as the archive's data authority versioned: bool If true, store all versions with explicit version numbers (defualt) raise_on_err: bool Raise an error if the archive already exists (default True) metadata: dict Dictionary of additional archive metadata helper: bool If true, interactively prompt for required metadata (default False)
[ "Create", "a", "DataFS", "archive" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L128-L188
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI.get_archive
def get_archive(self, archive_name, default_version=None): ''' Retrieve a data archive Parameters ---------- archive_name: str Name of the archive to retrieve default_version: version str or :py:class:`~distutils.StrictVersion` giving the default version number to be used on read operations Returns ------- archive: object New :py:class:`~datafs.core.data_archive.DataArchive` object Raises ------ KeyError: A KeyError is raised when the ``archive_name`` is not found ''' auth, archive_name = self._normalize_archive_name(archive_name) res = self.manager.get_archive(archive_name) if default_version is None: default_version = self._default_versions.get(archive_name, None) if (auth is not None) and (auth != res['authority_name']): raise ValueError( 'Archive "{}" not found on {}.'.format(archive_name, auth) + ' Did you mean "{}://{}"?'.format( res['authority_name'], archive_name)) return self._ArchiveConstructor( api=self, default_version=default_version, **res)
python
def get_archive(self, archive_name, default_version=None): ''' Retrieve a data archive Parameters ---------- archive_name: str Name of the archive to retrieve default_version: version str or :py:class:`~distutils.StrictVersion` giving the default version number to be used on read operations Returns ------- archive: object New :py:class:`~datafs.core.data_archive.DataArchive` object Raises ------ KeyError: A KeyError is raised when the ``archive_name`` is not found ''' auth, archive_name = self._normalize_archive_name(archive_name) res = self.manager.get_archive(archive_name) if default_version is None: default_version = self._default_versions.get(archive_name, None) if (auth is not None) and (auth != res['authority_name']): raise ValueError( 'Archive "{}" not found on {}.'.format(archive_name, auth) + ' Did you mean "{}://{}"?'.format( res['authority_name'], archive_name)) return self._ArchiveConstructor( api=self, default_version=default_version, **res)
[ "def", "get_archive", "(", "self", ",", "archive_name", ",", "default_version", "=", "None", ")", ":", "auth", ",", "archive_name", "=", "self", ".", "_normalize_archive_name", "(", "archive_name", ")", "res", "=", "self", ".", "manager", ".", "get_archive", ...
Retrieve a data archive Parameters ---------- archive_name: str Name of the archive to retrieve default_version: version str or :py:class:`~distutils.StrictVersion` giving the default version number to be used on read operations Returns ------- archive: object New :py:class:`~datafs.core.data_archive.DataArchive` object Raises ------ KeyError: A KeyError is raised when the ``archive_name`` is not found
[ "Retrieve", "a", "data", "archive" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L190-L233
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI.batch_get_archive
def batch_get_archive(self, archive_names, default_versions=None): ''' Batch version of :py:meth:`~DataAPI.get_archive` Parameters ---------- archive_names: list Iterable of archive names to retrieve default_versions: str, object, or dict Default versions to assign to each returned archive. May be a dict with archive names as keys and versions as values, or may be a version, in which case the same version is used for all archives. Versions must be a strict version number string, a :py:class:`~distutils.version.StrictVersion`, or a :py:class:`~datafs.core.versions.BumpableVersion` object. Returns ------- archives: list List of :py:class:`~datafs.core.data_archive.DataArchive` objects. If an archive is not found, it is omitted (``batch_get_archive`` does not raise a ``KeyError`` on invalid archive names). ''' # toss prefixes and normalize names archive_names = map( lambda arch: self._normalize_archive_name(arch)[1], archive_names) responses = self.manager.batch_get_archive(archive_names) archives = {} if default_versions is None: default_versions = {} for res in responses: res['archive_name'] = self._normalize_archive_name( res['archive_name']) archive_name = res['archive_name'] if hasattr(default_versions, 'get'): # Get version number from default_versions or # self._default_versions if key not present. default_version = default_versions.get( archive_name, self._default_versions.get(archive_name, None)) else: default_version = default_versions archive = self._ArchiveConstructor( api=self, default_version=default_version, **res) archives[archive_name] = archive return archives
python
def batch_get_archive(self, archive_names, default_versions=None): ''' Batch version of :py:meth:`~DataAPI.get_archive` Parameters ---------- archive_names: list Iterable of archive names to retrieve default_versions: str, object, or dict Default versions to assign to each returned archive. May be a dict with archive names as keys and versions as values, or may be a version, in which case the same version is used for all archives. Versions must be a strict version number string, a :py:class:`~distutils.version.StrictVersion`, or a :py:class:`~datafs.core.versions.BumpableVersion` object. Returns ------- archives: list List of :py:class:`~datafs.core.data_archive.DataArchive` objects. If an archive is not found, it is omitted (``batch_get_archive`` does not raise a ``KeyError`` on invalid archive names). ''' # toss prefixes and normalize names archive_names = map( lambda arch: self._normalize_archive_name(arch)[1], archive_names) responses = self.manager.batch_get_archive(archive_names) archives = {} if default_versions is None: default_versions = {} for res in responses: res['archive_name'] = self._normalize_archive_name( res['archive_name']) archive_name = res['archive_name'] if hasattr(default_versions, 'get'): # Get version number from default_versions or # self._default_versions if key not present. default_version = default_versions.get( archive_name, self._default_versions.get(archive_name, None)) else: default_version = default_versions archive = self._ArchiveConstructor( api=self, default_version=default_version, **res) archives[archive_name] = archive return archives
[ "def", "batch_get_archive", "(", "self", ",", "archive_names", ",", "default_versions", "=", "None", ")", ":", "# toss prefixes and normalize names", "archive_names", "=", "map", "(", "lambda", "arch", ":", "self", ".", "_normalize_archive_name", "(", "arch", ")", ...
Batch version of :py:meth:`~DataAPI.get_archive` Parameters ---------- archive_names: list Iterable of archive names to retrieve default_versions: str, object, or dict Default versions to assign to each returned archive. May be a dict with archive names as keys and versions as values, or may be a version, in which case the same version is used for all archives. Versions must be a strict version number string, a :py:class:`~distutils.version.StrictVersion`, or a :py:class:`~datafs.core.versions.BumpableVersion` object. Returns ------- archives: list List of :py:class:`~datafs.core.data_archive.DataArchive` objects. If an archive is not found, it is omitted (``batch_get_archive`` does not raise a ``KeyError`` on invalid archive names).
[ "Batch", "version", "of", ":", "py", ":", "meth", ":", "~DataAPI", ".", "get_archive" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L235-L302
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI.listdir
def listdir(self, location, authority_name=None): ''' List archive path components at a given location .. Note :: When using listdir on versioned archives, listdir will provide the version numbers when a full archive path is supplied as the location argument. This is because DataFS stores the archive path as a directory and the versions as the actual files when versioning is on. Parameters ---------- location: str Path of the "directory" to search `location` can be a path relative to the authority root (e.g `/MyFiles/Data`) or can include authority as a protocol (e.g. `my_auth://MyFiles/Data`). If the authority is specified as a protocol, the `authority_name` argument is ignored. authority_name: str Name of the authority to search (optional) If no authority is specified, the default authority is used (if only one authority is attached or if :py:attr:`DefaultAuthorityName` is assigned). Returns ------- list Archive path components that exist at the given "directory" location on the specified authority Raises ------ ValueError A ValueError is raised if the authority is ambiguous or invalid ''' authority_name, location = self._normalize_archive_name( location, authority_name=authority_name) if authority_name is None: authority_name = self.default_authority_name return self._authorities[authority_name].fs.listdir(location)
python
def listdir(self, location, authority_name=None): ''' List archive path components at a given location .. Note :: When using listdir on versioned archives, listdir will provide the version numbers when a full archive path is supplied as the location argument. This is because DataFS stores the archive path as a directory and the versions as the actual files when versioning is on. Parameters ---------- location: str Path of the "directory" to search `location` can be a path relative to the authority root (e.g `/MyFiles/Data`) or can include authority as a protocol (e.g. `my_auth://MyFiles/Data`). If the authority is specified as a protocol, the `authority_name` argument is ignored. authority_name: str Name of the authority to search (optional) If no authority is specified, the default authority is used (if only one authority is attached or if :py:attr:`DefaultAuthorityName` is assigned). Returns ------- list Archive path components that exist at the given "directory" location on the specified authority Raises ------ ValueError A ValueError is raised if the authority is ambiguous or invalid ''' authority_name, location = self._normalize_archive_name( location, authority_name=authority_name) if authority_name is None: authority_name = self.default_authority_name return self._authorities[authority_name].fs.listdir(location)
[ "def", "listdir", "(", "self", ",", "location", ",", "authority_name", "=", "None", ")", ":", "authority_name", ",", "location", "=", "self", ".", "_normalize_archive_name", "(", "location", ",", "authority_name", "=", "authority_name", ")", "if", "authority_nam...
List archive path components at a given location .. Note :: When using listdir on versioned archives, listdir will provide the version numbers when a full archive path is supplied as the location argument. This is because DataFS stores the archive path as a directory and the versions as the actual files when versioning is on. Parameters ---------- location: str Path of the "directory" to search `location` can be a path relative to the authority root (e.g `/MyFiles/Data`) or can include authority as a protocol (e.g. `my_auth://MyFiles/Data`). If the authority is specified as a protocol, the `authority_name` argument is ignored. authority_name: str Name of the authority to search (optional) If no authority is specified, the default authority is used (if only one authority is attached or if :py:attr:`DefaultAuthorityName` is assigned). Returns ------- list Archive path components that exist at the given "directory" location on the specified authority Raises ------ ValueError A ValueError is raised if the authority is ambiguous or invalid
[ "List", "archive", "path", "components", "at", "a", "given", "location" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L304-L361
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI.search
def search(self, *query, **kwargs): ''' Searches based on tags specified by users Parameters --------- query: str tags to search on. If multiple terms, provided in comma delimited string format prefix: str start of archive name. Providing a start string improves search speed. ''' prefix = kwargs.get('prefix') if prefix is not None: prefix = fs.path.relpath(prefix) return self.manager.search(query, begins_with=prefix)
python
def search(self, *query, **kwargs): ''' Searches based on tags specified by users Parameters --------- query: str tags to search on. If multiple terms, provided in comma delimited string format prefix: str start of archive name. Providing a start string improves search speed. ''' prefix = kwargs.get('prefix') if prefix is not None: prefix = fs.path.relpath(prefix) return self.manager.search(query, begins_with=prefix)
[ "def", "search", "(", "self", ",", "*", "query", ",", "*", "*", "kwargs", ")", ":", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ")", "if", "prefix", "is", "not", "None", ":", "prefix", "=", "fs", ".", "path", ".", "relpath", "(", "prefi...
Searches based on tags specified by users Parameters --------- query: str tags to search on. If multiple terms, provided in comma delimited string format prefix: str start of archive name. Providing a start string improves search speed.
[ "Searches", "based", "on", "tags", "specified", "by", "users" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L426-L449
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI._validate_archive_name
def _validate_archive_name(self, archive_name): ''' Utility function for creating and validating archive names Parameters ---------- archive_name: str Name of the archive from which to create a service path Returns ------- archive_path: str Internal path used by services to reference archive data ''' archive_name = fs.path.normpath(archive_name) patterns = self.manager.required_archive_patterns for pattern in patterns: if not re.search(pattern, archive_name): raise ValueError( "archive name does not match pattern '{}'".format(pattern))
python
def _validate_archive_name(self, archive_name): ''' Utility function for creating and validating archive names Parameters ---------- archive_name: str Name of the archive from which to create a service path Returns ------- archive_path: str Internal path used by services to reference archive data ''' archive_name = fs.path.normpath(archive_name) patterns = self.manager.required_archive_patterns for pattern in patterns: if not re.search(pattern, archive_name): raise ValueError( "archive name does not match pattern '{}'".format(pattern))
[ "def", "_validate_archive_name", "(", "self", ",", "archive_name", ")", ":", "archive_name", "=", "fs", ".", "path", ".", "normpath", "(", "archive_name", ")", "patterns", "=", "self", ".", "manager", ".", "required_archive_patterns", "for", "pattern", "in", "...
Utility function for creating and validating archive names Parameters ---------- archive_name: str Name of the archive from which to create a service path Returns ------- archive_path: str Internal path used by services to reference archive data
[ "Utility", "function", "for", "creating", "and", "validating", "archive", "names" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L451-L473
ClimateImpactLab/DataFS
datafs/core/data_api.py
DataAPI.hash_file
def hash_file(f): ''' Utility function for hashing file contents Overload this function to change the file equality checking algorithm Parameters ---------- f: file-like File-like object or file path from which to compute checksum value Returns ------- checksum: dict dictionary with {'algorithm': 'md5', 'checksum': hexdigest} ''' md5 = hashlib.md5() with open_filelike(f, 'rb') as f_obj: for chunk in iter(lambda: f_obj.read(128 * md5.block_size), b''): md5.update(chunk) return {'algorithm': 'md5', 'checksum': md5.hexdigest()}
python
def hash_file(f): ''' Utility function for hashing file contents Overload this function to change the file equality checking algorithm Parameters ---------- f: file-like File-like object or file path from which to compute checksum value Returns ------- checksum: dict dictionary with {'algorithm': 'md5', 'checksum': hexdigest} ''' md5 = hashlib.md5() with open_filelike(f, 'rb') as f_obj: for chunk in iter(lambda: f_obj.read(128 * md5.block_size), b''): md5.update(chunk) return {'algorithm': 'md5', 'checksum': md5.hexdigest()}
[ "def", "hash_file", "(", "f", ")", ":", "md5", "=", "hashlib", ".", "md5", "(", ")", "with", "open_filelike", "(", "f", ",", "'rb'", ")", "as", "f_obj", ":", "for", "chunk", "in", "iter", "(", "lambda", ":", "f_obj", ".", "read", "(", "128", "*",...
Utility function for hashing file contents Overload this function to change the file equality checking algorithm Parameters ---------- f: file-like File-like object or file path from which to compute checksum value Returns ------- checksum: dict dictionary with {'algorithm': 'md5', 'checksum': hexdigest}
[ "Utility", "function", "for", "hashing", "file", "contents" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L492-L518
yunojuno-archive/django-package-monitor
package_monitor/admin.py
html_list
def html_list(data): """Convert dict into formatted HTML.""" if data is None: return None as_li = lambda v: "<li>%s</li>" % v items = [as_li(v) for v in data] return mark_safe("<ul>%s</ul>" % ''.join(items))
python
def html_list(data): """Convert dict into formatted HTML.""" if data is None: return None as_li = lambda v: "<li>%s</li>" % v items = [as_li(v) for v in data] return mark_safe("<ul>%s</ul>" % ''.join(items))
[ "def", "html_list", "(", "data", ")", ":", "if", "data", "is", "None", ":", "return", "None", "as_li", "=", "lambda", "v", ":", "\"<li>%s</li>\"", "%", "v", "items", "=", "[", "as_li", "(", "v", ")", "for", "v", "in", "data", "]", "return", "mark_s...
Convert dict into formatted HTML.
[ "Convert", "dict", "into", "formatted", "HTML", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/admin.py#L13-L19
yunojuno-archive/django-package-monitor
package_monitor/admin.py
check_pypi
def check_pypi(modeladmin, request, queryset): """Update latest package info from PyPI.""" for p in queryset: if p.is_editable: logger.debug("Ignoring version update '%s' is editable", p.package_name) else: p.update_from_pypi()
python
def check_pypi(modeladmin, request, queryset): """Update latest package info from PyPI.""" for p in queryset: if p.is_editable: logger.debug("Ignoring version update '%s' is editable", p.package_name) else: p.update_from_pypi()
[ "def", "check_pypi", "(", "modeladmin", ",", "request", ",", "queryset", ")", ":", "for", "p", "in", "queryset", ":", "if", "p", ".", "is_editable", ":", "logger", ".", "debug", "(", "\"Ignoring version update '%s' is editable\"", ",", "p", ".", "package_name"...
Update latest package info from PyPI.
[ "Update", "latest", "package", "info", "from", "PyPI", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/admin.py#L22-L28
yunojuno-archive/django-package-monitor
package_monitor/admin.py
UpdateAvailableListFilter.queryset
def queryset(self, request, queryset): """Filter based on whether an update (of any sort) is available.""" if self.value() == '-1': return queryset.filter(latest_version__isnull=True) elif self.value() == '0': return ( queryset .filter( current_version__isnull=False, latest_version__isnull=False, latest_version=F('current_version') ) ) elif self.value() == '1': return ( queryset .filter( current_version__isnull=False, latest_version__isnull=False ).exclude( latest_version=F('current_version') ) ) else: return queryset
python
def queryset(self, request, queryset): """Filter based on whether an update (of any sort) is available.""" if self.value() == '-1': return queryset.filter(latest_version__isnull=True) elif self.value() == '0': return ( queryset .filter( current_version__isnull=False, latest_version__isnull=False, latest_version=F('current_version') ) ) elif self.value() == '1': return ( queryset .filter( current_version__isnull=False, latest_version__isnull=False ).exclude( latest_version=F('current_version') ) ) else: return queryset
[ "def", "queryset", "(", "self", ",", "request", ",", "queryset", ")", ":", "if", "self", ".", "value", "(", ")", "==", "'-1'", ":", "return", "queryset", ".", "filter", "(", "latest_version__isnull", "=", "True", ")", "elif", "self", ".", "value", "(",...
Filter based on whether an update (of any sort) is available.
[ "Filter", "based", "on", "whether", "an", "update", "(", "of", "any", "sort", ")", "is", "available", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/admin.py#L48-L72
yunojuno-archive/django-package-monitor
package_monitor/admin.py
PackageVersionAdmin._updateable
def _updateable(self, obj): """Return True if there are available updates.""" if obj.latest_version is None or obj.is_editable: return None else: return obj.latest_version != obj.current_version
python
def _updateable(self, obj): """Return True if there are available updates.""" if obj.latest_version is None or obj.is_editable: return None else: return obj.latest_version != obj.current_version
[ "def", "_updateable", "(", "self", ",", "obj", ")", ":", "if", "obj", ".", "latest_version", "is", "None", "or", "obj", ".", "is_editable", ":", "return", "None", "else", ":", "return", "obj", ".", "latest_version", "!=", "obj", ".", "current_version" ]
Return True if there are available updates.
[ "Return", "True", "if", "there", "are", "available", "updates", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/admin.py#L121-L126
yunojuno-archive/django-package-monitor
package_monitor/admin.py
PackageVersionAdmin.available_updates
def available_updates(self, obj): """Print out all versions ahead of the current one.""" from package_monitor import pypi package = pypi.Package(obj.package_name) versions = package.all_versions() return html_list([v for v in versions if v > obj.current_version])
python
def available_updates(self, obj): """Print out all versions ahead of the current one.""" from package_monitor import pypi package = pypi.Package(obj.package_name) versions = package.all_versions() return html_list([v for v in versions if v > obj.current_version])
[ "def", "available_updates", "(", "self", ",", "obj", ")", ":", "from", "package_monitor", "import", "pypi", "package", "=", "pypi", ".", "Package", "(", "obj", ".", "package_name", ")", "versions", "=", "package", ".", "all_versions", "(", ")", "return", "...
Print out all versions ahead of the current one.
[ "Print", "out", "all", "versions", "ahead", "of", "the", "current", "one", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/admin.py#L130-L135
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/pkgdb.py
get_agent
def get_agent(msg): """ Handy hack to handle legacy messages where 'agent' was a list. """ agent = msg['msg']['agent'] if isinstance(agent, list): agent = agent[0] return agent
python
def get_agent(msg): """ Handy hack to handle legacy messages where 'agent' was a list. """ agent = msg['msg']['agent'] if isinstance(agent, list): agent = agent[0] return agent
[ "def", "get_agent", "(", "msg", ")", ":", "agent", "=", "msg", "[", "'msg'", "]", "[", "'agent'", "]", "if", "isinstance", "(", "agent", ",", "list", ")", ":", "agent", "=", "agent", "[", "0", "]", "return", "agent" ]
Handy hack to handle legacy messages where 'agent' was a list.
[ "Handy", "hack", "to", "handle", "legacy", "messages", "where", "agent", "was", "a", "list", "." ]
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/pkgdb.py#L33-L38
LIVVkit/LIVVkit
livvkit/components/verification.py
run_suite
def run_suite(case, config, summary): """ Run the full suite of verification tests """ config["name"] = case model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) tabs = [] case_summary = LIVVDict() model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) for subcase in sorted(six.iterkeys(model_cases)): bench_subcases = bench_cases[subcase] if subcase in bench_cases else [] case_sections = [] for mcase in sorted(model_cases[subcase], key=functions.sort_processor_counts): bpath = (os.path.join(bench_dir, subcase, mcase.replace("-", os.path.sep)) if mcase in bench_subcases else "") mpath = os.path.join(model_dir, subcase, mcase.replace("-", os.path.sep)) case_result = _analyze_case(mpath, bpath, config) case_sections.append(elements.section(mcase, case_result)) case_summary[subcase] = _summarize_result(case_result, case_summary[subcase]) tabs.append(elements.tab(subcase, section_list=case_sections)) result = elements.page(case, config["description"], tab_list=tabs) summary[case] = case_summary _print_summary(case, summary[case]) functions.create_page_from_template("verification.html", os.path.join(livvkit.index_dir, "verification", case + ".html") ) functions.write_json(result, os.path.join(livvkit.output_dir, "verification"), case+".json")
python
def run_suite(case, config, summary): """ Run the full suite of verification tests """ config["name"] = case model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) tabs = [] case_summary = LIVVDict() model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) for subcase in sorted(six.iterkeys(model_cases)): bench_subcases = bench_cases[subcase] if subcase in bench_cases else [] case_sections = [] for mcase in sorted(model_cases[subcase], key=functions.sort_processor_counts): bpath = (os.path.join(bench_dir, subcase, mcase.replace("-", os.path.sep)) if mcase in bench_subcases else "") mpath = os.path.join(model_dir, subcase, mcase.replace("-", os.path.sep)) case_result = _analyze_case(mpath, bpath, config) case_sections.append(elements.section(mcase, case_result)) case_summary[subcase] = _summarize_result(case_result, case_summary[subcase]) tabs.append(elements.tab(subcase, section_list=case_sections)) result = elements.page(case, config["description"], tab_list=tabs) summary[case] = case_summary _print_summary(case, summary[case]) functions.create_page_from_template("verification.html", os.path.join(livvkit.index_dir, "verification", case + ".html") ) functions.write_json(result, os.path.join(livvkit.output_dir, "verification"), case+".json")
[ "def", "run_suite", "(", "case", ",", "config", ",", "summary", ")", ":", "config", "[", "\"name\"", "]", "=", "case", "model_dir", "=", "os", ".", "path", ".", "join", "(", "livvkit", ".", "model_dir", ",", "config", "[", "'data_dir'", "]", ",", "ca...
Run the full suite of verification tests
[ "Run", "the", "full", "suite", "of", "verification", "tests" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L50-L81
LIVVkit/LIVVkit
livvkit/components/verification.py
_analyze_case
def _analyze_case(model_dir, bench_dir, config): """ Runs all of the verification checks on a particular case """ bundle = livvkit.verification_model_module model_out = functions.find_file(model_dir, "*"+config["output_ext"]) bench_out = functions.find_file(bench_dir, "*"+config["output_ext"]) model_config = functions.find_file(model_dir, "*"+config["config_ext"]) bench_config = functions.find_file(bench_dir, "*"+config["config_ext"]) model_log = functions.find_file(model_dir, "*"+config["logfile_ext"]) el = [ bit_for_bit(model_out, bench_out, config), diff_configurations(model_config, bench_config, bundle, bundle), bundle.parse_log(model_log) ] return el
python
def _analyze_case(model_dir, bench_dir, config): """ Runs all of the verification checks on a particular case """ bundle = livvkit.verification_model_module model_out = functions.find_file(model_dir, "*"+config["output_ext"]) bench_out = functions.find_file(bench_dir, "*"+config["output_ext"]) model_config = functions.find_file(model_dir, "*"+config["config_ext"]) bench_config = functions.find_file(bench_dir, "*"+config["config_ext"]) model_log = functions.find_file(model_dir, "*"+config["logfile_ext"]) el = [ bit_for_bit(model_out, bench_out, config), diff_configurations(model_config, bench_config, bundle, bundle), bundle.parse_log(model_log) ] return el
[ "def", "_analyze_case", "(", "model_dir", ",", "bench_dir", ",", "config", ")", ":", "bundle", "=", "livvkit", ".", "verification_model_module", "model_out", "=", "functions", ".", "find_file", "(", "model_dir", ",", "\"*\"", "+", "config", "[", "\"output_ext\""...
Runs all of the verification checks on a particular case
[ "Runs", "all", "of", "the", "verification", "checks", "on", "a", "particular", "case" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L84-L97
LIVVkit/LIVVkit
livvkit/components/verification.py
_print_summary
def _print_summary(case, summary): """ Show some statistics from the run """ for dof, data in summary.items(): b4b = data["Bit for Bit"] conf = data["Configurations"] stdout = data["Std. Out Files"] print(" " + case + " " + str(dof)) print(" --------------------") print(" Bit for bit matches : " + str(b4b[0]) + " of " + str(b4b[1])) print(" Configuration matches : " + str(conf[0]) + " of " + str(conf[1])) print(" Std. Out files parsed : " + str(stdout)) print("")
python
def _print_summary(case, summary): """ Show some statistics from the run """ for dof, data in summary.items(): b4b = data["Bit for Bit"] conf = data["Configurations"] stdout = data["Std. Out Files"] print(" " + case + " " + str(dof)) print(" --------------------") print(" Bit for bit matches : " + str(b4b[0]) + " of " + str(b4b[1])) print(" Configuration matches : " + str(conf[0]) + " of " + str(conf[1])) print(" Std. Out files parsed : " + str(stdout)) print("")
[ "def", "_print_summary", "(", "case", ",", "summary", ")", ":", "for", "dof", ",", "data", "in", "summary", ".", "items", "(", ")", ":", "b4b", "=", "data", "[", "\"Bit for Bit\"", "]", "conf", "=", "data", "[", "\"Configurations\"", "]", "stdout", "="...
Show some statistics from the run
[ "Show", "some", "statistics", "from", "the", "run" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L100-L111
LIVVkit/LIVVkit
livvkit/components/verification.py
_summarize_result
def _summarize_result(result, summary): """ Trim out some data to return for the index page """ if "Bit for Bit" not in summary: summary["Bit for Bit"] = [0, 0] if "Configurations" not in summary: summary["Configurations"] = [0, 0] if "Std. Out Files" not in summary: summary["Std. Out Files"] = 0 # Get the number of bit for bit failures total_count = 0 failure_count = 0 summary_data = None for elem in result: if elem["Type"] == "Bit for Bit" and "Data" in elem: elem_data = elem["Data"] summary_data = summary["Bit for Bit"] total_count += 1 for var in six.iterkeys(elem_data): if elem_data[var]["Max Error"] != 0: failure_count += 1 break if summary_data is not None: summary_data = np.add(summary_data, [total_count-failure_count, total_count]).tolist() summary["Bit for Bit"] = summary_data # Get the number of config matches summary_data = None total_count = 0 failure_count = 0 for elem in result: if elem["Title"] == "Configuration Comparison" and elem["Type"] == "Diff": elem_data = elem["Data"] summary_data = summary["Configurations"] total_count += 1 failed = False for section_name, varlist in elem_data.items(): for var, val in varlist.items(): if not val[0]: failed = True if failed: failure_count += 1 if summary_data is not None: success_count = total_count - failure_count summary_data = np.add(summary_data, [success_count, total_count]).tolist() summary["Configurations"] = summary_data # Get the number of files parsed for elem in result: if elem["Title"] == "Output Log" and elem["Type"] == "Table": summary["Std. Out Files"] += 1 break return summary
python
def _summarize_result(result, summary): """ Trim out some data to return for the index page """ if "Bit for Bit" not in summary: summary["Bit for Bit"] = [0, 0] if "Configurations" not in summary: summary["Configurations"] = [0, 0] if "Std. Out Files" not in summary: summary["Std. Out Files"] = 0 # Get the number of bit for bit failures total_count = 0 failure_count = 0 summary_data = None for elem in result: if elem["Type"] == "Bit for Bit" and "Data" in elem: elem_data = elem["Data"] summary_data = summary["Bit for Bit"] total_count += 1 for var in six.iterkeys(elem_data): if elem_data[var]["Max Error"] != 0: failure_count += 1 break if summary_data is not None: summary_data = np.add(summary_data, [total_count-failure_count, total_count]).tolist() summary["Bit for Bit"] = summary_data # Get the number of config matches summary_data = None total_count = 0 failure_count = 0 for elem in result: if elem["Title"] == "Configuration Comparison" and elem["Type"] == "Diff": elem_data = elem["Data"] summary_data = summary["Configurations"] total_count += 1 failed = False for section_name, varlist in elem_data.items(): for var, val in varlist.items(): if not val[0]: failed = True if failed: failure_count += 1 if summary_data is not None: success_count = total_count - failure_count summary_data = np.add(summary_data, [success_count, total_count]).tolist() summary["Configurations"] = summary_data # Get the number of files parsed for elem in result: if elem["Title"] == "Output Log" and elem["Type"] == "Table": summary["Std. Out Files"] += 1 break return summary
[ "def", "_summarize_result", "(", "result", ",", "summary", ")", ":", "if", "\"Bit for Bit\"", "not", "in", "summary", ":", "summary", "[", "\"Bit for Bit\"", "]", "=", "[", "0", ",", "0", "]", "if", "\"Configurations\"", "not", "in", "summary", ":", "summa...
Trim out some data to return for the index page
[ "Trim", "out", "some", "data", "to", "return", "for", "the", "index", "page" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L114-L166
LIVVkit/LIVVkit
livvkit/components/verification.py
bit_for_bit
def bit_for_bit(model_path, bench_path, config): """ Checks whether the given files have bit for bit solution matches on the given variable list. Args: model_path: absolute path to the model dataset bench_path: absolute path to the benchmark dataset config: the configuration of the set of analyses Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ fname = model_path.split(os.path.sep)[-1] # Error handling if not (os.path.isfile(bench_path) and os.path.isfile(model_path)): return elements.error("Bit for Bit", "File named " + fname + " has no suitable match!") try: model_data = Dataset(model_path) bench_data = Dataset(bench_path) except (FileNotFoundError, PermissionError): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") # Begin bit for bit analysis headers = ["Max Error", "Index of Max Error", "RMS Error", "Plot"] stats = LIVVDict() for i, var in enumerate(config["bit_for_bit_vars"]): if var in model_data.variables and var in bench_data.variables: m_vardata = model_data.variables[var][:] b_vardata = bench_data.variables[var][:] diff_data = m_vardata - b_vardata if diff_data.any(): stats[var]["Max Error"] = np.amax(np.absolute(diff_data)) stats[var]["Index of Max Error"] = str( np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape)) stats[var]["RMS Error"] = np.sqrt(np.sum(np.square(diff_data).flatten()) / diff_data.size) pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data) else: stats[var]["Max Error"] = stats[var]["RMS Error"] = 0 pf = stats[var]["Index of Max Error"] = "N/A" stats[var]["Plot"] = pf else: stats[var] = {"Max Error": "No Match", "RMS Error": "N/A", "Plot": "N/A"} model_data.close() bench_data.close() return elements.bit_for_bit("Bit for Bit", headers, stats)
python
def bit_for_bit(model_path, bench_path, config): """ Checks whether the given files have bit for bit solution matches on the given variable list. Args: model_path: absolute path to the model dataset bench_path: absolute path to the benchmark dataset config: the configuration of the set of analyses Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ fname = model_path.split(os.path.sep)[-1] # Error handling if not (os.path.isfile(bench_path) and os.path.isfile(model_path)): return elements.error("Bit for Bit", "File named " + fname + " has no suitable match!") try: model_data = Dataset(model_path) bench_data = Dataset(bench_path) except (FileNotFoundError, PermissionError): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") # Begin bit for bit analysis headers = ["Max Error", "Index of Max Error", "RMS Error", "Plot"] stats = LIVVDict() for i, var in enumerate(config["bit_for_bit_vars"]): if var in model_data.variables and var in bench_data.variables: m_vardata = model_data.variables[var][:] b_vardata = bench_data.variables[var][:] diff_data = m_vardata - b_vardata if diff_data.any(): stats[var]["Max Error"] = np.amax(np.absolute(diff_data)) stats[var]["Index of Max Error"] = str( np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape)) stats[var]["RMS Error"] = np.sqrt(np.sum(np.square(diff_data).flatten()) / diff_data.size) pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data) else: stats[var]["Max Error"] = stats[var]["RMS Error"] = 0 pf = stats[var]["Index of Max Error"] = "N/A" stats[var]["Plot"] = pf else: stats[var] = {"Max Error": "No Match", "RMS Error": "N/A", "Plot": "N/A"} model_data.close() bench_data.close() return elements.bit_for_bit("Bit for Bit", headers, stats)
[ "def", "bit_for_bit", "(", "model_path", ",", "bench_path", ",", "config", ")", ":", "fname", "=", "model_path", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "1", "]", "# Error handling", "if", "not", "(", "os", ".", "path", ".", ...
Checks whether the given files have bit for bit solution matches on the given variable list. Args: model_path: absolute path to the model dataset bench_path: absolute path to the benchmark dataset config: the configuration of the set of analyses Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing
[ "Checks", "whether", "the", "given", "files", "have", "bit", "for", "bit", "solution", "matches", "on", "the", "given", "variable", "list", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L177-L229
LIVVkit/LIVVkit
livvkit/components/verification.py
diff_configurations
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle): """ Description Args: model_config: a dictionary with the model configuration data bench_config: a dictionary with the benchmark configuration data model_bundle: a LIVVkit model bundle object bench_bundle: a LIVVkit model bundle object Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ diff_dict = LIVVDict() model_data = model_bundle.parse_config(model_config) bench_data = bench_bundle.parse_config(bench_config) if model_data == {} and bench_data == {}: return elements.error("Configuration Comparison", "Could not open file: " + model_config.split(os.path.sep)[-1]) model_sections = set(six.iterkeys(model_data)) bench_sections = set(six.iterkeys(bench_data)) all_sections = set(model_sections.union(bench_sections)) for s in all_sections: model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set() bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set() all_vars = set(model_vars.union(bench_vars)) for v in all_vars: model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA' bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA' same = True if model_val == bench_val and model_val != 'NA' else False diff_dict[s][v] = (same, model_val, bench_val) return elements.file_diff("Configuration Comparison", diff_dict)
python
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle): """ Description Args: model_config: a dictionary with the model configuration data bench_config: a dictionary with the benchmark configuration data model_bundle: a LIVVkit model bundle object bench_bundle: a LIVVkit model bundle object Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ diff_dict = LIVVDict() model_data = model_bundle.parse_config(model_config) bench_data = bench_bundle.parse_config(bench_config) if model_data == {} and bench_data == {}: return elements.error("Configuration Comparison", "Could not open file: " + model_config.split(os.path.sep)[-1]) model_sections = set(six.iterkeys(model_data)) bench_sections = set(six.iterkeys(bench_data)) all_sections = set(model_sections.union(bench_sections)) for s in all_sections: model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set() bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set() all_vars = set(model_vars.union(bench_vars)) for v in all_vars: model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA' bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA' same = True if model_val == bench_val and model_val != 'NA' else False diff_dict[s][v] = (same, model_val, bench_val) return elements.file_diff("Configuration Comparison", diff_dict)
[ "def", "diff_configurations", "(", "model_config", ",", "bench_config", ",", "model_bundle", ",", "bench_bundle", ")", ":", "diff_dict", "=", "LIVVDict", "(", ")", "model_data", "=", "model_bundle", ".", "parse_config", "(", "model_config", ")", "bench_data", "=",...
Description Args: model_config: a dictionary with the model configuration data bench_config: a dictionary with the benchmark configuration data model_bundle: a LIVVkit model bundle object bench_bundle: a LIVVkit model bundle object Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing
[ "Description" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L232-L266
LIVVkit/LIVVkit
livvkit/components/verification.py
plot_bit_for_bit
def plot_bit_for_bit(case, var_name, model_data, bench_data, diff_data): """ Create a bit for bit plot """ plot_title = "" plot_name = case + "_" + var_name + ".png" plot_path = os.path.join(os.path.join(livvkit.output_dir, "verification", "imgs")) functions.mkdir_p(plot_path) m_ndim = np.ndim(model_data) b_ndim = np.ndim(bench_data) if m_ndim != b_ndim: return "Dataset dimensions didn't match!" if m_ndim == 3: model_data = model_data[-1] bench_data = bench_data[-1] diff_data = diff_data[-1] plot_title = "Showing "+var_name+"[-1,:,:]" elif m_ndim == 4: model_data = model_data[-1][0] bench_data = bench_data[-1][0] diff_data = diff_data[-1][0] plot_title = "Showing "+var_name+"[-1,0,:,:]" plt.figure(figsize=(12, 3), dpi=80) plt.clf() # Calculate min and max to scale the colorbars _max = np.amax([np.amax(model_data), np.amax(bench_data)]) _min = np.amin([np.amin(model_data), np.amin(bench_data)]) # Plot the model output plt.subplot(1, 3, 1) plt.xlabel("Model Data") plt.ylabel(var_name) plt.xticks([]) plt.yticks([]) plt.imshow(model_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() # Plot the benchmark data plt.subplot(1, 3, 2) plt.xlabel("Benchmark Data") plt.xticks([]) plt.yticks([]) plt.imshow(bench_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() # Plot the difference plt.subplot(1, 3, 3) plt.xlabel("Difference") plt.xticks([]) plt.yticks([]) plt.imshow(diff_data, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() plt.tight_layout(rect=(0, 0, 0.95, 0.9)) plt.suptitle(plot_title) plot_file = os.path.sep.join([plot_path, plot_name]) if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return os.path.join(os.path.relpath(plot_path, os.path.join(livvkit.output_dir, "verification")), plot_name)
python
def plot_bit_for_bit(case, var_name, model_data, bench_data, diff_data): """ Create a bit for bit plot """ plot_title = "" plot_name = case + "_" + var_name + ".png" plot_path = os.path.join(os.path.join(livvkit.output_dir, "verification", "imgs")) functions.mkdir_p(plot_path) m_ndim = np.ndim(model_data) b_ndim = np.ndim(bench_data) if m_ndim != b_ndim: return "Dataset dimensions didn't match!" if m_ndim == 3: model_data = model_data[-1] bench_data = bench_data[-1] diff_data = diff_data[-1] plot_title = "Showing "+var_name+"[-1,:,:]" elif m_ndim == 4: model_data = model_data[-1][0] bench_data = bench_data[-1][0] diff_data = diff_data[-1][0] plot_title = "Showing "+var_name+"[-1,0,:,:]" plt.figure(figsize=(12, 3), dpi=80) plt.clf() # Calculate min and max to scale the colorbars _max = np.amax([np.amax(model_data), np.amax(bench_data)]) _min = np.amin([np.amin(model_data), np.amin(bench_data)]) # Plot the model output plt.subplot(1, 3, 1) plt.xlabel("Model Data") plt.ylabel(var_name) plt.xticks([]) plt.yticks([]) plt.imshow(model_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() # Plot the benchmark data plt.subplot(1, 3, 2) plt.xlabel("Benchmark Data") plt.xticks([]) plt.yticks([]) plt.imshow(bench_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() # Plot the difference plt.subplot(1, 3, 3) plt.xlabel("Difference") plt.xticks([]) plt.yticks([]) plt.imshow(diff_data, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() plt.tight_layout(rect=(0, 0, 0.95, 0.9)) plt.suptitle(plot_title) plot_file = os.path.sep.join([plot_path, plot_name]) if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return os.path.join(os.path.relpath(plot_path, os.path.join(livvkit.output_dir, "verification")), plot_name)
[ "def", "plot_bit_for_bit", "(", "case", ",", "var_name", ",", "model_data", ",", "bench_data", ",", "diff_data", ")", ":", "plot_title", "=", "\"\"", "plot_name", "=", "case", "+", "\"_\"", "+", "var_name", "+", "\".png\"", "plot_path", "=", "os", ".", "pa...
Create a bit for bit plot
[ "Create", "a", "bit", "for", "bit", "plot" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L269-L331