repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
jedie/DragonPy
PyDC/PyDC/utils.py
https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/PyDC/PyDC/utils.py#L634-L651
def print_bitlist(bitstream, no_repr=False): """ >>> bitstream = bytes2bitstream("Hallo World!") >>> print_bitlist(bitstream) ... # doctest: +NORMALIZE_WHITESPACE 8 - 00010010 10000110 00110110 00110110 11110110 00000100 11101010 11110110 0x48 'H' 0x61 'a' 0x6c 'l' 0x6c 'l' 0x6f 'o' 0x20 ' ' 0x57 'W' 0x6f 'o' 12 - 01001110 00110110 00100110 10000100 0x72 'r' 0x6c 'l' 0x64 'd' 0x21 '!' >>> bitstream = bytes2bitstream("Hallo World!") >>> print_bitlist(bitstream, no_repr=True) ... # doctest: +NORMALIZE_WHITESPACE 8 - 00010010 10000110 00110110 00110110 11110110 00000100 11101010 11110110 12 - 01001110 00110110 00100110 10000100 """ block_bit_list = iter_steps(bitstream, steps=8) print_block_bit_list(block_bit_list, no_repr=no_repr)
[ "def", "print_bitlist", "(", "bitstream", ",", "no_repr", "=", "False", ")", ":", "block_bit_list", "=", "iter_steps", "(", "bitstream", ",", "steps", "=", "8", ")", "print_block_bit_list", "(", "block_bit_list", ",", "no_repr", "=", "no_repr", ")" ]
>>> bitstream = bytes2bitstream("Hallo World!") >>> print_bitlist(bitstream) ... # doctest: +NORMALIZE_WHITESPACE 8 - 00010010 10000110 00110110 00110110 11110110 00000100 11101010 11110110 0x48 'H' 0x61 'a' 0x6c 'l' 0x6c 'l' 0x6f 'o' 0x20 ' ' 0x57 'W' 0x6f 'o' 12 - 01001110 00110110 00100110 10000100 0x72 'r' 0x6c 'l' 0x64 'd' 0x21 '!' >>> bitstream = bytes2bitstream("Hallo World!") >>> print_bitlist(bitstream, no_repr=True) ... # doctest: +NORMALIZE_WHITESPACE 8 - 00010010 10000110 00110110 00110110 11110110 00000100 11101010 11110110 12 - 01001110 00110110 00100110 10000100
[ ">>>", "bitstream", "=", "bytes2bitstream", "(", "Hallo", "World!", ")", ">>>", "print_bitlist", "(", "bitstream", ")", "...", "#", "doctest", ":", "+", "NORMALIZE_WHITESPACE", "8", "-", "00010010", "10000110", "00110110", "00110110", "11110110", "00000100", "11101010", "11110110", "0x48", "H", "0x61", "a", "0x6c", "l", "0x6c", "l", "0x6f", "o", "0x20", "0x57", "W", "0x6f", "o", "12", "-", "01001110", "00110110", "00100110", "10000100", "0x72", "r", "0x6c", "l", "0x64", "d", "0x21", "!" ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/cms.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/cms.py#L320-L343
def update_peer(self, current_name, new_name, new_url, username, password, peer_type="REPLICATION"): """ Update a replication peer. @param current_name: The name of the peer to updated. @param new_name: The new name for the peer. @param new_url: The new url for the peer. @param username: The admin username to use to setup the remote side of the peer connection. @param password: The password of the admin user. @param peer_type: Added in v11. The type of the peer. Defaults to 'REPLICATION'. @return: The updated peer. @since: API v3 """ if self._get_resource_root().version < 11: peer_type = None peer = ApiCmPeer(self._get_resource_root(), name=new_name, url=new_url, username=username, password=password, type=peer_type) return self._put("peers/" + current_name, ApiCmPeer, data=peer, api_version=3)
[ "def", "update_peer", "(", "self", ",", "current_name", ",", "new_name", ",", "new_url", ",", "username", ",", "password", ",", "peer_type", "=", "\"REPLICATION\"", ")", ":", "if", "self", ".", "_get_resource_root", "(", ")", ".", "version", "<", "11", ":", "peer_type", "=", "None", "peer", "=", "ApiCmPeer", "(", "self", ".", "_get_resource_root", "(", ")", ",", "name", "=", "new_name", ",", "url", "=", "new_url", ",", "username", "=", "username", ",", "password", "=", "password", ",", "type", "=", "peer_type", ")", "return", "self", ".", "_put", "(", "\"peers/\"", "+", "current_name", ",", "ApiCmPeer", ",", "data", "=", "peer", ",", "api_version", "=", "3", ")" ]
Update a replication peer. @param current_name: The name of the peer to updated. @param new_name: The new name for the peer. @param new_url: The new url for the peer. @param username: The admin username to use to setup the remote side of the peer connection. @param password: The password of the admin user. @param peer_type: Added in v11. The type of the peer. Defaults to 'REPLICATION'. @return: The updated peer. @since: API v3
[ "Update", "a", "replication", "peer", "." ]
python
train
saltstack/salt
salt/states/rdp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rdp.py#L15-L38
def enabled(name): ''' Enable the RDP service and make sure access to the RDP port is allowed in the firewall configuration ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} stat = __salt__['rdp.status']() if not stat: if __opts__['test']: ret['result'] = None ret['comment'] = 'RDP will be enabled' return ret ret['result'] = __salt__['rdp.enable']() ret['changes'] = {'RDP was enabled': True} return ret ret['comment'] = 'RDP is enabled' return ret
[ "def", "enabled", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "stat", "=", "__salt__", "[", "'rdp.status'", "]", "(", ")", "if", "not", "stat", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'RDP will be enabled'", "return", "ret", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'rdp.enable'", "]", "(", ")", "ret", "[", "'changes'", "]", "=", "{", "'RDP was enabled'", ":", "True", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'RDP is enabled'", "return", "ret" ]
Enable the RDP service and make sure access to the RDP port is allowed in the firewall configuration
[ "Enable", "the", "RDP", "service", "and", "make", "sure", "access", "to", "the", "RDP", "port", "is", "allowed", "in", "the", "firewall", "configuration" ]
python
train
jforman/pybindxml
pybindxml/reader.py
https://github.com/jforman/pybindxml/blob/795fd5b1fab85e2debad8655888e2d52ef8dce5f/pybindxml/reader.py#L33-L49
def gather_xml(self): """Attempt to read the XML, whether from a file on-disk or via host:port. TODO: handle when you cant gather stats, due to bad hostname """ if self.xml_filepath: with open(self.xml_filepath, "r") as xml_fh: self.raw_xml = xml_fh.read() self.bs_xml = BeautifulSoup(self.raw_xml, 'lxml') else: try: req = urlopen('http://%s:%s' % (self.host, self.port)) self.raw_xml = req.read() self.bs_xml = BeautifulSoup(self.raw_xml, 'lxml') except URLError as u_error: raise XmlError('Unable to query BIND (%s:%s) for statistics. Reason: %s.' % (self.host, self.port, u_error))
[ "def", "gather_xml", "(", "self", ")", ":", "if", "self", ".", "xml_filepath", ":", "with", "open", "(", "self", ".", "xml_filepath", ",", "\"r\"", ")", "as", "xml_fh", ":", "self", ".", "raw_xml", "=", "xml_fh", ".", "read", "(", ")", "self", ".", "bs_xml", "=", "BeautifulSoup", "(", "self", ".", "raw_xml", ",", "'lxml'", ")", "else", ":", "try", ":", "req", "=", "urlopen", "(", "'http://%s:%s'", "%", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "self", ".", "raw_xml", "=", "req", ".", "read", "(", ")", "self", ".", "bs_xml", "=", "BeautifulSoup", "(", "self", ".", "raw_xml", ",", "'lxml'", ")", "except", "URLError", "as", "u_error", ":", "raise", "XmlError", "(", "'Unable to query BIND (%s:%s) for statistics. Reason: %s.'", "%", "(", "self", ".", "host", ",", "self", ".", "port", ",", "u_error", ")", ")" ]
Attempt to read the XML, whether from a file on-disk or via host:port. TODO: handle when you cant gather stats, due to bad hostname
[ "Attempt", "to", "read", "the", "XML", "whether", "from", "a", "file", "on", "-", "disk", "or", "via", "host", ":", "port", "." ]
python
train
grycap/RADL
radl/radl_parse.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl_parse.py#L274-L281
def p_system_sentence(self, t): """system_sentence : SYSTEM VAR | SYSTEM VAR LPAREN features RPAREN""" if len(t) == 3: t[0] = system(t[2], reference=True, line=t.lineno(1)) else: t[0] = system(t[2], t[4], line=t.lineno(1))
[ "def", "p_system_sentence", "(", "self", ",", "t", ")", ":", "if", "len", "(", "t", ")", "==", "3", ":", "t", "[", "0", "]", "=", "system", "(", "t", "[", "2", "]", ",", "reference", "=", "True", ",", "line", "=", "t", ".", "lineno", "(", "1", ")", ")", "else", ":", "t", "[", "0", "]", "=", "system", "(", "t", "[", "2", "]", ",", "t", "[", "4", "]", ",", "line", "=", "t", ".", "lineno", "(", "1", ")", ")" ]
system_sentence : SYSTEM VAR | SYSTEM VAR LPAREN features RPAREN
[ "system_sentence", ":", "SYSTEM", "VAR", "|", "SYSTEM", "VAR", "LPAREN", "features", "RPAREN" ]
python
train
erdewit/ib_insync
ib_insync/wrapper.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/wrapper.py#L88-L102
def _endReq(self, key, result=None, success=True): """ Finish the future of corresponding key with the given result. If no result is given then it will be popped of the general results. """ future = self._futures.pop(key, None) self._reqId2Contract.pop(key, None) if future: if result is None: result = self._results.pop(key, []) if not future.done(): if success: future.set_result(result) else: future.set_exception(result)
[ "def", "_endReq", "(", "self", ",", "key", ",", "result", "=", "None", ",", "success", "=", "True", ")", ":", "future", "=", "self", ".", "_futures", ".", "pop", "(", "key", ",", "None", ")", "self", ".", "_reqId2Contract", ".", "pop", "(", "key", ",", "None", ")", "if", "future", ":", "if", "result", "is", "None", ":", "result", "=", "self", ".", "_results", ".", "pop", "(", "key", ",", "[", "]", ")", "if", "not", "future", ".", "done", "(", ")", ":", "if", "success", ":", "future", ".", "set_result", "(", "result", ")", "else", ":", "future", ".", "set_exception", "(", "result", ")" ]
Finish the future of corresponding key with the given result. If no result is given then it will be popped of the general results.
[ "Finish", "the", "future", "of", "corresponding", "key", "with", "the", "given", "result", ".", "If", "no", "result", "is", "given", "then", "it", "will", "be", "popped", "of", "the", "general", "results", "." ]
python
train
amzn/ion-python
amazon/ion/reader_binary.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_binary.py#L132-L152
def _parse_var_int_components(buf, signed): """Parses a ``VarInt`` or ``VarUInt`` field from a file-like object.""" value = 0 sign = 1 while True: ch = buf.read(1) if ch == '': raise IonException('Variable integer under-run') octet = ord(ch) if signed: if octet & _VAR_INT_SIGN_MASK: sign = -1 value = octet & _VAR_INT_SIGN_VALUE_MASK signed = False else: value <<= _VAR_INT_VALUE_BITS value |= octet & _VAR_INT_VALUE_MASK if octet & _VAR_INT_SIGNAL_MASK: break return sign, value
[ "def", "_parse_var_int_components", "(", "buf", ",", "signed", ")", ":", "value", "=", "0", "sign", "=", "1", "while", "True", ":", "ch", "=", "buf", ".", "read", "(", "1", ")", "if", "ch", "==", "''", ":", "raise", "IonException", "(", "'Variable integer under-run'", ")", "octet", "=", "ord", "(", "ch", ")", "if", "signed", ":", "if", "octet", "&", "_VAR_INT_SIGN_MASK", ":", "sign", "=", "-", "1", "value", "=", "octet", "&", "_VAR_INT_SIGN_VALUE_MASK", "signed", "=", "False", "else", ":", "value", "<<=", "_VAR_INT_VALUE_BITS", "value", "|=", "octet", "&", "_VAR_INT_VALUE_MASK", "if", "octet", "&", "_VAR_INT_SIGNAL_MASK", ":", "break", "return", "sign", ",", "value" ]
Parses a ``VarInt`` or ``VarUInt`` field from a file-like object.
[ "Parses", "a", "VarInt", "or", "VarUInt", "field", "from", "a", "file", "-", "like", "object", "." ]
python
train
CityOfZion/neo-python
neo/Core/TX/Transaction.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/Transaction.py#L278-L289
def Hash(self): """ Get the hash of the transaction. Returns: UInt256: """ if not self.__hash: ba = bytearray(binascii.unhexlify(self.GetHashData())) hash = Crypto.Hash256(ba) self.__hash = UInt256(data=hash) return self.__hash
[ "def", "Hash", "(", "self", ")", ":", "if", "not", "self", ".", "__hash", ":", "ba", "=", "bytearray", "(", "binascii", ".", "unhexlify", "(", "self", ".", "GetHashData", "(", ")", ")", ")", "hash", "=", "Crypto", ".", "Hash256", "(", "ba", ")", "self", ".", "__hash", "=", "UInt256", "(", "data", "=", "hash", ")", "return", "self", ".", "__hash" ]
Get the hash of the transaction. Returns: UInt256:
[ "Get", "the", "hash", "of", "the", "transaction", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L241-L244
def p_portlist_io(self, p): 'portlist : LPAREN ioports RPAREN SEMICOLON' p[0] = Portlist(ports=p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_portlist_io", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Portlist", "(", "ports", "=", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
portlist : LPAREN ioports RPAREN SEMICOLON
[ "portlist", ":", "LPAREN", "ioports", "RPAREN", "SEMICOLON" ]
python
train
quantmind/pulsar
pulsar/utils/importer.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/importer.py#L32-L47
def import_modules(modules, safe=True): '''Safely import a list of *modules* ''' all = [] for mname in modules: if mname.endswith('.*'): to_load = expand_star(mname) else: to_load = [mname] for module in to_load: try: all.append(import_module(module)) except ImportError: if not safe: raise return all
[ "def", "import_modules", "(", "modules", ",", "safe", "=", "True", ")", ":", "all", "=", "[", "]", "for", "mname", "in", "modules", ":", "if", "mname", ".", "endswith", "(", "'.*'", ")", ":", "to_load", "=", "expand_star", "(", "mname", ")", "else", ":", "to_load", "=", "[", "mname", "]", "for", "module", "in", "to_load", ":", "try", ":", "all", ".", "append", "(", "import_module", "(", "module", ")", ")", "except", "ImportError", ":", "if", "not", "safe", ":", "raise", "return", "all" ]
Safely import a list of *modules*
[ "Safely", "import", "a", "list", "of", "*", "modules", "*" ]
python
train
sosreport/sos
sos/plugins/__init__.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/plugins/__init__.py#L69-L81
def _node_type(st): """ return a string indicating the type of special node represented by the stat buffer st (block, character, fifo, socket). """ _types = [ (stat.S_ISBLK, "block device"), (stat.S_ISCHR, "character device"), (stat.S_ISFIFO, "named pipe"), (stat.S_ISSOCK, "socket") ] for t in _types: if t[0](st.st_mode): return t[1]
[ "def", "_node_type", "(", "st", ")", ":", "_types", "=", "[", "(", "stat", ".", "S_ISBLK", ",", "\"block device\"", ")", ",", "(", "stat", ".", "S_ISCHR", ",", "\"character device\"", ")", ",", "(", "stat", ".", "S_ISFIFO", ",", "\"named pipe\"", ")", ",", "(", "stat", ".", "S_ISSOCK", ",", "\"socket\"", ")", "]", "for", "t", "in", "_types", ":", "if", "t", "[", "0", "]", "(", "st", ".", "st_mode", ")", ":", "return", "t", "[", "1", "]" ]
return a string indicating the type of special node represented by the stat buffer st (block, character, fifo, socket).
[ "return", "a", "string", "indicating", "the", "type", "of", "special", "node", "represented", "by", "the", "stat", "buffer", "st", "(", "block", "character", "fifo", "socket", ")", "." ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L2389-L2428
def add_real_file(self, source_path, read_only=True, target_path=None): """Create `file_path`, including all the parent directories along the way, for an existing real file. The contents of the real file are read only on demand. Args: source_path: Path to an existing file in the real file system read_only: If `True` (the default), writing to the fake file raises an exception. Otherwise, writing to the file changes the fake file only. target_path: If given, the path of the target direction, otherwise it is equal to `source_path`. Returns: the newly created FakeFile object. Raises: OSError: if the file does not exist in the real file system. IOError: if the file already exists in the fake file system. .. note:: On most systems, accessing the fake file's contents may update both the real and fake files' `atime` (access time). In this particular case, `add_real_file()` violates the rule that `pyfakefs` must not modify the real file system. """ target_path = target_path or source_path source_path = make_string_path(source_path) target_path = self.make_string_path(target_path) real_stat = os.stat(source_path) fake_file = self.create_file_internally(target_path, read_from_real_fs=True) # for read-only mode, remove the write/executable permission bits fake_file.stat_result.set_from_stat_result(real_stat) if read_only: fake_file.st_mode &= 0o777444 fake_file.file_path = source_path self.change_disk_usage(fake_file.size, fake_file.name, fake_file.st_dev) return fake_file
[ "def", "add_real_file", "(", "self", ",", "source_path", ",", "read_only", "=", "True", ",", "target_path", "=", "None", ")", ":", "target_path", "=", "target_path", "or", "source_path", "source_path", "=", "make_string_path", "(", "source_path", ")", "target_path", "=", "self", ".", "make_string_path", "(", "target_path", ")", "real_stat", "=", "os", ".", "stat", "(", "source_path", ")", "fake_file", "=", "self", ".", "create_file_internally", "(", "target_path", ",", "read_from_real_fs", "=", "True", ")", "# for read-only mode, remove the write/executable permission bits", "fake_file", ".", "stat_result", ".", "set_from_stat_result", "(", "real_stat", ")", "if", "read_only", ":", "fake_file", ".", "st_mode", "&=", "0o777444", "fake_file", ".", "file_path", "=", "source_path", "self", ".", "change_disk_usage", "(", "fake_file", ".", "size", ",", "fake_file", ".", "name", ",", "fake_file", ".", "st_dev", ")", "return", "fake_file" ]
Create `file_path`, including all the parent directories along the way, for an existing real file. The contents of the real file are read only on demand. Args: source_path: Path to an existing file in the real file system read_only: If `True` (the default), writing to the fake file raises an exception. Otherwise, writing to the file changes the fake file only. target_path: If given, the path of the target direction, otherwise it is equal to `source_path`. Returns: the newly created FakeFile object. Raises: OSError: if the file does not exist in the real file system. IOError: if the file already exists in the fake file system. .. note:: On most systems, accessing the fake file's contents may update both the real and fake files' `atime` (access time). In this particular case, `add_real_file()` violates the rule that `pyfakefs` must not modify the real file system.
[ "Create", "file_path", "including", "all", "the", "parent", "directories", "along", "the", "way", "for", "an", "existing", "real", "file", ".", "The", "contents", "of", "the", "real", "file", "are", "read", "only", "on", "demand", "." ]
python
train
rochacbruno/dynaconf
dynaconf/base.py
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/base.py#L333-L349
def get_environ(self, key, default=None, cast=None): """Get value from environment variable using os.environ.get :param key: The name of the setting value, will always be upper case :param default: In case of not found it will be returned :param cast: Should cast in to @int, @float, @bool or @json ? or cast must be true to use cast inference :return: The value if found, default or None """ key = key.upper() data = self.environ.get(key, default) if data: if cast in converters: data = converters.get(cast)(data) if cast is True: data = parse_conf_data(data, tomlfy=True) return data
[ "def", "get_environ", "(", "self", ",", "key", ",", "default", "=", "None", ",", "cast", "=", "None", ")", ":", "key", "=", "key", ".", "upper", "(", ")", "data", "=", "self", ".", "environ", ".", "get", "(", "key", ",", "default", ")", "if", "data", ":", "if", "cast", "in", "converters", ":", "data", "=", "converters", ".", "get", "(", "cast", ")", "(", "data", ")", "if", "cast", "is", "True", ":", "data", "=", "parse_conf_data", "(", "data", ",", "tomlfy", "=", "True", ")", "return", "data" ]
Get value from environment variable using os.environ.get :param key: The name of the setting value, will always be upper case :param default: In case of not found it will be returned :param cast: Should cast in to @int, @float, @bool or @json ? or cast must be true to use cast inference :return: The value if found, default or None
[ "Get", "value", "from", "environment", "variable", "using", "os", ".", "environ", ".", "get" ]
python
train
F-Secure/see
see/context/resources/network.py
https://github.com/F-Secure/see/blob/3e053e52a45229f96a12db9e98caf7fb3880e811/see/context/resources/network.py#L187-L195
def generate_address(hypervisor, configuration): """Generate a valid IP address according to the configuration.""" ipv4 = configuration['ipv4'] prefix = configuration['prefix'] subnet_prefix = configuration['subnet_prefix'] subnet_address = ipaddress.IPv4Network(u'/'.join((str(ipv4), str(prefix)))) net_address_pool = subnet_address.subnets(new_prefix=subnet_prefix) return address_lookup(hypervisor, net_address_pool)
[ "def", "generate_address", "(", "hypervisor", ",", "configuration", ")", ":", "ipv4", "=", "configuration", "[", "'ipv4'", "]", "prefix", "=", "configuration", "[", "'prefix'", "]", "subnet_prefix", "=", "configuration", "[", "'subnet_prefix'", "]", "subnet_address", "=", "ipaddress", ".", "IPv4Network", "(", "u'/'", ".", "join", "(", "(", "str", "(", "ipv4", ")", ",", "str", "(", "prefix", ")", ")", ")", ")", "net_address_pool", "=", "subnet_address", ".", "subnets", "(", "new_prefix", "=", "subnet_prefix", ")", "return", "address_lookup", "(", "hypervisor", ",", "net_address_pool", ")" ]
Generate a valid IP address according to the configuration.
[ "Generate", "a", "valid", "IP", "address", "according", "to", "the", "configuration", "." ]
python
train
quantopian/empyrical
empyrical/stats.py
https://github.com/quantopian/empyrical/blob/badbdca75f5b293f28b5e947974894de041d6868/empyrical/stats.py#L885-L931
def excess_sharpe(returns, factor_returns, out=None): """ Determines the Excess Sharpe of a strategy. Parameters ---------- returns : pd.Series or np.ndarray Daily returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns: float / series Benchmark return to compare returns against. out : array-like, optional Array to use as output buffer. If not passed, a new array will be created. Returns ------- excess_sharpe : float Note ----- The excess Sharpe is a simplified Information Ratio that uses tracking error rather than "active risk" as the denominator. """ allocated_output = out is None if allocated_output: out = np.empty(returns.shape[1:]) returns_1d = returns.ndim == 1 if len(returns) < 2: out[()] = np.nan if returns_1d: out = out.item() return out active_return = _adjust_returns(returns, factor_returns) tracking_error = np.nan_to_num(nanstd(active_return, ddof=1, axis=0)) out = np.divide( nanmean(active_return, axis=0, out=out), tracking_error, out=out, ) if returns_1d: out = out.item() return out
[ "def", "excess_sharpe", "(", "returns", ",", "factor_returns", ",", "out", "=", "None", ")", ":", "allocated_output", "=", "out", "is", "None", "if", "allocated_output", ":", "out", "=", "np", ".", "empty", "(", "returns", ".", "shape", "[", "1", ":", "]", ")", "returns_1d", "=", "returns", ".", "ndim", "==", "1", "if", "len", "(", "returns", ")", "<", "2", ":", "out", "[", "(", ")", "]", "=", "np", ".", "nan", "if", "returns_1d", ":", "out", "=", "out", ".", "item", "(", ")", "return", "out", "active_return", "=", "_adjust_returns", "(", "returns", ",", "factor_returns", ")", "tracking_error", "=", "np", ".", "nan_to_num", "(", "nanstd", "(", "active_return", ",", "ddof", "=", "1", ",", "axis", "=", "0", ")", ")", "out", "=", "np", ".", "divide", "(", "nanmean", "(", "active_return", ",", "axis", "=", "0", ",", "out", "=", "out", ")", ",", "tracking_error", ",", "out", "=", "out", ",", ")", "if", "returns_1d", ":", "out", "=", "out", ".", "item", "(", ")", "return", "out" ]
Determines the Excess Sharpe of a strategy. Parameters ---------- returns : pd.Series or np.ndarray Daily returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns: float / series Benchmark return to compare returns against. out : array-like, optional Array to use as output buffer. If not passed, a new array will be created. Returns ------- excess_sharpe : float Note ----- The excess Sharpe is a simplified Information Ratio that uses tracking error rather than "active risk" as the denominator.
[ "Determines", "the", "Excess", "Sharpe", "of", "a", "strategy", "." ]
python
train
tariqdaouda/pyGeno
pyGeno/tools/parsers/VCFTools.py
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/parsers/VCFTools.py#L91-L142
def parse(self, filename, gziped = False, stream = False) : """opens a file""" self.stream = stream if gziped : self.f = gzip.open(filename) else : self.f = open(filename) self.filename = filename self.gziped = gziped lineId = 0 inLegend = True while inLegend : ll = self.f.readline() l = ll.replace('\r', '\n').replace('\n', '') if l[:2] == '##' : eqPos = l.find('=') key = l[2:eqPos] values = l[eqPos+1:].strip() if l[eqPos+1] != '<' : self.meta[key] = values else : if key not in self.meta : self.meta[key] = {} svalues = l[eqPos+2:-1].split(',') #remove the < and > that surounf the string idKey = svalues[0].split('=')[1] self.meta[key][idKey] = {} i = 1 for v in svalues[1:] : sv = v.split("=") field = sv[0] value = sv[1] if field.lower() == 'description' : self.meta[key][idKey][field] = ','.join(svalues[i:])[len(field)+2:-1] break self.meta[key][idKey][field] = value i += 1 elif l[:6] == '#CHROM': #we are in legend sl = l.split('\t') for i in range(len(sl)) : self.legend[sl[i]] = i self.dnegel[i] = sl[i] break lineId += 1 if not stream : self.lines = self.f.readlines() self.f.close()
[ "def", "parse", "(", "self", ",", "filename", ",", "gziped", "=", "False", ",", "stream", "=", "False", ")", ":", "self", ".", "stream", "=", "stream", "if", "gziped", ":", "self", ".", "f", "=", "gzip", ".", "open", "(", "filename", ")", "else", ":", "self", ".", "f", "=", "open", "(", "filename", ")", "self", ".", "filename", "=", "filename", "self", ".", "gziped", "=", "gziped", "lineId", "=", "0", "inLegend", "=", "True", "while", "inLegend", ":", "ll", "=", "self", ".", "f", ".", "readline", "(", ")", "l", "=", "ll", ".", "replace", "(", "'\\r'", ",", "'\\n'", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "if", "l", "[", ":", "2", "]", "==", "'##'", ":", "eqPos", "=", "l", ".", "find", "(", "'='", ")", "key", "=", "l", "[", "2", ":", "eqPos", "]", "values", "=", "l", "[", "eqPos", "+", "1", ":", "]", ".", "strip", "(", ")", "if", "l", "[", "eqPos", "+", "1", "]", "!=", "'<'", ":", "self", ".", "meta", "[", "key", "]", "=", "values", "else", ":", "if", "key", "not", "in", "self", ".", "meta", ":", "self", ".", "meta", "[", "key", "]", "=", "{", "}", "svalues", "=", "l", "[", "eqPos", "+", "2", ":", "-", "1", "]", ".", "split", "(", "','", ")", "#remove the < and > that surounf the string ", "idKey", "=", "svalues", "[", "0", "]", ".", "split", "(", "'='", ")", "[", "1", "]", "self", ".", "meta", "[", "key", "]", "[", "idKey", "]", "=", "{", "}", "i", "=", "1", "for", "v", "in", "svalues", "[", "1", ":", "]", ":", "sv", "=", "v", ".", "split", "(", "\"=\"", ")", "field", "=", "sv", "[", "0", "]", "value", "=", "sv", "[", "1", "]", "if", "field", ".", "lower", "(", ")", "==", "'description'", ":", "self", ".", "meta", "[", "key", "]", "[", "idKey", "]", "[", "field", "]", "=", "','", ".", "join", "(", "svalues", "[", "i", ":", "]", ")", "[", "len", "(", "field", ")", "+", "2", ":", "-", "1", "]", "break", "self", ".", "meta", "[", "key", "]", "[", "idKey", "]", "[", "field", "]", "=", "value", "i", "+=", "1", "elif", "l", "[", ":", "6", "]", "==", "'#CHROM'", ":", "#we are in legend", "sl", "=", "l", ".", "split", "(", "'\\t'", ")", "for", "i", "in", "range", "(", "len", "(", "sl", ")", ")", ":", "self", ".", "legend", "[", "sl", "[", "i", "]", "]", "=", "i", "self", ".", "dnegel", "[", "i", "]", "=", "sl", "[", "i", "]", "break", "lineId", "+=", "1", "if", "not", "stream", ":", "self", ".", "lines", "=", "self", ".", "f", ".", "readlines", "(", ")", "self", ".", "f", ".", "close", "(", ")" ]
opens a file
[ "opens", "a", "file" ]
python
train
trustar/trustar-python
trustar/models/tag.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/models/tag.py#L48-L65
def to_dict(self, remove_nones=False): """ Creates a dictionary representation of the tag. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the tag. """ if remove_nones: d = super().to_dict(remove_nones=True) else: d = { 'name': self.name, 'id': self.id, 'enclaveId': self.enclave_id } return d
[ "def", "to_dict", "(", "self", ",", "remove_nones", "=", "False", ")", ":", "if", "remove_nones", ":", "d", "=", "super", "(", ")", ".", "to_dict", "(", "remove_nones", "=", "True", ")", "else", ":", "d", "=", "{", "'name'", ":", "self", ".", "name", ",", "'id'", ":", "self", ".", "id", ",", "'enclaveId'", ":", "self", ".", "enclave_id", "}", "return", "d" ]
Creates a dictionary representation of the tag. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the tag.
[ "Creates", "a", "dictionary", "representation", "of", "the", "tag", "." ]
python
train
eraclitux/ipcampy
ipcampy/foscam.py
https://github.com/eraclitux/ipcampy/blob/bffd1c4df9006705cffa5b83a090b0db90cbcbcf/ipcampy/foscam.py#L28-L52
def snap(self, path=None): """Get a snapshot and save it to disk.""" if path is None: path = "/tmp" else: path = path.rstrip("/") day_dir = datetime.datetime.now().strftime("%d%m%Y") hour_dir = datetime.datetime.now().strftime("%H%M") ensure_snapshot_dir(path+"/"+self.cam_id+"/"+day_dir+"/"+hour_dir) f_path = "{0}/{1}/{2}/{3}/{4}.jpg".format( path, self.cam_id, day_dir, hour_dir, datetime.datetime.now().strftime("%S"), ) urllib.urlretrieve( 'http://{0}/snapshot.cgi?user={1}&pwd={2}'.format( self.address, self.user, self.pswd, ), f_path, )
[ "def", "snap", "(", "self", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "\"/tmp\"", "else", ":", "path", "=", "path", ".", "rstrip", "(", "\"/\"", ")", "day_dir", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%d%m%Y\"", ")", "hour_dir", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%H%M\"", ")", "ensure_snapshot_dir", "(", "path", "+", "\"/\"", "+", "self", ".", "cam_id", "+", "\"/\"", "+", "day_dir", "+", "\"/\"", "+", "hour_dir", ")", "f_path", "=", "\"{0}/{1}/{2}/{3}/{4}.jpg\"", ".", "format", "(", "path", ",", "self", ".", "cam_id", ",", "day_dir", ",", "hour_dir", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%S\"", ")", ",", ")", "urllib", ".", "urlretrieve", "(", "'http://{0}/snapshot.cgi?user={1}&pwd={2}'", ".", "format", "(", "self", ".", "address", ",", "self", ".", "user", ",", "self", ".", "pswd", ",", ")", ",", "f_path", ",", ")" ]
Get a snapshot and save it to disk.
[ "Get", "a", "snapshot", "and", "save", "it", "to", "disk", "." ]
python
train
jimporter/bfg9000
bfg9000/versioning.py
https://github.com/jimporter/bfg9000/blob/33615fc67573f0d416297ee9a98dd1ec8b1aa960/bfg9000/versioning.py#L37-L88
def simplify_specifiers(spec): """Try to simplify a SpecifierSet by combining redundant specifiers.""" def key(s): return (s.version, 1 if s.operator in ['>=', '<'] else 2) def in_bounds(v, lo, hi): if lo and v not in lo: return False if hi and v not in hi: return False return True def err(reason='inconsistent'): return ValueError('{} specifier set {}'.format(reason, spec)) gt = None lt = None eq = None ne = [] for i in spec: if i.operator == '==': if eq is None: eq = i elif eq != i: # pragma: no branch raise err() elif i.operator == '!=': ne.append(i) elif i.operator in ['>', '>=']: gt = i if gt is None else max(gt, i, key=key) elif i.operator in ['<', '<=']: lt = i if lt is None else min(lt, i, key=key) else: raise err('invalid') ne = [i for i in ne if in_bounds(i.version, gt, lt)] if eq: if ( any(i.version in eq for i in ne) or not in_bounds(eq.version, gt, lt)): raise err() return SpecifierSet(str(eq)) if lt and gt: if lt.version not in gt or gt.version not in lt: raise err() if ( gt.version == lt.version and gt.operator == '>=' and lt.operator == '<='): return SpecifierSet('=={}'.format(gt.version)) return SpecifierSet( ','.join(str(i) for i in chain(iterate(gt), iterate(lt), ne)) )
[ "def", "simplify_specifiers", "(", "spec", ")", ":", "def", "key", "(", "s", ")", ":", "return", "(", "s", ".", "version", ",", "1", "if", "s", ".", "operator", "in", "[", "'>='", ",", "'<'", "]", "else", "2", ")", "def", "in_bounds", "(", "v", ",", "lo", ",", "hi", ")", ":", "if", "lo", "and", "v", "not", "in", "lo", ":", "return", "False", "if", "hi", "and", "v", "not", "in", "hi", ":", "return", "False", "return", "True", "def", "err", "(", "reason", "=", "'inconsistent'", ")", ":", "return", "ValueError", "(", "'{} specifier set {}'", ".", "format", "(", "reason", ",", "spec", ")", ")", "gt", "=", "None", "lt", "=", "None", "eq", "=", "None", "ne", "=", "[", "]", "for", "i", "in", "spec", ":", "if", "i", ".", "operator", "==", "'=='", ":", "if", "eq", "is", "None", ":", "eq", "=", "i", "elif", "eq", "!=", "i", ":", "# pragma: no branch", "raise", "err", "(", ")", "elif", "i", ".", "operator", "==", "'!='", ":", "ne", ".", "append", "(", "i", ")", "elif", "i", ".", "operator", "in", "[", "'>'", ",", "'>='", "]", ":", "gt", "=", "i", "if", "gt", "is", "None", "else", "max", "(", "gt", ",", "i", ",", "key", "=", "key", ")", "elif", "i", ".", "operator", "in", "[", "'<'", ",", "'<='", "]", ":", "lt", "=", "i", "if", "lt", "is", "None", "else", "min", "(", "lt", ",", "i", ",", "key", "=", "key", ")", "else", ":", "raise", "err", "(", "'invalid'", ")", "ne", "=", "[", "i", "for", "i", "in", "ne", "if", "in_bounds", "(", "i", ".", "version", ",", "gt", ",", "lt", ")", "]", "if", "eq", ":", "if", "(", "any", "(", "i", ".", "version", "in", "eq", "for", "i", "in", "ne", ")", "or", "not", "in_bounds", "(", "eq", ".", "version", ",", "gt", ",", "lt", ")", ")", ":", "raise", "err", "(", ")", "return", "SpecifierSet", "(", "str", "(", "eq", ")", ")", "if", "lt", "and", "gt", ":", "if", "lt", ".", "version", "not", "in", "gt", "or", "gt", ".", "version", "not", "in", "lt", ":", "raise", "err", "(", ")", "if", "(", "gt", ".", "version", "==", "lt", ".", "version", "and", "gt", ".", "operator", "==", "'>='", "and", "lt", ".", "operator", "==", "'<='", ")", ":", "return", "SpecifierSet", "(", "'=={}'", ".", "format", "(", "gt", ".", "version", ")", ")", "return", "SpecifierSet", "(", "','", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "chain", "(", "iterate", "(", "gt", ")", ",", "iterate", "(", "lt", ")", ",", "ne", ")", ")", ")" ]
Try to simplify a SpecifierSet by combining redundant specifiers.
[ "Try", "to", "simplify", "a", "SpecifierSet", "by", "combining", "redundant", "specifiers", "." ]
python
train
pymc-devs/pymc
pymc/Model.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L777-L797
def restore_sampler_state(self): """ Restore the state of the sampler and to the state stored in the database. """ state = self.db.getstate() or {} # Restore sampler's state sampler_state = state.get('sampler', {}) self.__dict__.update(sampler_state) # Restore stochastic parameters state stoch_state = state.get('stochastics', {}) for sm in self.stochastics: try: sm.value = stoch_state[sm.__name__] except: warnings.warn( 'Failed to restore state of stochastic %s from %s backend' % (sm.__name__, self.db.__name__))
[ "def", "restore_sampler_state", "(", "self", ")", ":", "state", "=", "self", ".", "db", ".", "getstate", "(", ")", "or", "{", "}", "# Restore sampler's state", "sampler_state", "=", "state", ".", "get", "(", "'sampler'", ",", "{", "}", ")", "self", ".", "__dict__", ".", "update", "(", "sampler_state", ")", "# Restore stochastic parameters state", "stoch_state", "=", "state", ".", "get", "(", "'stochastics'", ",", "{", "}", ")", "for", "sm", "in", "self", ".", "stochastics", ":", "try", ":", "sm", ".", "value", "=", "stoch_state", "[", "sm", ".", "__name__", "]", "except", ":", "warnings", ".", "warn", "(", "'Failed to restore state of stochastic %s from %s backend'", "%", "(", "sm", ".", "__name__", ",", "self", ".", "db", ".", "__name__", ")", ")" ]
Restore the state of the sampler and to the state stored in the database.
[ "Restore", "the", "state", "of", "the", "sampler", "and", "to", "the", "state", "stored", "in", "the", "database", "." ]
python
train
chaimleib/intervaltree
intervaltree/intervaltree.py
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L600-L620
def overlaps_range(self, begin, end): """ Returns whether some interval in the tree overlaps the given range. Returns False if given a null interval over which to test. Completes in O(r*log n) time, where r is the range length and n is the table size. :rtype: bool """ if self.is_empty(): return False elif begin >= end: return False elif self.overlaps_point(begin): return True return any( self.overlaps_point(bound) for bound in self.boundary_table if begin < bound < end )
[ "def", "overlaps_range", "(", "self", ",", "begin", ",", "end", ")", ":", "if", "self", ".", "is_empty", "(", ")", ":", "return", "False", "elif", "begin", ">=", "end", ":", "return", "False", "elif", "self", ".", "overlaps_point", "(", "begin", ")", ":", "return", "True", "return", "any", "(", "self", ".", "overlaps_point", "(", "bound", ")", "for", "bound", "in", "self", ".", "boundary_table", "if", "begin", "<", "bound", "<", "end", ")" ]
Returns whether some interval in the tree overlaps the given range. Returns False if given a null interval over which to test. Completes in O(r*log n) time, where r is the range length and n is the table size. :rtype: bool
[ "Returns", "whether", "some", "interval", "in", "the", "tree", "overlaps", "the", "given", "range", ".", "Returns", "False", "if", "given", "a", "null", "interval", "over", "which", "to", "test", "." ]
python
train
lucuma/Clay
clay/markdown_ext/md_captions.py
https://github.com/lucuma/Clay/blob/620d37086b712bdc4d13930ced43a5b7c9a5f46d/clay/markdown_ext/md_captions.py#L98-L103
def extendMarkdown(self, md, md_globals): """ Add an instance of FigcaptionProcessor to BlockParser. """ # def_list = 'def_list' in md.registeredExtensions md.parser.blockprocessors.add( 'figcaption', FigcaptionProcessor(md.parser), '<ulist')
[ "def", "extendMarkdown", "(", "self", ",", "md", ",", "md_globals", ")", ":", "# def_list = 'def_list' in md.registeredExtensions", "md", ".", "parser", ".", "blockprocessors", ".", "add", "(", "'figcaption'", ",", "FigcaptionProcessor", "(", "md", ".", "parser", ")", ",", "'<ulist'", ")" ]
Add an instance of FigcaptionProcessor to BlockParser.
[ "Add", "an", "instance", "of", "FigcaptionProcessor", "to", "BlockParser", "." ]
python
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/gui/qt_b26_gui.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/qt_b26_gui.py#L1148-L1180
def edit_tree_item(self): """ if sender is self.tree_gui_settings this will open a filedialog and ask for a filepath this filepath will be updated in the field of self.tree_gui_settings that has been double clicked """ def open_path_dialog(path): """ opens a file dialog to get the path to a file and """ dialog = QtWidgets.QFileDialog() dialog.setFileMode(QtWidgets.QFileDialog.Directory) dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly, True) path = dialog.getExistingDirectory(self, 'Select a folder:', path) return path tree = self.sender() if tree == self.tree_gui_settings: index = tree.selectedIndexes()[0] model = index.model() if index.column() == 1: path = model.itemFromIndex(index).text() path = str(open_path_dialog(path)) key = str(model.itemFromIndex(model.index(index.row(), 0)).text()) if path != "": self.gui_settings.update({key : str(path)}) self.fill_treeview(tree, self.gui_settings)
[ "def", "edit_tree_item", "(", "self", ")", ":", "def", "open_path_dialog", "(", "path", ")", ":", "\"\"\"\n opens a file dialog to get the path to a file and\n \"\"\"", "dialog", "=", "QtWidgets", ".", "QFileDialog", "(", ")", "dialog", ".", "setFileMode", "(", "QtWidgets", ".", "QFileDialog", ".", "Directory", ")", "dialog", ".", "setOption", "(", "QtWidgets", ".", "QFileDialog", ".", "ShowDirsOnly", ",", "True", ")", "path", "=", "dialog", ".", "getExistingDirectory", "(", "self", ",", "'Select a folder:'", ",", "path", ")", "return", "path", "tree", "=", "self", ".", "sender", "(", ")", "if", "tree", "==", "self", ".", "tree_gui_settings", ":", "index", "=", "tree", ".", "selectedIndexes", "(", ")", "[", "0", "]", "model", "=", "index", ".", "model", "(", ")", "if", "index", ".", "column", "(", ")", "==", "1", ":", "path", "=", "model", ".", "itemFromIndex", "(", "index", ")", ".", "text", "(", ")", "path", "=", "str", "(", "open_path_dialog", "(", "path", ")", ")", "key", "=", "str", "(", "model", ".", "itemFromIndex", "(", "model", ".", "index", "(", "index", ".", "row", "(", ")", ",", "0", ")", ")", ".", "text", "(", ")", ")", "if", "path", "!=", "\"\"", ":", "self", ".", "gui_settings", ".", "update", "(", "{", "key", ":", "str", "(", "path", ")", "}", ")", "self", ".", "fill_treeview", "(", "tree", ",", "self", ".", "gui_settings", ")" ]
if sender is self.tree_gui_settings this will open a filedialog and ask for a filepath this filepath will be updated in the field of self.tree_gui_settings that has been double clicked
[ "if", "sender", "is", "self", ".", "tree_gui_settings", "this", "will", "open", "a", "filedialog", "and", "ask", "for", "a", "filepath", "this", "filepath", "will", "be", "updated", "in", "the", "field", "of", "self", ".", "tree_gui_settings", "that", "has", "been", "double", "clicked" ]
python
train
ojengwa/accounting
accounting/accounting.py
https://github.com/ojengwa/accounting/blob/6343cf373a5c57941e407a92c101ac4bc45382e3/accounting/accounting.py#L179-L223
def format(self, number, **kwargs): """Format a given number. Format a number, with comma-separated thousands and custom precision/decimal places Localise by overriding the precision and thousand / decimal separators 2nd parameter `precision` can be an object matching `settings.number` Args: number (TYPE): Description precision (TYPE): Description thousand (TYPE): Description decimal (TYPE): Description Returns: name (TYPE): Description """ # Resursively format lists if check_type(number, 'list'): return map(lambda val: self.format(val, **kwargs)) # Clean up number number = self.parse(number) # Build options object from second param (if object) or all params, # extending defaults if check_type(kwargs, 'dict'): options = (self.settings['number'].update(kwargs)) # Clean up precision precision = self._change_precision(options['precision']) negative = (lambda num: "-" if num < 0 else "")(number) base = str(int(self.to_fixed(abs(number) or 0, precision)), 10) mod = (lambda num: len(num) % 3 if len(num) > 3 else 0)(base) # Format the number: num = negative + (lambda num: base[0:num] if num else '')(mod) num += re.sub('/(\d{3})(?=\d)/g', '$1' + options['thousand'], base[mod:]) num += (lambda val: options[ 'decimal'] + self.to_fixed(abs(number), precision) .split('.')[1] if val else '')(precision) return num
[ "def", "format", "(", "self", ",", "number", ",", "*", "*", "kwargs", ")", ":", "# Resursively format lists", "if", "check_type", "(", "number", ",", "'list'", ")", ":", "return", "map", "(", "lambda", "val", ":", "self", ".", "format", "(", "val", ",", "*", "*", "kwargs", ")", ")", "# Clean up number", "number", "=", "self", ".", "parse", "(", "number", ")", "# Build options object from second param (if object) or all params,", "# extending defaults", "if", "check_type", "(", "kwargs", ",", "'dict'", ")", ":", "options", "=", "(", "self", ".", "settings", "[", "'number'", "]", ".", "update", "(", "kwargs", ")", ")", "# Clean up precision", "precision", "=", "self", ".", "_change_precision", "(", "options", "[", "'precision'", "]", ")", "negative", "=", "(", "lambda", "num", ":", "\"-\"", "if", "num", "<", "0", "else", "\"\"", ")", "(", "number", ")", "base", "=", "str", "(", "int", "(", "self", ".", "to_fixed", "(", "abs", "(", "number", ")", "or", "0", ",", "precision", ")", ")", ",", "10", ")", "mod", "=", "(", "lambda", "num", ":", "len", "(", "num", ")", "%", "3", "if", "len", "(", "num", ")", ">", "3", "else", "0", ")", "(", "base", ")", "# Format the number:", "num", "=", "negative", "+", "(", "lambda", "num", ":", "base", "[", "0", ":", "num", "]", "if", "num", "else", "''", ")", "(", "mod", ")", "num", "+=", "re", ".", "sub", "(", "'/(\\d{3})(?=\\d)/g'", ",", "'$1'", "+", "options", "[", "'thousand'", "]", ",", "base", "[", "mod", ":", "]", ")", "num", "+=", "(", "lambda", "val", ":", "options", "[", "'decimal'", "]", "+", "self", ".", "to_fixed", "(", "abs", "(", "number", ")", ",", "precision", ")", ".", "split", "(", "'.'", ")", "[", "1", "]", "if", "val", "else", "''", ")", "(", "precision", ")", "return", "num" ]
Format a given number. Format a number, with comma-separated thousands and custom precision/decimal places Localise by overriding the precision and thousand / decimal separators 2nd parameter `precision` can be an object matching `settings.number` Args: number (TYPE): Description precision (TYPE): Description thousand (TYPE): Description decimal (TYPE): Description Returns: name (TYPE): Description
[ "Format", "a", "given", "number", "." ]
python
test
mdavidsaver/p4p
src/p4p/nt/scalar.py
https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/scalar.py#L183-L203
def wrap(self, value, timestamp=None): """Pack python value into Value Accepts dict to explicitly initialize fields be name. Any other type is assigned to the 'value' field. """ if isinstance(value, Value): return value elif isinstance(value, ntwrappercommon): return value.raw elif isinstance(value, dict): return self.Value(self.type, value) else: S, NS = divmod(float(timestamp or time.time()), 1.0) return self.Value(self.type, { 'value': value, 'timeStamp': { 'secondsPastEpoch': S, 'nanoseconds': NS * 1e9, }, })
[ "def", "wrap", "(", "self", ",", "value", ",", "timestamp", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "Value", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "ntwrappercommon", ")", ":", "return", "value", ".", "raw", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "self", ".", "Value", "(", "self", ".", "type", ",", "value", ")", "else", ":", "S", ",", "NS", "=", "divmod", "(", "float", "(", "timestamp", "or", "time", ".", "time", "(", ")", ")", ",", "1.0", ")", "return", "self", ".", "Value", "(", "self", ".", "type", ",", "{", "'value'", ":", "value", ",", "'timeStamp'", ":", "{", "'secondsPastEpoch'", ":", "S", ",", "'nanoseconds'", ":", "NS", "*", "1e9", ",", "}", ",", "}", ")" ]
Pack python value into Value Accepts dict to explicitly initialize fields be name. Any other type is assigned to the 'value' field.
[ "Pack", "python", "value", "into", "Value" ]
python
train
pyroscope/pyrocore
src/pyrocore/torrent/engine.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/engine.py#L142-L190
def _fmt_files(filelist): """ Produce a file listing. """ depth = max(i.path.count('/') for i in filelist) pad = ['\uFFFE'] * depth base_indent = ' ' * 38 indent = 0 result = [] prev_path = pad sorted_files = sorted((i.path.split('/')[:-1]+pad, i.path.rsplit('/', 1)[-1], i) for i in filelist) for path, name, fileinfo in sorted_files: path = path[:depth] if path != prev_path: common = min([depth] + [idx for idx, (dirname, prev_name) in enumerate(zip(path, prev_path)) if dirname != prev_name ]) #result.append("!!%r %r" % (indent, common)) #result.append("!!%r" % (prev_path,)) #result.append("!!%r" % (path,)) while indent > common: indent -= 1 result.append("%s%s/" % (base_indent, ' ' * indent)) for dirname in path[common:]: if dirname == '\uFFFE': break result.append("%s%s\\ %s" % (base_indent, ' ' * indent, dirname)) indent += 1 ##result.append("!!%r %r" % (path, name)) result.append(" %s %s %s %s| %s" % ( {0: "off ", 1: " ", 2: "high"}.get(fileinfo.prio, "????"), fmt.iso_datetime(fileinfo.mtime), fmt.human_size(fileinfo.size), ' ' * indent, name, )) prev_path = path while indent > 0: indent -= 1 result.append("%s%s/" % (base_indent, ' ' * indent)) result.append("%s= %d file(s)" % (base_indent, len(filelist))) return '\n'.join(result)
[ "def", "_fmt_files", "(", "filelist", ")", ":", "depth", "=", "max", "(", "i", ".", "path", ".", "count", "(", "'/'", ")", "for", "i", "in", "filelist", ")", "pad", "=", "[", "'\\uFFFE'", "]", "*", "depth", "base_indent", "=", "' '", "*", "38", "indent", "=", "0", "result", "=", "[", "]", "prev_path", "=", "pad", "sorted_files", "=", "sorted", "(", "(", "i", ".", "path", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", "+", "pad", ",", "i", ".", "path", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ",", "i", ")", "for", "i", "in", "filelist", ")", "for", "path", ",", "name", ",", "fileinfo", "in", "sorted_files", ":", "path", "=", "path", "[", ":", "depth", "]", "if", "path", "!=", "prev_path", ":", "common", "=", "min", "(", "[", "depth", "]", "+", "[", "idx", "for", "idx", ",", "(", "dirname", ",", "prev_name", ")", "in", "enumerate", "(", "zip", "(", "path", ",", "prev_path", ")", ")", "if", "dirname", "!=", "prev_name", "]", ")", "#result.append(\"!!%r %r\" % (indent, common))", "#result.append(\"!!%r\" % (prev_path,))", "#result.append(\"!!%r\" % (path,))", "while", "indent", ">", "common", ":", "indent", "-=", "1", "result", ".", "append", "(", "\"%s%s/\"", "%", "(", "base_indent", ",", "' '", "*", "indent", ")", ")", "for", "dirname", "in", "path", "[", "common", ":", "]", ":", "if", "dirname", "==", "'\\uFFFE'", ":", "break", "result", ".", "append", "(", "\"%s%s\\\\ %s\"", "%", "(", "base_indent", ",", "' '", "*", "indent", ",", "dirname", ")", ")", "indent", "+=", "1", "##result.append(\"!!%r %r\" % (path, name))", "result", ".", "append", "(", "\" %s %s %s %s| %s\"", "%", "(", "{", "0", ":", "\"off \"", ",", "1", ":", "\" \"", ",", "2", ":", "\"high\"", "}", ".", "get", "(", "fileinfo", ".", "prio", ",", "\"????\"", ")", ",", "fmt", ".", "iso_datetime", "(", "fileinfo", ".", "mtime", ")", ",", "fmt", ".", "human_size", "(", "fileinfo", ".", "size", ")", ",", "' '", "*", "indent", ",", "name", ",", ")", ")", "prev_path", "=", "path", "while", "indent", ">", "0", ":", "indent", "-=", "1", "result", ".", "append", "(", "\"%s%s/\"", "%", "(", "base_indent", ",", "' '", "*", "indent", ")", ")", "result", ".", "append", "(", "\"%s= %d file(s)\"", "%", "(", "base_indent", ",", "len", "(", "filelist", ")", ")", ")", "return", "'\\n'", ".", "join", "(", "result", ")" ]
Produce a file listing.
[ "Produce", "a", "file", "listing", "." ]
python
train
svinota/mdns
mdns/zeroconf.py
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L624-L629
def set_property(self, key, value): """ Update only one property in the dict """ self.properties[key] = value self.sync_properties()
[ "def", "set_property", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "properties", "[", "key", "]", "=", "value", "self", ".", "sync_properties", "(", ")" ]
Update only one property in the dict
[ "Update", "only", "one", "property", "in", "the", "dict" ]
python
train
hugapi/hug
hug/decorators.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/decorators.py#L41-L57
def default_output_format(content_type='application/json', apply_globally=False, api=None, cli=False, http=True): """A decorator that allows you to override the default output format for an API""" def decorator(formatter): formatter = hug.output_format.content_type(content_type)(formatter) if apply_globally: if http: hug.defaults.output_format = formatter if cli: hug.defaults.cli_output_format = formatter else: apply_to_api = hug.API(api) if api else hug.api.from_object(formatter) if http: apply_to_api.http.output_format = formatter if cli: apply_to_api.cli.output_format = formatter return formatter return decorator
[ "def", "default_output_format", "(", "content_type", "=", "'application/json'", ",", "apply_globally", "=", "False", ",", "api", "=", "None", ",", "cli", "=", "False", ",", "http", "=", "True", ")", ":", "def", "decorator", "(", "formatter", ")", ":", "formatter", "=", "hug", ".", "output_format", ".", "content_type", "(", "content_type", ")", "(", "formatter", ")", "if", "apply_globally", ":", "if", "http", ":", "hug", ".", "defaults", ".", "output_format", "=", "formatter", "if", "cli", ":", "hug", ".", "defaults", ".", "cli_output_format", "=", "formatter", "else", ":", "apply_to_api", "=", "hug", ".", "API", "(", "api", ")", "if", "api", "else", "hug", ".", "api", ".", "from_object", "(", "formatter", ")", "if", "http", ":", "apply_to_api", ".", "http", ".", "output_format", "=", "formatter", "if", "cli", ":", "apply_to_api", ".", "cli", ".", "output_format", "=", "formatter", "return", "formatter", "return", "decorator" ]
A decorator that allows you to override the default output format for an API
[ "A", "decorator", "that", "allows", "you", "to", "override", "the", "default", "output", "format", "for", "an", "API" ]
python
train
arviz-devs/arviz
arviz/stats/stats.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/stats/stats.py#L48-L251
def compare( dataset_dict, ic="waic", method="BB-pseudo-BMA", b_samples=1000, alpha=1, seed=None, scale="deviance", ): r"""Compare models based on WAIC or LOO cross validation. WAIC is Widely applicable information criterion, and LOO is leave-one-out (LOO) cross-validation. Read more theory here - in a paper by some of the leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353 Parameters ---------- dataset_dict : dict[str] -> InferenceData A dictionary of model names and InferenceData objects ic : str Information Criterion (WAIC or LOO) used to compare models. Default WAIC. method : str Method used to estimate the weights for each model. Available options are: - 'stacking' : stacking of predictive distributions. - 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type weighting. The weights are stabilized using the Bayesian bootstrap - 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type weighting, without Bootstrap stabilization (not recommended) For more information read https://arxiv.org/abs/1704.02030 b_samples: int Number of samples taken by the Bayesian bootstrap estimation. Only useful when method = 'BB-pseudo-BMA'. alpha : float The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1. seed : int or np.random.RandomState instance If int or RandomState, use it for seeding Bayesian bootstrap. Only useful when method = 'BB-pseudo-BMA'. Default None the global np.random state is used. scale : str Output scale for IC. Available options are: - `deviance` : (default) -2 * (log-score) - `log` : 1 * log-score (after Vehtari et al. (2017)) - `negative_log` : -1 * (log-score) Returns ------- A DataFrame, ordered from lowest to highest IC. The index reflects the order in which the models are passed to this function. The columns are: IC : Information Criteria (WAIC or LOO). Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC. If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model). pIC : Estimated effective number of parameters. dIC : Relative difference between each IC (WAIC or LOO) and the lowest IC (WAIC or LOO). It's always 0 for the top-ranked model. weight: Relative weight for each model. This can be loosely interpreted as the probability of each model (among the compared model) given the data. By default the uncertainty in the weights estimation is considered using Bayesian bootstrap. SE : Standard error of the IC estimate. If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap. dSE : Standard error of the difference in IC between each model and the top-ranked model. It's always 0 for the top-ranked model. warning : A value of 1 indicates that the computation of the IC may not be reliable. This could be indication of WAIC/LOO starting to fail see http://arxiv.org/abs/1507.04544 for details. scale : Scale used for the IC. """ names = list(dataset_dict.keys()) scale = scale.lower() if scale == "log": scale_value = 1 ascending = False else: if scale == "negative_log": scale_value = -1 else: scale_value = -2 ascending = True if ic == "waic": ic_func = waic df_comp = pd.DataFrame( index=names, columns=["waic", "p_waic", "d_waic", "weight", "se", "dse", "warning", "waic_scale"], ) scale_col = "waic_scale" elif ic == "loo": ic_func = loo df_comp = pd.DataFrame( index=names, columns=["loo", "p_loo", "d_loo", "weight", "se", "dse", "warning", "loo_scale"], ) scale_col = "loo_scale" else: raise NotImplementedError("The information criterion {} is not supported.".format(ic)) if method.lower() not in ["stacking", "bb-pseudo-bma", "pseudo-bma"]: raise ValueError("The method {}, to compute weights, is not supported.".format(method)) ic_se = "{}_se".format(ic) p_ic = "p_{}".format(ic) ic_i = "{}_i".format(ic) ics = pd.DataFrame() names = [] for name, dataset in dataset_dict.items(): names.append(name) ics = ics.append([ic_func(dataset, pointwise=True, scale=scale)]) ics.index = names ics.sort_values(by=ic, inplace=True, ascending=ascending) if method.lower() == "stacking": rows, cols, ic_i_val = _ic_matrix(ics, ic_i) exp_ic_i = np.exp(ic_i_val / scale_value) last_col = cols - 1 def w_fuller(weights): return np.concatenate((weights, [max(1.0 - np.sum(weights), 0.0)])) def log_score(weights): w_full = w_fuller(weights) score = 0.0 for i in range(rows): score += np.log(np.dot(exp_ic_i[i], w_full)) return -score def gradient(weights): w_full = w_fuller(weights) grad = np.zeros(last_col) for k in range(last_col - 1): for i in range(rows): grad[k] += (exp_ic_i[i, k] - exp_ic_i[i, last_col]) / np.dot( exp_ic_i[i], w_full ) return -grad theta = np.full(last_col, 1.0 / cols) bounds = [(0.0, 1.0) for _ in range(last_col)] constraints = [ {"type": "ineq", "fun": lambda x: 1.0 - np.sum(x)}, {"type": "ineq", "fun": np.sum}, ] weights = minimize( fun=log_score, x0=theta, jac=gradient, bounds=bounds, constraints=constraints ) weights = w_fuller(weights["x"]) ses = ics[ic_se] elif method.lower() == "bb-pseudo-bma": rows, cols, ic_i_val = _ic_matrix(ics, ic_i) ic_i_val = ic_i_val * rows b_weighting = st.dirichlet.rvs(alpha=[alpha] * rows, size=b_samples, random_state=seed) weights = np.zeros((b_samples, cols)) z_bs = np.zeros_like(weights) for i in range(b_samples): z_b = np.dot(b_weighting[i], ic_i_val) u_weights = np.exp((z_b - np.min(z_b)) / scale_value) z_bs[i] = z_b # pylint: disable=unsupported-assignment-operation weights[i] = u_weights / np.sum(u_weights) weights = weights.mean(axis=0) ses = pd.Series(z_bs.std(axis=0), index=names) # pylint: disable=no-member elif method.lower() == "pseudo-bma": min_ic = ics.iloc[0][ic] z_rv = np.exp((ics[ic] - min_ic) / scale_value) weights = z_rv / np.sum(z_rv) ses = ics[ic_se] if np.any(weights): min_ic_i_val = ics[ic_i].iloc[0] for idx, val in enumerate(ics.index): res = ics.loc[val] if scale_value < 0: diff = res[ic_i] - min_ic_i_val else: diff = min_ic_i_val - res[ic_i] d_ic = np.sum(diff) d_std_err = np.sqrt(len(diff) * np.var(diff)) std_err = ses.loc[val] weight = weights[idx] df_comp.at[val] = ( res[ic], res[p_ic], d_ic, weight, std_err, d_std_err, res["warning"], res[scale_col], ) return df_comp.sort_values(by=ic, ascending=ascending)
[ "def", "compare", "(", "dataset_dict", ",", "ic", "=", "\"waic\"", ",", "method", "=", "\"BB-pseudo-BMA\"", ",", "b_samples", "=", "1000", ",", "alpha", "=", "1", ",", "seed", "=", "None", ",", "scale", "=", "\"deviance\"", ",", ")", ":", "names", "=", "list", "(", "dataset_dict", ".", "keys", "(", ")", ")", "scale", "=", "scale", ".", "lower", "(", ")", "if", "scale", "==", "\"log\"", ":", "scale_value", "=", "1", "ascending", "=", "False", "else", ":", "if", "scale", "==", "\"negative_log\"", ":", "scale_value", "=", "-", "1", "else", ":", "scale_value", "=", "-", "2", "ascending", "=", "True", "if", "ic", "==", "\"waic\"", ":", "ic_func", "=", "waic", "df_comp", "=", "pd", ".", "DataFrame", "(", "index", "=", "names", ",", "columns", "=", "[", "\"waic\"", ",", "\"p_waic\"", ",", "\"d_waic\"", ",", "\"weight\"", ",", "\"se\"", ",", "\"dse\"", ",", "\"warning\"", ",", "\"waic_scale\"", "]", ",", ")", "scale_col", "=", "\"waic_scale\"", "elif", "ic", "==", "\"loo\"", ":", "ic_func", "=", "loo", "df_comp", "=", "pd", ".", "DataFrame", "(", "index", "=", "names", ",", "columns", "=", "[", "\"loo\"", ",", "\"p_loo\"", ",", "\"d_loo\"", ",", "\"weight\"", ",", "\"se\"", ",", "\"dse\"", ",", "\"warning\"", ",", "\"loo_scale\"", "]", ",", ")", "scale_col", "=", "\"loo_scale\"", "else", ":", "raise", "NotImplementedError", "(", "\"The information criterion {} is not supported.\"", ".", "format", "(", "ic", ")", ")", "if", "method", ".", "lower", "(", ")", "not", "in", "[", "\"stacking\"", ",", "\"bb-pseudo-bma\"", ",", "\"pseudo-bma\"", "]", ":", "raise", "ValueError", "(", "\"The method {}, to compute weights, is not supported.\"", ".", "format", "(", "method", ")", ")", "ic_se", "=", "\"{}_se\"", ".", "format", "(", "ic", ")", "p_ic", "=", "\"p_{}\"", ".", "format", "(", "ic", ")", "ic_i", "=", "\"{}_i\"", ".", "format", "(", "ic", ")", "ics", "=", "pd", ".", "DataFrame", "(", ")", "names", "=", "[", "]", "for", "name", ",", "dataset", "in", "dataset_dict", ".", "items", "(", ")", ":", "names", ".", "append", "(", "name", ")", "ics", "=", "ics", ".", "append", "(", "[", "ic_func", "(", "dataset", ",", "pointwise", "=", "True", ",", "scale", "=", "scale", ")", "]", ")", "ics", ".", "index", "=", "names", "ics", ".", "sort_values", "(", "by", "=", "ic", ",", "inplace", "=", "True", ",", "ascending", "=", "ascending", ")", "if", "method", ".", "lower", "(", ")", "==", "\"stacking\"", ":", "rows", ",", "cols", ",", "ic_i_val", "=", "_ic_matrix", "(", "ics", ",", "ic_i", ")", "exp_ic_i", "=", "np", ".", "exp", "(", "ic_i_val", "/", "scale_value", ")", "last_col", "=", "cols", "-", "1", "def", "w_fuller", "(", "weights", ")", ":", "return", "np", ".", "concatenate", "(", "(", "weights", ",", "[", "max", "(", "1.0", "-", "np", ".", "sum", "(", "weights", ")", ",", "0.0", ")", "]", ")", ")", "def", "log_score", "(", "weights", ")", ":", "w_full", "=", "w_fuller", "(", "weights", ")", "score", "=", "0.0", "for", "i", "in", "range", "(", "rows", ")", ":", "score", "+=", "np", ".", "log", "(", "np", ".", "dot", "(", "exp_ic_i", "[", "i", "]", ",", "w_full", ")", ")", "return", "-", "score", "def", "gradient", "(", "weights", ")", ":", "w_full", "=", "w_fuller", "(", "weights", ")", "grad", "=", "np", ".", "zeros", "(", "last_col", ")", "for", "k", "in", "range", "(", "last_col", "-", "1", ")", ":", "for", "i", "in", "range", "(", "rows", ")", ":", "grad", "[", "k", "]", "+=", "(", "exp_ic_i", "[", "i", ",", "k", "]", "-", "exp_ic_i", "[", "i", ",", "last_col", "]", ")", "/", "np", ".", "dot", "(", "exp_ic_i", "[", "i", "]", ",", "w_full", ")", "return", "-", "grad", "theta", "=", "np", ".", "full", "(", "last_col", ",", "1.0", "/", "cols", ")", "bounds", "=", "[", "(", "0.0", ",", "1.0", ")", "for", "_", "in", "range", "(", "last_col", ")", "]", "constraints", "=", "[", "{", "\"type\"", ":", "\"ineq\"", ",", "\"fun\"", ":", "lambda", "x", ":", "1.0", "-", "np", ".", "sum", "(", "x", ")", "}", ",", "{", "\"type\"", ":", "\"ineq\"", ",", "\"fun\"", ":", "np", ".", "sum", "}", ",", "]", "weights", "=", "minimize", "(", "fun", "=", "log_score", ",", "x0", "=", "theta", ",", "jac", "=", "gradient", ",", "bounds", "=", "bounds", ",", "constraints", "=", "constraints", ")", "weights", "=", "w_fuller", "(", "weights", "[", "\"x\"", "]", ")", "ses", "=", "ics", "[", "ic_se", "]", "elif", "method", ".", "lower", "(", ")", "==", "\"bb-pseudo-bma\"", ":", "rows", ",", "cols", ",", "ic_i_val", "=", "_ic_matrix", "(", "ics", ",", "ic_i", ")", "ic_i_val", "=", "ic_i_val", "*", "rows", "b_weighting", "=", "st", ".", "dirichlet", ".", "rvs", "(", "alpha", "=", "[", "alpha", "]", "*", "rows", ",", "size", "=", "b_samples", ",", "random_state", "=", "seed", ")", "weights", "=", "np", ".", "zeros", "(", "(", "b_samples", ",", "cols", ")", ")", "z_bs", "=", "np", ".", "zeros_like", "(", "weights", ")", "for", "i", "in", "range", "(", "b_samples", ")", ":", "z_b", "=", "np", ".", "dot", "(", "b_weighting", "[", "i", "]", ",", "ic_i_val", ")", "u_weights", "=", "np", ".", "exp", "(", "(", "z_b", "-", "np", ".", "min", "(", "z_b", ")", ")", "/", "scale_value", ")", "z_bs", "[", "i", "]", "=", "z_b", "# pylint: disable=unsupported-assignment-operation", "weights", "[", "i", "]", "=", "u_weights", "/", "np", ".", "sum", "(", "u_weights", ")", "weights", "=", "weights", ".", "mean", "(", "axis", "=", "0", ")", "ses", "=", "pd", ".", "Series", "(", "z_bs", ".", "std", "(", "axis", "=", "0", ")", ",", "index", "=", "names", ")", "# pylint: disable=no-member", "elif", "method", ".", "lower", "(", ")", "==", "\"pseudo-bma\"", ":", "min_ic", "=", "ics", ".", "iloc", "[", "0", "]", "[", "ic", "]", "z_rv", "=", "np", ".", "exp", "(", "(", "ics", "[", "ic", "]", "-", "min_ic", ")", "/", "scale_value", ")", "weights", "=", "z_rv", "/", "np", ".", "sum", "(", "z_rv", ")", "ses", "=", "ics", "[", "ic_se", "]", "if", "np", ".", "any", "(", "weights", ")", ":", "min_ic_i_val", "=", "ics", "[", "ic_i", "]", ".", "iloc", "[", "0", "]", "for", "idx", ",", "val", "in", "enumerate", "(", "ics", ".", "index", ")", ":", "res", "=", "ics", ".", "loc", "[", "val", "]", "if", "scale_value", "<", "0", ":", "diff", "=", "res", "[", "ic_i", "]", "-", "min_ic_i_val", "else", ":", "diff", "=", "min_ic_i_val", "-", "res", "[", "ic_i", "]", "d_ic", "=", "np", ".", "sum", "(", "diff", ")", "d_std_err", "=", "np", ".", "sqrt", "(", "len", "(", "diff", ")", "*", "np", ".", "var", "(", "diff", ")", ")", "std_err", "=", "ses", ".", "loc", "[", "val", "]", "weight", "=", "weights", "[", "idx", "]", "df_comp", ".", "at", "[", "val", "]", "=", "(", "res", "[", "ic", "]", ",", "res", "[", "p_ic", "]", ",", "d_ic", ",", "weight", ",", "std_err", ",", "d_std_err", ",", "res", "[", "\"warning\"", "]", ",", "res", "[", "scale_col", "]", ",", ")", "return", "df_comp", ".", "sort_values", "(", "by", "=", "ic", ",", "ascending", "=", "ascending", ")" ]
r"""Compare models based on WAIC or LOO cross validation. WAIC is Widely applicable information criterion, and LOO is leave-one-out (LOO) cross-validation. Read more theory here - in a paper by some of the leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353 Parameters ---------- dataset_dict : dict[str] -> InferenceData A dictionary of model names and InferenceData objects ic : str Information Criterion (WAIC or LOO) used to compare models. Default WAIC. method : str Method used to estimate the weights for each model. Available options are: - 'stacking' : stacking of predictive distributions. - 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type weighting. The weights are stabilized using the Bayesian bootstrap - 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type weighting, without Bootstrap stabilization (not recommended) For more information read https://arxiv.org/abs/1704.02030 b_samples: int Number of samples taken by the Bayesian bootstrap estimation. Only useful when method = 'BB-pseudo-BMA'. alpha : float The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1. seed : int or np.random.RandomState instance If int or RandomState, use it for seeding Bayesian bootstrap. Only useful when method = 'BB-pseudo-BMA'. Default None the global np.random state is used. scale : str Output scale for IC. Available options are: - `deviance` : (default) -2 * (log-score) - `log` : 1 * log-score (after Vehtari et al. (2017)) - `negative_log` : -1 * (log-score) Returns ------- A DataFrame, ordered from lowest to highest IC. The index reflects the order in which the models are passed to this function. The columns are: IC : Information Criteria (WAIC or LOO). Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC. If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model). pIC : Estimated effective number of parameters. dIC : Relative difference between each IC (WAIC or LOO) and the lowest IC (WAIC or LOO). It's always 0 for the top-ranked model. weight: Relative weight for each model. This can be loosely interpreted as the probability of each model (among the compared model) given the data. By default the uncertainty in the weights estimation is considered using Bayesian bootstrap. SE : Standard error of the IC estimate. If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap. dSE : Standard error of the difference in IC between each model and the top-ranked model. It's always 0 for the top-ranked model. warning : A value of 1 indicates that the computation of the IC may not be reliable. This could be indication of WAIC/LOO starting to fail see http://arxiv.org/abs/1507.04544 for details. scale : Scale used for the IC.
[ "r", "Compare", "models", "based", "on", "WAIC", "or", "LOO", "cross", "validation", "." ]
python
train
infobloxopen/infoblox-client
infoblox_client/object_manager.py
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L453-L470
def delete_objects_associated_with_a_record(self, name, view, delete_list): """Deletes records associated with record:a or record:aaaa.""" search_objects = {} if 'record:cname' in delete_list: search_objects['record:cname'] = 'canonical' if 'record:txt' in delete_list: search_objects['record:txt'] = 'name' if not search_objects: return for obj_type, search_type in search_objects.items(): payload = {'view': view, search_type: name} ib_objs = self.connector.get_object(obj_type, payload) if ib_objs: for ib_obj in ib_objs: self.delete_object_by_ref(ib_obj['_ref'])
[ "def", "delete_objects_associated_with_a_record", "(", "self", ",", "name", ",", "view", ",", "delete_list", ")", ":", "search_objects", "=", "{", "}", "if", "'record:cname'", "in", "delete_list", ":", "search_objects", "[", "'record:cname'", "]", "=", "'canonical'", "if", "'record:txt'", "in", "delete_list", ":", "search_objects", "[", "'record:txt'", "]", "=", "'name'", "if", "not", "search_objects", ":", "return", "for", "obj_type", ",", "search_type", "in", "search_objects", ".", "items", "(", ")", ":", "payload", "=", "{", "'view'", ":", "view", ",", "search_type", ":", "name", "}", "ib_objs", "=", "self", ".", "connector", ".", "get_object", "(", "obj_type", ",", "payload", ")", "if", "ib_objs", ":", "for", "ib_obj", "in", "ib_objs", ":", "self", ".", "delete_object_by_ref", "(", "ib_obj", "[", "'_ref'", "]", ")" ]
Deletes records associated with record:a or record:aaaa.
[ "Deletes", "records", "associated", "with", "record", ":", "a", "or", "record", ":", "aaaa", "." ]
python
train
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L126-L135
def refresh(self)->None: "Apply any logit, flow, or affine transfers that have been sent to the `Image`." if self._logit_px is not None: self._px = self._logit_px.sigmoid_() self._logit_px = None if self._affine_mat is not None or self._flow is not None: self._px = _grid_sample(self._px, self.flow, **self.sample_kwargs) self.sample_kwargs = {} self._flow = None return self
[ "def", "refresh", "(", "self", ")", "->", "None", ":", "if", "self", ".", "_logit_px", "is", "not", "None", ":", "self", ".", "_px", "=", "self", ".", "_logit_px", ".", "sigmoid_", "(", ")", "self", ".", "_logit_px", "=", "None", "if", "self", ".", "_affine_mat", "is", "not", "None", "or", "self", ".", "_flow", "is", "not", "None", ":", "self", ".", "_px", "=", "_grid_sample", "(", "self", ".", "_px", ",", "self", ".", "flow", ",", "*", "*", "self", ".", "sample_kwargs", ")", "self", ".", "sample_kwargs", "=", "{", "}", "self", ".", "_flow", "=", "None", "return", "self" ]
Apply any logit, flow, or affine transfers that have been sent to the `Image`.
[ "Apply", "any", "logit", "flow", "or", "affine", "transfers", "that", "have", "been", "sent", "to", "the", "Image", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/ringbuffer_read_many_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/ringbuffer_read_many_codec.py#L12-L22
def calculate_size(name, start_sequence, min_count, max_count, filter): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += LONG_SIZE_IN_BYTES data_size += INT_SIZE_IN_BYTES data_size += INT_SIZE_IN_BYTES data_size += BOOLEAN_SIZE_IN_BYTES if filter is not None: data_size += calculate_size_data(filter) return data_size
[ "def", "calculate_size", "(", "name", ",", "start_sequence", ",", "min_count", ",", "max_count", ",", "filter", ")", ":", "data_size", "=", "0", "data_size", "+=", "calculate_size_str", "(", "name", ")", "data_size", "+=", "LONG_SIZE_IN_BYTES", "data_size", "+=", "INT_SIZE_IN_BYTES", "data_size", "+=", "INT_SIZE_IN_BYTES", "data_size", "+=", "BOOLEAN_SIZE_IN_BYTES", "if", "filter", "is", "not", "None", ":", "data_size", "+=", "calculate_size_data", "(", "filter", ")", "return", "data_size" ]
Calculates the request payload size
[ "Calculates", "the", "request", "payload", "size" ]
python
train
niemasd/TreeSwift
treeswift/Tree.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L550-L589
def label_to_node(self, selection='leaves'): '''Return a dictionary mapping labels (strings) to ``Node`` objects * If ``selection`` is ``"all"``, the dictionary will contain all nodes * If ``selection`` is ``"leaves"``, the dictionary will only contain leaves * If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes * If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection`` * If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained Args: ``selection`` (``str`` or ``set``): The selection of nodes to get * ``"all"`` to select all nodes * ``"leaves"`` to select leaves * ``"internal"`` to select internal nodes * A ``set`` of labels to specify nodes to select Returns: ``dict``: Dictionary mapping labels to the corresponding nodes ''' if not isinstance(selection,set) and not isinstance(selection,list) and (not isinstance(selection,str) or not (selection != 'all' or selection != 'leaves' or selection != 'internal')): raise RuntimeError('"selection" must be one of the strings "all", "leaves", or "internal", or it must be a set containing Node labels') if isinstance(selection, str): selection = selection[0] elif isinstance(selection,list): selection = set(selection) label_to_node = dict() for node in self.traverse_preorder(): if selection == 'a' or (selection == 'i' and not node.is_leaf()) or (selection == 'l' and node.is_leaf()) or str(node) in selection: label_to_node[str(node)] = node if not isinstance(selection,str) and len(label_to_node) != len(selection): warn("Not all given labels exist in the tree") return label_to_node
[ "def", "label_to_node", "(", "self", ",", "selection", "=", "'leaves'", ")", ":", "if", "not", "isinstance", "(", "selection", ",", "set", ")", "and", "not", "isinstance", "(", "selection", ",", "list", ")", "and", "(", "not", "isinstance", "(", "selection", ",", "str", ")", "or", "not", "(", "selection", "!=", "'all'", "or", "selection", "!=", "'leaves'", "or", "selection", "!=", "'internal'", ")", ")", ":", "raise", "RuntimeError", "(", "'\"selection\" must be one of the strings \"all\", \"leaves\", or \"internal\", or it must be a set containing Node labels'", ")", "if", "isinstance", "(", "selection", ",", "str", ")", ":", "selection", "=", "selection", "[", "0", "]", "elif", "isinstance", "(", "selection", ",", "list", ")", ":", "selection", "=", "set", "(", "selection", ")", "label_to_node", "=", "dict", "(", ")", "for", "node", "in", "self", ".", "traverse_preorder", "(", ")", ":", "if", "selection", "==", "'a'", "or", "(", "selection", "==", "'i'", "and", "not", "node", ".", "is_leaf", "(", ")", ")", "or", "(", "selection", "==", "'l'", "and", "node", ".", "is_leaf", "(", ")", ")", "or", "str", "(", "node", ")", "in", "selection", ":", "label_to_node", "[", "str", "(", "node", ")", "]", "=", "node", "if", "not", "isinstance", "(", "selection", ",", "str", ")", "and", "len", "(", "label_to_node", ")", "!=", "len", "(", "selection", ")", ":", "warn", "(", "\"Not all given labels exist in the tree\"", ")", "return", "label_to_node" ]
Return a dictionary mapping labels (strings) to ``Node`` objects * If ``selection`` is ``"all"``, the dictionary will contain all nodes * If ``selection`` is ``"leaves"``, the dictionary will only contain leaves * If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes * If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection`` * If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained Args: ``selection`` (``str`` or ``set``): The selection of nodes to get * ``"all"`` to select all nodes * ``"leaves"`` to select leaves * ``"internal"`` to select internal nodes * A ``set`` of labels to specify nodes to select Returns: ``dict``: Dictionary mapping labels to the corresponding nodes
[ "Return", "a", "dictionary", "mapping", "labels", "(", "strings", ")", "to", "Node", "objects" ]
python
train
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L117-L131
def create_backup(self, instance, name, description=None): """ Creates a backup of the specified instance, giving it the specified name along with an optional description. """ body = {"backup": { "instance": utils.get_id(instance), "name": name, }} if description is not None: body["backup"]["description"] = description uri = "/backups" resp, resp_body = self.api.method_post(uri, body=body) mgr = self.api._backup_manager return CloudDatabaseBackup(mgr, body.get("backup"))
[ "def", "create_backup", "(", "self", ",", "instance", ",", "name", ",", "description", "=", "None", ")", ":", "body", "=", "{", "\"backup\"", ":", "{", "\"instance\"", ":", "utils", ".", "get_id", "(", "instance", ")", ",", "\"name\"", ":", "name", ",", "}", "}", "if", "description", "is", "not", "None", ":", "body", "[", "\"backup\"", "]", "[", "\"description\"", "]", "=", "description", "uri", "=", "\"/backups\"", "resp", ",", "resp_body", "=", "self", ".", "api", ".", "method_post", "(", "uri", ",", "body", "=", "body", ")", "mgr", "=", "self", ".", "api", ".", "_backup_manager", "return", "CloudDatabaseBackup", "(", "mgr", ",", "body", ".", "get", "(", "\"backup\"", ")", ")" ]
Creates a backup of the specified instance, giving it the specified name along with an optional description.
[ "Creates", "a", "backup", "of", "the", "specified", "instance", "giving", "it", "the", "specified", "name", "along", "with", "an", "optional", "description", "." ]
python
train
maxalbert/tohu
tohu/v2/generators.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/generators.py#L48-L55
def tuple_len(self): """ Length of tuples produced by this generator. """ try: return self._tuple_len except AttributeError: raise NotImplementedError("Class {} does not implement attribute 'tuple_len'.".format(self.__class__.__name__))
[ "def", "tuple_len", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_tuple_len", "except", "AttributeError", ":", "raise", "NotImplementedError", "(", "\"Class {} does not implement attribute 'tuple_len'.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Length of tuples produced by this generator.
[ "Length", "of", "tuples", "produced", "by", "this", "generator", "." ]
python
train
peterbrittain/asciimatics
asciimatics/effects.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/effects.py#L425-L437
def _respawn(self): """ Pick a random location for the star making sure it does not overwrite an existing piece of text. """ self._cycle = randint(0, len(self._star_chars)) (height, width) = self._screen.dimensions while True: self._x = randint(0, width - 1) self._y = self._screen.start_line + randint(0, height - 1) if self._screen.get_from(self._x, self._y)[0] == 32: break self._old_char = " "
[ "def", "_respawn", "(", "self", ")", ":", "self", ".", "_cycle", "=", "randint", "(", "0", ",", "len", "(", "self", ".", "_star_chars", ")", ")", "(", "height", ",", "width", ")", "=", "self", ".", "_screen", ".", "dimensions", "while", "True", ":", "self", ".", "_x", "=", "randint", "(", "0", ",", "width", "-", "1", ")", "self", ".", "_y", "=", "self", ".", "_screen", ".", "start_line", "+", "randint", "(", "0", ",", "height", "-", "1", ")", "if", "self", ".", "_screen", ".", "get_from", "(", "self", ".", "_x", ",", "self", ".", "_y", ")", "[", "0", "]", "==", "32", ":", "break", "self", ".", "_old_char", "=", "\" \"" ]
Pick a random location for the star making sure it does not overwrite an existing piece of text.
[ "Pick", "a", "random", "location", "for", "the", "star", "making", "sure", "it", "does", "not", "overwrite", "an", "existing", "piece", "of", "text", "." ]
python
train
Kortemme-Lab/klab
klab/bio/ligand.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/ligand.py#L90-L130
def retrieve_data_from_rcsb(cls, ligand_code, pdb_id = None, silent = True, cached_dir = None): '''Retrieve a file from the RCSB.''' if not silent: colortext.printf("Retrieving data from RCSB") if cached_dir: assert(os.path.exists(cached_dir)) ligand_info_path, ligand_info, pdb_ligand_info, pdb_ligand_info_path = None, None, None, None if cached_dir: ligand_info_path = os.path.join(cached_dir, '{0}.cif'.format(ligand_code)) if os.path.exists(ligand_info_path): ligand_info = read_file(ligand_info_path) if not ligand_info: ligand_info = retrieve_ligand_cif(ligand_code) if cached_dir: write_file(ligand_info_path, ligand_info) # Parse .cif l = cls(ligand_code) l.parse_cif(ligand_info) l.pdb_id = pdb_id or l.pdb_id has_pdb_id = l.pdb_id and (len(l.pdb_id) == 4) and (l.pdb_id != '?') # the last case is unnecessary and will be short-cut but I included it to show possible values # Parse PDB XML if has_pdb_id: if cached_dir: pdb_ligand_info_path = os.path.join(cached_dir, '{0}.pdb.ligandinfo'.format(l.pdb_id.lower())) if os.path.exists(pdb_ligand_info_path): pdb_ligand_info = read_file(pdb_ligand_info_path) else: pdb_ligand_info = retrieve_pdb_ligand_info(l.pdb_id) write_file(pdb_ligand_info_path, pdb_ligand_info) else: pdb_ligand_info = retrieve_pdb_ligand_info(l.pdb_id) if pdb_ligand_info: l.parse_pdb_ligand_info(pdb_ligand_info) # Retrive the diagram image l.get_diagram() return l
[ "def", "retrieve_data_from_rcsb", "(", "cls", ",", "ligand_code", ",", "pdb_id", "=", "None", ",", "silent", "=", "True", ",", "cached_dir", "=", "None", ")", ":", "if", "not", "silent", ":", "colortext", ".", "printf", "(", "\"Retrieving data from RCSB\"", ")", "if", "cached_dir", ":", "assert", "(", "os", ".", "path", ".", "exists", "(", "cached_dir", ")", ")", "ligand_info_path", ",", "ligand_info", ",", "pdb_ligand_info", ",", "pdb_ligand_info_path", "=", "None", ",", "None", ",", "None", ",", "None", "if", "cached_dir", ":", "ligand_info_path", "=", "os", ".", "path", ".", "join", "(", "cached_dir", ",", "'{0}.cif'", ".", "format", "(", "ligand_code", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "ligand_info_path", ")", ":", "ligand_info", "=", "read_file", "(", "ligand_info_path", ")", "if", "not", "ligand_info", ":", "ligand_info", "=", "retrieve_ligand_cif", "(", "ligand_code", ")", "if", "cached_dir", ":", "write_file", "(", "ligand_info_path", ",", "ligand_info", ")", "# Parse .cif", "l", "=", "cls", "(", "ligand_code", ")", "l", ".", "parse_cif", "(", "ligand_info", ")", "l", ".", "pdb_id", "=", "pdb_id", "or", "l", ".", "pdb_id", "has_pdb_id", "=", "l", ".", "pdb_id", "and", "(", "len", "(", "l", ".", "pdb_id", ")", "==", "4", ")", "and", "(", "l", ".", "pdb_id", "!=", "'?'", ")", "# the last case is unnecessary and will be short-cut but I included it to show possible values", "# Parse PDB XML", "if", "has_pdb_id", ":", "if", "cached_dir", ":", "pdb_ligand_info_path", "=", "os", ".", "path", ".", "join", "(", "cached_dir", ",", "'{0}.pdb.ligandinfo'", ".", "format", "(", "l", ".", "pdb_id", ".", "lower", "(", ")", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "pdb_ligand_info_path", ")", ":", "pdb_ligand_info", "=", "read_file", "(", "pdb_ligand_info_path", ")", "else", ":", "pdb_ligand_info", "=", "retrieve_pdb_ligand_info", "(", "l", ".", "pdb_id", ")", "write_file", "(", "pdb_ligand_info_path", ",", "pdb_ligand_info", ")", "else", ":", "pdb_ligand_info", "=", "retrieve_pdb_ligand_info", "(", "l", ".", "pdb_id", ")", "if", "pdb_ligand_info", ":", "l", ".", "parse_pdb_ligand_info", "(", "pdb_ligand_info", ")", "# Retrive the diagram image", "l", ".", "get_diagram", "(", ")", "return", "l" ]
Retrieve a file from the RCSB.
[ "Retrieve", "a", "file", "from", "the", "RCSB", "." ]
python
train
bibanon/BASC-py4chan
basc_py4chan/thread.py
https://github.com/bibanon/BASC-py4chan/blob/88e4866d73853e1025e549fbbe9744e750522359/basc_py4chan/thread.py#L137-L143
def file_objects(self): """Returns the :class:`basc_py4chan.File` objects of all files attached to posts in the thread.""" if self.topic.has_file: yield self.topic.file for reply in self.replies: if reply.has_file: yield reply.file
[ "def", "file_objects", "(", "self", ")", ":", "if", "self", ".", "topic", ".", "has_file", ":", "yield", "self", ".", "topic", ".", "file", "for", "reply", "in", "self", ".", "replies", ":", "if", "reply", ".", "has_file", ":", "yield", "reply", ".", "file" ]
Returns the :class:`basc_py4chan.File` objects of all files attached to posts in the thread.
[ "Returns", "the", ":", "class", ":", "basc_py4chan", ".", "File", "objects", "of", "all", "files", "attached", "to", "posts", "in", "the", "thread", "." ]
python
train
google/grr
grr/server/grr_response_server/aff4_objects/security.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/security.py#L696-L701
def BuildApprovalSymlinksUrns(self, approval_id): """Builds list of symlinks URNs for the approval object.""" return [ self.ApprovalSymlinkUrnBuilder("cron", self.subject_urn.Basename(), self.token.username, approval_id) ]
[ "def", "BuildApprovalSymlinksUrns", "(", "self", ",", "approval_id", ")", ":", "return", "[", "self", ".", "ApprovalSymlinkUrnBuilder", "(", "\"cron\"", ",", "self", ".", "subject_urn", ".", "Basename", "(", ")", ",", "self", ".", "token", ".", "username", ",", "approval_id", ")", "]" ]
Builds list of symlinks URNs for the approval object.
[ "Builds", "list", "of", "symlinks", "URNs", "for", "the", "approval", "object", "." ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/cases/views.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/views.py#L736-L742
def research(institute_id, case_name): """Open the research list for a case.""" institute_obj, case_obj = institute_and_case(store, institute_id, case_name) user_obj = store.user(current_user.email) link = url_for('.case', institute_id=institute_id, case_name=case_name) store.open_research(institute_obj, case_obj, user_obj, link) return redirect(request.referrer)
[ "def", "research", "(", "institute_id", ",", "case_name", ")", ":", "institute_obj", ",", "case_obj", "=", "institute_and_case", "(", "store", ",", "institute_id", ",", "case_name", ")", "user_obj", "=", "store", ".", "user", "(", "current_user", ".", "email", ")", "link", "=", "url_for", "(", "'.case'", ",", "institute_id", "=", "institute_id", ",", "case_name", "=", "case_name", ")", "store", ".", "open_research", "(", "institute_obj", ",", "case_obj", ",", "user_obj", ",", "link", ")", "return", "redirect", "(", "request", ".", "referrer", ")" ]
Open the research list for a case.
[ "Open", "the", "research", "list", "for", "a", "case", "." ]
python
test
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/bases.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L327-L361
def apply(self, func, applyto='measurement', noneval=nan, setdata=False): """ Apply func either to self or to associated data. If data is not already parsed, try and read it. Parameters ---------- func : callable The function either accepts a measurement object or an FCS object. Does some calculation and returns the result. applyto : ['data' | 'measurement'] * 'data' : apply to associated data * 'measurement' : apply to measurement object itself. noneval : obj Value to return if `applyto` is 'data', but no data is available. setdata : bool Used only if data is not already set. If true parsed data will be assigned to self.data Otherwise data will be discarded at end of apply. """ applyto = applyto.lower() if applyto == 'data': if self.data is not None: data = self.data elif self.datafile is None: return noneval else: data = self.read_data() if setdata: self.data = data return func(data) elif applyto == 'measurement': return func(self) else: raise ValueError('Encountered unsupported value "%s" for applyto parameter.' % applyto)
[ "def", "apply", "(", "self", ",", "func", ",", "applyto", "=", "'measurement'", ",", "noneval", "=", "nan", ",", "setdata", "=", "False", ")", ":", "applyto", "=", "applyto", ".", "lower", "(", ")", "if", "applyto", "==", "'data'", ":", "if", "self", ".", "data", "is", "not", "None", ":", "data", "=", "self", ".", "data", "elif", "self", ".", "datafile", "is", "None", ":", "return", "noneval", "else", ":", "data", "=", "self", ".", "read_data", "(", ")", "if", "setdata", ":", "self", ".", "data", "=", "data", "return", "func", "(", "data", ")", "elif", "applyto", "==", "'measurement'", ":", "return", "func", "(", "self", ")", "else", ":", "raise", "ValueError", "(", "'Encountered unsupported value \"%s\" for applyto parameter.'", "%", "applyto", ")" ]
Apply func either to self or to associated data. If data is not already parsed, try and read it. Parameters ---------- func : callable The function either accepts a measurement object or an FCS object. Does some calculation and returns the result. applyto : ['data' | 'measurement'] * 'data' : apply to associated data * 'measurement' : apply to measurement object itself. noneval : obj Value to return if `applyto` is 'data', but no data is available. setdata : bool Used only if data is not already set. If true parsed data will be assigned to self.data Otherwise data will be discarded at end of apply.
[ "Apply", "func", "either", "to", "self", "or", "to", "associated", "data", ".", "If", "data", "is", "not", "already", "parsed", "try", "and", "read", "it", "." ]
python
train
sampsyo/confuse
confuse.py
https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L989-L1029
def dump(self, full=True, redact=False): """Dump the Configuration object to a YAML file. The order of the keys is determined from the default configuration file. All keys not in the default configuration will be appended to the end of the file. :param filename: The file to dump the configuration to, or None if the YAML string should be returned instead :type filename: unicode :param full: Dump settings that don't differ from the defaults as well :param redact: Remove sensitive information (views with the `redact` flag set) from the output """ if full: out_dict = self.flatten(redact=redact) else: # Exclude defaults when flattening. sources = [s for s in self.sources if not s.default] temp_root = RootView(sources) temp_root.redactions = self.redactions out_dict = temp_root.flatten(redact=redact) yaml_out = yaml.dump(out_dict, Dumper=Dumper, default_flow_style=None, indent=4, width=1000) # Restore comments to the YAML text. default_source = None for source in self.sources: if source.default: default_source = source break if default_source and default_source.filename: with open(default_source.filename, 'rb') as fp: default_data = fp.read() yaml_out = restore_yaml_comments(yaml_out, default_data.decode('utf-8')) return yaml_out
[ "def", "dump", "(", "self", ",", "full", "=", "True", ",", "redact", "=", "False", ")", ":", "if", "full", ":", "out_dict", "=", "self", ".", "flatten", "(", "redact", "=", "redact", ")", "else", ":", "# Exclude defaults when flattening.", "sources", "=", "[", "s", "for", "s", "in", "self", ".", "sources", "if", "not", "s", ".", "default", "]", "temp_root", "=", "RootView", "(", "sources", ")", "temp_root", ".", "redactions", "=", "self", ".", "redactions", "out_dict", "=", "temp_root", ".", "flatten", "(", "redact", "=", "redact", ")", "yaml_out", "=", "yaml", ".", "dump", "(", "out_dict", ",", "Dumper", "=", "Dumper", ",", "default_flow_style", "=", "None", ",", "indent", "=", "4", ",", "width", "=", "1000", ")", "# Restore comments to the YAML text.", "default_source", "=", "None", "for", "source", "in", "self", ".", "sources", ":", "if", "source", ".", "default", ":", "default_source", "=", "source", "break", "if", "default_source", "and", "default_source", ".", "filename", ":", "with", "open", "(", "default_source", ".", "filename", ",", "'rb'", ")", "as", "fp", ":", "default_data", "=", "fp", ".", "read", "(", ")", "yaml_out", "=", "restore_yaml_comments", "(", "yaml_out", ",", "default_data", ".", "decode", "(", "'utf-8'", ")", ")", "return", "yaml_out" ]
Dump the Configuration object to a YAML file. The order of the keys is determined from the default configuration file. All keys not in the default configuration will be appended to the end of the file. :param filename: The file to dump the configuration to, or None if the YAML string should be returned instead :type filename: unicode :param full: Dump settings that don't differ from the defaults as well :param redact: Remove sensitive information (views with the `redact` flag set) from the output
[ "Dump", "the", "Configuration", "object", "to", "a", "YAML", "file", "." ]
python
train
tophatmonocle/ims_lti_py
ims_lti_py/tool_config.py
https://github.com/tophatmonocle/ims_lti_py/blob/979244d83c2e6420d2c1941f58e52f641c56ad12/ims_lti_py/tool_config.py#L105-L170
def process_xml(self, xml): ''' Parse tool configuration data out of the Common Cartridge LTI link XML. ''' root = objectify.fromstring(xml, parser = etree.XMLParser()) # Parse all children of the root node for child in root.getchildren(): if 'title' in child.tag: self.title = child.text if 'description' in child.tag: self.description = child.text if 'secure_launch_url' in child.tag: self.secure_launch_url = child.text elif 'launch_url' in child.tag: self.launch_url = child.text if 'icon' in child.tag: self.icon = child.text if 'secure_icon' in child.tag: self.secure_icon = child.text if 'cartridge_bundle' in child.tag: self.cartridge_bundle = child.attrib['identifierref'] if 'catridge_icon' in child.tag: self.cartridge_icon = child.atrib['identifierref'] if 'vendor' in child.tag: # Parse vendor tag for v_child in child.getchildren(): if 'code' in v_child.tag: self.vendor_code = v_child.text if 'description' in v_child.tag: self.vendor_description = v_child.text if 'name' in v_child.tag: self.vendor_name = v_child.text if 'url' in v_child.tag: self.vendor_url = v_child.text if 'contact' in v_child.tag: # Parse contact tag for email and name for c_child in v_child: if 'name' in c_child.tag: self.vendor_contact_name = c_child.text if 'email' in c_child.tag: self.vendor_contact_email = c_child.text if 'custom' in child.tag: # Parse custom tags for custom_child in child.getchildren(): self.custom_params[custom_child.attrib['name']] =\ custom_child.text if 'extensions' in child.tag: platform = child.attrib['platform'] properties = {} # Parse extension tags for ext_child in child.getchildren(): if 'property' in ext_child.tag: properties[ext_child.attrib['name']] = ext_child.text elif 'options' in ext_child.tag: opt_name = ext_child.attrib['name'] options = {} for option_child in ext_child.getchildren(): options[option_child.attrib['name']] =\ option_child.text properties[opt_name] = options self.set_ext_params(platform, properties)
[ "def", "process_xml", "(", "self", ",", "xml", ")", ":", "root", "=", "objectify", ".", "fromstring", "(", "xml", ",", "parser", "=", "etree", ".", "XMLParser", "(", ")", ")", "# Parse all children of the root node", "for", "child", "in", "root", ".", "getchildren", "(", ")", ":", "if", "'title'", "in", "child", ".", "tag", ":", "self", ".", "title", "=", "child", ".", "text", "if", "'description'", "in", "child", ".", "tag", ":", "self", ".", "description", "=", "child", ".", "text", "if", "'secure_launch_url'", "in", "child", ".", "tag", ":", "self", ".", "secure_launch_url", "=", "child", ".", "text", "elif", "'launch_url'", "in", "child", ".", "tag", ":", "self", ".", "launch_url", "=", "child", ".", "text", "if", "'icon'", "in", "child", ".", "tag", ":", "self", ".", "icon", "=", "child", ".", "text", "if", "'secure_icon'", "in", "child", ".", "tag", ":", "self", ".", "secure_icon", "=", "child", ".", "text", "if", "'cartridge_bundle'", "in", "child", ".", "tag", ":", "self", ".", "cartridge_bundle", "=", "child", ".", "attrib", "[", "'identifierref'", "]", "if", "'catridge_icon'", "in", "child", ".", "tag", ":", "self", ".", "cartridge_icon", "=", "child", ".", "atrib", "[", "'identifierref'", "]", "if", "'vendor'", "in", "child", ".", "tag", ":", "# Parse vendor tag", "for", "v_child", "in", "child", ".", "getchildren", "(", ")", ":", "if", "'code'", "in", "v_child", ".", "tag", ":", "self", ".", "vendor_code", "=", "v_child", ".", "text", "if", "'description'", "in", "v_child", ".", "tag", ":", "self", ".", "vendor_description", "=", "v_child", ".", "text", "if", "'name'", "in", "v_child", ".", "tag", ":", "self", ".", "vendor_name", "=", "v_child", ".", "text", "if", "'url'", "in", "v_child", ".", "tag", ":", "self", ".", "vendor_url", "=", "v_child", ".", "text", "if", "'contact'", "in", "v_child", ".", "tag", ":", "# Parse contact tag for email and name", "for", "c_child", "in", "v_child", ":", "if", "'name'", "in", "c_child", ".", "tag", ":", "self", ".", "vendor_contact_name", "=", "c_child", ".", "text", "if", "'email'", "in", "c_child", ".", "tag", ":", "self", ".", "vendor_contact_email", "=", "c_child", ".", "text", "if", "'custom'", "in", "child", ".", "tag", ":", "# Parse custom tags", "for", "custom_child", "in", "child", ".", "getchildren", "(", ")", ":", "self", ".", "custom_params", "[", "custom_child", ".", "attrib", "[", "'name'", "]", "]", "=", "custom_child", ".", "text", "if", "'extensions'", "in", "child", ".", "tag", ":", "platform", "=", "child", ".", "attrib", "[", "'platform'", "]", "properties", "=", "{", "}", "# Parse extension tags", "for", "ext_child", "in", "child", ".", "getchildren", "(", ")", ":", "if", "'property'", "in", "ext_child", ".", "tag", ":", "properties", "[", "ext_child", ".", "attrib", "[", "'name'", "]", "]", "=", "ext_child", ".", "text", "elif", "'options'", "in", "ext_child", ".", "tag", ":", "opt_name", "=", "ext_child", ".", "attrib", "[", "'name'", "]", "options", "=", "{", "}", "for", "option_child", "in", "ext_child", ".", "getchildren", "(", ")", ":", "options", "[", "option_child", ".", "attrib", "[", "'name'", "]", "]", "=", "option_child", ".", "text", "properties", "[", "opt_name", "]", "=", "options", "self", ".", "set_ext_params", "(", "platform", ",", "properties", ")" ]
Parse tool configuration data out of the Common Cartridge LTI link XML.
[ "Parse", "tool", "configuration", "data", "out", "of", "the", "Common", "Cartridge", "LTI", "link", "XML", "." ]
python
train
wtsi-hgi/python-baton-wrapper
baton/collections.py
https://github.com/wtsi-hgi/python-baton-wrapper/blob/ae0c9e3630e2c4729a0614cc86f493688436b0b7/baton/collections.py#L79-L86
def get_by_number(self, number: int) -> Optional[DataObjectReplica]: """ Gets the data object replica in this collection with the given number. Will return `None` if such replica does not exist. :param number: the number of the data object replica to get :return: the data object replica in this collection with the given number """ return self._data.get(number, None)
[ "def", "get_by_number", "(", "self", ",", "number", ":", "int", ")", "->", "Optional", "[", "DataObjectReplica", "]", ":", "return", "self", ".", "_data", ".", "get", "(", "number", ",", "None", ")" ]
Gets the data object replica in this collection with the given number. Will return `None` if such replica does not exist. :param number: the number of the data object replica to get :return: the data object replica in this collection with the given number
[ "Gets", "the", "data", "object", "replica", "in", "this", "collection", "with", "the", "given", "number", ".", "Will", "return", "None", "if", "such", "replica", "does", "not", "exist", ".", ":", "param", "number", ":", "the", "number", "of", "the", "data", "object", "replica", "to", "get", ":", "return", ":", "the", "data", "object", "replica", "in", "this", "collection", "with", "the", "given", "number" ]
python
train
projectshift/shift-boiler
boiler/feature/users.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/feature/users.py#L72-L97
def enable_request_loader(): """ Enable request loader Optional user loader based on incomin request object. This is useful to enable on top of default user loader if you want to authenticate API requests via bearer token header. :return: """ @login_manager.request_loader def load_user_from_request(request): user = None auth = request.headers.get('Authorization') if auth and auth.startswith('Bearer'): try: token = auth[7:] user = user_service.get_user_by_token(token) except x.UserException as exception: msg = 'JWT token login failed for [{ip}] with message: [{msg}]' msg = msg.format( ip=request.environ['REMOTE_ADDR'], msg=str(exception) ) current_app.logger.log(msg=msg, level=logging.INFO) abort(401, description=str(exception)) return user
[ "def", "enable_request_loader", "(", ")", ":", "@", "login_manager", ".", "request_loader", "def", "load_user_from_request", "(", "request", ")", ":", "user", "=", "None", "auth", "=", "request", ".", "headers", ".", "get", "(", "'Authorization'", ")", "if", "auth", "and", "auth", ".", "startswith", "(", "'Bearer'", ")", ":", "try", ":", "token", "=", "auth", "[", "7", ":", "]", "user", "=", "user_service", ".", "get_user_by_token", "(", "token", ")", "except", "x", ".", "UserException", "as", "exception", ":", "msg", "=", "'JWT token login failed for [{ip}] with message: [{msg}]'", "msg", "=", "msg", ".", "format", "(", "ip", "=", "request", ".", "environ", "[", "'REMOTE_ADDR'", "]", ",", "msg", "=", "str", "(", "exception", ")", ")", "current_app", ".", "logger", ".", "log", "(", "msg", "=", "msg", ",", "level", "=", "logging", ".", "INFO", ")", "abort", "(", "401", ",", "description", "=", "str", "(", "exception", ")", ")", "return", "user" ]
Enable request loader Optional user loader based on incomin request object. This is useful to enable on top of default user loader if you want to authenticate API requests via bearer token header. :return:
[ "Enable", "request", "loader", "Optional", "user", "loader", "based", "on", "incomin", "request", "object", ".", "This", "is", "useful", "to", "enable", "on", "top", "of", "default", "user", "loader", "if", "you", "want", "to", "authenticate", "API", "requests", "via", "bearer", "token", "header", ".", ":", "return", ":" ]
python
train
jurismarches/chopper
chopper/extractor.py
https://github.com/jurismarches/chopper/blob/53c5489a53e3a5d205a5cb207df751c09633e7ce/chopper/extractor.py#L142-L152
def __add(self, dest, xpath): """ Adds a Xpath expression to the dest list :param dest: The destination list to add the Xpath :type dest: list :param xpath: The Xpath expression to add :type xpath: str """ assert isinstance(xpath, string_types) dest.append(xpath)
[ "def", "__add", "(", "self", ",", "dest", ",", "xpath", ")", ":", "assert", "isinstance", "(", "xpath", ",", "string_types", ")", "dest", ".", "append", "(", "xpath", ")" ]
Adds a Xpath expression to the dest list :param dest: The destination list to add the Xpath :type dest: list :param xpath: The Xpath expression to add :type xpath: str
[ "Adds", "a", "Xpath", "expression", "to", "the", "dest", "list" ]
python
train
SignalN/language
language/ngrams.py
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L117-L129
def word_matches(s1, s2, n=3): """ Word-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings """ return __matches(s1, s2, word_ngrams, n=n)
[ "def", "word_matches", "(", "s1", ",", "s2", ",", "n", "=", "3", ")", ":", "return", "__matches", "(", "s1", ",", "s2", ",", "word_ngrams", ",", "n", "=", "n", ")" ]
Word-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings
[ "Word", "-", "level", "n", "-", "grams", "that", "match", "between", "two", "strings" ]
python
train
Shinichi-Nakagawa/pitchpx
pitchpx/baseball/retrosheet.py
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/baseball/retrosheet.py#L154-L172
def ball_count(cls, ball_tally, strike_tally, pitch_res): """ Ball/Strike counter :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :return: ball count, strike count """ b, s = ball_tally, strike_tally if pitch_res == "B": if ball_tally < 4: b += 1 elif pitch_res == "S" or pitch_res == "C" or pitch_res == "X": if strike_tally < 3: s += 1 elif pitch_res == "F": if strike_tally < 2: s += 1 return b, s
[ "def", "ball_count", "(", "cls", ",", "ball_tally", ",", "strike_tally", ",", "pitch_res", ")", ":", "b", ",", "s", "=", "ball_tally", ",", "strike_tally", "if", "pitch_res", "==", "\"B\"", ":", "if", "ball_tally", "<", "4", ":", "b", "+=", "1", "elif", "pitch_res", "==", "\"S\"", "or", "pitch_res", "==", "\"C\"", "or", "pitch_res", "==", "\"X\"", ":", "if", "strike_tally", "<", "3", ":", "s", "+=", "1", "elif", "pitch_res", "==", "\"F\"", ":", "if", "strike_tally", "<", "2", ":", "s", "+=", "1", "return", "b", ",", "s" ]
Ball/Strike counter :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :return: ball count, strike count
[ "Ball", "/", "Strike", "counter", ":", "param", "ball_tally", ":", "Ball", "telly", ":", "param", "strike_tally", ":", "Strike", "telly", ":", "param", "pitch_res", ":", "pitching", "result", "(", "Retrosheet", "format", ")", ":", "return", ":", "ball", "count", "strike", "count" ]
python
train
mgraffg/EvoDAG
EvoDAG/population.py
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L276-L282
def trace(self, n): "Restore the position in the history of individual v's nodes" trace_map = {} self._trace(n, trace_map) s = list(trace_map.keys()) s.sort() return s
[ "def", "trace", "(", "self", ",", "n", ")", ":", "trace_map", "=", "{", "}", "self", ".", "_trace", "(", "n", ",", "trace_map", ")", "s", "=", "list", "(", "trace_map", ".", "keys", "(", ")", ")", "s", ".", "sort", "(", ")", "return", "s" ]
Restore the position in the history of individual v's nodes
[ "Restore", "the", "position", "in", "the", "history", "of", "individual", "v", "s", "nodes" ]
python
train
RI-imaging/qpformat
qpformat/file_formats/series_zip_tif_holo.py
https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/series_zip_tif_holo.py#L63-L80
def get_time(self, idx): """Time for each TIFF file If there are no metadata keyword arguments defined for the TIFF file format, then the zip file `date_time` value is used. """ # first try to get the time from the TIFF file # (possible meta data keywords) ds = self._get_dataset(idx) thetime = ds.get_time() if np.isnan(thetime): # use zipfile date_time zf = zipfile.ZipFile(self.path) info = zf.getinfo(self.files[idx]) timetuple = tuple(list(info.date_time) + [0, 0, 0]) thetime = time.mktime(timetuple) return thetime
[ "def", "get_time", "(", "self", ",", "idx", ")", ":", "# first try to get the time from the TIFF file", "# (possible meta data keywords)", "ds", "=", "self", ".", "_get_dataset", "(", "idx", ")", "thetime", "=", "ds", ".", "get_time", "(", ")", "if", "np", ".", "isnan", "(", "thetime", ")", ":", "# use zipfile date_time", "zf", "=", "zipfile", ".", "ZipFile", "(", "self", ".", "path", ")", "info", "=", "zf", ".", "getinfo", "(", "self", ".", "files", "[", "idx", "]", ")", "timetuple", "=", "tuple", "(", "list", "(", "info", ".", "date_time", ")", "+", "[", "0", ",", "0", ",", "0", "]", ")", "thetime", "=", "time", ".", "mktime", "(", "timetuple", ")", "return", "thetime" ]
Time for each TIFF file If there are no metadata keyword arguments defined for the TIFF file format, then the zip file `date_time` value is used.
[ "Time", "for", "each", "TIFF", "file" ]
python
train
doraemonext/wechat-python-sdk
wechat_sdk/lib/crypto/pkcs7.py
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/lib/crypto/pkcs7.py#L11-L24
def encode(cls, text): """ 对需要加密的明文进行填充补位 @param text: 需要进行填充补位操作的明文 @return: 补齐明文字符串 """ text_length = len(text) # 计算需要填充的位数 amount_to_pad = cls.block_size - (text_length % cls.block_size) if amount_to_pad == 0: amount_to_pad = cls.block_size # 获得补位所用的字符 pad = to_binary(chr(amount_to_pad)) return text + pad * amount_to_pad
[ "def", "encode", "(", "cls", ",", "text", ")", ":", "text_length", "=", "len", "(", "text", ")", "# 计算需要填充的位数", "amount_to_pad", "=", "cls", ".", "block_size", "-", "(", "text_length", "%", "cls", ".", "block_size", ")", "if", "amount_to_pad", "==", "0", ":", "amount_to_pad", "=", "cls", ".", "block_size", "# 获得补位所用的字符", "pad", "=", "to_binary", "(", "chr", "(", "amount_to_pad", ")", ")", "return", "text", "+", "pad", "*", "amount_to_pad" ]
对需要加密的明文进行填充补位 @param text: 需要进行填充补位操作的明文 @return: 补齐明文字符串
[ "对需要加密的明文进行填充补位" ]
python
valid
saltstack/salt
salt/returners/memcache_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/memcache_return.py#L158-L164
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.set(jid, salt.utils.json.dumps(load)) _append_list(serv, 'jids', jid)
[ "def", "save_load", "(", "jid", ",", "load", ",", "minions", "=", "None", ")", ":", "serv", "=", "_get_serv", "(", "ret", "=", "None", ")", "serv", ".", "set", "(", "jid", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "load", ")", ")", "_append_list", "(", "serv", ",", "'jids'", ",", "jid", ")" ]
Save the load to the specified jid
[ "Save", "the", "load", "to", "the", "specified", "jid" ]
python
train
radjkarl/imgProcessor
imgProcessor/transform/equalizeImage.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transform/equalizeImage.py#L11-L47
def equalizeImage(img, save_path=None, name_additive='_eqHist'): ''' Equalize the histogram (contrast) of an image works with RGB/multi-channel images and flat-arrays @param img - image_path or np.array @param save_path if given output images will be saved there @param name_additive if given this additive will be appended to output images @return output images if input images are numpy.arrays and no save_path is given @return None elsewise ''' if isinstance(img, string_types): img = PathStr(img) if not img.exists(): raise Exception("image path doesn't exist") img_name = img.basename().replace('.', '%s.' % name_additive) if save_path is None: save_path = img.dirname() img = cv2.imread(img) if img.dtype != np.dtype('uint8'): # openCV cannot work with float arrays or uint > 8bit eqFn = _equalizeHistogram else: eqFn = cv2.equalizeHist if len(img.shape) == 3: # multi channel img like rgb for i in range(img.shape[2]): img[:, :, i] = eqFn(img[:, :, i]) else: # grey scale image img = eqFn(img) if save_path: img_name = PathStr(save_path).join(img_name) cv2.imwrite(img_name, img) return img
[ "def", "equalizeImage", "(", "img", ",", "save_path", "=", "None", ",", "name_additive", "=", "'_eqHist'", ")", ":", "if", "isinstance", "(", "img", ",", "string_types", ")", ":", "img", "=", "PathStr", "(", "img", ")", "if", "not", "img", ".", "exists", "(", ")", ":", "raise", "Exception", "(", "\"image path doesn't exist\"", ")", "img_name", "=", "img", ".", "basename", "(", ")", ".", "replace", "(", "'.'", ",", "'%s.'", "%", "name_additive", ")", "if", "save_path", "is", "None", ":", "save_path", "=", "img", ".", "dirname", "(", ")", "img", "=", "cv2", ".", "imread", "(", "img", ")", "if", "img", ".", "dtype", "!=", "np", ".", "dtype", "(", "'uint8'", ")", ":", "# openCV cannot work with float arrays or uint > 8bit\r", "eqFn", "=", "_equalizeHistogram", "else", ":", "eqFn", "=", "cv2", ".", "equalizeHist", "if", "len", "(", "img", ".", "shape", ")", "==", "3", ":", "# multi channel img like rgb\r", "for", "i", "in", "range", "(", "img", ".", "shape", "[", "2", "]", ")", ":", "img", "[", ":", ",", ":", ",", "i", "]", "=", "eqFn", "(", "img", "[", ":", ",", ":", ",", "i", "]", ")", "else", ":", "# grey scale image\r", "img", "=", "eqFn", "(", "img", ")", "if", "save_path", ":", "img_name", "=", "PathStr", "(", "save_path", ")", ".", "join", "(", "img_name", ")", "cv2", ".", "imwrite", "(", "img_name", ",", "img", ")", "return", "img" ]
Equalize the histogram (contrast) of an image works with RGB/multi-channel images and flat-arrays @param img - image_path or np.array @param save_path if given output images will be saved there @param name_additive if given this additive will be appended to output images @return output images if input images are numpy.arrays and no save_path is given @return None elsewise
[ "Equalize", "the", "histogram", "(", "contrast", ")", "of", "an", "image", "works", "with", "RGB", "/", "multi", "-", "channel", "images", "and", "flat", "-", "arrays" ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L4166-L4181
def rmdir(self, target_directory, dir_fd=None): """Remove a leaf Fake directory. Args: target_directory: (str) Name of directory to remove. dir_fd: If not `None`, the file descriptor of a directory, with `target_directory` being relative to this directory. New in Python 3.3. Raises: OSError: if target_directory does not exist or is not a directory, or as per FakeFilesystem.remove_object. Cannot remove '.'. """ target_directory = self._path_with_dir_fd( target_directory, self.rmdir, dir_fd) self.filesystem.rmdir(target_directory)
[ "def", "rmdir", "(", "self", ",", "target_directory", ",", "dir_fd", "=", "None", ")", ":", "target_directory", "=", "self", ".", "_path_with_dir_fd", "(", "target_directory", ",", "self", ".", "rmdir", ",", "dir_fd", ")", "self", ".", "filesystem", ".", "rmdir", "(", "target_directory", ")" ]
Remove a leaf Fake directory. Args: target_directory: (str) Name of directory to remove. dir_fd: If not `None`, the file descriptor of a directory, with `target_directory` being relative to this directory. New in Python 3.3. Raises: OSError: if target_directory does not exist or is not a directory, or as per FakeFilesystem.remove_object. Cannot remove '.'.
[ "Remove", "a", "leaf", "Fake", "directory", "." ]
python
train
PeerAssets/pypeerassets
pypeerassets/pautils.py
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L213-L226
def load_deck_p2th_into_local_node(provider: RpcNode, deck: Deck) -> None: ''' load deck p2th into local node via "importprivke", this allows building of proof-of-timeline for this deck ''' assert isinstance(provider, RpcNode), {"error": "You can load privkeys only into local node."} error = {"error": "Deck P2TH import went wrong."} provider.importprivkey(deck.p2th_wif, deck.id) check_addr = provider.validateaddress(deck.p2th_address) if not check_addr["isvalid"] and not check_addr["ismine"]: raise DeckP2THImportError(error)
[ "def", "load_deck_p2th_into_local_node", "(", "provider", ":", "RpcNode", ",", "deck", ":", "Deck", ")", "->", "None", ":", "assert", "isinstance", "(", "provider", ",", "RpcNode", ")", ",", "{", "\"error\"", ":", "\"You can load privkeys only into local node.\"", "}", "error", "=", "{", "\"error\"", ":", "\"Deck P2TH import went wrong.\"", "}", "provider", ".", "importprivkey", "(", "deck", ".", "p2th_wif", ",", "deck", ".", "id", ")", "check_addr", "=", "provider", ".", "validateaddress", "(", "deck", ".", "p2th_address", ")", "if", "not", "check_addr", "[", "\"isvalid\"", "]", "and", "not", "check_addr", "[", "\"ismine\"", "]", ":", "raise", "DeckP2THImportError", "(", "error", ")" ]
load deck p2th into local node via "importprivke", this allows building of proof-of-timeline for this deck
[ "load", "deck", "p2th", "into", "local", "node", "via", "importprivke", "this", "allows", "building", "of", "proof", "-", "of", "-", "timeline", "for", "this", "deck" ]
python
train
stephen-bunn/file-config
tasks/package.py
https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/package.py#L56-L62
def check(ctx): """ Check built package is valid. """ check_command = f"twine check {ctx.directory!s}/dist/*" report.info(ctx, "package.check", "checking package") ctx.run(check_command)
[ "def", "check", "(", "ctx", ")", ":", "check_command", "=", "f\"twine check {ctx.directory!s}/dist/*\"", "report", ".", "info", "(", "ctx", ",", "\"package.check\"", ",", "\"checking package\"", ")", "ctx", ".", "run", "(", "check_command", ")" ]
Check built package is valid.
[ "Check", "built", "package", "is", "valid", "." ]
python
train
learningequality/ricecooker
ricecooker/classes/nodes.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/nodes.py#L359-L383
def to_dict(self): """ to_dict: puts Topic or Content node data into the format that Kolibri Studio expects Args: None Returns: dict of channel data """ return { "title": self.title, "language" : self.language, "description": self.description, "node_id": self.get_node_id().hex, "content_id": self.get_content_id().hex, "source_domain": self.domain_ns.hex, "source_id": self.source_id, "author": self.author, "aggregator": self.aggregator, "provider": self.provider, "files" : [f.to_dict() for f in self.files if f and f.filename], # Filter out failed downloads "tags": self.tags, "kind": self.kind, "license": None, "license_description": None, "copyright_holder": "", "questions": [], "extra_fields": {}, }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "\"title\"", ":", "self", ".", "title", ",", "\"language\"", ":", "self", ".", "language", ",", "\"description\"", ":", "self", ".", "description", ",", "\"node_id\"", ":", "self", ".", "get_node_id", "(", ")", ".", "hex", ",", "\"content_id\"", ":", "self", ".", "get_content_id", "(", ")", ".", "hex", ",", "\"source_domain\"", ":", "self", ".", "domain_ns", ".", "hex", ",", "\"source_id\"", ":", "self", ".", "source_id", ",", "\"author\"", ":", "self", ".", "author", ",", "\"aggregator\"", ":", "self", ".", "aggregator", ",", "\"provider\"", ":", "self", ".", "provider", ",", "\"files\"", ":", "[", "f", ".", "to_dict", "(", ")", "for", "f", "in", "self", ".", "files", "if", "f", "and", "f", ".", "filename", "]", ",", "# Filter out failed downloads", "\"tags\"", ":", "self", ".", "tags", ",", "\"kind\"", ":", "self", ".", "kind", ",", "\"license\"", ":", "None", ",", "\"license_description\"", ":", "None", ",", "\"copyright_holder\"", ":", "\"\"", ",", "\"questions\"", ":", "[", "]", ",", "\"extra_fields\"", ":", "{", "}", ",", "}" ]
to_dict: puts Topic or Content node data into the format that Kolibri Studio expects Args: None Returns: dict of channel data
[ "to_dict", ":", "puts", "Topic", "or", "Content", "node", "data", "into", "the", "format", "that", "Kolibri", "Studio", "expects", "Args", ":", "None", "Returns", ":", "dict", "of", "channel", "data" ]
python
train
brews/snakebacon
snakebacon/mcmcbackends/bacon/utils.py
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/mcmcbackends/bacon/utils.py#L52-L123
def calibrate_dates(chron, calib_curve, d_r, d_std, cutoff=0.0001, normal_distr=False, t_a=[3], t_b=[4]): """Get density of calendar dates for chron date segment in core Parameters ---------- chron : DatedProxy-like calib_curve : CalibCurve or list of CalibCurves d_r : scalar or ndarray Carbon reservoir offset. d_std : scalar or ndarray Carbon reservoir offset error standard deviation. cutoff : scalar, optional Unknown. normal_distr : Bool, optional Use normal distribution for date errors. If False, then use Student's t-distribution. t_a : scalar or ndarray, optional Student's t-distribution parameter, a. t_a - 1 must equal t_b. t_b : scalar or ndarray, optional Student's t-distribution parameter, b. t_a - 1 must equal t_b. Returns ------- depth : ndarray Depth of dated sediment sample. probs : list of 2d arrays Density of calendar age for each dated sediment sample. For each sediment sample, the 2d array has two columns, the first is the calendar age. The second column is the density for that calendar age. """ # Python version of .bacon.calib() on line 908 in Bacon.R # .bacon.calib - line 908 # rcmean = 4128; w2 = 4225; t_a=3; t_b=4 # test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a, # t_b=t_b, cutoff=cutoff, normal = normal) # Line 959 of Bacon.R # calib = list(dets.iloc[:, 3]) # Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R # Line #973 # TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above. # TODO(brews): Check whether we call returned values densities, freqs or what options we should have. n = len(chron.depth) calib_curve = np.array(calib_curve) t_a = np.array(t_a) t_b = np.array(t_b) assert t_b - 1 == t_a d_r = np.array(d_r) d_std = np.array(d_std) if len(t_a) == 1: t_a = np.repeat(t_a, n) if len(t_b) == 1: t_b = np.repeat(t_b, n) if len(d_r) == 1: d_r = np.repeat(d_r, n) if len(d_std) == 1: d_std = np.repeat(d_std, n) if len(calib_curve) == 1: calib_curve = np.repeat(calib_curve, n) calib_probs = [] rcmean = chron.age - d_r w2 = chron.error ** 2 + d_std ** 2 for i in range(n): age_realizations = d_cal(calib_curve[i], rcmean=rcmean[i], w2=w2[i], t_a=t_a[i], t_b=t_b[i], cutoff=cutoff, normal_distr=normal_distr) calib_probs.append(age_realizations) return np.array(chron.depth), calib_probs
[ "def", "calibrate_dates", "(", "chron", ",", "calib_curve", ",", "d_r", ",", "d_std", ",", "cutoff", "=", "0.0001", ",", "normal_distr", "=", "False", ",", "t_a", "=", "[", "3", "]", ",", "t_b", "=", "[", "4", "]", ")", ":", "# Python version of .bacon.calib() on line 908 in Bacon.R", "# .bacon.calib - line 908", "# rcmean = 4128; w2 = 4225; t_a=3; t_b=4", "# test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a,", "# t_b=t_b, cutoff=cutoff, normal = normal)", "# Line 959 of Bacon.R", "# calib = list(dets.iloc[:, 3])", "# Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R", "# Line #973", "# TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above.", "# TODO(brews): Check whether we call returned values densities, freqs or what options we should have.", "n", "=", "len", "(", "chron", ".", "depth", ")", "calib_curve", "=", "np", ".", "array", "(", "calib_curve", ")", "t_a", "=", "np", ".", "array", "(", "t_a", ")", "t_b", "=", "np", ".", "array", "(", "t_b", ")", "assert", "t_b", "-", "1", "==", "t_a", "d_r", "=", "np", ".", "array", "(", "d_r", ")", "d_std", "=", "np", ".", "array", "(", "d_std", ")", "if", "len", "(", "t_a", ")", "==", "1", ":", "t_a", "=", "np", ".", "repeat", "(", "t_a", ",", "n", ")", "if", "len", "(", "t_b", ")", "==", "1", ":", "t_b", "=", "np", ".", "repeat", "(", "t_b", ",", "n", ")", "if", "len", "(", "d_r", ")", "==", "1", ":", "d_r", "=", "np", ".", "repeat", "(", "d_r", ",", "n", ")", "if", "len", "(", "d_std", ")", "==", "1", ":", "d_std", "=", "np", ".", "repeat", "(", "d_std", ",", "n", ")", "if", "len", "(", "calib_curve", ")", "==", "1", ":", "calib_curve", "=", "np", ".", "repeat", "(", "calib_curve", ",", "n", ")", "calib_probs", "=", "[", "]", "rcmean", "=", "chron", ".", "age", "-", "d_r", "w2", "=", "chron", ".", "error", "**", "2", "+", "d_std", "**", "2", "for", "i", "in", "range", "(", "n", ")", ":", "age_realizations", "=", "d_cal", "(", "calib_curve", "[", "i", "]", ",", "rcmean", "=", "rcmean", "[", "i", "]", ",", "w2", "=", "w2", "[", "i", "]", ",", "t_a", "=", "t_a", "[", "i", "]", ",", "t_b", "=", "t_b", "[", "i", "]", ",", "cutoff", "=", "cutoff", ",", "normal_distr", "=", "normal_distr", ")", "calib_probs", ".", "append", "(", "age_realizations", ")", "return", "np", ".", "array", "(", "chron", ".", "depth", ")", ",", "calib_probs" ]
Get density of calendar dates for chron date segment in core Parameters ---------- chron : DatedProxy-like calib_curve : CalibCurve or list of CalibCurves d_r : scalar or ndarray Carbon reservoir offset. d_std : scalar or ndarray Carbon reservoir offset error standard deviation. cutoff : scalar, optional Unknown. normal_distr : Bool, optional Use normal distribution for date errors. If False, then use Student's t-distribution. t_a : scalar or ndarray, optional Student's t-distribution parameter, a. t_a - 1 must equal t_b. t_b : scalar or ndarray, optional Student's t-distribution parameter, b. t_a - 1 must equal t_b. Returns ------- depth : ndarray Depth of dated sediment sample. probs : list of 2d arrays Density of calendar age for each dated sediment sample. For each sediment sample, the 2d array has two columns, the first is the calendar age. The second column is the density for that calendar age.
[ "Get", "density", "of", "calendar", "dates", "for", "chron", "date", "segment", "in", "core" ]
python
train
lappis-unb/salic-ml
src/salicml/data/query.py
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/data/query.py#L75-L90
def get_metric(self, pronac, metric): """ Get metric for the project with the given pronac number. Usage: >>> metrics.get_metric(pronac_id, 'finance.approved_funds') """ assert isinstance(metric, str) assert '.' in metric, 'metric must declare a namespace' try: func = self._metrics[metric] return func(pronac, self._data) except KeyError: raise InvalidMetricError('metric does not exist')
[ "def", "get_metric", "(", "self", ",", "pronac", ",", "metric", ")", ":", "assert", "isinstance", "(", "metric", ",", "str", ")", "assert", "'.'", "in", "metric", ",", "'metric must declare a namespace'", "try", ":", "func", "=", "self", ".", "_metrics", "[", "metric", "]", "return", "func", "(", "pronac", ",", "self", ".", "_data", ")", "except", "KeyError", ":", "raise", "InvalidMetricError", "(", "'metric does not exist'", ")" ]
Get metric for the project with the given pronac number. Usage: >>> metrics.get_metric(pronac_id, 'finance.approved_funds')
[ "Get", "metric", "for", "the", "project", "with", "the", "given", "pronac", "number", "." ]
python
train
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L734-L767
def tablespace_create(name, location, options=None, owner=None, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Adds a tablespace to the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.tablespace_create tablespacename '/path/datadir' .. versionadded:: 2015.8.0 ''' owner_query = '' options_query = '' if owner: owner_query = 'OWNER "{0}"'.format(owner) # should come out looking like: 'OWNER postgres' if options: optionstext = ['{0} = {1}'.format(k, v) for k, v in six.iteritems(options)] options_query = 'WITH ( {0} )'.format(', '.join(optionstext)) # should come out looking like: 'WITH ( opt1 = 1.0, opt2 = 4.0 )' query = 'CREATE TABLESPACE "{0}" {1} LOCATION \'{2}\' {3}'.format(name, owner_query, location, options_query) # Execute the command ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0
[ "def", "tablespace_create", "(", "name", ",", "location", ",", "options", "=", "None", ",", "owner", "=", "None", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ")", ":", "owner_query", "=", "''", "options_query", "=", "''", "if", "owner", ":", "owner_query", "=", "'OWNER \"{0}\"'", ".", "format", "(", "owner", ")", "# should come out looking like: 'OWNER postgres'", "if", "options", ":", "optionstext", "=", "[", "'{0} = {1}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "options", ")", "]", "options_query", "=", "'WITH ( {0} )'", ".", "format", "(", "', '", ".", "join", "(", "optionstext", ")", ")", "# should come out looking like: 'WITH ( opt1 = 1.0, opt2 = 4.0 )'", "query", "=", "'CREATE TABLESPACE \"{0}\" {1} LOCATION \\'{2}\\' {3}'", ".", "format", "(", "name", ",", "owner_query", ",", "location", ",", "options_query", ")", "# Execute the command", "ret", "=", "_psql_prepare_and_run", "(", "[", "'-c'", ",", "query", "]", ",", "user", "=", "user", ",", "host", "=", "host", ",", "port", "=", "port", ",", "maintenance_db", "=", "maintenance_db", ",", "password", "=", "password", ",", "runas", "=", "runas", ")", "return", "ret", "[", "'retcode'", "]", "==", "0" ]
Adds a tablespace to the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.tablespace_create tablespacename '/path/datadir' .. versionadded:: 2015.8.0
[ "Adds", "a", "tablespace", "to", "the", "Postgres", "server", "." ]
python
train
pantsbuild/pants
src/python/pants/build_graph/build_graph.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_graph.py#L182-L191
def get_derived_from(self, address): """Get the target the specified target was derived from. If a Target was injected programmatically, e.g. from codegen, this allows us to trace its ancestry. If a Target is not derived, default to returning itself. :API: public """ parent_address = self._derived_from_by_derivative.get(address, address) return self.get_target(parent_address)
[ "def", "get_derived_from", "(", "self", ",", "address", ")", ":", "parent_address", "=", "self", ".", "_derived_from_by_derivative", ".", "get", "(", "address", ",", "address", ")", "return", "self", ".", "get_target", "(", "parent_address", ")" ]
Get the target the specified target was derived from. If a Target was injected programmatically, e.g. from codegen, this allows us to trace its ancestry. If a Target is not derived, default to returning itself. :API: public
[ "Get", "the", "target", "the", "specified", "target", "was", "derived", "from", "." ]
python
train
bokeh/bokeh
bokeh/layouts.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/layouts.py#L100-L142
def column(*args, **kwargs): """ Create a column of Bokeh Layout objects. Forces all objects to have the same sizing_mode, which is required for complex layouts to work. Args: children (list of :class:`~bokeh.models.layouts.LayoutDOM` ): A list of instances for the column. Can be any of the following - :class:`~bokeh.models.plots.Plot`, :class:`~bokeh.models.widgets.widget.Widget`, :class:`~bokeh.models.layouts.Row`, :class:`~bokeh.models.layouts.Column`, :class:`~bokeh.models.tools.ToolbarBox`, :class:`~bokeh.models.layouts.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.layouts.LayoutDOM`. Returns: Column: A column of LayoutDOM objects all with the same sizing_mode. Examples: >>> column([plot_1, plot_2]) >>> column(children=[widget_1, plot_1], sizing_mode='stretch_both') """ sizing_mode = kwargs.pop('sizing_mode', None) children = kwargs.pop('children', None) children = _handle_children(*args, children=children) col_children = [] for item in children: if isinstance(item, LayoutDOM): if sizing_mode is not None and _has_auto_sizing(item): item.sizing_mode = sizing_mode col_children.append(item) else: raise ValueError("""Only LayoutDOM items can be inserted into a column. Tried to insert: %s of type %s""" % (item, type(item))) return Column(children=col_children, sizing_mode=sizing_mode, **kwargs)
[ "def", "column", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "sizing_mode", "=", "kwargs", ".", "pop", "(", "'sizing_mode'", ",", "None", ")", "children", "=", "kwargs", ".", "pop", "(", "'children'", ",", "None", ")", "children", "=", "_handle_children", "(", "*", "args", ",", "children", "=", "children", ")", "col_children", "=", "[", "]", "for", "item", "in", "children", ":", "if", "isinstance", "(", "item", ",", "LayoutDOM", ")", ":", "if", "sizing_mode", "is", "not", "None", "and", "_has_auto_sizing", "(", "item", ")", ":", "item", ".", "sizing_mode", "=", "sizing_mode", "col_children", ".", "append", "(", "item", ")", "else", ":", "raise", "ValueError", "(", "\"\"\"Only LayoutDOM items can be inserted into a column. Tried to insert: %s of type %s\"\"\"", "%", "(", "item", ",", "type", "(", "item", ")", ")", ")", "return", "Column", "(", "children", "=", "col_children", ",", "sizing_mode", "=", "sizing_mode", ",", "*", "*", "kwargs", ")" ]
Create a column of Bokeh Layout objects. Forces all objects to have the same sizing_mode, which is required for complex layouts to work. Args: children (list of :class:`~bokeh.models.layouts.LayoutDOM` ): A list of instances for the column. Can be any of the following - :class:`~bokeh.models.plots.Plot`, :class:`~bokeh.models.widgets.widget.Widget`, :class:`~bokeh.models.layouts.Row`, :class:`~bokeh.models.layouts.Column`, :class:`~bokeh.models.tools.ToolbarBox`, :class:`~bokeh.models.layouts.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.layouts.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.layouts.LayoutDOM`. Returns: Column: A column of LayoutDOM objects all with the same sizing_mode. Examples: >>> column([plot_1, plot_2]) >>> column(children=[widget_1, plot_1], sizing_mode='stretch_both')
[ "Create", "a", "column", "of", "Bokeh", "Layout", "objects", ".", "Forces", "all", "objects", "to", "have", "the", "same", "sizing_mode", "which", "is", "required", "for", "complex", "layouts", "to", "work", "." ]
python
train
kata198/indexedredis
IndexedRedis/fields/foreign.py
https://github.com/kata198/indexedredis/blob/f9c85adcf5218dac25acb06eedc63fc2950816fa/IndexedRedis/fields/foreign.py#L243-L258
def objHasUnsavedChanges(self): ''' objHasUnsavedChanges - @see ForeignLinkData.objHasUnsavedChanges True if ANY object has unsaved changes. ''' if not self.obj: return False for thisObj in self.obj: if not thisObj: continue if thisObj.hasUnsavedChanges(cascadeObjects=True): return True return False
[ "def", "objHasUnsavedChanges", "(", "self", ")", ":", "if", "not", "self", ".", "obj", ":", "return", "False", "for", "thisObj", "in", "self", ".", "obj", ":", "if", "not", "thisObj", ":", "continue", "if", "thisObj", ".", "hasUnsavedChanges", "(", "cascadeObjects", "=", "True", ")", ":", "return", "True", "return", "False" ]
objHasUnsavedChanges - @see ForeignLinkData.objHasUnsavedChanges True if ANY object has unsaved changes.
[ "objHasUnsavedChanges", "-", "@see", "ForeignLinkData", ".", "objHasUnsavedChanges" ]
python
valid
binux/pyspider
pyspider/libs/response.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/response.py#L140-L147
def doc(self): """Returns a PyQuery object of the response's content""" if hasattr(self, '_doc'): return self._doc elements = self.etree doc = self._doc = PyQuery(elements) doc.make_links_absolute(utils.text(self.url)) return doc
[ "def", "doc", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_doc'", ")", ":", "return", "self", ".", "_doc", "elements", "=", "self", ".", "etree", "doc", "=", "self", ".", "_doc", "=", "PyQuery", "(", "elements", ")", "doc", ".", "make_links_absolute", "(", "utils", ".", "text", "(", "self", ".", "url", ")", ")", "return", "doc" ]
Returns a PyQuery object of the response's content
[ "Returns", "a", "PyQuery", "object", "of", "the", "response", "s", "content" ]
python
train
jbittel/django-mama-cas
mama_cas/models.py
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L42-L56
def create_ticket(self, ticket=None, **kwargs): """ Create a new ``Ticket``. Additional arguments are passed to the ``create()`` function. Return the newly created ``Ticket``. """ if not ticket: ticket = self.create_ticket_str() if 'service' in kwargs: kwargs['service'] = clean_service_url(kwargs['service']) if 'expires' not in kwargs: expires = now() + timedelta(seconds=self.model.TICKET_EXPIRE) kwargs['expires'] = expires t = self.create(ticket=ticket, **kwargs) logger.debug("Created %s %s" % (t.name, t.ticket)) return t
[ "def", "create_ticket", "(", "self", ",", "ticket", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "ticket", ":", "ticket", "=", "self", ".", "create_ticket_str", "(", ")", "if", "'service'", "in", "kwargs", ":", "kwargs", "[", "'service'", "]", "=", "clean_service_url", "(", "kwargs", "[", "'service'", "]", ")", "if", "'expires'", "not", "in", "kwargs", ":", "expires", "=", "now", "(", ")", "+", "timedelta", "(", "seconds", "=", "self", ".", "model", ".", "TICKET_EXPIRE", ")", "kwargs", "[", "'expires'", "]", "=", "expires", "t", "=", "self", ".", "create", "(", "ticket", "=", "ticket", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "\"Created %s %s\"", "%", "(", "t", ".", "name", ",", "t", ".", "ticket", ")", ")", "return", "t" ]
Create a new ``Ticket``. Additional arguments are passed to the ``create()`` function. Return the newly created ``Ticket``.
[ "Create", "a", "new", "Ticket", ".", "Additional", "arguments", "are", "passed", "to", "the", "create", "()", "function", ".", "Return", "the", "newly", "created", "Ticket", "." ]
python
train
PyCQA/pylint-django
pylint_django/plugin.py
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/plugin.py#L24-L41
def register(linter): """ Registering additional checkers. """ # add all of the checkers register_checkers(linter) # register any checking fiddlers try: from pylint_django.augmentations import apply_augmentations apply_augmentations(linter) except ImportError: # probably trying to execute pylint_django when Django isn't installed # in this case the django-not-installed checker will kick-in pass if not compat.LOAD_CONFIGURATION_SUPPORTED: load_configuration(linter)
[ "def", "register", "(", "linter", ")", ":", "# add all of the checkers", "register_checkers", "(", "linter", ")", "# register any checking fiddlers", "try", ":", "from", "pylint_django", ".", "augmentations", "import", "apply_augmentations", "apply_augmentations", "(", "linter", ")", "except", "ImportError", ":", "# probably trying to execute pylint_django when Django isn't installed", "# in this case the django-not-installed checker will kick-in", "pass", "if", "not", "compat", ".", "LOAD_CONFIGURATION_SUPPORTED", ":", "load_configuration", "(", "linter", ")" ]
Registering additional checkers.
[ "Registering", "additional", "checkers", "." ]
python
train
bsmurphy/PyKrige
pykrige/rk.py
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/rk.py#L254-L273
def score(self, p, x, y, sample_weight=None): """ Overloading default regression score method Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging y: ndarray array of targets (Ns, ) """ return r2_score(y_pred=self.predict(p, x), y_true=y, sample_weight=sample_weight)
[ "def", "score", "(", "self", ",", "p", ",", "x", ",", "y", ",", "sample_weight", "=", "None", ")", ":", "return", "r2_score", "(", "y_pred", "=", "self", ".", "predict", "(", "p", ",", "x", ")", ",", "y_true", "=", "y", ",", "sample_weight", "=", "sample_weight", ")" ]
Overloading default regression score method Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging y: ndarray array of targets (Ns, )
[ "Overloading", "default", "regression", "score", "method" ]
python
train
angr/angr
angr/analyses/reassembler.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/reassembler.py#L1223-L1236
def desymbolize(self): """ We believe this was a pointer and symbolized it before. Now we want to desymbolize it. The following actions are performed: - Reload content from memory - Mark the sort as 'unknown' :return: None """ self.sort = 'unknown' content = self.binary.fast_memory_load(self.addr, self.size, bytes) self.content = [ content ]
[ "def", "desymbolize", "(", "self", ")", ":", "self", ".", "sort", "=", "'unknown'", "content", "=", "self", ".", "binary", ".", "fast_memory_load", "(", "self", ".", "addr", ",", "self", ".", "size", ",", "bytes", ")", "self", ".", "content", "=", "[", "content", "]" ]
We believe this was a pointer and symbolized it before. Now we want to desymbolize it. The following actions are performed: - Reload content from memory - Mark the sort as 'unknown' :return: None
[ "We", "believe", "this", "was", "a", "pointer", "and", "symbolized", "it", "before", ".", "Now", "we", "want", "to", "desymbolize", "it", "." ]
python
train
JohnVinyard/zounds
zounds/timeseries/audiosamples.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/timeseries/audiosamples.py#L149-L158
def mono(self): """ Return this instance summed to mono. If the instance is already mono, this is a no-op. """ if self.channels == 1: return self x = self.sum(axis=1) * 0.5 y = x * 0.5 return AudioSamples(y, self.samplerate)
[ "def", "mono", "(", "self", ")", ":", "if", "self", ".", "channels", "==", "1", ":", "return", "self", "x", "=", "self", ".", "sum", "(", "axis", "=", "1", ")", "*", "0.5", "y", "=", "x", "*", "0.5", "return", "AudioSamples", "(", "y", ",", "self", ".", "samplerate", ")" ]
Return this instance summed to mono. If the instance is already mono, this is a no-op.
[ "Return", "this", "instance", "summed", "to", "mono", ".", "If", "the", "instance", "is", "already", "mono", "this", "is", "a", "no", "-", "op", "." ]
python
train
seme0021/python-zillow
zillow/api.py
https://github.com/seme0021/python-zillow/blob/cddb9bad91d6c0ecdb67dbb9865b9392bd5116ab/zillow/api.py#L35-L68
def GetSearchResults(self, zws_id, address, citystatezip, retnzestimate=False): """ The GetSearchResults API finds a property for a specified address. The content returned contains the address for the property or properties as well as the Zillow Property ID (ZPID) and current Zestimate. It also includes the date the Zestimate was computed, a valuation range and the Zestimate ranking for the property within its ZIP code. The GetSearchResults API Web Service is located at: http://www.zillow.com/webservice/GetSearchResults.htm :param zws_id: The Zillow Web Service Identifier. Each subscriber to Zillow Web Services is uniquely identified by an ID sequence and every request to Web services requires this ID. :param address: The address of the property to search. This string should be URL encoded. :param citystatezip: The city+state combination and/or ZIP code for which to search. This string should be URL encoded. Note that giving both city and state is required. Using just one will not work. :param retnzestimat: Return Rent Zestimate information if available (boolean true/false, default: false) :return: """ url = '%s/GetSearchResults.htm' % (self.base_url) parameters = {'zws-id': zws_id} if address and citystatezip: parameters['address'] = address parameters['citystatezip'] = citystatezip else: raise ZillowError({'message': "Specify address and citystatezip."}) if retnzestimate: parameters['retnzestimate'] = 'true' resp = self._RequestUrl(url, 'GET', data=parameters) data = resp.content.decode('utf-8') xmltodict_data = xmltodict.parse(data) place = Place() try: place.set_data(xmltodict_data.get('SearchResults:searchresults', None)['response']['results']['result']) except: raise ZillowError({'message': "Zillow did not return a valid response: %s" % data}) return place
[ "def", "GetSearchResults", "(", "self", ",", "zws_id", ",", "address", ",", "citystatezip", ",", "retnzestimate", "=", "False", ")", ":", "url", "=", "'%s/GetSearchResults.htm'", "%", "(", "self", ".", "base_url", ")", "parameters", "=", "{", "'zws-id'", ":", "zws_id", "}", "if", "address", "and", "citystatezip", ":", "parameters", "[", "'address'", "]", "=", "address", "parameters", "[", "'citystatezip'", "]", "=", "citystatezip", "else", ":", "raise", "ZillowError", "(", "{", "'message'", ":", "\"Specify address and citystatezip.\"", "}", ")", "if", "retnzestimate", ":", "parameters", "[", "'retnzestimate'", "]", "=", "'true'", "resp", "=", "self", ".", "_RequestUrl", "(", "url", ",", "'GET'", ",", "data", "=", "parameters", ")", "data", "=", "resp", ".", "content", ".", "decode", "(", "'utf-8'", ")", "xmltodict_data", "=", "xmltodict", ".", "parse", "(", "data", ")", "place", "=", "Place", "(", ")", "try", ":", "place", ".", "set_data", "(", "xmltodict_data", ".", "get", "(", "'SearchResults:searchresults'", ",", "None", ")", "[", "'response'", "]", "[", "'results'", "]", "[", "'result'", "]", ")", "except", ":", "raise", "ZillowError", "(", "{", "'message'", ":", "\"Zillow did not return a valid response: %s\"", "%", "data", "}", ")", "return", "place" ]
The GetSearchResults API finds a property for a specified address. The content returned contains the address for the property or properties as well as the Zillow Property ID (ZPID) and current Zestimate. It also includes the date the Zestimate was computed, a valuation range and the Zestimate ranking for the property within its ZIP code. The GetSearchResults API Web Service is located at: http://www.zillow.com/webservice/GetSearchResults.htm :param zws_id: The Zillow Web Service Identifier. Each subscriber to Zillow Web Services is uniquely identified by an ID sequence and every request to Web services requires this ID. :param address: The address of the property to search. This string should be URL encoded. :param citystatezip: The city+state combination and/or ZIP code for which to search. This string should be URL encoded. Note that giving both city and state is required. Using just one will not work. :param retnzestimat: Return Rent Zestimate information if available (boolean true/false, default: false) :return:
[ "The", "GetSearchResults", "API", "finds", "a", "property", "for", "a", "specified", "address", ".", "The", "content", "returned", "contains", "the", "address", "for", "the", "property", "or", "properties", "as", "well", "as", "the", "Zillow", "Property", "ID", "(", "ZPID", ")", "and", "current", "Zestimate", ".", "It", "also", "includes", "the", "date", "the", "Zestimate", "was", "computed", "a", "valuation", "range", "and", "the", "Zestimate", "ranking", "for", "the", "property", "within", "its", "ZIP", "code", ".", "The", "GetSearchResults", "API", "Web", "Service", "is", "located", "at", ":", "http", ":", "//", "www", ".", "zillow", ".", "com", "/", "webservice", "/", "GetSearchResults", ".", "htm", ":", "param", "zws_id", ":", "The", "Zillow", "Web", "Service", "Identifier", ".", "Each", "subscriber", "to", "Zillow", "Web", "Services", "is", "uniquely", "identified", "by", "an", "ID", "sequence", "and", "every", "request", "to", "Web", "services", "requires", "this", "ID", ".", ":", "param", "address", ":", "The", "address", "of", "the", "property", "to", "search", ".", "This", "string", "should", "be", "URL", "encoded", ".", ":", "param", "citystatezip", ":", "The", "city", "+", "state", "combination", "and", "/", "or", "ZIP", "code", "for", "which", "to", "search", ".", "This", "string", "should", "be", "URL", "encoded", ".", "Note", "that", "giving", "both", "city", "and", "state", "is", "required", ".", "Using", "just", "one", "will", "not", "work", ".", ":", "param", "retnzestimat", ":", "Return", "Rent", "Zestimate", "information", "if", "available", "(", "boolean", "true", "/", "false", "default", ":", "false", ")", ":", "return", ":" ]
python
train
SBRG/ssbio
ssbio/protein/structure/properties/freesasa.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/freesasa.py#L43-L93
def parse_rsa_data(rsa_outfile, ignore_hets=True): """Process a NACCESS or freesasa RSA output file. Adapted from Biopython NACCESS modele. Args: rsa_outfile (str): Path to RSA output file ignore_hets (bool): If HETATMs should be excluded from the final dictionary. This is extremely important when loading this information into a ChainProp's SeqRecord, since this will throw off the sequence matching. Returns: dict: Per-residue dictionary of RSA values """ naccess_rel_dict = OrderedDict() with open(rsa_outfile, 'r') as f: for line in f: if line.startswith('RES'): res_name = line[4:7] chain_id = line[8] resseq = int(line[9:13]) icode = line[13] res_id = (' ', resseq, icode) all_atoms_abs = line[16:22].strip() all_atoms_rel = line[23:28].strip() side_chain_abs = line[29:35].strip() side_chain_rel = line[36:41].strip() main_chain_abs = line[42:48].strip() main_chain_rel = line[49:54].strip() non_polar_abs = line[55:61].strip() non_polar_rel = line[62:67].strip() all_polar_abs = line[68:74].strip() all_polar_rel = line[75:80].strip() if all_atoms_rel =='N/A' and main_chain_rel =='N/A' and all_polar_rel =='N/A' and non_polar_rel =='N/A' and side_chain_rel =='N/A' and ignore_hets: continue naccess_rel_dict[(chain_id, res_id)] = { 'res_name' : res_name, 'all_atoms_abs' : ssbio.utils.conv_to_float(all_atoms_abs, inf_str='N/A'), 'all_atoms_rel' : ssbio.utils.conv_to_float(all_atoms_rel, inf_str='N/A'), 'side_chain_abs': ssbio.utils.conv_to_float(side_chain_abs, inf_str='N/A'), 'side_chain_rel': ssbio.utils.conv_to_float(side_chain_rel, inf_str='N/A'), 'main_chain_abs': ssbio.utils.conv_to_float(main_chain_abs, inf_str='N/A'), 'main_chain_rel': ssbio.utils.conv_to_float(main_chain_rel, inf_str='N/A'), 'non_polar_abs' : ssbio.utils.conv_to_float(non_polar_abs, inf_str='N/A'), 'non_polar_rel' : ssbio.utils.conv_to_float(non_polar_rel, inf_str='N/A'), 'all_polar_abs' : ssbio.utils.conv_to_float(all_polar_abs, inf_str='N/A'), 'all_polar_rel' : ssbio.utils.conv_to_float(all_polar_rel, inf_str='N/A')} return naccess_rel_dict
[ "def", "parse_rsa_data", "(", "rsa_outfile", ",", "ignore_hets", "=", "True", ")", ":", "naccess_rel_dict", "=", "OrderedDict", "(", ")", "with", "open", "(", "rsa_outfile", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "'RES'", ")", ":", "res_name", "=", "line", "[", "4", ":", "7", "]", "chain_id", "=", "line", "[", "8", "]", "resseq", "=", "int", "(", "line", "[", "9", ":", "13", "]", ")", "icode", "=", "line", "[", "13", "]", "res_id", "=", "(", "' '", ",", "resseq", ",", "icode", ")", "all_atoms_abs", "=", "line", "[", "16", ":", "22", "]", ".", "strip", "(", ")", "all_atoms_rel", "=", "line", "[", "23", ":", "28", "]", ".", "strip", "(", ")", "side_chain_abs", "=", "line", "[", "29", ":", "35", "]", ".", "strip", "(", ")", "side_chain_rel", "=", "line", "[", "36", ":", "41", "]", ".", "strip", "(", ")", "main_chain_abs", "=", "line", "[", "42", ":", "48", "]", ".", "strip", "(", ")", "main_chain_rel", "=", "line", "[", "49", ":", "54", "]", ".", "strip", "(", ")", "non_polar_abs", "=", "line", "[", "55", ":", "61", "]", ".", "strip", "(", ")", "non_polar_rel", "=", "line", "[", "62", ":", "67", "]", ".", "strip", "(", ")", "all_polar_abs", "=", "line", "[", "68", ":", "74", "]", ".", "strip", "(", ")", "all_polar_rel", "=", "line", "[", "75", ":", "80", "]", ".", "strip", "(", ")", "if", "all_atoms_rel", "==", "'N/A'", "and", "main_chain_rel", "==", "'N/A'", "and", "all_polar_rel", "==", "'N/A'", "and", "non_polar_rel", "==", "'N/A'", "and", "side_chain_rel", "==", "'N/A'", "and", "ignore_hets", ":", "continue", "naccess_rel_dict", "[", "(", "chain_id", ",", "res_id", ")", "]", "=", "{", "'res_name'", ":", "res_name", ",", "'all_atoms_abs'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "all_atoms_abs", ",", "inf_str", "=", "'N/A'", ")", ",", "'all_atoms_rel'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "all_atoms_rel", ",", "inf_str", "=", "'N/A'", ")", ",", "'side_chain_abs'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "side_chain_abs", ",", "inf_str", "=", "'N/A'", ")", ",", "'side_chain_rel'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "side_chain_rel", ",", "inf_str", "=", "'N/A'", ")", ",", "'main_chain_abs'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "main_chain_abs", ",", "inf_str", "=", "'N/A'", ")", ",", "'main_chain_rel'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "main_chain_rel", ",", "inf_str", "=", "'N/A'", ")", ",", "'non_polar_abs'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "non_polar_abs", ",", "inf_str", "=", "'N/A'", ")", ",", "'non_polar_rel'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "non_polar_rel", ",", "inf_str", "=", "'N/A'", ")", ",", "'all_polar_abs'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "all_polar_abs", ",", "inf_str", "=", "'N/A'", ")", ",", "'all_polar_rel'", ":", "ssbio", ".", "utils", ".", "conv_to_float", "(", "all_polar_rel", ",", "inf_str", "=", "'N/A'", ")", "}", "return", "naccess_rel_dict" ]
Process a NACCESS or freesasa RSA output file. Adapted from Biopython NACCESS modele. Args: rsa_outfile (str): Path to RSA output file ignore_hets (bool): If HETATMs should be excluded from the final dictionary. This is extremely important when loading this information into a ChainProp's SeqRecord, since this will throw off the sequence matching. Returns: dict: Per-residue dictionary of RSA values
[ "Process", "a", "NACCESS", "or", "freesasa", "RSA", "output", "file", ".", "Adapted", "from", "Biopython", "NACCESS", "modele", ".", "Args", ":", "rsa_outfile", "(", "str", ")", ":", "Path", "to", "RSA", "output", "file", "ignore_hets", "(", "bool", ")", ":", "If", "HETATMs", "should", "be", "excluded", "from", "the", "final", "dictionary", ".", "This", "is", "extremely", "important", "when", "loading", "this", "information", "into", "a", "ChainProp", "s", "SeqRecord", "since", "this", "will", "throw", "off", "the", "sequence", "matching", "." ]
python
train
mrjoes/sockjs-tornado
sockjs/tornado/transports/base.py
https://github.com/mrjoes/sockjs-tornado/blob/bd3a99b407f1181f054b3b1730f438dde375ca1c/sockjs/tornado/transports/base.py#L12-L18
def get_conn_info(self): """Return `ConnectionInfo` object from current transport""" return session.ConnectionInfo(self.request.remote_ip, self.request.cookies, self.request.arguments, self.request.headers, self.request.path)
[ "def", "get_conn_info", "(", "self", ")", ":", "return", "session", ".", "ConnectionInfo", "(", "self", ".", "request", ".", "remote_ip", ",", "self", ".", "request", ".", "cookies", ",", "self", ".", "request", ".", "arguments", ",", "self", ".", "request", ".", "headers", ",", "self", ".", "request", ".", "path", ")" ]
Return `ConnectionInfo` object from current transport
[ "Return", "ConnectionInfo", "object", "from", "current", "transport" ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/config_database.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/config_database.py#L365-L375
def get_config_database_info(self): """Get memory usage and space statistics on the config database.""" max_size = self.config_database.data_size max_entries = self.config_database.max_entries() used_size = self.config_database.data_index used_entries = len(self.config_database.entries) invalid_size = sum(x.data_space() for x in self.config_database.entries if not x.valid) invalid_entries = sum(1 for x in self.config_database.entries if not x.valid) return [max_size, used_size, invalid_size, used_entries, invalid_entries, max_entries, 0]
[ "def", "get_config_database_info", "(", "self", ")", ":", "max_size", "=", "self", ".", "config_database", ".", "data_size", "max_entries", "=", "self", ".", "config_database", ".", "max_entries", "(", ")", "used_size", "=", "self", ".", "config_database", ".", "data_index", "used_entries", "=", "len", "(", "self", ".", "config_database", ".", "entries", ")", "invalid_size", "=", "sum", "(", "x", ".", "data_space", "(", ")", "for", "x", "in", "self", ".", "config_database", ".", "entries", "if", "not", "x", ".", "valid", ")", "invalid_entries", "=", "sum", "(", "1", "for", "x", "in", "self", ".", "config_database", ".", "entries", "if", "not", "x", ".", "valid", ")", "return", "[", "max_size", ",", "used_size", ",", "invalid_size", ",", "used_entries", ",", "invalid_entries", ",", "max_entries", ",", "0", "]" ]
Get memory usage and space statistics on the config database.
[ "Get", "memory", "usage", "and", "space", "statistics", "on", "the", "config", "database", "." ]
python
train
MisterY/gnucash-portfolio
gnucash_portfolio/lib/datetimeutils.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/datetimeutils.py#L71-L80
def get_period_last_week() -> str: """ Returns the last week as a period string """ today = Datum() today.start_of_day() # start_date = today - timedelta(days=7) start_date = today.clone() start_date.subtract_days(7) period = get_period(start_date.value, today.value) return period
[ "def", "get_period_last_week", "(", ")", "->", "str", ":", "today", "=", "Datum", "(", ")", "today", ".", "start_of_day", "(", ")", "# start_date = today - timedelta(days=7)", "start_date", "=", "today", ".", "clone", "(", ")", "start_date", ".", "subtract_days", "(", "7", ")", "period", "=", "get_period", "(", "start_date", ".", "value", ",", "today", ".", "value", ")", "return", "period" ]
Returns the last week as a period string
[ "Returns", "the", "last", "week", "as", "a", "period", "string" ]
python
train
PMEAL/OpenPNM
openpnm/algorithms/OrdinaryPercolation.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/OrdinaryPercolation.py#L105-L177
def setup(self, phase=None, access_limited=None, mode='', throat_entry_pressure='', pore_entry_pressure='', pore_volume='', throat_volume=''): r""" Used to specify necessary arguments to the simulation. This method is useful for resetting the algorithm or applying more explicit control. Parameters ---------- phase : OpenPNM Phase object The Phase object containing the physical properties of the invading fluid. access_limited : boolean If ``True`` the invading phase can only enter the network from the invasion sites specified with ``set_inlets``. Otherwise, invading clusters can appear anywhere in the network. This second case is the normal *ordinary percolation* in the traditional sense, while the first case is more physically representative of invading fluids. mode : string Specifies the type of percolation process to simulate. Options are: **'bond'** - The percolation process is controlled by bond entry thresholds. **'site'** - The percolation process is controlled by site entry thresholds. pore_entry_pressure : string The dictionary key on the Phase object where the pore entry pressure values are stored. The default is 'pore.capillary_pressure'. This is only accessed if the ``mode`` is set to site percolation. throat_entry_pressure : string The dictionary key on the Phase object where the throat entry pressure values are stored. The default is 'throat.capillary_pressure'. This is only accessed if the ``mode`` is set to bond percolation. 'pore_volume' : string The dictionary key containing the pore volume information. 'throat_volume' : string The dictionary key containing the pore volume information. """ if phase: self.settings['phase'] = phase.name if throat_entry_pressure: self.settings['throat_entry_pressure'] = throat_entry_pressure phase = self.project.find_phase(self) self['throat.entry_pressure'] = phase[throat_entry_pressure] if pore_entry_pressure: self.settings['pore_entry_pressure'] = pore_entry_pressure phase = self.project.find_phase(self) self['pore.entry_pressure'] = phase[pore_entry_pressure] if mode: self.settings['mode'] = mode if access_limited is not None: self.settings['access_limited'] = access_limited if pore_volume: self.settings['pore_volume'] = pore_volume if throat_volume: self.settings['throat_volume'] = throat_volume
[ "def", "setup", "(", "self", ",", "phase", "=", "None", ",", "access_limited", "=", "None", ",", "mode", "=", "''", ",", "throat_entry_pressure", "=", "''", ",", "pore_entry_pressure", "=", "''", ",", "pore_volume", "=", "''", ",", "throat_volume", "=", "''", ")", ":", "if", "phase", ":", "self", ".", "settings", "[", "'phase'", "]", "=", "phase", ".", "name", "if", "throat_entry_pressure", ":", "self", ".", "settings", "[", "'throat_entry_pressure'", "]", "=", "throat_entry_pressure", "phase", "=", "self", ".", "project", ".", "find_phase", "(", "self", ")", "self", "[", "'throat.entry_pressure'", "]", "=", "phase", "[", "throat_entry_pressure", "]", "if", "pore_entry_pressure", ":", "self", ".", "settings", "[", "'pore_entry_pressure'", "]", "=", "pore_entry_pressure", "phase", "=", "self", ".", "project", ".", "find_phase", "(", "self", ")", "self", "[", "'pore.entry_pressure'", "]", "=", "phase", "[", "pore_entry_pressure", "]", "if", "mode", ":", "self", ".", "settings", "[", "'mode'", "]", "=", "mode", "if", "access_limited", "is", "not", "None", ":", "self", ".", "settings", "[", "'access_limited'", "]", "=", "access_limited", "if", "pore_volume", ":", "self", ".", "settings", "[", "'pore_volume'", "]", "=", "pore_volume", "if", "throat_volume", ":", "self", ".", "settings", "[", "'throat_volume'", "]", "=", "throat_volume" ]
r""" Used to specify necessary arguments to the simulation. This method is useful for resetting the algorithm or applying more explicit control. Parameters ---------- phase : OpenPNM Phase object The Phase object containing the physical properties of the invading fluid. access_limited : boolean If ``True`` the invading phase can only enter the network from the invasion sites specified with ``set_inlets``. Otherwise, invading clusters can appear anywhere in the network. This second case is the normal *ordinary percolation* in the traditional sense, while the first case is more physically representative of invading fluids. mode : string Specifies the type of percolation process to simulate. Options are: **'bond'** - The percolation process is controlled by bond entry thresholds. **'site'** - The percolation process is controlled by site entry thresholds. pore_entry_pressure : string The dictionary key on the Phase object where the pore entry pressure values are stored. The default is 'pore.capillary_pressure'. This is only accessed if the ``mode`` is set to site percolation. throat_entry_pressure : string The dictionary key on the Phase object where the throat entry pressure values are stored. The default is 'throat.capillary_pressure'. This is only accessed if the ``mode`` is set to bond percolation. 'pore_volume' : string The dictionary key containing the pore volume information. 'throat_volume' : string The dictionary key containing the pore volume information.
[ "r", "Used", "to", "specify", "necessary", "arguments", "to", "the", "simulation", ".", "This", "method", "is", "useful", "for", "resetting", "the", "algorithm", "or", "applying", "more", "explicit", "control", "." ]
python
train
AustralianSynchrotron/lightflow
lightflow/workflows.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/workflows.py#L16-L49
def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, store_args=None): """ Start a single workflow by sending it to the workflow queue. Args: name (str): The name of the workflow that should be started. Refers to the name of the workflow file without the .py extension. config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. store_args (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. Returns: str: The ID of the workflow job. Raises: WorkflowArgumentError: If the workflow requires arguments to be set in store_args that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails. """ try: wf = Workflow.from_name(name, queue=queue, clear_data_store=clear_data_store, arguments=store_args) except DirectedAcyclicGraphInvalid as e: raise WorkflowDefinitionError(workflow_name=name, graph_name=e.graph_name) celery_app = create_app(config) result = celery_app.send_task(JobExecPath.Workflow, args=(wf,), queue=queue, routing_key=queue) return result.id
[ "def", "start_workflow", "(", "name", ",", "config", ",", "*", ",", "queue", "=", "DefaultJobQueueName", ".", "Workflow", ",", "clear_data_store", "=", "True", ",", "store_args", "=", "None", ")", ":", "try", ":", "wf", "=", "Workflow", ".", "from_name", "(", "name", ",", "queue", "=", "queue", ",", "clear_data_store", "=", "clear_data_store", ",", "arguments", "=", "store_args", ")", "except", "DirectedAcyclicGraphInvalid", "as", "e", ":", "raise", "WorkflowDefinitionError", "(", "workflow_name", "=", "name", ",", "graph_name", "=", "e", ".", "graph_name", ")", "celery_app", "=", "create_app", "(", "config", ")", "result", "=", "celery_app", ".", "send_task", "(", "JobExecPath", ".", "Workflow", ",", "args", "=", "(", "wf", ",", ")", ",", "queue", "=", "queue", ",", "routing_key", "=", "queue", ")", "return", "result", ".", "id" ]
Start a single workflow by sending it to the workflow queue. Args: name (str): The name of the workflow that should be started. Refers to the name of the workflow file without the .py extension. config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. store_args (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. Returns: str: The ID of the workflow job. Raises: WorkflowArgumentError: If the workflow requires arguments to be set in store_args that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails.
[ "Start", "a", "single", "workflow", "by", "sending", "it", "to", "the", "workflow", "queue", "." ]
python
train
LogicalDash/LiSE
ELiDE/ELiDE/card.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/card.py#L961-L978
def on_deckbuilder(self, *args): """Bind my deckbuilder to update my ``scroll``, and my ``scroll`` to update my deckbuilder. """ if self.deckbuilder is None: return att = 'deck_{}_hint_offsets'.format( 'x' if self.orientation == 'horizontal' else 'y' ) offs = getattr(self.deckbuilder, att) if len(offs) <= self.deckidx: Clock.schedule_once(self.on_deckbuilder, 0) return self.bind(scroll=self.handle_scroll) self.deckbuilder.bind(**{att: self.upd_scroll}) self.upd_scroll() self.deckbuilder._trigger_layout()
[ "def", "on_deckbuilder", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "deckbuilder", "is", "None", ":", "return", "att", "=", "'deck_{}_hint_offsets'", ".", "format", "(", "'x'", "if", "self", ".", "orientation", "==", "'horizontal'", "else", "'y'", ")", "offs", "=", "getattr", "(", "self", ".", "deckbuilder", ",", "att", ")", "if", "len", "(", "offs", ")", "<=", "self", ".", "deckidx", ":", "Clock", ".", "schedule_once", "(", "self", ".", "on_deckbuilder", ",", "0", ")", "return", "self", ".", "bind", "(", "scroll", "=", "self", ".", "handle_scroll", ")", "self", ".", "deckbuilder", ".", "bind", "(", "*", "*", "{", "att", ":", "self", ".", "upd_scroll", "}", ")", "self", ".", "upd_scroll", "(", ")", "self", ".", "deckbuilder", ".", "_trigger_layout", "(", ")" ]
Bind my deckbuilder to update my ``scroll``, and my ``scroll`` to update my deckbuilder.
[ "Bind", "my", "deckbuilder", "to", "update", "my", "scroll", "and", "my", "scroll", "to", "update", "my", "deckbuilder", "." ]
python
train
saltstack/salt
salt/modules/parallels.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/parallels.py#L49-L59
def _normalize_args(args): ''' Return args as a list of strings ''' if isinstance(args, six.string_types): return shlex.split(args) if isinstance(args, (tuple, list)): return [six.text_type(arg) for arg in args] else: return [six.text_type(args)]
[ "def", "_normalize_args", "(", "args", ")", ":", "if", "isinstance", "(", "args", ",", "six", ".", "string_types", ")", ":", "return", "shlex", ".", "split", "(", "args", ")", "if", "isinstance", "(", "args", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "[", "six", ".", "text_type", "(", "arg", ")", "for", "arg", "in", "args", "]", "else", ":", "return", "[", "six", ".", "text_type", "(", "args", ")", "]" ]
Return args as a list of strings
[ "Return", "args", "as", "a", "list", "of", "strings" ]
python
train
gem/oq-engine
openquake/baselib/general.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/general.py#L1131-L1138
def warn(msg, *args): """ Print a warning on stderr """ if not args: sys.stderr.write('WARNING: ' + msg) else: sys.stderr.write('WARNING: ' + msg % args)
[ "def", "warn", "(", "msg", ",", "*", "args", ")", ":", "if", "not", "args", ":", "sys", ".", "stderr", ".", "write", "(", "'WARNING: '", "+", "msg", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "'WARNING: '", "+", "msg", "%", "args", ")" ]
Print a warning on stderr
[ "Print", "a", "warning", "on", "stderr" ]
python
train
bitesofcode/projex
projex/cli.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/cli.py#L91-L99
def usage(self): """ Returns the usage string for this method. :return <str> """ arg_list = ' '.join(self.cmd_args).upper() name = self.interface.name() return '%s [options] %s %s' % (name, self.__name__, arg_list)
[ "def", "usage", "(", "self", ")", ":", "arg_list", "=", "' '", ".", "join", "(", "self", ".", "cmd_args", ")", ".", "upper", "(", ")", "name", "=", "self", ".", "interface", ".", "name", "(", ")", "return", "'%s [options] %s %s'", "%", "(", "name", ",", "self", ".", "__name__", ",", "arg_list", ")" ]
Returns the usage string for this method. :return <str>
[ "Returns", "the", "usage", "string", "for", "this", "method", ".", ":", "return", "<str", ">" ]
python
train
zerotk/easyfs
zerotk/easyfs/_easyfs.py
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L1782-L1797
def ExpandUser(path): ''' os.path.expanduser wrapper, necessary because it cannot handle unicode strings properly. This is not necessary in Python 3. :param path: .. seealso:: os.path.expanduser ''' if six.PY2: encoding = sys.getfilesystemencoding() path = path.encode(encoding) result = os.path.expanduser(path) if six.PY2: result = result.decode(encoding) return result
[ "def", "ExpandUser", "(", "path", ")", ":", "if", "six", ".", "PY2", ":", "encoding", "=", "sys", ".", "getfilesystemencoding", "(", ")", "path", "=", "path", ".", "encode", "(", "encoding", ")", "result", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "six", ".", "PY2", ":", "result", "=", "result", ".", "decode", "(", "encoding", ")", "return", "result" ]
os.path.expanduser wrapper, necessary because it cannot handle unicode strings properly. This is not necessary in Python 3. :param path: .. seealso:: os.path.expanduser
[ "os", ".", "path", ".", "expanduser", "wrapper", "necessary", "because", "it", "cannot", "handle", "unicode", "strings", "properly", "." ]
python
valid
cloud9ers/gurumate
environment/share/doc/ipython/examples/parallel/workflow/wmanager.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/workflow/wmanager.py#L15-L29
def cleanup(controller, engines): """Cleanup routine to shut down all subprocesses we opened.""" import signal, time print('Starting cleanup') print('Stopping engines...') for e in engines: e.send_signal(signal.SIGINT) print('Stopping controller...') # so it can shut down its queues controller.send_signal(signal.SIGINT) time.sleep(0.1) print('Killing controller...') controller.kill() print('Cleanup done')
[ "def", "cleanup", "(", "controller", ",", "engines", ")", ":", "import", "signal", ",", "time", "print", "(", "'Starting cleanup'", ")", "print", "(", "'Stopping engines...'", ")", "for", "e", "in", "engines", ":", "e", ".", "send_signal", "(", "signal", ".", "SIGINT", ")", "print", "(", "'Stopping controller...'", ")", "# so it can shut down its queues", "controller", ".", "send_signal", "(", "signal", ".", "SIGINT", ")", "time", ".", "sleep", "(", "0.1", ")", "print", "(", "'Killing controller...'", ")", "controller", ".", "kill", "(", ")", "print", "(", "'Cleanup done'", ")" ]
Cleanup routine to shut down all subprocesses we opened.
[ "Cleanup", "routine", "to", "shut", "down", "all", "subprocesses", "we", "opened", "." ]
python
test
thespacedoctor/polyglot
polyglot/ebook.py
https://github.com/thespacedoctor/polyglot/blob/98038d746aa67e343b73b3ccee1e02d31dab81ec/polyglot/ebook.py#L246-L282
def _tmp_html_file( self, content): """*create a tmp html file with some content used for the header or footer of the ebook* **Key Arguments:** - ``content`` -- the content to include in the HTML file. """ self.log.debug('starting the ``_tmp_html_file`` method') content = """ <hr> <div style="text-align: center"> %(content)s </div> <hr> """ % locals() now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S%f") pathToWriteFile = "/tmp/%(now)s.html" % locals() try: self.log.debug("attempting to open the file %s" % (pathToWriteFile,)) writeFile = codecs.open( pathToWriteFile, encoding='utf-8', mode='w') except IOError, e: message = 'could not open the file %s' % (pathToWriteFile,) self.log.critical(message) raise IOError(message) writeFile.write(content) writeFile.close() self.log.debug('completed the ``_tmp_html_file`` method') return pathToWriteFile
[ "def", "_tmp_html_file", "(", "self", ",", "content", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``_tmp_html_file`` method'", ")", "content", "=", "\"\"\"\n\n<hr>\n<div style=\"text-align: center\">\n%(content)s\n</div>\n<hr>\n\n\"\"\"", "%", "locals", "(", ")", "now", "=", "datetime", ".", "now", "(", ")", "now", "=", "now", ".", "strftime", "(", "\"%Y%m%dt%H%M%S%f\"", ")", "pathToWriteFile", "=", "\"/tmp/%(now)s.html\"", "%", "locals", "(", ")", "try", ":", "self", ".", "log", ".", "debug", "(", "\"attempting to open the file %s\"", "%", "(", "pathToWriteFile", ",", ")", ")", "writeFile", "=", "codecs", ".", "open", "(", "pathToWriteFile", ",", "encoding", "=", "'utf-8'", ",", "mode", "=", "'w'", ")", "except", "IOError", ",", "e", ":", "message", "=", "'could not open the file %s'", "%", "(", "pathToWriteFile", ",", ")", "self", ".", "log", ".", "critical", "(", "message", ")", "raise", "IOError", "(", "message", ")", "writeFile", ".", "write", "(", "content", ")", "writeFile", ".", "close", "(", ")", "self", ".", "log", ".", "debug", "(", "'completed the ``_tmp_html_file`` method'", ")", "return", "pathToWriteFile" ]
*create a tmp html file with some content used for the header or footer of the ebook* **Key Arguments:** - ``content`` -- the content to include in the HTML file.
[ "*", "create", "a", "tmp", "html", "file", "with", "some", "content", "used", "for", "the", "header", "or", "footer", "of", "the", "ebook", "*" ]
python
train
gawel/panoramisk
panoramisk/fast_agi.py
https://github.com/gawel/panoramisk/blob/2ccb5d18be28a8e8f444dc0cd3a3bfb59aa19a8e/panoramisk/fast_agi.py#L108-L130
def del_route(self, path): """Delete a route for FastAGI requests: :param path: URI to answer. Ex: 'calls/start' :type path: String :Example: :: @asyncio.coroutine def start(request): print('Receive a FastAGI request') print(['AGI variables:', request.headers]) fa_app = Application() fa_app.add_route('calls/start', start) fa_app.del_route('calls/start') """ if path not in self._route: raise ValueError('This route doesn\'t exist.') del(self._route[path])
[ "def", "del_route", "(", "self", ",", "path", ")", ":", "if", "path", "not", "in", "self", ".", "_route", ":", "raise", "ValueError", "(", "'This route doesn\\'t exist.'", ")", "del", "(", "self", ".", "_route", "[", "path", "]", ")" ]
Delete a route for FastAGI requests: :param path: URI to answer. Ex: 'calls/start' :type path: String :Example: :: @asyncio.coroutine def start(request): print('Receive a FastAGI request') print(['AGI variables:', request.headers]) fa_app = Application() fa_app.add_route('calls/start', start) fa_app.del_route('calls/start')
[ "Delete", "a", "route", "for", "FastAGI", "requests", ":" ]
python
test
pmacosta/peng
peng/touchstone.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/touchstone.py#L79-L278
def read_touchstone(fname): r""" Read a `Touchstone <https://ibis.org/connector/touchstone_spec11.pdf>`_ file. According to the specification a data line can have at most values for four complex parameters (plus potentially the frequency point), however this function is able to process malformed files as long as they have the correct number of data points (:code:`points` x :code:`nports` x :code:`nports` where :code:`points` represents the number of frequency points and :code:`nports` represents the number of ports in the file). Per the Touchstone specification noise data is only supported for two-port files :param fname: Touchstone file name :type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/ ptypes.html#filenameexists>`_ :rtype: dictionary with the following structure: * **nports** (*integer*) -- number of ports * **opts** (:ref:`TouchstoneOptions`) -- File options * **data** (:ref:`TouchstoneData`) -- Parameter data * **noise** (:ref:`TouchstoneNoiseData`) -- Noise data, per the Touchstone specification only supported in 2-port files .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.touchstone.read_touchstone :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (File *[fname]* does not have a valid extension) * RuntimeError (File *[fname]* has no data) * RuntimeError (First non-comment line is not the option line) * RuntimeError (Frequency must increase) * RuntimeError (Illegal data in line *[lineno]*) * RuntimeError (Illegal option line) * RuntimeError (Malformed data) * RuntimeError (Malformed noise data) * RuntimeError (Noise frequency must increase) .. [[[end]]] .. note:: The returned parameter(s) are complex numbers in real and imaginary format regardless of the format used in the Touchstone file. Similarly, the returned frequency vector unit is Hertz regardless of the unit used in the Touchstone file """ # pylint: disable=R0912,R0915,W0702 # Exceptions definitions exnports = pexdoc.exh.addex( RuntimeError, "File *[fname]* does not have a valid extension" ) exnoopt = pexdoc.exh.addex( RuntimeError, "First non-comment line is not the option line" ) exopt = pexdoc.exh.addex(RuntimeError, "Illegal option line") exline = pexdoc.exh.addex(RuntimeError, "Illegal data in line *[lineno]*") exnodata = pexdoc.exh.addex(RuntimeError, "File *[fname]* has no data") exdata = pexdoc.exh.addex(RuntimeError, "Malformed data") exndata = pexdoc.exh.addex(RuntimeError, "Malformed noise data") exfreq = pexdoc.exh.addex(RuntimeError, "Frequency must increase") exnfreq = pexdoc.exh.addex(RuntimeError, "Noise frequency must increase") # Verify that file has correct extension format _, ext = os.path.splitext(fname) ext = ext.lower() nports_regexp = re.compile(r"\.s(\d+)p") match = nports_regexp.match(ext) exnports(not match, edata={"field": "fname", "value": fname}) nports = int(match.groups()[0]) opt_line = False units_dict = {"GHZ": "GHz", "MHZ": "MHz", "KHZ": "KHz", "HZ": "Hz"} scale_dict = {"GHZ": 1e9, "MHZ": 1e6, "KHZ": 1e3, "HZ": 1.0} units_opts = ["GHZ", "MHZ", "KHZ", "HZ"] type_opts = ["S", "Y", "Z", "H", "G"] format_opts = ["DB", "MA", "RI"] opts = dict(units=None, ptype=None, pformat=None, z0=None) data = [] with open(fname, "r") as fobj: for num, line in enumerate(fobj): line = line.strip().upper() # Comment line if line.startswith("!"): continue # Options line if (not opt_line) and (not line.startswith("#")): exnoopt(True) if not opt_line: # Each Touchstone data file must contain an option line # (additional option lines after the first one will be ignored) opt_line = True tokens = line[1:].split() # Remove initial hash if "R" in tokens: idx = tokens.index("R") add = 1 if len(tokens) > idx + 1: try: opts["z0"] = float(tokens[idx + 1]) add = 2 except: pass tokens = tokens[:idx] + tokens[idx + add :] matches = 0 for token in tokens: if (token in format_opts) and (not opts["pformat"]): matches += 1 opts["pformat"] = token elif (token in units_opts) and (not opts["units"]): matches += 1 opts["units"] = units_dict[token] elif (token in type_opts) and (not opts["ptype"]): matches += 1 opts["ptype"] = token exopt(matches != len(tokens)) if opt_line and line.startswith("#"): continue # Data lines try: if "!" in line: idx = line.index("!") line = line[:idx] tokens = [float(item) for item in line.split()] data.append(tokens) except: exline(True, edata={"field": "lineno", "value": num + 1}) data = np.concatenate(data) exnodata(not data.size, edata={"field": "fname", "value": fname}) # Set option defaults opts["units"] = opts["units"] or "GHz" opts["ptype"] = opts["ptype"] or "S" opts["pformat"] = opts["pformat"] or "MA" opts["z0"] = opts["z0"] or 50 # Format data data_dict = {} nums_per_freq = 1 + (2 * (nports ** 2)) fslice = slice(0, data.size, nums_per_freq) freq = data[fslice] ndiff = np.diff(freq) ndict = {} if (nports == 2) and ndiff.size and (min(ndiff) <= 0): # Extract noise data npoints = np.where(ndiff <= 0)[0][0] + 1 freq = freq[:npoints] ndata = data[9 * npoints :] nfpoints = int(ndata.size / 5.0) exndata(ndata.size % 5 != 0) data = data[: 9 * npoints] ndiff = 1 nfslice = slice(0, ndata.size, 5) nfreq = ndata[nfslice] ndiff = np.diff(nfreq) exnfreq(bool(ndiff.size and (min(ndiff) <= 0))) nfig_slice = slice(1, ndata.size, 5) rlmag_slice = slice(2, ndata.size, 5) rlphase_slice = slice(3, ndata.size, 5) res_slice = slice(4, ndata.size, 5) ndict["freq"] = scale_dict[opts["units"].upper()] * nfreq ndict["nf"] = ndata[nfig_slice] ndict["rc"] = ndata[rlmag_slice] * np.exp(1j * ndata[rlphase_slice]) ndict["res"] = ndata[res_slice] ndict["points"] = nfpoints exdata(data.size % nums_per_freq != 0) npoints = int(data.size / nums_per_freq) exfreq(bool(ndiff.size and (min(ndiff) <= 0))) data_dict["freq"] = scale_dict[opts["units"].upper()] * freq d1slice = slice(0, data.size, 2) d2slice = slice(1, data.size, 2) data = np.delete(data, fslice) # For format that has angle information, the angle is given in degrees if opts["pformat"] == "MA": data = data[d1slice] * np.exp(1j * np.deg2rad(data[d2slice])) elif opts["pformat"] == "RI": data = data[d1slice] + (1j * data[d2slice]) else: # if opts['pformat'] == 'DB': data = (10 ** (data[d1slice] / 20.0)) * np.exp(1j * np.deg2rad(data[d2slice])) if nports > 1: data_dict["pars"] = np.resize(data, (npoints, nports, nports)) else: data_dict["pars"] = copy.copy(data) del data data_dict["points"] = npoints if nports == 2: # The order of data for a two-port file is N11, N21, N12, N22 but for # m ports where m > 2, the order is N11, N12, N13, ..., N1m data_dict["pars"] = np.transpose(data_dict["pars"], (0, 2, 1)) return dict(nports=nports, opts=opts, data=data_dict, noise=ndict)
[ "def", "read_touchstone", "(", "fname", ")", ":", "# pylint: disable=R0912,R0915,W0702", "# Exceptions definitions", "exnports", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"File *[fname]* does not have a valid extension\"", ")", "exnoopt", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"First non-comment line is not the option line\"", ")", "exopt", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Illegal option line\"", ")", "exline", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Illegal data in line *[lineno]*\"", ")", "exnodata", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"File *[fname]* has no data\"", ")", "exdata", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Malformed data\"", ")", "exndata", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Malformed noise data\"", ")", "exfreq", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Frequency must increase\"", ")", "exnfreq", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Noise frequency must increase\"", ")", "# Verify that file has correct extension format", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "ext", "=", "ext", ".", "lower", "(", ")", "nports_regexp", "=", "re", ".", "compile", "(", "r\"\\.s(\\d+)p\"", ")", "match", "=", "nports_regexp", ".", "match", "(", "ext", ")", "exnports", "(", "not", "match", ",", "edata", "=", "{", "\"field\"", ":", "\"fname\"", ",", "\"value\"", ":", "fname", "}", ")", "nports", "=", "int", "(", "match", ".", "groups", "(", ")", "[", "0", "]", ")", "opt_line", "=", "False", "units_dict", "=", "{", "\"GHZ\"", ":", "\"GHz\"", ",", "\"MHZ\"", ":", "\"MHz\"", ",", "\"KHZ\"", ":", "\"KHz\"", ",", "\"HZ\"", ":", "\"Hz\"", "}", "scale_dict", "=", "{", "\"GHZ\"", ":", "1e9", ",", "\"MHZ\"", ":", "1e6", ",", "\"KHZ\"", ":", "1e3", ",", "\"HZ\"", ":", "1.0", "}", "units_opts", "=", "[", "\"GHZ\"", ",", "\"MHZ\"", ",", "\"KHZ\"", ",", "\"HZ\"", "]", "type_opts", "=", "[", "\"S\"", ",", "\"Y\"", ",", "\"Z\"", ",", "\"H\"", ",", "\"G\"", "]", "format_opts", "=", "[", "\"DB\"", ",", "\"MA\"", ",", "\"RI\"", "]", "opts", "=", "dict", "(", "units", "=", "None", ",", "ptype", "=", "None", ",", "pformat", "=", "None", ",", "z0", "=", "None", ")", "data", "=", "[", "]", "with", "open", "(", "fname", ",", "\"r\"", ")", "as", "fobj", ":", "for", "num", ",", "line", "in", "enumerate", "(", "fobj", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "upper", "(", ")", "# Comment line", "if", "line", ".", "startswith", "(", "\"!\"", ")", ":", "continue", "# Options line", "if", "(", "not", "opt_line", ")", "and", "(", "not", "line", ".", "startswith", "(", "\"#\"", ")", ")", ":", "exnoopt", "(", "True", ")", "if", "not", "opt_line", ":", "# Each Touchstone data file must contain an option line", "# (additional option lines after the first one will be ignored)", "opt_line", "=", "True", "tokens", "=", "line", "[", "1", ":", "]", ".", "split", "(", ")", "# Remove initial hash", "if", "\"R\"", "in", "tokens", ":", "idx", "=", "tokens", ".", "index", "(", "\"R\"", ")", "add", "=", "1", "if", "len", "(", "tokens", ")", ">", "idx", "+", "1", ":", "try", ":", "opts", "[", "\"z0\"", "]", "=", "float", "(", "tokens", "[", "idx", "+", "1", "]", ")", "add", "=", "2", "except", ":", "pass", "tokens", "=", "tokens", "[", ":", "idx", "]", "+", "tokens", "[", "idx", "+", "add", ":", "]", "matches", "=", "0", "for", "token", "in", "tokens", ":", "if", "(", "token", "in", "format_opts", ")", "and", "(", "not", "opts", "[", "\"pformat\"", "]", ")", ":", "matches", "+=", "1", "opts", "[", "\"pformat\"", "]", "=", "token", "elif", "(", "token", "in", "units_opts", ")", "and", "(", "not", "opts", "[", "\"units\"", "]", ")", ":", "matches", "+=", "1", "opts", "[", "\"units\"", "]", "=", "units_dict", "[", "token", "]", "elif", "(", "token", "in", "type_opts", ")", "and", "(", "not", "opts", "[", "\"ptype\"", "]", ")", ":", "matches", "+=", "1", "opts", "[", "\"ptype\"", "]", "=", "token", "exopt", "(", "matches", "!=", "len", "(", "tokens", ")", ")", "if", "opt_line", "and", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "# Data lines", "try", ":", "if", "\"!\"", "in", "line", ":", "idx", "=", "line", ".", "index", "(", "\"!\"", ")", "line", "=", "line", "[", ":", "idx", "]", "tokens", "=", "[", "float", "(", "item", ")", "for", "item", "in", "line", ".", "split", "(", ")", "]", "data", ".", "append", "(", "tokens", ")", "except", ":", "exline", "(", "True", ",", "edata", "=", "{", "\"field\"", ":", "\"lineno\"", ",", "\"value\"", ":", "num", "+", "1", "}", ")", "data", "=", "np", ".", "concatenate", "(", "data", ")", "exnodata", "(", "not", "data", ".", "size", ",", "edata", "=", "{", "\"field\"", ":", "\"fname\"", ",", "\"value\"", ":", "fname", "}", ")", "# Set option defaults", "opts", "[", "\"units\"", "]", "=", "opts", "[", "\"units\"", "]", "or", "\"GHz\"", "opts", "[", "\"ptype\"", "]", "=", "opts", "[", "\"ptype\"", "]", "or", "\"S\"", "opts", "[", "\"pformat\"", "]", "=", "opts", "[", "\"pformat\"", "]", "or", "\"MA\"", "opts", "[", "\"z0\"", "]", "=", "opts", "[", "\"z0\"", "]", "or", "50", "# Format data", "data_dict", "=", "{", "}", "nums_per_freq", "=", "1", "+", "(", "2", "*", "(", "nports", "**", "2", ")", ")", "fslice", "=", "slice", "(", "0", ",", "data", ".", "size", ",", "nums_per_freq", ")", "freq", "=", "data", "[", "fslice", "]", "ndiff", "=", "np", ".", "diff", "(", "freq", ")", "ndict", "=", "{", "}", "if", "(", "nports", "==", "2", ")", "and", "ndiff", ".", "size", "and", "(", "min", "(", "ndiff", ")", "<=", "0", ")", ":", "# Extract noise data", "npoints", "=", "np", ".", "where", "(", "ndiff", "<=", "0", ")", "[", "0", "]", "[", "0", "]", "+", "1", "freq", "=", "freq", "[", ":", "npoints", "]", "ndata", "=", "data", "[", "9", "*", "npoints", ":", "]", "nfpoints", "=", "int", "(", "ndata", ".", "size", "/", "5.0", ")", "exndata", "(", "ndata", ".", "size", "%", "5", "!=", "0", ")", "data", "=", "data", "[", ":", "9", "*", "npoints", "]", "ndiff", "=", "1", "nfslice", "=", "slice", "(", "0", ",", "ndata", ".", "size", ",", "5", ")", "nfreq", "=", "ndata", "[", "nfslice", "]", "ndiff", "=", "np", ".", "diff", "(", "nfreq", ")", "exnfreq", "(", "bool", "(", "ndiff", ".", "size", "and", "(", "min", "(", "ndiff", ")", "<=", "0", ")", ")", ")", "nfig_slice", "=", "slice", "(", "1", ",", "ndata", ".", "size", ",", "5", ")", "rlmag_slice", "=", "slice", "(", "2", ",", "ndata", ".", "size", ",", "5", ")", "rlphase_slice", "=", "slice", "(", "3", ",", "ndata", ".", "size", ",", "5", ")", "res_slice", "=", "slice", "(", "4", ",", "ndata", ".", "size", ",", "5", ")", "ndict", "[", "\"freq\"", "]", "=", "scale_dict", "[", "opts", "[", "\"units\"", "]", ".", "upper", "(", ")", "]", "*", "nfreq", "ndict", "[", "\"nf\"", "]", "=", "ndata", "[", "nfig_slice", "]", "ndict", "[", "\"rc\"", "]", "=", "ndata", "[", "rlmag_slice", "]", "*", "np", ".", "exp", "(", "1j", "*", "ndata", "[", "rlphase_slice", "]", ")", "ndict", "[", "\"res\"", "]", "=", "ndata", "[", "res_slice", "]", "ndict", "[", "\"points\"", "]", "=", "nfpoints", "exdata", "(", "data", ".", "size", "%", "nums_per_freq", "!=", "0", ")", "npoints", "=", "int", "(", "data", ".", "size", "/", "nums_per_freq", ")", "exfreq", "(", "bool", "(", "ndiff", ".", "size", "and", "(", "min", "(", "ndiff", ")", "<=", "0", ")", ")", ")", "data_dict", "[", "\"freq\"", "]", "=", "scale_dict", "[", "opts", "[", "\"units\"", "]", ".", "upper", "(", ")", "]", "*", "freq", "d1slice", "=", "slice", "(", "0", ",", "data", ".", "size", ",", "2", ")", "d2slice", "=", "slice", "(", "1", ",", "data", ".", "size", ",", "2", ")", "data", "=", "np", ".", "delete", "(", "data", ",", "fslice", ")", "# For format that has angle information, the angle is given in degrees", "if", "opts", "[", "\"pformat\"", "]", "==", "\"MA\"", ":", "data", "=", "data", "[", "d1slice", "]", "*", "np", ".", "exp", "(", "1j", "*", "np", ".", "deg2rad", "(", "data", "[", "d2slice", "]", ")", ")", "elif", "opts", "[", "\"pformat\"", "]", "==", "\"RI\"", ":", "data", "=", "data", "[", "d1slice", "]", "+", "(", "1j", "*", "data", "[", "d2slice", "]", ")", "else", ":", "# if opts['pformat'] == 'DB':", "data", "=", "(", "10", "**", "(", "data", "[", "d1slice", "]", "/", "20.0", ")", ")", "*", "np", ".", "exp", "(", "1j", "*", "np", ".", "deg2rad", "(", "data", "[", "d2slice", "]", ")", ")", "if", "nports", ">", "1", ":", "data_dict", "[", "\"pars\"", "]", "=", "np", ".", "resize", "(", "data", ",", "(", "npoints", ",", "nports", ",", "nports", ")", ")", "else", ":", "data_dict", "[", "\"pars\"", "]", "=", "copy", ".", "copy", "(", "data", ")", "del", "data", "data_dict", "[", "\"points\"", "]", "=", "npoints", "if", "nports", "==", "2", ":", "# The order of data for a two-port file is N11, N21, N12, N22 but for", "# m ports where m > 2, the order is N11, N12, N13, ..., N1m", "data_dict", "[", "\"pars\"", "]", "=", "np", ".", "transpose", "(", "data_dict", "[", "\"pars\"", "]", ",", "(", "0", ",", "2", ",", "1", ")", ")", "return", "dict", "(", "nports", "=", "nports", ",", "opts", "=", "opts", ",", "data", "=", "data_dict", ",", "noise", "=", "ndict", ")" ]
r""" Read a `Touchstone <https://ibis.org/connector/touchstone_spec11.pdf>`_ file. According to the specification a data line can have at most values for four complex parameters (plus potentially the frequency point), however this function is able to process malformed files as long as they have the correct number of data points (:code:`points` x :code:`nports` x :code:`nports` where :code:`points` represents the number of frequency points and :code:`nports` represents the number of ports in the file). Per the Touchstone specification noise data is only supported for two-port files :param fname: Touchstone file name :type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/ ptypes.html#filenameexists>`_ :rtype: dictionary with the following structure: * **nports** (*integer*) -- number of ports * **opts** (:ref:`TouchstoneOptions`) -- File options * **data** (:ref:`TouchstoneData`) -- Parameter data * **noise** (:ref:`TouchstoneNoiseData`) -- Noise data, per the Touchstone specification only supported in 2-port files .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.touchstone.read_touchstone :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (File *[fname]* does not have a valid extension) * RuntimeError (File *[fname]* has no data) * RuntimeError (First non-comment line is not the option line) * RuntimeError (Frequency must increase) * RuntimeError (Illegal data in line *[lineno]*) * RuntimeError (Illegal option line) * RuntimeError (Malformed data) * RuntimeError (Malformed noise data) * RuntimeError (Noise frequency must increase) .. [[[end]]] .. note:: The returned parameter(s) are complex numbers in real and imaginary format regardless of the format used in the Touchstone file. Similarly, the returned frequency vector unit is Hertz regardless of the unit used in the Touchstone file
[ "r", "Read", "a", "Touchstone", "<https", ":", "//", "ibis", ".", "org", "/", "connector", "/", "touchstone_spec11", ".", "pdf", ">", "_", "file", "." ]
python
test
bear/bearlib
bearlib/tools.py
https://github.com/bear/bearlib/blob/30f9b8ba4b7a8db4cd2f4c6e07966ae51d0a00dd/bearlib/tools.py#L22-L34
def baseDomain(domain, includeScheme=True): """Return only the network location portion of the given domain unless includeScheme is True """ result = '' url = urlparse(domain) if includeScheme: result = '%s://' % url.scheme if len(url.netloc) == 0: result += url.path else: result += url.netloc return result
[ "def", "baseDomain", "(", "domain", ",", "includeScheme", "=", "True", ")", ":", "result", "=", "''", "url", "=", "urlparse", "(", "domain", ")", "if", "includeScheme", ":", "result", "=", "'%s://'", "%", "url", ".", "scheme", "if", "len", "(", "url", ".", "netloc", ")", "==", "0", ":", "result", "+=", "url", ".", "path", "else", ":", "result", "+=", "url", ".", "netloc", "return", "result" ]
Return only the network location portion of the given domain unless includeScheme is True
[ "Return", "only", "the", "network", "location", "portion", "of", "the", "given", "domain", "unless", "includeScheme", "is", "True" ]
python
train
goldmann/docker-squash
docker_squash/lib/xtarfile.py
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/lib/xtarfile.py#L20-L81
def _proc_pax(self, filetar): """Process an extended or global header as described in POSIX.1-2001.""" # Read the header information. buf = filetar.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == tarfile.XGLTYPE: pax_headers = filetar.pax_headers else: pax_headers = filetar.pax_headers.copy() # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(r"(\d+) ([^=]+)=", re.U) pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] try: keyword = keyword.decode("utf8") except Exception: pass try: value = value.decode("utf8") except Exception: pass pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(filetar) except tarfile.HeaderError: raise tarfile.SubsequentHeaderError("missing or bad subsequent header") if self.type in (tarfile.XHDTYPE, tarfile.SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, filetar.encoding, filetar.errors) next.offset = self.offset if "size" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in tarfile.SUPPORTED_TYPES: offset += next._block(next.size) filetar.offset = offset return next
[ "def", "_proc_pax", "(", "self", ",", "filetar", ")", ":", "# Read the header information.", "buf", "=", "filetar", ".", "fileobj", ".", "read", "(", "self", ".", "_block", "(", "self", ".", "size", ")", ")", "# A pax header stores supplemental information for either", "# the following file (extended) or all following files", "# (global).", "if", "self", ".", "type", "==", "tarfile", ".", "XGLTYPE", ":", "pax_headers", "=", "filetar", ".", "pax_headers", "else", ":", "pax_headers", "=", "filetar", ".", "pax_headers", ".", "copy", "(", ")", "# Parse pax header information. A record looks like that:", "# \"%d %s=%s\\n\" % (length, keyword, value). length is the size", "# of the complete record including the length field itself and", "# the newline. keyword and value are both UTF-8 encoded strings.", "regex", "=", "re", ".", "compile", "(", "r\"(\\d+) ([^=]+)=\"", ",", "re", ".", "U", ")", "pos", "=", "0", "while", "True", ":", "match", "=", "regex", ".", "match", "(", "buf", ",", "pos", ")", "if", "not", "match", ":", "break", "length", ",", "keyword", "=", "match", ".", "groups", "(", ")", "length", "=", "int", "(", "length", ")", "value", "=", "buf", "[", "match", ".", "end", "(", "2", ")", "+", "1", ":", "match", ".", "start", "(", "1", ")", "+", "length", "-", "1", "]", "try", ":", "keyword", "=", "keyword", ".", "decode", "(", "\"utf8\"", ")", "except", "Exception", ":", "pass", "try", ":", "value", "=", "value", ".", "decode", "(", "\"utf8\"", ")", "except", "Exception", ":", "pass", "pax_headers", "[", "keyword", "]", "=", "value", "pos", "+=", "length", "# Fetch the next header.", "try", ":", "next", "=", "self", ".", "fromtarfile", "(", "filetar", ")", "except", "tarfile", ".", "HeaderError", ":", "raise", "tarfile", ".", "SubsequentHeaderError", "(", "\"missing or bad subsequent header\"", ")", "if", "self", ".", "type", "in", "(", "tarfile", ".", "XHDTYPE", ",", "tarfile", ".", "SOLARIS_XHDTYPE", ")", ":", "# Patch the TarInfo object with the extended header info.", "next", ".", "_apply_pax_info", "(", "pax_headers", ",", "filetar", ".", "encoding", ",", "filetar", ".", "errors", ")", "next", ".", "offset", "=", "self", ".", "offset", "if", "\"size\"", "in", "pax_headers", ":", "# If the extended header replaces the size field,", "# we need to recalculate the offset where the next", "# header starts.", "offset", "=", "next", ".", "offset_data", "if", "next", ".", "isreg", "(", ")", "or", "next", ".", "type", "not", "in", "tarfile", ".", "SUPPORTED_TYPES", ":", "offset", "+=", "next", ".", "_block", "(", "next", ".", "size", ")", "filetar", ".", "offset", "=", "offset", "return", "next" ]
Process an extended or global header as described in POSIX.1-2001.
[ "Process", "an", "extended", "or", "global", "header", "as", "described", "in", "POSIX", ".", "1", "-", "2001", "." ]
python
train
psss/did
did/plugins/bugzilla.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/bugzilla.py#L81-L119
def search(self, query, options): """ Perform Bugzilla search """ query["query_format"] = "advanced" log.debug("Search query:") log.debug(pretty(query)) # Fetch bug info try: result = self.server.query(query) except xmlrpclib.Fault as error: # Ignore non-existent users (this is necessary for users with # several email aliases to allow them using --merge/--total) if "not a valid username" in unicode(error): log.debug(error) return [] # Otherwise suggest to bake bugzilla cookies log.error("An error encountered, while searching for bugs.") log.debug(error) raise ReportError( "Have you baked cookies using the 'bugzilla login' command?") log.debug("Search result:") log.debug(pretty(result)) bugs = dict((bug.id, bug) for bug in result) # Fetch bug history log.debug("Fetching bug history") result = self.server._proxy.Bug.history({'ids': bugs.keys()}) log.debug(pretty(result)) history = dict((bug["id"], bug["history"]) for bug in result["bugs"]) # Fetch bug comments log.debug("Fetching bug comments") result = self.server._proxy.Bug.comments({'ids': bugs.keys()}) log.debug(pretty(result)) comments = dict( (int(bug), data["comments"]) for bug, data in result["bugs"].items()) # Create bug objects return [ self.parent.bug( bugs[id], history[id], comments[id], parent=self.parent) for id in bugs]
[ "def", "search", "(", "self", ",", "query", ",", "options", ")", ":", "query", "[", "\"query_format\"", "]", "=", "\"advanced\"", "log", ".", "debug", "(", "\"Search query:\"", ")", "log", ".", "debug", "(", "pretty", "(", "query", ")", ")", "# Fetch bug info", "try", ":", "result", "=", "self", ".", "server", ".", "query", "(", "query", ")", "except", "xmlrpclib", ".", "Fault", "as", "error", ":", "# Ignore non-existent users (this is necessary for users with", "# several email aliases to allow them using --merge/--total)", "if", "\"not a valid username\"", "in", "unicode", "(", "error", ")", ":", "log", ".", "debug", "(", "error", ")", "return", "[", "]", "# Otherwise suggest to bake bugzilla cookies", "log", ".", "error", "(", "\"An error encountered, while searching for bugs.\"", ")", "log", ".", "debug", "(", "error", ")", "raise", "ReportError", "(", "\"Have you baked cookies using the 'bugzilla login' command?\"", ")", "log", ".", "debug", "(", "\"Search result:\"", ")", "log", ".", "debug", "(", "pretty", "(", "result", ")", ")", "bugs", "=", "dict", "(", "(", "bug", ".", "id", ",", "bug", ")", "for", "bug", "in", "result", ")", "# Fetch bug history", "log", ".", "debug", "(", "\"Fetching bug history\"", ")", "result", "=", "self", ".", "server", ".", "_proxy", ".", "Bug", ".", "history", "(", "{", "'ids'", ":", "bugs", ".", "keys", "(", ")", "}", ")", "log", ".", "debug", "(", "pretty", "(", "result", ")", ")", "history", "=", "dict", "(", "(", "bug", "[", "\"id\"", "]", ",", "bug", "[", "\"history\"", "]", ")", "for", "bug", "in", "result", "[", "\"bugs\"", "]", ")", "# Fetch bug comments", "log", ".", "debug", "(", "\"Fetching bug comments\"", ")", "result", "=", "self", ".", "server", ".", "_proxy", ".", "Bug", ".", "comments", "(", "{", "'ids'", ":", "bugs", ".", "keys", "(", ")", "}", ")", "log", ".", "debug", "(", "pretty", "(", "result", ")", ")", "comments", "=", "dict", "(", "(", "int", "(", "bug", ")", ",", "data", "[", "\"comments\"", "]", ")", "for", "bug", ",", "data", "in", "result", "[", "\"bugs\"", "]", ".", "items", "(", ")", ")", "# Create bug objects", "return", "[", "self", ".", "parent", ".", "bug", "(", "bugs", "[", "id", "]", ",", "history", "[", "id", "]", ",", "comments", "[", "id", "]", ",", "parent", "=", "self", ".", "parent", ")", "for", "id", "in", "bugs", "]" ]
Perform Bugzilla search
[ "Perform", "Bugzilla", "search" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3182-L3205
def dskv02(handle, dladsc, start, room): """ Fetch vertices from a type 2 DSK segment. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskv02_c.html :param handle: DSK file handle. :type handle: int :param dladsc: DLA descriptor. :type dladsc: spiceypy.utils.support_types.SpiceDLADescr :param start: Start index. :type start: int :param room: Amount of room in output array. :type room: int :return: Array containing vertices. :rtype: Room x 3-Element Array of floats """ handle = ctypes.c_int(handle) start = ctypes.c_int(start) room = ctypes.c_int(room) n = ctypes.c_int() vrtces = stypes.emptyDoubleMatrix(3, room) libspice.dskv02_c(handle, dladsc, start, room, ctypes.byref(n), vrtces) return stypes.cMatrixToNumpy(vrtces)
[ "def", "dskv02", "(", "handle", ",", "dladsc", ",", "start", ",", "room", ")", ":", "handle", "=", "ctypes", ".", "c_int", "(", "handle", ")", "start", "=", "ctypes", ".", "c_int", "(", "start", ")", "room", "=", "ctypes", ".", "c_int", "(", "room", ")", "n", "=", "ctypes", ".", "c_int", "(", ")", "vrtces", "=", "stypes", ".", "emptyDoubleMatrix", "(", "3", ",", "room", ")", "libspice", ".", "dskv02_c", "(", "handle", ",", "dladsc", ",", "start", ",", "room", ",", "ctypes", ".", "byref", "(", "n", ")", ",", "vrtces", ")", "return", "stypes", ".", "cMatrixToNumpy", "(", "vrtces", ")" ]
Fetch vertices from a type 2 DSK segment. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskv02_c.html :param handle: DSK file handle. :type handle: int :param dladsc: DLA descriptor. :type dladsc: spiceypy.utils.support_types.SpiceDLADescr :param start: Start index. :type start: int :param room: Amount of room in output array. :type room: int :return: Array containing vertices. :rtype: Room x 3-Element Array of floats
[ "Fetch", "vertices", "from", "a", "type", "2", "DSK", "segment", "." ]
python
train
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L1597-L1614
def default_value(self, argname): """Get the default value for an argument. :param argname: The name of the argument to get the default value for. :type argname: str :raises NoDefault: If there is no default value defined for the given argument. """ i = _find_arg(argname, self.args)[0] if i is not None: idx = i - (len(self.args) - len(self.defaults)) if idx >= 0: return self.defaults[idx] i = _find_arg(argname, self.kwonlyargs)[0] if i is not None and self.kw_defaults[i] is not None: return self.kw_defaults[i] raise exceptions.NoDefault(func=self.parent, name=argname)
[ "def", "default_value", "(", "self", ",", "argname", ")", ":", "i", "=", "_find_arg", "(", "argname", ",", "self", ".", "args", ")", "[", "0", "]", "if", "i", "is", "not", "None", ":", "idx", "=", "i", "-", "(", "len", "(", "self", ".", "args", ")", "-", "len", "(", "self", ".", "defaults", ")", ")", "if", "idx", ">=", "0", ":", "return", "self", ".", "defaults", "[", "idx", "]", "i", "=", "_find_arg", "(", "argname", ",", "self", ".", "kwonlyargs", ")", "[", "0", "]", "if", "i", "is", "not", "None", "and", "self", ".", "kw_defaults", "[", "i", "]", "is", "not", "None", ":", "return", "self", ".", "kw_defaults", "[", "i", "]", "raise", "exceptions", ".", "NoDefault", "(", "func", "=", "self", ".", "parent", ",", "name", "=", "argname", ")" ]
Get the default value for an argument. :param argname: The name of the argument to get the default value for. :type argname: str :raises NoDefault: If there is no default value defined for the given argument.
[ "Get", "the", "default", "value", "for", "an", "argument", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/wix.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/wix.py#L39-L63
def generate(env): """Add Builders and construction variables for WiX to an Environment.""" if not exists(env): return env['WIXCANDLEFLAGS'] = ['-nologo'] env['WIXCANDLEINCLUDE'] = [] env['WIXCANDLECOM'] = '$WIXCANDLE $WIXCANDLEFLAGS -I $WIXCANDLEINCLUDE -o ${TARGET} ${SOURCE}' env['WIXLIGHTFLAGS'].append( '-nologo' ) env['WIXLIGHTCOM'] = "$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}" env['WIXSRCSUF'] = '.wxs' env['WIXOBJSUF'] = '.wixobj' object_builder = SCons.Builder.Builder( action = '$WIXCANDLECOM', suffix = '$WIXOBJSUF', src_suffix = '$WIXSRCSUF') linker_builder = SCons.Builder.Builder( action = '$WIXLIGHTCOM', src_suffix = '$WIXOBJSUF', src_builder = object_builder) env['BUILDERS']['WiX'] = linker_builder
[ "def", "generate", "(", "env", ")", ":", "if", "not", "exists", "(", "env", ")", ":", "return", "env", "[", "'WIXCANDLEFLAGS'", "]", "=", "[", "'-nologo'", "]", "env", "[", "'WIXCANDLEINCLUDE'", "]", "=", "[", "]", "env", "[", "'WIXCANDLECOM'", "]", "=", "'$WIXCANDLE $WIXCANDLEFLAGS -I $WIXCANDLEINCLUDE -o ${TARGET} ${SOURCE}'", "env", "[", "'WIXLIGHTFLAGS'", "]", ".", "append", "(", "'-nologo'", ")", "env", "[", "'WIXLIGHTCOM'", "]", "=", "\"$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}\"", "env", "[", "'WIXSRCSUF'", "]", "=", "'.wxs'", "env", "[", "'WIXOBJSUF'", "]", "=", "'.wixobj'", "object_builder", "=", "SCons", ".", "Builder", ".", "Builder", "(", "action", "=", "'$WIXCANDLECOM'", ",", "suffix", "=", "'$WIXOBJSUF'", ",", "src_suffix", "=", "'$WIXSRCSUF'", ")", "linker_builder", "=", "SCons", ".", "Builder", ".", "Builder", "(", "action", "=", "'$WIXLIGHTCOM'", ",", "src_suffix", "=", "'$WIXOBJSUF'", ",", "src_builder", "=", "object_builder", ")", "env", "[", "'BUILDERS'", "]", "[", "'WiX'", "]", "=", "linker_builder" ]
Add Builders and construction variables for WiX to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "WiX", "to", "an", "Environment", "." ]
python
train
base4sistemas/satcfe
satcfe/clientelocal.py
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/clientelocal.py#L46-L54
def ativar_sat(self, tipo_certificado, cnpj, codigo_uf): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.ativar_sat`. :return: Uma resposta SAT especilizada em ``AtivarSAT``. :rtype: satcfe.resposta.ativarsat.RespostaAtivarSAT """ retorno = super(ClienteSATLocal, self).ativar_sat( tipo_certificado, cnpj, codigo_uf) return RespostaAtivarSAT.analisar(retorno)
[ "def", "ativar_sat", "(", "self", ",", "tipo_certificado", ",", "cnpj", ",", "codigo_uf", ")", ":", "retorno", "=", "super", "(", "ClienteSATLocal", ",", "self", ")", ".", "ativar_sat", "(", "tipo_certificado", ",", "cnpj", ",", "codigo_uf", ")", "return", "RespostaAtivarSAT", ".", "analisar", "(", "retorno", ")" ]
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.ativar_sat`. :return: Uma resposta SAT especilizada em ``AtivarSAT``. :rtype: satcfe.resposta.ativarsat.RespostaAtivarSAT
[ "Sobrepõe", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "ativar_sat", "." ]
python
train
supercoderz/pyflightdata
pyflightdata/flightdata.py
https://github.com/supercoderz/pyflightdata/blob/2caf9f429288f9a171893d1b8377d0c6244541cc/pyflightdata/flightdata.py#L118-L133
def get_airports(self, country): """Returns a list of all the airports For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc Args: country (str): The country for which the airports will be fetched Example:: from pyflightdata import FlightData f=FlightData() f.get_airports('India') """ url = AIRPORT_BASE.format(country.replace(" ", "-")) return self._fr24.get_airports_data(url)
[ "def", "get_airports", "(", "self", ",", "country", ")", ":", "url", "=", "AIRPORT_BASE", ".", "format", "(", "country", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", ")", "return", "self", ".", "_fr24", ".", "get_airports_data", "(", "url", ")" ]
Returns a list of all the airports For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc Args: country (str): The country for which the airports will be fetched Example:: from pyflightdata import FlightData f=FlightData() f.get_airports('India')
[ "Returns", "a", "list", "of", "all", "the", "airports", "For", "a", "given", "country", "this", "returns", "a", "list", "of", "dicts", "one", "for", "each", "airport", "with", "information", "like", "the", "iata", "code", "of", "the", "airport", "etc" ]
python
train
mdgoldberg/sportsref
sportsref/nfl/boxscores.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/boxscores.py#L401-L418
def snap_counts(self): """Gets the snap counts for both teams' players and returns them in a DataFrame. Note: only goes back to 2012. :returns: DataFrame of snap count data """ # TODO: combine duplicate players, see 201312150mia - ThomDa03 doc = self.get_doc() table_ids = ('vis_snap_counts', 'home_snap_counts') tms = (self.away(), self.home()) df = pd.concat([ sportsref.utils.parse_table(doc('table#{}'.format(table_id))) .assign(is_home=bool(i), team=tms[i], opp=tms[i*-1+1]) for i, table_id in enumerate(table_ids) ]) if df.empty: return df return df.set_index('player_id')
[ "def", "snap_counts", "(", "self", ")", ":", "# TODO: combine duplicate players, see 201312150mia - ThomDa03", "doc", "=", "self", ".", "get_doc", "(", ")", "table_ids", "=", "(", "'vis_snap_counts'", ",", "'home_snap_counts'", ")", "tms", "=", "(", "self", ".", "away", "(", ")", ",", "self", ".", "home", "(", ")", ")", "df", "=", "pd", ".", "concat", "(", "[", "sportsref", ".", "utils", ".", "parse_table", "(", "doc", "(", "'table#{}'", ".", "format", "(", "table_id", ")", ")", ")", ".", "assign", "(", "is_home", "=", "bool", "(", "i", ")", ",", "team", "=", "tms", "[", "i", "]", ",", "opp", "=", "tms", "[", "i", "*", "-", "1", "+", "1", "]", ")", "for", "i", ",", "table_id", "in", "enumerate", "(", "table_ids", ")", "]", ")", "if", "df", ".", "empty", ":", "return", "df", "return", "df", ".", "set_index", "(", "'player_id'", ")" ]
Gets the snap counts for both teams' players and returns them in a DataFrame. Note: only goes back to 2012. :returns: DataFrame of snap count data
[ "Gets", "the", "snap", "counts", "for", "both", "teams", "players", "and", "returns", "them", "in", "a", "DataFrame", ".", "Note", ":", "only", "goes", "back", "to", "2012", "." ]
python
test
swarmer/fridge
fridge.py
https://github.com/swarmer/fridge/blob/fcf6481307ce268c40c22f5e0062d01334f6cd95/fridge.py#L107-L116
def save(self): """ Force saving the dictionary to the file. All data in the file is discarded. This method is called automatically by :meth:`close`. """ self._check_open() self.file.truncate(0) self.file.seek(0) json.dump(self, self.file, **self.dump_args)
[ "def", "save", "(", "self", ")", ":", "self", ".", "_check_open", "(", ")", "self", ".", "file", ".", "truncate", "(", "0", ")", "self", ".", "file", ".", "seek", "(", "0", ")", "json", ".", "dump", "(", "self", ",", "self", ".", "file", ",", "*", "*", "self", ".", "dump_args", ")" ]
Force saving the dictionary to the file. All data in the file is discarded. This method is called automatically by :meth:`close`.
[ "Force", "saving", "the", "dictionary", "to", "the", "file", ".", "All", "data", "in", "the", "file", "is", "discarded", ".", "This", "method", "is", "called", "automatically", "by", ":", "meth", ":", "close", "." ]
python
test
ecederstrand/exchangelib
exchangelib/util.py
https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/util.py#L552-L682
def post_ratelimited(protocol, session, url, headers, data, allow_redirects=False, stream=False): """ There are two error-handling policies implemented here: a fail-fast policy intended for stand-alone scripts which fails on all responses except HTTP 200. The other policy is intended for long-running tasks that need to respect rate-limiting errors from the server and paper over outages of up to 1 hour. Wrap POST requests in a try-catch loop with a lot of error handling logic and some basic rate-limiting. If a request fails, and some conditions are met, the loop waits in increasing intervals, up to 1 hour, before trying again. The reason for this is that servers often malfunction for short periods of time, either because of ongoing data migrations or other maintenance tasks, misconfigurations or heavy load, or because the connecting user has hit a throttling policy limit. If the loop exited early, consumers of this package that don't implement their own rate-limiting code could quickly swamp such a server with new requests. That would only make things worse. Instead, it's better if the request loop waits patiently until the server is functioning again. If the connecting user has hit a throttling policy, then the server will start to malfunction in many interesting ways, but never actually tell the user what is happening. There is no way to distinguish this situation from other malfunctions. The only cure is to stop making requests. The contract on sessions here is to return the session that ends up being used, or retiring the session if we intend to raise an exception. We give up on max_wait timeout, not number of retries. An additional resource on handling throttling policies and client back off strategies: https://msdn.microsoft.com/en-us/library/office/jj945066(v=exchg.150).aspx#bk_ThrottlingBatch """ thread_id = get_ident() wait = 10 # seconds retry = 0 redirects = 0 # In Python 2, we want this to be a 'str' object so logging doesn't break (all formatting arguments are 'str'). # We activated 'unicode_literals' at the top of this file, so it would be a 'unicode' object unless we convert # to 'str' explicitly. This is a no-op for Python 3. log_msg = str('''\ Retry: %(retry)s Waited: %(wait)s Timeout: %(timeout)s Session: %(session_id)s Thread: %(thread_id)s Auth type: %(auth)s URL: %(url)s HTTP adapter: %(adapter)s Allow redirects: %(allow_redirects)s Streaming: %(stream)s Response time: %(response_time)s Status code: %(status_code)s Request headers: %(request_headers)s Response headers: %(response_headers)s Request data: %(xml_request)s Response data: %(xml_response)s ''') log_vals = dict( retry=retry, wait=wait, timeout=protocol.TIMEOUT, session_id=session.session_id, thread_id=thread_id, auth=session.auth, url=url, adapter=session.get_adapter(url), allow_redirects=allow_redirects, stream=stream, response_time=None, status_code=None, request_headers=headers, response_headers=None, xml_request=data, xml_response=None, ) try: while True: _back_off_if_needed(protocol.credentials.back_off_until) log.debug('Session %s thread %s: retry %s timeout %s POST\'ing to %s after %ss wait', session.session_id, thread_id, retry, protocol.TIMEOUT, url, wait) d_start = time_func() # Always create a dummy response for logging purposes, in case we fail in the following r = DummyResponse(url=url, headers={}, request_headers=headers) try: r = session.post(url=url, headers=headers, data=data, allow_redirects=False, timeout=protocol.TIMEOUT, stream=stream) except CONNECTION_ERRORS as e: log.debug('Session %s thread %s: connection error POST\'ing to %s', session.session_id, thread_id, url) r = DummyResponse(url=url, headers={'TimeoutException': e}, request_headers=headers) finally: log_vals.update( retry=retry, wait=wait, session_id=session.session_id, url=str(r.url), response_time=time_func() - d_start, status_code=r.status_code, request_headers=r.request.headers, response_headers=r.headers, xml_response='[STREAMING]' if stream else r.content, ) log.debug(log_msg, log_vals) if _may_retry_on_error(r, protocol, wait): log.info("Session %s thread %s: Connection error on URL %s (code %s). Cool down %s secs", session.session_id, thread_id, r.url, r.status_code, wait) time.sleep(wait) # Increase delay for every retry retry += 1 wait *= 2 session = protocol.renew_session(session) continue if r.status_code in (301, 302): if stream: r.close() url, redirects = _redirect_or_fail(r, redirects, allow_redirects) continue break except (RateLimitError, RedirectError) as e: log.warning(e.value) protocol.retire_session(session) raise except Exception as e: # Let higher layers handle this. Add full context for better debugging. log.error(str('%s: %s\n%s'), e.__class__.__name__, str(e), log_msg % log_vals) protocol.retire_session(session) raise if r.status_code == 500 and r.content and is_xml(r.content): # Some genius at Microsoft thinks it's OK to send a valid SOAP response as an HTTP 500 log.debug('Got status code %s but trying to parse content anyway', r.status_code) elif r.status_code != 200: protocol.retire_session(session) try: _raise_response_errors(r, protocol, log_msg, log_vals) # Always raises an exception finally: if stream: r.close() log.debug('Session %s thread %s: Useful response from %s', session.session_id, thread_id, url) return r, session
[ "def", "post_ratelimited", "(", "protocol", ",", "session", ",", "url", ",", "headers", ",", "data", ",", "allow_redirects", "=", "False", ",", "stream", "=", "False", ")", ":", "thread_id", "=", "get_ident", "(", ")", "wait", "=", "10", "# seconds", "retry", "=", "0", "redirects", "=", "0", "# In Python 2, we want this to be a 'str' object so logging doesn't break (all formatting arguments are 'str').", "# We activated 'unicode_literals' at the top of this file, so it would be a 'unicode' object unless we convert", "# to 'str' explicitly. This is a no-op for Python 3.", "log_msg", "=", "str", "(", "'''\\\nRetry: %(retry)s\nWaited: %(wait)s\nTimeout: %(timeout)s\nSession: %(session_id)s\nThread: %(thread_id)s\nAuth type: %(auth)s\nURL: %(url)s\nHTTP adapter: %(adapter)s\nAllow redirects: %(allow_redirects)s\nStreaming: %(stream)s\nResponse time: %(response_time)s\nStatus code: %(status_code)s\nRequest headers: %(request_headers)s\nResponse headers: %(response_headers)s\nRequest data: %(xml_request)s\nResponse data: %(xml_response)s\n'''", ")", "log_vals", "=", "dict", "(", "retry", "=", "retry", ",", "wait", "=", "wait", ",", "timeout", "=", "protocol", ".", "TIMEOUT", ",", "session_id", "=", "session", ".", "session_id", ",", "thread_id", "=", "thread_id", ",", "auth", "=", "session", ".", "auth", ",", "url", "=", "url", ",", "adapter", "=", "session", ".", "get_adapter", "(", "url", ")", ",", "allow_redirects", "=", "allow_redirects", ",", "stream", "=", "stream", ",", "response_time", "=", "None", ",", "status_code", "=", "None", ",", "request_headers", "=", "headers", ",", "response_headers", "=", "None", ",", "xml_request", "=", "data", ",", "xml_response", "=", "None", ",", ")", "try", ":", "while", "True", ":", "_back_off_if_needed", "(", "protocol", ".", "credentials", ".", "back_off_until", ")", "log", ".", "debug", "(", "'Session %s thread %s: retry %s timeout %s POST\\'ing to %s after %ss wait'", ",", "session", ".", "session_id", ",", "thread_id", ",", "retry", ",", "protocol", ".", "TIMEOUT", ",", "url", ",", "wait", ")", "d_start", "=", "time_func", "(", ")", "# Always create a dummy response for logging purposes, in case we fail in the following", "r", "=", "DummyResponse", "(", "url", "=", "url", ",", "headers", "=", "{", "}", ",", "request_headers", "=", "headers", ")", "try", ":", "r", "=", "session", ".", "post", "(", "url", "=", "url", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "allow_redirects", "=", "False", ",", "timeout", "=", "protocol", ".", "TIMEOUT", ",", "stream", "=", "stream", ")", "except", "CONNECTION_ERRORS", "as", "e", ":", "log", ".", "debug", "(", "'Session %s thread %s: connection error POST\\'ing to %s'", ",", "session", ".", "session_id", ",", "thread_id", ",", "url", ")", "r", "=", "DummyResponse", "(", "url", "=", "url", ",", "headers", "=", "{", "'TimeoutException'", ":", "e", "}", ",", "request_headers", "=", "headers", ")", "finally", ":", "log_vals", ".", "update", "(", "retry", "=", "retry", ",", "wait", "=", "wait", ",", "session_id", "=", "session", ".", "session_id", ",", "url", "=", "str", "(", "r", ".", "url", ")", ",", "response_time", "=", "time_func", "(", ")", "-", "d_start", ",", "status_code", "=", "r", ".", "status_code", ",", "request_headers", "=", "r", ".", "request", ".", "headers", ",", "response_headers", "=", "r", ".", "headers", ",", "xml_response", "=", "'[STREAMING]'", "if", "stream", "else", "r", ".", "content", ",", ")", "log", ".", "debug", "(", "log_msg", ",", "log_vals", ")", "if", "_may_retry_on_error", "(", "r", ",", "protocol", ",", "wait", ")", ":", "log", ".", "info", "(", "\"Session %s thread %s: Connection error on URL %s (code %s). Cool down %s secs\"", ",", "session", ".", "session_id", ",", "thread_id", ",", "r", ".", "url", ",", "r", ".", "status_code", ",", "wait", ")", "time", ".", "sleep", "(", "wait", ")", "# Increase delay for every retry", "retry", "+=", "1", "wait", "*=", "2", "session", "=", "protocol", ".", "renew_session", "(", "session", ")", "continue", "if", "r", ".", "status_code", "in", "(", "301", ",", "302", ")", ":", "if", "stream", ":", "r", ".", "close", "(", ")", "url", ",", "redirects", "=", "_redirect_or_fail", "(", "r", ",", "redirects", ",", "allow_redirects", ")", "continue", "break", "except", "(", "RateLimitError", ",", "RedirectError", ")", "as", "e", ":", "log", ".", "warning", "(", "e", ".", "value", ")", "protocol", ".", "retire_session", "(", "session", ")", "raise", "except", "Exception", "as", "e", ":", "# Let higher layers handle this. Add full context for better debugging.", "log", ".", "error", "(", "str", "(", "'%s: %s\\n%s'", ")", ",", "e", ".", "__class__", ".", "__name__", ",", "str", "(", "e", ")", ",", "log_msg", "%", "log_vals", ")", "protocol", ".", "retire_session", "(", "session", ")", "raise", "if", "r", ".", "status_code", "==", "500", "and", "r", ".", "content", "and", "is_xml", "(", "r", ".", "content", ")", ":", "# Some genius at Microsoft thinks it's OK to send a valid SOAP response as an HTTP 500", "log", ".", "debug", "(", "'Got status code %s but trying to parse content anyway'", ",", "r", ".", "status_code", ")", "elif", "r", ".", "status_code", "!=", "200", ":", "protocol", ".", "retire_session", "(", "session", ")", "try", ":", "_raise_response_errors", "(", "r", ",", "protocol", ",", "log_msg", ",", "log_vals", ")", "# Always raises an exception", "finally", ":", "if", "stream", ":", "r", ".", "close", "(", ")", "log", ".", "debug", "(", "'Session %s thread %s: Useful response from %s'", ",", "session", ".", "session_id", ",", "thread_id", ",", "url", ")", "return", "r", ",", "session" ]
There are two error-handling policies implemented here: a fail-fast policy intended for stand-alone scripts which fails on all responses except HTTP 200. The other policy is intended for long-running tasks that need to respect rate-limiting errors from the server and paper over outages of up to 1 hour. Wrap POST requests in a try-catch loop with a lot of error handling logic and some basic rate-limiting. If a request fails, and some conditions are met, the loop waits in increasing intervals, up to 1 hour, before trying again. The reason for this is that servers often malfunction for short periods of time, either because of ongoing data migrations or other maintenance tasks, misconfigurations or heavy load, or because the connecting user has hit a throttling policy limit. If the loop exited early, consumers of this package that don't implement their own rate-limiting code could quickly swamp such a server with new requests. That would only make things worse. Instead, it's better if the request loop waits patiently until the server is functioning again. If the connecting user has hit a throttling policy, then the server will start to malfunction in many interesting ways, but never actually tell the user what is happening. There is no way to distinguish this situation from other malfunctions. The only cure is to stop making requests. The contract on sessions here is to return the session that ends up being used, or retiring the session if we intend to raise an exception. We give up on max_wait timeout, not number of retries. An additional resource on handling throttling policies and client back off strategies: https://msdn.microsoft.com/en-us/library/office/jj945066(v=exchg.150).aspx#bk_ThrottlingBatch
[ "There", "are", "two", "error", "-", "handling", "policies", "implemented", "here", ":", "a", "fail", "-", "fast", "policy", "intended", "for", "stand", "-", "alone", "scripts", "which", "fails", "on", "all", "responses", "except", "HTTP", "200", ".", "The", "other", "policy", "is", "intended", "for", "long", "-", "running", "tasks", "that", "need", "to", "respect", "rate", "-", "limiting", "errors", "from", "the", "server", "and", "paper", "over", "outages", "of", "up", "to", "1", "hour", "." ]
python
train
satellogic/telluric
telluric/collections.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/collections.py#L386-L396
def validate(self): """ if schema exists we run shape file validation code of fiona by trying to save to in MemoryFile """ if self._schema is not None: with MemoryFile() as memfile: with memfile.open(driver="ESRI Shapefile", schema=self.schema) as target: for _item in self._results: # getting rid of the assets that don't behave well becasue of in memroy rasters item = GeoFeature(_item.geometry, _item.properties) target.write(item.to_record(item.crs))
[ "def", "validate", "(", "self", ")", ":", "if", "self", ".", "_schema", "is", "not", "None", ":", "with", "MemoryFile", "(", ")", "as", "memfile", ":", "with", "memfile", ".", "open", "(", "driver", "=", "\"ESRI Shapefile\"", ",", "schema", "=", "self", ".", "schema", ")", "as", "target", ":", "for", "_item", "in", "self", ".", "_results", ":", "# getting rid of the assets that don't behave well becasue of in memroy rasters", "item", "=", "GeoFeature", "(", "_item", ".", "geometry", ",", "_item", ".", "properties", ")", "target", ".", "write", "(", "item", ".", "to_record", "(", "item", ".", "crs", ")", ")" ]
if schema exists we run shape file validation code of fiona by trying to save to in MemoryFile
[ "if", "schema", "exists", "we", "run", "shape", "file", "validation", "code", "of", "fiona", "by", "trying", "to", "save", "to", "in", "MemoryFile" ]
python
train
cokelaer/spectrum
src/spectrum/linear_prediction.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/linear_prediction.py#L134-L146
def rc2ac(k, R0): """Convert reflection coefficients to autocorrelation sequence. :param k: reflection coefficients :param R0: zero-lag autocorrelation :returns: the autocorrelation sequence .. seealso:: :func:`ac2rc`, :func:`poly2rc`, :func:`ac2poly`, :func:`poly2rc`, :func:`rc2poly`. """ [a,efinal] = rc2poly(k, R0) R, u, kr, e = rlevinson(a, efinal) return R
[ "def", "rc2ac", "(", "k", ",", "R0", ")", ":", "[", "a", ",", "efinal", "]", "=", "rc2poly", "(", "k", ",", "R0", ")", "R", ",", "u", ",", "kr", ",", "e", "=", "rlevinson", "(", "a", ",", "efinal", ")", "return", "R" ]
Convert reflection coefficients to autocorrelation sequence. :param k: reflection coefficients :param R0: zero-lag autocorrelation :returns: the autocorrelation sequence .. seealso:: :func:`ac2rc`, :func:`poly2rc`, :func:`ac2poly`, :func:`poly2rc`, :func:`rc2poly`.
[ "Convert", "reflection", "coefficients", "to", "autocorrelation", "sequence", "." ]
python
valid
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/example_imager_mpi/example_mpi_imager.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/example_imager_mpi/example_mpi_imager.py#L130-L248
def main(): """Runs test imaging pipeline using MPI.""" # Check command line arguments. if len(sys.argv) < 2: raise RuntimeError( 'Usage: mpiexec -n <np> ' 'python mpi_imager_test.py <settings_file> <dir>') # Get the MPI communicator and initialise broadcast variables. comm = MPI.COMM_WORLD settings = None inputs = None grid_weights = None # Create log. log = logging.getLogger() log.setLevel(logging.DEBUG) if len(log.handlers) == 0: log.addHandler(logging.StreamHandler(sys.stdout)) if comm.Get_rank() == 0: # Load pipeline settings. with open(sys.argv[1]) as f: settings = json.load(f) # Get a list of input Measurement Sets to process. data_dir = str(sys.argv[2]) inputs = glob(join(data_dir, '*.ms')) + glob(join(data_dir, '*.MS')) inputs = filter(None, inputs) log.info('Found input Measurement Sets: %s', ', '.join(inputs)) # Distribute the list of Measurement Sets among processors. inputs = chunks(inputs, comm.Get_size()) # Broadcast settings and scatter list of input files. comm.barrier() settings = comm.bcast(settings) inputs = comm.scatter(inputs) # Record which file(s) this node is working on. log.debug('Rank %d, processing [%s]', comm.Get_rank(), ', '.join(inputs)) # Create an imager and configure it. precision = settings['precision'] imager = oskar.Imager(precision) for key, value in settings['imager'].items(): setattr(imager, key, value) # Allocate a local visibility grid. grid_norm = 0. grid_dim = [imager.plane_size, imager.plane_size] grid_data = numpy.zeros(grid_dim, dtype='c8' if precision == 'single' else 'c16') # Process data according to mode. if settings['combine']: if imager.weighting == 'Uniform' or imager.algorithm == 'W-projection': # If necessary, generate a local weights grid. local_weights = None if imager.weighting == 'Uniform': grid_weights = numpy.zeros(grid_dim, dtype=precision) local_weights = numpy.zeros(grid_dim, dtype=precision) # Do a first pass for uniform weighting or W-projection. imager.coords_only = True for f in inputs: log.info('Reading coordinates from %s', f) process_input_data(f, imager, None, 0.0, local_weights) imager.coords_only = False # Get maximum number of W-projection planes required. num_w_planes = imager.num_w_planes num_w_planes = comm.allreduce(num_w_planes, op=MPI.MAX) imager.num_w_planes = num_w_planes # Combine (reduce) weights grids, and broadcast the result. if local_weights is not None: comm.Allreduce(local_weights, grid_weights, op=MPI.SUM) # Populate the local visibility grid. for f in inputs: log.info('Reading visibilities from %s', f) grid_norm = process_input_data(f, imager, grid_data, grid_norm, grid_weights) # Combine (reduce) visibility grids. grid = numpy.zeros_like(grid_data) comm.Reduce(grid_data, grid, op=MPI.SUM) grid_norm = comm.reduce(grid_norm, op=MPI.SUM) # Finalise grid and save image. if comm.Get_rank() == 0: save_image(imager, grid, grid_norm, settings['output_file']) log.info('Finished. Output file is %s', settings['output_file']) else: for f in inputs: # Clear the grid. grid_norm = 0. grid_data.fill(0) if imager.weighting == 'Uniform': grid_weights = numpy.zeros(grid_dim, dtype=precision) # Do a first pass for uniform weighting or W-projection. if imager.weighting == 'Uniform' or \ imager.algorithm == 'W-projection': imager.coords_only = True log.info('Reading coordinates from %s', f) process_input_data(f, imager, None, 0.0, grid_weights) imager.coords_only = False # Populate the local visibility grid. log.info('Reading visibilities from %s', f) grid_norm = process_input_data(f, imager, grid_data, grid_norm, grid_weights) # Save image by finalising grid. output_file = splitext(f)[0] + '.fits' save_image(imager, grid_data, grid_norm, output_file) log.info('Finished. Output file is %s', output_file)
[ "def", "main", "(", ")", ":", "# Check command line arguments.", "if", "len", "(", "sys", ".", "argv", ")", "<", "2", ":", "raise", "RuntimeError", "(", "'Usage: mpiexec -n <np> '", "'python mpi_imager_test.py <settings_file> <dir>'", ")", "# Get the MPI communicator and initialise broadcast variables.", "comm", "=", "MPI", ".", "COMM_WORLD", "settings", "=", "None", "inputs", "=", "None", "grid_weights", "=", "None", "# Create log.", "log", "=", "logging", ".", "getLogger", "(", ")", "log", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "if", "len", "(", "log", ".", "handlers", ")", "==", "0", ":", "log", ".", "addHandler", "(", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", ")", "if", "comm", ".", "Get_rank", "(", ")", "==", "0", ":", "# Load pipeline settings.", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ")", "as", "f", ":", "settings", "=", "json", ".", "load", "(", "f", ")", "# Get a list of input Measurement Sets to process.", "data_dir", "=", "str", "(", "sys", ".", "argv", "[", "2", "]", ")", "inputs", "=", "glob", "(", "join", "(", "data_dir", ",", "'*.ms'", ")", ")", "+", "glob", "(", "join", "(", "data_dir", ",", "'*.MS'", ")", ")", "inputs", "=", "filter", "(", "None", ",", "inputs", ")", "log", ".", "info", "(", "'Found input Measurement Sets: %s'", ",", "', '", ".", "join", "(", "inputs", ")", ")", "# Distribute the list of Measurement Sets among processors.", "inputs", "=", "chunks", "(", "inputs", ",", "comm", ".", "Get_size", "(", ")", ")", "# Broadcast settings and scatter list of input files.", "comm", ".", "barrier", "(", ")", "settings", "=", "comm", ".", "bcast", "(", "settings", ")", "inputs", "=", "comm", ".", "scatter", "(", "inputs", ")", "# Record which file(s) this node is working on.", "log", ".", "debug", "(", "'Rank %d, processing [%s]'", ",", "comm", ".", "Get_rank", "(", ")", ",", "', '", ".", "join", "(", "inputs", ")", ")", "# Create an imager and configure it.", "precision", "=", "settings", "[", "'precision'", "]", "imager", "=", "oskar", ".", "Imager", "(", "precision", ")", "for", "key", ",", "value", "in", "settings", "[", "'imager'", "]", ".", "items", "(", ")", ":", "setattr", "(", "imager", ",", "key", ",", "value", ")", "# Allocate a local visibility grid.", "grid_norm", "=", "0.", "grid_dim", "=", "[", "imager", ".", "plane_size", ",", "imager", ".", "plane_size", "]", "grid_data", "=", "numpy", ".", "zeros", "(", "grid_dim", ",", "dtype", "=", "'c8'", "if", "precision", "==", "'single'", "else", "'c16'", ")", "# Process data according to mode.", "if", "settings", "[", "'combine'", "]", ":", "if", "imager", ".", "weighting", "==", "'Uniform'", "or", "imager", ".", "algorithm", "==", "'W-projection'", ":", "# If necessary, generate a local weights grid.", "local_weights", "=", "None", "if", "imager", ".", "weighting", "==", "'Uniform'", ":", "grid_weights", "=", "numpy", ".", "zeros", "(", "grid_dim", ",", "dtype", "=", "precision", ")", "local_weights", "=", "numpy", ".", "zeros", "(", "grid_dim", ",", "dtype", "=", "precision", ")", "# Do a first pass for uniform weighting or W-projection.", "imager", ".", "coords_only", "=", "True", "for", "f", "in", "inputs", ":", "log", ".", "info", "(", "'Reading coordinates from %s'", ",", "f", ")", "process_input_data", "(", "f", ",", "imager", ",", "None", ",", "0.0", ",", "local_weights", ")", "imager", ".", "coords_only", "=", "False", "# Get maximum number of W-projection planes required.", "num_w_planes", "=", "imager", ".", "num_w_planes", "num_w_planes", "=", "comm", ".", "allreduce", "(", "num_w_planes", ",", "op", "=", "MPI", ".", "MAX", ")", "imager", ".", "num_w_planes", "=", "num_w_planes", "# Combine (reduce) weights grids, and broadcast the result.", "if", "local_weights", "is", "not", "None", ":", "comm", ".", "Allreduce", "(", "local_weights", ",", "grid_weights", ",", "op", "=", "MPI", ".", "SUM", ")", "# Populate the local visibility grid.", "for", "f", "in", "inputs", ":", "log", ".", "info", "(", "'Reading visibilities from %s'", ",", "f", ")", "grid_norm", "=", "process_input_data", "(", "f", ",", "imager", ",", "grid_data", ",", "grid_norm", ",", "grid_weights", ")", "# Combine (reduce) visibility grids.", "grid", "=", "numpy", ".", "zeros_like", "(", "grid_data", ")", "comm", ".", "Reduce", "(", "grid_data", ",", "grid", ",", "op", "=", "MPI", ".", "SUM", ")", "grid_norm", "=", "comm", ".", "reduce", "(", "grid_norm", ",", "op", "=", "MPI", ".", "SUM", ")", "# Finalise grid and save image.", "if", "comm", ".", "Get_rank", "(", ")", "==", "0", ":", "save_image", "(", "imager", ",", "grid", ",", "grid_norm", ",", "settings", "[", "'output_file'", "]", ")", "log", ".", "info", "(", "'Finished. Output file is %s'", ",", "settings", "[", "'output_file'", "]", ")", "else", ":", "for", "f", "in", "inputs", ":", "# Clear the grid.", "grid_norm", "=", "0.", "grid_data", ".", "fill", "(", "0", ")", "if", "imager", ".", "weighting", "==", "'Uniform'", ":", "grid_weights", "=", "numpy", ".", "zeros", "(", "grid_dim", ",", "dtype", "=", "precision", ")", "# Do a first pass for uniform weighting or W-projection.", "if", "imager", ".", "weighting", "==", "'Uniform'", "or", "imager", ".", "algorithm", "==", "'W-projection'", ":", "imager", ".", "coords_only", "=", "True", "log", ".", "info", "(", "'Reading coordinates from %s'", ",", "f", ")", "process_input_data", "(", "f", ",", "imager", ",", "None", ",", "0.0", ",", "grid_weights", ")", "imager", ".", "coords_only", "=", "False", "# Populate the local visibility grid.", "log", ".", "info", "(", "'Reading visibilities from %s'", ",", "f", ")", "grid_norm", "=", "process_input_data", "(", "f", ",", "imager", ",", "grid_data", ",", "grid_norm", ",", "grid_weights", ")", "# Save image by finalising grid.", "output_file", "=", "splitext", "(", "f", ")", "[", "0", "]", "+", "'.fits'", "save_image", "(", "imager", ",", "grid_data", ",", "grid_norm", ",", "output_file", ")", "log", ".", "info", "(", "'Finished. Output file is %s'", ",", "output_file", ")" ]
Runs test imaging pipeline using MPI.
[ "Runs", "test", "imaging", "pipeline", "using", "MPI", "." ]
python
train
tanghaibao/goatools
goatools/gosubdag/plot/plot.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/plot.py#L55-L60
def plt_goids(gosubdag, fout_img, goids, **kws_plt): """Plot GO IDs in a DAG (Directed Acyclic Graph).""" gosubdag_plt = GoSubDag(goids, gosubdag.go2obj, rcntobj=gosubdag.rcntobj, **kws_plt) godagplot = GoSubDagPlot(gosubdag_plt, **kws_plt) godagplot.plt_dag(fout_img) return godagplot
[ "def", "plt_goids", "(", "gosubdag", ",", "fout_img", ",", "goids", ",", "*", "*", "kws_plt", ")", ":", "gosubdag_plt", "=", "GoSubDag", "(", "goids", ",", "gosubdag", ".", "go2obj", ",", "rcntobj", "=", "gosubdag", ".", "rcntobj", ",", "*", "*", "kws_plt", ")", "godagplot", "=", "GoSubDagPlot", "(", "gosubdag_plt", ",", "*", "*", "kws_plt", ")", "godagplot", ".", "plt_dag", "(", "fout_img", ")", "return", "godagplot" ]
Plot GO IDs in a DAG (Directed Acyclic Graph).
[ "Plot", "GO", "IDs", "in", "a", "DAG", "(", "Directed", "Acyclic", "Graph", ")", "." ]
python
train