repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
genepattern/genepattern-python
gp/core.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/core.py#L305-L323
def load_info(self): """ Parses the JSON object stored at GPJob.info and assigns its metadata to properties of this GPJob object. Primarily intended to be called from GPJob.get_info(). """ self.task_name = self.info['taskName'] self.task_lsid = self.info['taskLsid'] self.user_id = self.info['userId'] self.job_number = int(self.info['jobId']) self.status = self.get_status_message() self.date_submitted = self.info['dateSubmitted'] self.log_files = self.info['logFiles'] self.output_files = self.info['outputFiles'] self.num_output_files = self.info['numOutputFiles'] # Create children, if relevant self.children = self.get_child_jobs()
[ "def", "load_info", "(", "self", ")", ":", "self", ".", "task_name", "=", "self", ".", "info", "[", "'taskName'", "]", "self", ".", "task_lsid", "=", "self", ".", "info", "[", "'taskLsid'", "]", "self", ".", "user_id", "=", "self", ".", "info", "[", ...
Parses the JSON object stored at GPJob.info and assigns its metadata to properties of this GPJob object. Primarily intended to be called from GPJob.get_info().
[ "Parses", "the", "JSON", "object", "stored", "at", "GPJob", ".", "info", "and", "assigns", "its", "metadata", "to", "properties", "of", "this", "GPJob", "object", "." ]
python
train
20c/xbahn
xbahn/api.py
https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/api.py#L304-L350
def on_receive(self, message=None, wire=None, event_origin=None): """ event handler bound to the receive event of the link the server is wired too. Arguments: - message (message.Message): incoming message Keyword arguments: - event_origin (connection.Link) """ self.trigger("before_call", message) fn_name = message.data pmsg = self.prepare_message try: for handler in self.handlers: handler.incoming(message, self) fn = self.get_function(fn_name, message.path) except Exception as inst: wire.respond(message, ErrorMessage(str(inst))) return if callable(fn) and getattr(fn, "exposed", False): try: r = fn(*message.args, **message.kwargs) if isinstance(r,Message): wire.respond(message, pmsg(r)) else: wire.respond(message, pmsg(Message(r))) except Exception as inst: if self.debug: wire.respond(message, pmsg(ErrorMessage(str(traceback.format_exc())))) else: wire.respond(message, pmsg(ErrorMessage(str(inst)))) else: wire.respond( message, pmsg( ErrorMessage("action '%s' not exposed on API (%s)" % (fn_name, self.__class__.__name__))) ) self.trigger("after_call", message)
[ "def", "on_receive", "(", "self", ",", "message", "=", "None", ",", "wire", "=", "None", ",", "event_origin", "=", "None", ")", ":", "self", ".", "trigger", "(", "\"before_call\"", ",", "message", ")", "fn_name", "=", "message", ".", "data", "pmsg", "=...
event handler bound to the receive event of the link the server is wired too. Arguments: - message (message.Message): incoming message Keyword arguments: - event_origin (connection.Link)
[ "event", "handler", "bound", "to", "the", "receive", "event", "of", "the", "link", "the", "server", "is", "wired", "too", "." ]
python
train
mongodb/mongo-python-driver
pymongo/change_stream.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/change_stream.py#L226-L283
def try_next(self): """Advance the cursor without blocking indefinitely. This method returns the next change document without waiting indefinitely for the next change. For example:: with db.collection.watch() as stream: while stream.alive: change = stream.try_next() if change is not None: print(change) elif stream.alive: # We end up here when there are no recent changes. # Sleep for a while to avoid flooding the server with # getMore requests when no changes are available. time.sleep(10) If no change document is cached locally then this method runs a single getMore command. If the getMore yields any documents, the next document is returned, otherwise, if the getMore returns no documents (because there have been no changes) then ``None`` is returned. :Returns: The next change document or ``None`` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 3.8 """ # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: change = self._cursor._try_next(True) except ConnectionFailure: self._resume() change = self._cursor._try_next(False) except OperationFailure as exc: if exc.code in _NON_RESUMABLE_GETMORE_ERRORS: raise self._resume() change = self._cursor._try_next(False) # No changes are available. if change is None: return None try: resume_token = change['_id'] except KeyError: self.close() raise InvalidOperation( "Cannot provide resume functionality when the resume " "token is missing.") self._resume_token = copy.copy(resume_token) self._start_at_operation_time = None if self._decode_custom: return _bson_to_dict(change.raw, self._orig_codec_options) return change
[ "def", "try_next", "(", "self", ")", ":", "# Attempt to get the next change with at most one getMore and at most", "# one resume attempt.", "try", ":", "change", "=", "self", ".", "_cursor", ".", "_try_next", "(", "True", ")", "except", "ConnectionFailure", ":", "self",...
Advance the cursor without blocking indefinitely. This method returns the next change document without waiting indefinitely for the next change. For example:: with db.collection.watch() as stream: while stream.alive: change = stream.try_next() if change is not None: print(change) elif stream.alive: # We end up here when there are no recent changes. # Sleep for a while to avoid flooding the server with # getMore requests when no changes are available. time.sleep(10) If no change document is cached locally then this method runs a single getMore command. If the getMore yields any documents, the next document is returned, otherwise, if the getMore returns no documents (because there have been no changes) then ``None`` is returned. :Returns: The next change document or ``None`` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 3.8
[ "Advance", "the", "cursor", "without", "blocking", "indefinitely", "." ]
python
train
Mxit/python-mxit
mxit/services.py
https://github.com/Mxit/python-mxit/blob/6b18a54ef6fbfe1f9d94755ba3d4ad77743c8b0c/mxit/services.py#L274-L287
def get_friend_suggestions(self, scope='graph/read'): """ Retrieve the Mxit user's full profile User authentication required with the following scope: 'graph/read' """ suggestions = _get( token=self.oauth.get_user_token(scope), uri='/user/socialgraph/suggestions' ) try: return json.loads(suggestions) except: raise MxitAPIException('Error parsing suggestions data')
[ "def", "get_friend_suggestions", "(", "self", ",", "scope", "=", "'graph/read'", ")", ":", "suggestions", "=", "_get", "(", "token", "=", "self", ".", "oauth", ".", "get_user_token", "(", "scope", ")", ",", "uri", "=", "'/user/socialgraph/suggestions'", ")", ...
Retrieve the Mxit user's full profile User authentication required with the following scope: 'graph/read'
[ "Retrieve", "the", "Mxit", "user", "s", "full", "profile", "User", "authentication", "required", "with", "the", "following", "scope", ":", "graph", "/", "read" ]
python
train
wummel/linkchecker
linkcheck/lock.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/lock.py#L59-L74
def get_semaphore(name, value=None, debug=False): """Get a new semaphore. @param value: if not None, a BoundedSemaphore will be used @ptype debug: int or None @param debug: if True, acquire() and release() will have debug messages @ptype debug: boolean, default is False @return: a semaphore object @rtype: threading.Semaphore or threading.BoundedSemaphore or DebugLock """ if value is None: lock = threading.Semaphore() else: lock = threading.BoundedSemaphore(value) if debug: lock = DebugLock(lock, name) return lock
[ "def", "get_semaphore", "(", "name", ",", "value", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "value", "is", "None", ":", "lock", "=", "threading", ".", "Semaphore", "(", ")", "else", ":", "lock", "=", "threading", ".", "BoundedSemaphore"...
Get a new semaphore. @param value: if not None, a BoundedSemaphore will be used @ptype debug: int or None @param debug: if True, acquire() and release() will have debug messages @ptype debug: boolean, default is False @return: a semaphore object @rtype: threading.Semaphore or threading.BoundedSemaphore or DebugLock
[ "Get", "a", "new", "semaphore", "." ]
python
train
tbielawa/bitmath
bitmath/__init__.py
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L439-L528
def best_prefix(self, system=None): """Optional parameter, `system`, allows you to prefer NIST or SI in the results. By default, the current system is used (Bit/Byte default to NIST). Logic discussion/notes: Base-case, does it need converting? If the instance is less than one Byte, return the instance as a Bit instance. Else, begin by recording the unit system the instance is defined by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over. If the instance is not already a ``Byte`` instance, convert it to one. NIST units step up by powers of 1024, SI units step up by powers of 1000. Take integer value of the log(base=STEP_POWER) of the instance's byte value. E.g.: >>> int(math.log(Gb(100).bytes, 1000)) 3 This will return a value >= 0. The following determines the 'best prefix unit' for representation: * result == 0, best represented as a Byte * result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte * 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1] """ # Use absolute value so we don't return Bit's for *everything* # less than Byte(1). From github issue #55 if abs(self) < Byte(1): return Bit.from_other(self) else: if type(self) is Byte: # pylint: disable=unidiomatic-typecheck _inst = self else: _inst = Byte.from_other(self) # Which table to consult? Was a preferred system provided? if system is None: # No preference. Use existing system if self.system == 'NIST': _STEPS = NIST_PREFIXES _BASE = 1024 elif self.system == 'SI': _STEPS = SI_PREFIXES _BASE = 1000 # Anything else would have raised by now else: # Preferred system provided. if system == NIST: _STEPS = NIST_PREFIXES _BASE = 1024 elif system == SI: _STEPS = SI_PREFIXES _BASE = 1000 else: raise ValueError("Invalid value given for 'system' parameter." " Must be one of NIST or SI") # Index of the string of the best prefix in the STEPS list _index = int(math.log(abs(_inst.bytes), _BASE)) # Recall that the log() function returns >= 0. This doesn't # map to the STEPS list 1:1. That is to say, 0 is handled with # special care. So if the _index is 1, we actually want item 0 # in the list. if _index == 0: # Already a Byte() type, so return it. return _inst elif _index >= len(_STEPS): # This is a really big number. Use the biggest prefix we've got _best_prefix = _STEPS[-1] elif 0 < _index < len(_STEPS): # There is an appropriate prefix unit to represent this _best_prefix = _STEPS[_index - 1] _conversion_method = getattr( self, 'to_%sB' % _best_prefix) return _conversion_method()
[ "def", "best_prefix", "(", "self", ",", "system", "=", "None", ")", ":", "# Use absolute value so we don't return Bit's for *everything*", "# less than Byte(1). From github issue #55", "if", "abs", "(", "self", ")", "<", "Byte", "(", "1", ")", ":", "return", "Bit", ...
Optional parameter, `system`, allows you to prefer NIST or SI in the results. By default, the current system is used (Bit/Byte default to NIST). Logic discussion/notes: Base-case, does it need converting? If the instance is less than one Byte, return the instance as a Bit instance. Else, begin by recording the unit system the instance is defined by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over. If the instance is not already a ``Byte`` instance, convert it to one. NIST units step up by powers of 1024, SI units step up by powers of 1000. Take integer value of the log(base=STEP_POWER) of the instance's byte value. E.g.: >>> int(math.log(Gb(100).bytes, 1000)) 3 This will return a value >= 0. The following determines the 'best prefix unit' for representation: * result == 0, best represented as a Byte * result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte * 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
[ "Optional", "parameter", "system", "allows", "you", "to", "prefer", "NIST", "or", "SI", "in", "the", "results", ".", "By", "default", "the", "current", "system", "is", "used", "(", "Bit", "/", "Byte", "default", "to", "NIST", ")", "." ]
python
train
agile-geoscience/welly
welly/utils.py
https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/utils.py#L307-L323
def normalize(a, new_min=0.0, new_max=1.0): """ From ``bruges`` Normalize an array to [0,1] or to arbitrary new min and max. Args: a (ndarray) new_min (float): the new min, default 0. new_max (float): the new max, default 1. Returns: ndarray. The normalized array. """ n = (a - np.amin(a)) / np.amax(a - np.amin(a)) return n * (new_max - new_min) + new_min
[ "def", "normalize", "(", "a", ",", "new_min", "=", "0.0", ",", "new_max", "=", "1.0", ")", ":", "n", "=", "(", "a", "-", "np", ".", "amin", "(", "a", ")", ")", "/", "np", ".", "amax", "(", "a", "-", "np", ".", "amin", "(", "a", ")", ")", ...
From ``bruges`` Normalize an array to [0,1] or to arbitrary new min and max. Args: a (ndarray) new_min (float): the new min, default 0. new_max (float): the new max, default 1. Returns: ndarray. The normalized array.
[ "From", "bruges" ]
python
train
isislovecruft/python-gnupg
pretty_bad_protocol/_meta.py
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/_meta.py#L795-L814
def _recv_keys(self, keyids, keyserver=None): """Import keys from a keyserver. :param str keyids: A space-delimited string containing the keyids to request. :param str keyserver: The keyserver to request the ``keyids`` from; defaults to `gnupg.GPG.keyserver`. """ if not keyserver: keyserver = self.keyserver args = ['--keyserver {0}'.format(keyserver), '--recv-keys {0}'.format(keyids)] log.info('Requesting keys from %s: %s' % (keyserver, keyids)) result = self._result_map['import'](self) proc = self._open_subprocess(args) self._collect_output(proc, result) log.debug('recv_keys result: %r', result.__dict__) return result
[ "def", "_recv_keys", "(", "self", ",", "keyids", ",", "keyserver", "=", "None", ")", ":", "if", "not", "keyserver", ":", "keyserver", "=", "self", ".", "keyserver", "args", "=", "[", "'--keyserver {0}'", ".", "format", "(", "keyserver", ")", ",", "'--rec...
Import keys from a keyserver. :param str keyids: A space-delimited string containing the keyids to request. :param str keyserver: The keyserver to request the ``keyids`` from; defaults to `gnupg.GPG.keyserver`.
[ "Import", "keys", "from", "a", "keyserver", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/blast.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L367-L380
def collect_gaps(blast, use_subject=False): """ Collect the gaps between adjacent HSPs in the BLAST file. """ key = lambda x: x.sstart if use_subject else x.qstart blast.sort(key=key) for a, b in zip(blast, blast[1:]): if use_subject: if a.sstop < b.sstart: yield b.sstart - a.sstop else: if a.qstop < b.qstart: yield b.qstart - a.qstop
[ "def", "collect_gaps", "(", "blast", ",", "use_subject", "=", "False", ")", ":", "key", "=", "lambda", "x", ":", "x", ".", "sstart", "if", "use_subject", "else", "x", ".", "qstart", "blast", ".", "sort", "(", "key", "=", "key", ")", "for", "a", ","...
Collect the gaps between adjacent HSPs in the BLAST file.
[ "Collect", "the", "gaps", "between", "adjacent", "HSPs", "in", "the", "BLAST", "file", "." ]
python
train
Atomistica/atomistica
src/python/atomistica/io.py
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/io.py#L76-L89
def write(fn, a, **kwargs): """ Convenience function: Detect file extension and write via Atomistica or ASE. Has support for writing LAMMPS data files. """ ext = fn[fn.rfind('.'):].split('@') if ext[0] == '.out' or ext[0] == '.dat': return write_atoms(fn, a) elif ext[0] == '.lammps': return write_lammps_data(fn, a, velocities=True, **kwargs) elif ext[0] == '.nc': return NetCDFTrajectory(fn, 'w').write(a) else: return ase.io.write(fn, a, **kwargs)
[ "def", "write", "(", "fn", ",", "a", ",", "*", "*", "kwargs", ")", ":", "ext", "=", "fn", "[", "fn", ".", "rfind", "(", "'.'", ")", ":", "]", ".", "split", "(", "'@'", ")", "if", "ext", "[", "0", "]", "==", "'.out'", "or", "ext", "[", "0"...
Convenience function: Detect file extension and write via Atomistica or ASE. Has support for writing LAMMPS data files.
[ "Convenience", "function", ":", "Detect", "file", "extension", "and", "write", "via", "Atomistica", "or", "ASE", ".", "Has", "support", "for", "writing", "LAMMPS", "data", "files", "." ]
python
train
hit9/rux
rux/utils.py
https://github.com/hit9/rux/blob/d7f60722658a3b83ac6d7bb3ca2790ac9c926b59/rux/utils.py#L75-L92
def update_nested_dict(a, b): """ update nested dict `a` with another dict b. usage:: >>> a = {'x' : { 'y': 1}} >>> b = {'x' : {'z':2, 'y':3}, 'w': 4} >>> update_nested_dict(a,b) {'x': {'y': 3, 'z': 2}, 'w': 4} """ for k, v in b.iteritems(): if isinstance(v, dict): d = a.setdefault(k, {}) update_nested_dict(d, v) else: a[k] = v return a
[ "def", "update_nested_dict", "(", "a", ",", "b", ")", ":", "for", "k", ",", "v", "in", "b", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "d", "=", "a", ".", "setdefault", "(", "k", ",", "{", "}", ")", ...
update nested dict `a` with another dict b. usage:: >>> a = {'x' : { 'y': 1}} >>> b = {'x' : {'z':2, 'y':3}, 'w': 4} >>> update_nested_dict(a,b) {'x': {'y': 3, 'z': 2}, 'w': 4}
[ "update", "nested", "dict", "a", "with", "another", "dict", "b", ".", "usage", "::" ]
python
valid
DataDog/integrations-core
openldap/datadog_checks/openldap/openldap.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/openldap/datadog_checks/openldap/openldap.py#L58-L94
def _get_tls_object(self, ssl_params): """ Return a TLS object to establish a secure connection to a server """ if ssl_params is None: return None if not ssl_params["verify"] and ssl_params["ca_certs"]: self.warning( "Incorrect configuration: trying to disable server certificate validation, " "while also specifying a capath. No validation will be performed. Fix your " "configuration to remove this warning" ) validate = ssl.CERT_REQUIRED if ssl_params["verify"] else ssl.CERT_NONE if ssl_params["ca_certs"] is None or os.path.isfile(ssl_params["ca_certs"]): tls = ldap3.core.tls.Tls( local_private_key_file=ssl_params["key"], local_certificate_file=ssl_params["cert"], ca_certs_file=ssl_params["ca_certs"], version=ssl.PROTOCOL_SSLv23, validate=validate, ) elif os.path.isdir(ssl_params["ca_certs"]): tls = ldap3.core.tls.Tls( local_private_key_file=ssl_params["key"], local_certificate_file=ssl_params["cert"], ca_certs_path=ssl_params["ca_certs"], version=ssl.PROTOCOL_SSLv23, validate=validate, ) else: raise ConfigurationError( 'Invalid path {} for ssl_ca_certs: no such file or directory'.format(ssl_params['ca_certs']) ) return tls
[ "def", "_get_tls_object", "(", "self", ",", "ssl_params", ")", ":", "if", "ssl_params", "is", "None", ":", "return", "None", "if", "not", "ssl_params", "[", "\"verify\"", "]", "and", "ssl_params", "[", "\"ca_certs\"", "]", ":", "self", ".", "warning", "(",...
Return a TLS object to establish a secure connection to a server
[ "Return", "a", "TLS", "object", "to", "establish", "a", "secure", "connection", "to", "a", "server" ]
python
train
marrow/mongo
marrow/mongo/core/trait/expires.py
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/expires.py#L36-L44
def from_mongo(cls, data, expired=False, **kw): """In the event a value that has technically already expired is loaded, swap it for None.""" value = super(Expires, cls).from_mongo(data, **kw) if not expired and value.is_expired: return None return value
[ "def", "from_mongo", "(", "cls", ",", "data", ",", "expired", "=", "False", ",", "*", "*", "kw", ")", ":", "value", "=", "super", "(", "Expires", ",", "cls", ")", ".", "from_mongo", "(", "data", ",", "*", "*", "kw", ")", "if", "not", "expired", ...
In the event a value that has technically already expired is loaded, swap it for None.
[ "In", "the", "event", "a", "value", "that", "has", "technically", "already", "expired", "is", "loaded", "swap", "it", "for", "None", "." ]
python
train
kennethreitz/maya
maya/core.py
https://github.com/kennethreitz/maya/blob/774b141d91a83a5d77cb5351db3d02bf50564b21/maya/core.py#L158-L165
def snap(self, instruction): """ Returns a new MayaDT object modified by the given instruction. Powered by snaptime. See https://github.com/zartstrom/snaptime for a complete documentation about the snaptime instructions. """ return self.from_datetime(snaptime.snap(self.datetime(), instruction))
[ "def", "snap", "(", "self", ",", "instruction", ")", ":", "return", "self", ".", "from_datetime", "(", "snaptime", ".", "snap", "(", "self", ".", "datetime", "(", ")", ",", "instruction", ")", ")" ]
Returns a new MayaDT object modified by the given instruction. Powered by snaptime. See https://github.com/zartstrom/snaptime for a complete documentation about the snaptime instructions.
[ "Returns", "a", "new", "MayaDT", "object", "modified", "by", "the", "given", "instruction", "." ]
python
train
PyFilesystem/pyfilesystem2
fs/multifs.py
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/multifs.py#L165-L171
def _writable_required(self, path): # type: (Text) -> FS """Check that ``path`` is writeable. """ if self.write_fs is None: raise errors.ResourceReadOnly(path) return self.write_fs
[ "def", "_writable_required", "(", "self", ",", "path", ")", ":", "# type: (Text) -> FS", "if", "self", ".", "write_fs", "is", "None", ":", "raise", "errors", ".", "ResourceReadOnly", "(", "path", ")", "return", "self", ".", "write_fs" ]
Check that ``path`` is writeable.
[ "Check", "that", "path", "is", "writeable", "." ]
python
train
ricmoo/pyscrypt
pyscrypt/hash.py
https://github.com/ricmoo/pyscrypt/blob/131ca39acee4963afd704b4c4631497e4fe34c97/pyscrypt/hash.py#L217-L258
def hash(password, salt, N, r, p, dkLen): """Returns the result of the scrypt password-based key derivation function. Constraints: r * p < (2 ** 30) dkLen <= (((2 ** 32) - 1) * 32 N must be a power of 2 greater than 1 (eg. 2, 4, 8, 16, 32...) N, r, p must be positive """ # This only matters to Python 3 if not check_bytes(password): raise ValueError('password must be a byte array') if not check_bytes(salt): raise ValueError('salt must be a byte array') # Scrypt implementation. Significant thanks to https://github.com/wg/scrypt if N < 2 or (N & (N - 1)): raise ValueError('Scrypt N must be a power of 2 greater than 1') # A psuedorandom function prf = lambda k, m: hmac.new(key = k, msg = m, digestmod = hashlib.sha256).digest() # convert into integers B = [ get_byte(c) for c in pbkdf2_single(password, salt, p * 128 * r, prf) ] B = [ ((B[i + 3] << 24) | (B[i + 2] << 16) | (B[i + 1] << 8) | B[i + 0]) for i in xrange(0, len(B), 4)] XY = [ 0 ] * (64 * r) V = [ 0 ] * (32 * r * N) for i in xrange(0, p): smix(B, i * 32 * r, r, N, V, XY) # Convert back into bytes Bc = [ ] for i in B: Bc.append((i >> 0) & 0xff) Bc.append((i >> 8) & 0xff) Bc.append((i >> 16) & 0xff) Bc.append((i >> 24) & 0xff) return pbkdf2_single(password, chars_to_bytes(Bc), dkLen, prf)
[ "def", "hash", "(", "password", ",", "salt", ",", "N", ",", "r", ",", "p", ",", "dkLen", ")", ":", "# This only matters to Python 3", "if", "not", "check_bytes", "(", "password", ")", ":", "raise", "ValueError", "(", "'password must be a byte array'", ")", "...
Returns the result of the scrypt password-based key derivation function. Constraints: r * p < (2 ** 30) dkLen <= (((2 ** 32) - 1) * 32 N must be a power of 2 greater than 1 (eg. 2, 4, 8, 16, 32...) N, r, p must be positive
[ "Returns", "the", "result", "of", "the", "scrypt", "password", "-", "based", "key", "derivation", "function", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L2271-L2329
def Network_setCookie(self, name, value, **kwargs): """ Function path: Network.setCookie Domain: Network Method name: setCookie WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'name' (type: string) -> Cookie name. 'value' (type: string) -> Cookie value. Optional arguments: 'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie. 'domain' (type: string) -> Cookie domain. 'path' (type: string) -> Cookie path. 'secure' (type: boolean) -> True if cookie is secure. 'httpOnly' (type: boolean) -> True if cookie is http-only. 'sameSite' (type: CookieSameSite) -> Cookie SameSite type. 'expires' (type: TimeSinceEpoch) -> Cookie expiration date, session cookie if not set Returns: 'success' (type: boolean) -> True if successfully set cookie. Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist. """ assert isinstance(name, (str,) ), "Argument 'name' must be of type '['str']'. Received type: '%s'" % type( name) assert isinstance(value, (str,) ), "Argument 'value' must be of type '['str']'. Received type: '%s'" % type( value) if 'url' in kwargs: assert isinstance(kwargs['url'], (str,) ), "Optional argument 'url' must be of type '['str']'. Received type: '%s'" % type( kwargs['url']) if 'domain' in kwargs: assert isinstance(kwargs['domain'], (str,) ), "Optional argument 'domain' must be of type '['str']'. Received type: '%s'" % type( kwargs['domain']) if 'path' in kwargs: assert isinstance(kwargs['path'], (str,) ), "Optional argument 'path' must be of type '['str']'. Received type: '%s'" % type( kwargs['path']) if 'secure' in kwargs: assert isinstance(kwargs['secure'], (bool,) ), "Optional argument 'secure' must be of type '['bool']'. Received type: '%s'" % type( kwargs['secure']) if 'httpOnly' in kwargs: assert isinstance(kwargs['httpOnly'], (bool,) ), "Optional argument 'httpOnly' must be of type '['bool']'. Received type: '%s'" % type( kwargs['httpOnly']) expected = ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite', 'expires'] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite', 'expires']. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command('Network.setCookie', name=name, value=value, **kwargs) return subdom_funcs
[ "def", "Network_setCookie", "(", "self", ",", "name", ",", "value", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "name", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'name' must be of type '['str']'. Received type: '%s'\"", "%", "type", ...
Function path: Network.setCookie Domain: Network Method name: setCookie WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'name' (type: string) -> Cookie name. 'value' (type: string) -> Cookie value. Optional arguments: 'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie. 'domain' (type: string) -> Cookie domain. 'path' (type: string) -> Cookie path. 'secure' (type: boolean) -> True if cookie is secure. 'httpOnly' (type: boolean) -> True if cookie is http-only. 'sameSite' (type: CookieSameSite) -> Cookie SameSite type. 'expires' (type: TimeSinceEpoch) -> Cookie expiration date, session cookie if not set Returns: 'success' (type: boolean) -> True if successfully set cookie. Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist.
[ "Function", "path", ":", "Network", ".", "setCookie", "Domain", ":", "Network", "Method", "name", ":", "setCookie", "WARNING", ":", "This", "function", "is", "marked", "Experimental", "!", "Parameters", ":", "Required", "arguments", ":", "name", "(", "type", ...
python
train
happyleavesaoc/python-motorparts
motorparts/__init__.py
https://github.com/happyleavesaoc/python-motorparts/blob/4a6b4dc72dd45524dd64a7a079478bd98c55215c/motorparts/__init__.py#L140-L154
def _traverse_report(data): """Recursively traverse vehicle health report.""" if 'items' not in data: return {} out = {} for item in data['items']: skip = (item['severity'] == 'NonDisplay' or item['itemKey'] == 'categoryDesc' or item['value'] in [None, 'Null', 'N/A', 'NULL']) if skip: continue value = 'Ok' if item['value'] == '0.0' else item['value'] out[item['itemKey']] = value out.update(_traverse_report(item)) return out
[ "def", "_traverse_report", "(", "data", ")", ":", "if", "'items'", "not", "in", "data", ":", "return", "{", "}", "out", "=", "{", "}", "for", "item", "in", "data", "[", "'items'", "]", ":", "skip", "=", "(", "item", "[", "'severity'", "]", "==", ...
Recursively traverse vehicle health report.
[ "Recursively", "traverse", "vehicle", "health", "report", "." ]
python
train
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddindex.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddindex.py#L65-L71
def makeref2namesdct(name2refdct): """make the ref2namesdct in the idd_index""" ref2namesdct = {} for key, values in name2refdct.items(): for value in values: ref2namesdct.setdefault(value, set()).add(key) return ref2namesdct
[ "def", "makeref2namesdct", "(", "name2refdct", ")", ":", "ref2namesdct", "=", "{", "}", "for", "key", ",", "values", "in", "name2refdct", ".", "items", "(", ")", ":", "for", "value", "in", "values", ":", "ref2namesdct", ".", "setdefault", "(", "value", "...
make the ref2namesdct in the idd_index
[ "make", "the", "ref2namesdct", "in", "the", "idd_index" ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAARP/QAUser.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAUser.py#L408-L424
def register_account(self, account, portfolio_cookie=None): ''' 注册一个account到portfolio组合中 account 也可以是一个策略类,实现其 on_bar 方法 :param account: 被注册的account :return: ''' # 查找 portfolio if len(self.portfolio_list.keys()) < 1: po = self.new_portfolio() elif portfolio_cookie is not None: po = self.portfolio_list[portfolio_cookie] else: po = list(self.portfolio_list.values())[0] # 把account 添加到 portfolio中去 po.add_account(account) return (po, account)
[ "def", "register_account", "(", "self", ",", "account", ",", "portfolio_cookie", "=", "None", ")", ":", "# 查找 portfolio", "if", "len", "(", "self", ".", "portfolio_list", ".", "keys", "(", ")", ")", "<", "1", ":", "po", "=", "self", ".", "new_portfolio",...
注册一个account到portfolio组合中 account 也可以是一个策略类,实现其 on_bar 方法 :param account: 被注册的account :return:
[ "注册一个account到portfolio组合中", "account", "也可以是一个策略类,实现其", "on_bar", "方法", ":", "param", "account", ":", "被注册的account", ":", "return", ":" ]
python
train
revelc/pyaccumulo
pyaccumulo/proxy/AccumuloProxy.py
https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L3021-L3029
def updateAndFlush(self, login, tableName, cells): """ Parameters: - login - tableName - cells """ self.send_updateAndFlush(login, tableName, cells) self.recv_updateAndFlush()
[ "def", "updateAndFlush", "(", "self", ",", "login", ",", "tableName", ",", "cells", ")", ":", "self", ".", "send_updateAndFlush", "(", "login", ",", "tableName", ",", "cells", ")", "self", ".", "recv_updateAndFlush", "(", ")" ]
Parameters: - login - tableName - cells
[ "Parameters", ":", "-", "login", "-", "tableName", "-", "cells" ]
python
train
log2timeline/dfvfs
dfvfs/helpers/file_system_searcher.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/file_system_searcher.py#L364-L384
def PrepareMatches(self, file_system): """Prepare find specification for matching. Args: file_system (FileSystem): file system. """ if self._location is not None: self._location_segments = self._SplitPath( self._location, file_system.PATH_SEPARATOR) elif self._location_regex is not None: path_separator = file_system.PATH_SEPARATOR if path_separator == '\\': # The backslash '\' is escaped within a regular expression. path_separator = '\\\\' self._location_segments = self._SplitPath( self._location_regex, path_separator) if self._location_segments is not None: self._number_of_location_segments = len(self._location_segments)
[ "def", "PrepareMatches", "(", "self", ",", "file_system", ")", ":", "if", "self", ".", "_location", "is", "not", "None", ":", "self", ".", "_location_segments", "=", "self", ".", "_SplitPath", "(", "self", ".", "_location", ",", "file_system", ".", "PATH_S...
Prepare find specification for matching. Args: file_system (FileSystem): file system.
[ "Prepare", "find", "specification", "for", "matching", "." ]
python
train
quasipedia/swaggery
swaggery/utils.py
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/utils.py#L98-L101
def map_exception_codes(): '''Helper function to intialise CODES_TO_EXCEPTIONS.''' werkex = inspect.getmembers(exceptions, lambda x: getattr(x, 'code', None)) return {e.code: e for _, e in werkex}
[ "def", "map_exception_codes", "(", ")", ":", "werkex", "=", "inspect", ".", "getmembers", "(", "exceptions", ",", "lambda", "x", ":", "getattr", "(", "x", ",", "'code'", ",", "None", ")", ")", "return", "{", "e", ".", "code", ":", "e", "for", "_", ...
Helper function to intialise CODES_TO_EXCEPTIONS.
[ "Helper", "function", "to", "intialise", "CODES_TO_EXCEPTIONS", "." ]
python
train
prthkms/alex
alex/web.py
https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/web.py#L4-L47
def weather(query): """weather(query) -- use Name Entity Recogniser (nltk-stanford-ner), to determine location entity in query and fetch weather info for that location (using yahoo apis). """ print 'Identifying the location . . .' try: response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner", headers={ "X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP", "Content-Type": "application/x-www-form-urlencoded" }, params={ "text": query } ) except: print 'Unable to connect to internet' return location = '' for entity in response.body['result'].split(): word,tag = entity.split('/') if(tag == 'LOCATION'): location += ' '+word if(location != ''): print 'Gathering weather information for'+location import urllib2, urllib, json baseurl = "https://query.yahooapis.com/v1/public/yql?" yql_query = "select * from weather.forecast where woeid in \ (select woeid from geo.places(1) where text=\""+location+"\")" yql_url = baseurl + urllib.urlencode({'q':yql_query}) + "&format=json" try: result = urllib2.urlopen(yql_url).read() data = json.loads(result) result = data['query']['results']['channel'] print result['location']['city']+' '+result['location']['country']+' '+result['location']['region'] print result['item']['condition']['date'] print result['item']['condition']['text'] print result['item']['condition']['temp']+' '+result['units']['temperature'] except: print 'Unable to connect to internet' else: print 'Unable to get the location.'
[ "def", "weather", "(", "query", ")", ":", "print", "'Identifying the location . . .'", "try", ":", "response", "=", "unirest", ".", "post", "(", "\"https://textanalysis.p.mashape.com/nltk-stanford-ner\"", ",", "headers", "=", "{", "\"X-Mashape-Key\"", ":", "\"E7WffsNDbN...
weather(query) -- use Name Entity Recogniser (nltk-stanford-ner), to determine location entity in query and fetch weather info for that location (using yahoo apis).
[ "weather", "(", "query", ")", "--", "use", "Name", "Entity", "Recogniser", "(", "nltk", "-", "stanford", "-", "ner", ")", "to", "determine", "location", "entity", "in", "query", "and", "fetch", "weather", "info", "for", "that", "location", "(", "using", ...
python
train
pycontribs/pyrax
pyrax/autoscale.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L1030-L1038
def replace(self, scaling_group, name, cooldown, min_entities, max_entities, metadata=None): """ Replace an existing ScalingGroup configuration. All of the attributes must be specified. If you wish to delete any of the optional attributes, pass them in as None. """ return self._manager.replace(scaling_group, name, cooldown, min_entities, max_entities, metadata=metadata)
[ "def", "replace", "(", "self", ",", "scaling_group", ",", "name", ",", "cooldown", ",", "min_entities", ",", "max_entities", ",", "metadata", "=", "None", ")", ":", "return", "self", ".", "_manager", ".", "replace", "(", "scaling_group", ",", "name", ",", ...
Replace an existing ScalingGroup configuration. All of the attributes must be specified. If you wish to delete any of the optional attributes, pass them in as None.
[ "Replace", "an", "existing", "ScalingGroup", "configuration", ".", "All", "of", "the", "attributes", "must", "be", "specified", ".", "If", "you", "wish", "to", "delete", "any", "of", "the", "optional", "attributes", "pass", "them", "in", "as", "None", "." ]
python
train
ashmastaflash/kal-wrapper
kalibrate/fn.py
https://github.com/ashmastaflash/kal-wrapper/blob/80ee03ab7bd3172ac26b769d6b442960f3424b0e/kalibrate/fn.py#L157-L189
def parse_kal_scan(kal_out): """Parse kal band scan output.""" kal_data = [] scan_band = determine_scan_band(kal_out) scan_gain = determine_scan_gain(kal_out) scan_device = determine_device(kal_out) sample_rate = determine_sample_rate(kal_out) chan_detect_threshold = determine_chan_detect_threshold(kal_out) for line in kal_out.splitlines(): if "chan:" in line: p_line = line.split(' ') chan = str(p_line[1]) modifier = str(p_line[3]) power = str(p_line[5]) mod_raw = str(p_line[4]).replace(')\tpower:', '') base_raw = str((p_line[2]).replace('(', '')) mod_freq = herz_me(mod_raw) base_freq = herz_me(base_raw) final_freq = to_eng(determine_final_freq(base_freq, modifier, mod_freq)) kal_run = {"channel": chan, "base_freq": base_freq, "mod_freq": mod_freq, "modifier": modifier, "final_freq": final_freq, "power": power, "band": scan_band, "gain": scan_gain, "device": scan_device, "sample_rate": sample_rate, "channel_detect_threshold": chan_detect_threshold} kal_data.append(kal_run.copy()) return kal_data
[ "def", "parse_kal_scan", "(", "kal_out", ")", ":", "kal_data", "=", "[", "]", "scan_band", "=", "determine_scan_band", "(", "kal_out", ")", "scan_gain", "=", "determine_scan_gain", "(", "kal_out", ")", "scan_device", "=", "determine_device", "(", "kal_out", ")",...
Parse kal band scan output.
[ "Parse", "kal", "band", "scan", "output", "." ]
python
train
Azure/msrest-for-python
msrest/pipeline/requests.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/pipeline/requests.py#L105-L147
def send(self, request, **kwargs): """Patch the current session with Request level operation config. This is deprecated, we shouldn't patch the session with arguments at the Request, and "config" should be used. """ session = request.context.session old_max_redirects = None if 'max_redirects' in kwargs: warnings.warn("max_redirects in operation kwargs is deprecated, use config.redirect_policy instead", DeprecationWarning) old_max_redirects = session.max_redirects session.max_redirects = int(kwargs['max_redirects']) old_trust_env = None if 'use_env_proxies' in kwargs: warnings.warn("use_env_proxies in operation kwargs is deprecated, use config.proxies instead", DeprecationWarning) old_trust_env = session.trust_env session.trust_env = bool(kwargs['use_env_proxies']) old_retries = {} if 'retries' in kwargs: warnings.warn("retries in operation kwargs is deprecated, use config.retry_policy instead", DeprecationWarning) max_retries = kwargs['retries'] for protocol in self._protocols: old_retries[protocol] = session.adapters[protocol].max_retries session.adapters[protocol].max_retries = max_retries try: return self.next.send(request, **kwargs) finally: if old_max_redirects: session.max_redirects = old_max_redirects if old_trust_env: session.trust_env = old_trust_env if old_retries: for protocol in self._protocols: session.adapters[protocol].max_retries = old_retries[protocol]
[ "def", "send", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "session", "=", "request", ".", "context", ".", "session", "old_max_redirects", "=", "None", "if", "'max_redirects'", "in", "kwargs", ":", "warnings", ".", "warn", "(", "\"max...
Patch the current session with Request level operation config. This is deprecated, we shouldn't patch the session with arguments at the Request, and "config" should be used.
[ "Patch", "the", "current", "session", "with", "Request", "level", "operation", "config", "." ]
python
train
pyopenapi/pyswagger
pyswagger/primitives/render.py
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/primitives/render.py#L231-L264
def default(): """ return default options, available options: - max_name_length: maximum length of name for additionalProperties - max_prop_count: maximum count of properties (count of fixed properties + additional properties) - max_str_length: maximum length of string type - max_byte_length: maximum length of byte type - max_array_length: maximum length of array - max_file_length: maximum length of file, in byte - minimal_property: only generate 'required' properties - minimal_parameter: only generate 'required' parameter - files: registered file object: refer to pyswagger.primitives.File for details - object_template: dict of default values assigned for properties when 'name' matched - parameter_template: dict of default values assigned for parameters when 'name matched - max_property: all properties are generated, ignore 'required' - max_parameter: all parameters are generated, ignore 'required' :return: options :rtype: dict """ return dict( max_name_length=64, max_prop_count=32, max_str_length=100, max_byte_length=100, max_array_length=100, max_file_length=200, minimal_property=False, minimal_parameter=False, files=[], object_template={}, parameter_template={}, max_property=False, max_parameter=False, )
[ "def", "default", "(", ")", ":", "return", "dict", "(", "max_name_length", "=", "64", ",", "max_prop_count", "=", "32", ",", "max_str_length", "=", "100", ",", "max_byte_length", "=", "100", ",", "max_array_length", "=", "100", ",", "max_file_length", "=", ...
return default options, available options: - max_name_length: maximum length of name for additionalProperties - max_prop_count: maximum count of properties (count of fixed properties + additional properties) - max_str_length: maximum length of string type - max_byte_length: maximum length of byte type - max_array_length: maximum length of array - max_file_length: maximum length of file, in byte - minimal_property: only generate 'required' properties - minimal_parameter: only generate 'required' parameter - files: registered file object: refer to pyswagger.primitives.File for details - object_template: dict of default values assigned for properties when 'name' matched - parameter_template: dict of default values assigned for parameters when 'name matched - max_property: all properties are generated, ignore 'required' - max_parameter: all parameters are generated, ignore 'required' :return: options :rtype: dict
[ "return", "default", "options", "available", "options", ":", "-", "max_name_length", ":", "maximum", "length", "of", "name", "for", "additionalProperties", "-", "max_prop_count", ":", "maximum", "count", "of", "properties", "(", "count", "of", "fixed", "properties...
python
train
geophysics-ubonn/reda
lib/reda/plotters/plots2d.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/plots2d.py#L13-L31
def _pseudodepths_wenner(configs, spacing=1, grid=None): """Given distances between electrodes, compute Wenner pseudo depths for the provided configuration The pseudodepth is computed after Roy & Apparao, 1971, as 0.11 times the distance between the two outermost electrodes. It's not really clear why the Wenner depths are different from the Dipole-Dipole depths, given the fact that Wenner configurations are a complete subset of the Dipole-Dipole configurations. """ if grid is None: xpositions = (configs - 1) * spacing else: xpositions = grid.get_electrode_positions()[configs - 1, 0] z = np.abs(np.max(xpositions, axis=1) - np.min(xpositions, axis=1)) * -0.11 x = np.mean(xpositions, axis=1) return x, z
[ "def", "_pseudodepths_wenner", "(", "configs", ",", "spacing", "=", "1", ",", "grid", "=", "None", ")", ":", "if", "grid", "is", "None", ":", "xpositions", "=", "(", "configs", "-", "1", ")", "*", "spacing", "else", ":", "xpositions", "=", "grid", "....
Given distances between electrodes, compute Wenner pseudo depths for the provided configuration The pseudodepth is computed after Roy & Apparao, 1971, as 0.11 times the distance between the two outermost electrodes. It's not really clear why the Wenner depths are different from the Dipole-Dipole depths, given the fact that Wenner configurations are a complete subset of the Dipole-Dipole configurations.
[ "Given", "distances", "between", "electrodes", "compute", "Wenner", "pseudo", "depths", "for", "the", "provided", "configuration" ]
python
train
djordon/queueing-tool
queueing_tool/queues/queue_servers.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/queues/queue_servers.py#L418-L465
def _current_color(self, which=0): """Returns a color for the queue. Parameters ---------- which : int (optional, default: ``0``) Specifies the type of color to return. Returns ------- color : list Returns a RGBA color that is represented as a list with 4 entries where each entry can be any floating point number between 0 and 1. * If ``which`` is 1 then it returns the color of the edge as if it were a self loop. This is specified in ``colors['edge_loop_color']``. * If ``which`` is 2 then it returns the color of the vertex pen color (defined as color/vertex_color in :meth:`.QueueNetworkDiGraph.graph_draw`). This is specified in ``colors['vertex_color']``. * If ``which`` is anything else, then it returns the a shade of the edge that is proportional to the number of agents in the system -- which includes those being servered and those waiting to be served. More agents correspond to darker edge colors. Uses ``colors['vertex_fill_color']`` if the queue sits on a loop, and ``colors['edge_color']`` otherwise. """ if which == 1: color = self.colors['edge_loop_color'] elif which == 2: color = self.colors['vertex_color'] else: div = self.coloring_sensitivity * self.num_servers + 1. tmp = 1. - min(self.num_system / div, 1) if self.edge[0] == self.edge[1]: color = [i * tmp for i in self.colors['vertex_fill_color']] color[3] = 1.0 else: color = [i * tmp for i in self.colors['edge_color']] color[3] = 1 / 2. return color
[ "def", "_current_color", "(", "self", ",", "which", "=", "0", ")", ":", "if", "which", "==", "1", ":", "color", "=", "self", ".", "colors", "[", "'edge_loop_color'", "]", "elif", "which", "==", "2", ":", "color", "=", "self", ".", "colors", "[", "'...
Returns a color for the queue. Parameters ---------- which : int (optional, default: ``0``) Specifies the type of color to return. Returns ------- color : list Returns a RGBA color that is represented as a list with 4 entries where each entry can be any floating point number between 0 and 1. * If ``which`` is 1 then it returns the color of the edge as if it were a self loop. This is specified in ``colors['edge_loop_color']``. * If ``which`` is 2 then it returns the color of the vertex pen color (defined as color/vertex_color in :meth:`.QueueNetworkDiGraph.graph_draw`). This is specified in ``colors['vertex_color']``. * If ``which`` is anything else, then it returns the a shade of the edge that is proportional to the number of agents in the system -- which includes those being servered and those waiting to be served. More agents correspond to darker edge colors. Uses ``colors['vertex_fill_color']`` if the queue sits on a loop, and ``colors['edge_color']`` otherwise.
[ "Returns", "a", "color", "for", "the", "queue", "." ]
python
valid
optimizely/python-sdk
optimizely/logger.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/logger.py#L95-L117
def adapt_logger(logger): """ Adapt our custom logger.BaseLogger object into a standard logging.Logger object. Adaptations are: - NoOpLogger turns into a logger with a single NullHandler. - SimpleLogger turns into a logger with a StreamHandler and level. Args: logger: Possibly a logger.BaseLogger, or a standard python logging.Logger. Returns: a standard python logging.Logger. """ if isinstance(logger, logging.Logger): return logger # Use the standard python logger created by these classes. if isinstance(logger, (SimpleLogger, NoOpLogger)): return logger.logger # Otherwise, return whatever we were given because we can't adapt. return logger
[ "def", "adapt_logger", "(", "logger", ")", ":", "if", "isinstance", "(", "logger", ",", "logging", ".", "Logger", ")", ":", "return", "logger", "# Use the standard python logger created by these classes.", "if", "isinstance", "(", "logger", ",", "(", "SimpleLogger",...
Adapt our custom logger.BaseLogger object into a standard logging.Logger object. Adaptations are: - NoOpLogger turns into a logger with a single NullHandler. - SimpleLogger turns into a logger with a StreamHandler and level. Args: logger: Possibly a logger.BaseLogger, or a standard python logging.Logger. Returns: a standard python logging.Logger.
[ "Adapt", "our", "custom", "logger", ".", "BaseLogger", "object", "into", "a", "standard", "logging", ".", "Logger", "object", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L587-L628
def affine_transform_keypoints(coords_list, transform_matrix): """Transform keypoint coordinates according to a given affine transform matrix. OpenCV format, x is width. Note that, for pose estimation task, flipping requires maintaining the left and right body information. We should not flip the left and right body, so please use ``tl.prepro.keypoint_random_flip``. Parameters ----------- coords_list : list of list of tuple/list The coordinates e.g., the keypoint coordinates of every person in an image. transform_matrix : numpy.array Transform matrix, OpenCV format. Examples --------- >>> # 1. get all affine transform matrices >>> M_rotate = tl.prepro.affine_rotation_matrix(angle=20) >>> M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1) >>> # 2. combine all affine transform matrices to one matrix >>> M_combined = dot(M_flip).dot(M_rotate) >>> # 3. transfrom the matrix from Cartesian coordinate (the origin in the middle of image) >>> # to Image coordinate (the origin on the top-left of image) >>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h) >>> # 4. then we can transfrom the image once for all transformations >>> result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster >>> # 5. transform keypoint coordinates >>> coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]] >>> coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix) """ coords_result_list = [] for coords in coords_list: coords = np.asarray(coords) coords = coords.transpose([1, 0]) coords = np.insert(coords, 2, 1, axis=0) # print(coords) # print(transform_matrix) coords_result = np.matmul(transform_matrix, coords) coords_result = coords_result[0:2, :].transpose([1, 0]) coords_result_list.append(coords_result) return coords_result_list
[ "def", "affine_transform_keypoints", "(", "coords_list", ",", "transform_matrix", ")", ":", "coords_result_list", "=", "[", "]", "for", "coords", "in", "coords_list", ":", "coords", "=", "np", ".", "asarray", "(", "coords", ")", "coords", "=", "coords", ".", ...
Transform keypoint coordinates according to a given affine transform matrix. OpenCV format, x is width. Note that, for pose estimation task, flipping requires maintaining the left and right body information. We should not flip the left and right body, so please use ``tl.prepro.keypoint_random_flip``. Parameters ----------- coords_list : list of list of tuple/list The coordinates e.g., the keypoint coordinates of every person in an image. transform_matrix : numpy.array Transform matrix, OpenCV format. Examples --------- >>> # 1. get all affine transform matrices >>> M_rotate = tl.prepro.affine_rotation_matrix(angle=20) >>> M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1) >>> # 2. combine all affine transform matrices to one matrix >>> M_combined = dot(M_flip).dot(M_rotate) >>> # 3. transfrom the matrix from Cartesian coordinate (the origin in the middle of image) >>> # to Image coordinate (the origin on the top-left of image) >>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h) >>> # 4. then we can transfrom the image once for all transformations >>> result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster >>> # 5. transform keypoint coordinates >>> coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]] >>> coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
[ "Transform", "keypoint", "coordinates", "according", "to", "a", "given", "affine", "transform", "matrix", ".", "OpenCV", "format", "x", "is", "width", "." ]
python
valid
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py#L213-L226
def _open_fits_files(filenames): """ Given a {correlation: filename} mapping for filenames returns a {correlation: file handle} mapping """ kw = { 'mode' : 'update', 'memmap' : False } def _fh(fn): """ Returns a filehandle or None if file does not exist """ return fits.open(fn, **kw) if os.path.exists(fn) else None return collections.OrderedDict( (corr, tuple(_fh(fn) for fn in files)) for corr, files in filenames.iteritems() )
[ "def", "_open_fits_files", "(", "filenames", ")", ":", "kw", "=", "{", "'mode'", ":", "'update'", ",", "'memmap'", ":", "False", "}", "def", "_fh", "(", "fn", ")", ":", "\"\"\" Returns a filehandle or None if file does not exist \"\"\"", "return", "fits", ".", "...
Given a {correlation: filename} mapping for filenames returns a {correlation: file handle} mapping
[ "Given", "a", "{", "correlation", ":", "filename", "}", "mapping", "for", "filenames", "returns", "a", "{", "correlation", ":", "file", "handle", "}", "mapping" ]
python
train
SHDShim/pytheos
pytheos/eqn_electronic.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_electronic.py#L6-L30
def zharkov_pel(v, temp, v0, e0, g, n, z, t_ref=300., three_r=3. * constants.R): """ calculate electronic contributions in pressure for the Zharkov equation the equation can be found in Sokolova and Dorogokupets 2013 :param v: unit-cell volume in A^3 :param temp: temperature in K :param v0: unit-cell volume in A^3 at 1 bar :param e0: parameter in K-1 for the Zharkov equation :param g: parameter for the Zharkov equation :param n: number of atoms in a formula unit :param z: number of formula unit in a unit cell :param t_ref: reference temperature, 300 K :param three_r: 3 times gas constant :return: electronic contribution in GPa """ v_mol = vol_uc2mol(v, z) x = v / v0 # a = a0 * np.power(x, m) def f(t): return three_r * n / 2. * e0 * np.power(x, g) * np.power(t, 2.) * \ g / v_mol * 1.e-9 return f(temp) - f(t_ref)
[ "def", "zharkov_pel", "(", "v", ",", "temp", ",", "v0", ",", "e0", ",", "g", ",", "n", ",", "z", ",", "t_ref", "=", "300.", ",", "three_r", "=", "3.", "*", "constants", ".", "R", ")", ":", "v_mol", "=", "vol_uc2mol", "(", "v", ",", "z", ")", ...
calculate electronic contributions in pressure for the Zharkov equation the equation can be found in Sokolova and Dorogokupets 2013 :param v: unit-cell volume in A^3 :param temp: temperature in K :param v0: unit-cell volume in A^3 at 1 bar :param e0: parameter in K-1 for the Zharkov equation :param g: parameter for the Zharkov equation :param n: number of atoms in a formula unit :param z: number of formula unit in a unit cell :param t_ref: reference temperature, 300 K :param three_r: 3 times gas constant :return: electronic contribution in GPa
[ "calculate", "electronic", "contributions", "in", "pressure", "for", "the", "Zharkov", "equation", "the", "equation", "can", "be", "found", "in", "Sokolova", "and", "Dorogokupets", "2013" ]
python
train
worldcompany/djangoembed
oembed/consumer.py
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/consumer.py#L30-L37
def extract_oembeds(self, text, maxwidth=None, maxheight=None, resource_type=None): """ Scans a block of text and extracts oembed data on any urls, returning it in a list of dictionaries """ parser = text_parser() urls = parser.extract_urls(text) return self.handle_extracted_urls(urls, maxwidth, maxheight, resource_type)
[ "def", "extract_oembeds", "(", "self", ",", "text", ",", "maxwidth", "=", "None", ",", "maxheight", "=", "None", ",", "resource_type", "=", "None", ")", ":", "parser", "=", "text_parser", "(", ")", "urls", "=", "parser", ".", "extract_urls", "(", "text",...
Scans a block of text and extracts oembed data on any urls, returning it in a list of dictionaries
[ "Scans", "a", "block", "of", "text", "and", "extracts", "oembed", "data", "on", "any", "urls", "returning", "it", "in", "a", "list", "of", "dictionaries" ]
python
valid
ConsenSys/mythril-classic
mythril/laser/ethereum/state/world_state.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/ethereum/state/world_state.py#L119-L130
def _generate_new_address(self, creator=None) -> str: """Generates a new address for the global state. :return: """ if creator: # TODO: Use nounce return "0x" + str(mk_contract_address(creator, 0).hex()) while True: address = "0x" + "".join([str(hex(randint(0, 16)))[-1] for _ in range(40)]) if address not in self.accounts.keys(): return address
[ "def", "_generate_new_address", "(", "self", ",", "creator", "=", "None", ")", "->", "str", ":", "if", "creator", ":", "# TODO: Use nounce", "return", "\"0x\"", "+", "str", "(", "mk_contract_address", "(", "creator", ",", "0", ")", ".", "hex", "(", ")", ...
Generates a new address for the global state. :return:
[ "Generates", "a", "new", "address", "for", "the", "global", "state", "." ]
python
train
ssalentin/plip
plip/modules/pymolplip.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/pymolplip.py#L168-L191
def show_cationpi(self): """Visualize cation-pi interactions.""" for i, p in enumerate(self.plcomplex.pication): cmd.pseudoatom('ps-picat-1-%i' % i, pos=p.ring_center) cmd.pseudoatom('ps-picat-2-%i' % i, pos=p.charge_center) if p.protcharged: cmd.pseudoatom('Chargecenter-P', pos=p.charge_center) cmd.pseudoatom('Centroids-L', pos=p.ring_center) pilig_ids = '+'.join(map(str, p.ring_atoms)) cmd.select('PiCatRing-L', 'PiCatRing-L or (id %s & %s)' % (pilig_ids, self.ligname)) for a in p.charge_atoms: cmd.select('PosCharge-P', 'PosCharge-P or (id %i & %s)' % (a, self.protname)) else: cmd.pseudoatom('Chargecenter-L', pos=p.charge_center) cmd.pseudoatom('Centroids-P', pos=p.ring_center) pires_ids = '+'.join(map(str, p.ring_atoms)) cmd.select('PiCatRing-P', 'PiCatRing-P or (id %s & %s)' % (pires_ids, self.protname)) for a in p.charge_atoms: cmd.select('PosCharge-L', 'PosCharge-L or (id %i & %s)' % (a, self.ligname)) cmd.distance('PiCation', 'ps-picat-1-%i' % i, 'ps-picat-2-%i' % i) if self.object_exists('PiCation'): cmd.set('dash_color', 'orange', 'PiCation') cmd.set('dash_gap', 0.3, 'PiCation') cmd.set('dash_length', 0.6, 'PiCation')
[ "def", "show_cationpi", "(", "self", ")", ":", "for", "i", ",", "p", "in", "enumerate", "(", "self", ".", "plcomplex", ".", "pication", ")", ":", "cmd", ".", "pseudoatom", "(", "'ps-picat-1-%i'", "%", "i", ",", "pos", "=", "p", ".", "ring_center", ")...
Visualize cation-pi interactions.
[ "Visualize", "cation", "-", "pi", "interactions", "." ]
python
train
hydraplatform/hydra-base
hydra_base/util/hdb.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/hdb.py#L425-L433
def get_dimension_from_db_by_name(dimension_name): """ Gets a dimension from the DB table. """ try: dimension = db.DBSession.query(Dimension).filter(Dimension.name==dimension_name).one() return JSONObject(dimension) except NoResultFound: raise ResourceNotFoundError("Dimension %s not found"%(dimension_name))
[ "def", "get_dimension_from_db_by_name", "(", "dimension_name", ")", ":", "try", ":", "dimension", "=", "db", ".", "DBSession", ".", "query", "(", "Dimension", ")", ".", "filter", "(", "Dimension", ".", "name", "==", "dimension_name", ")", ".", "one", "(", ...
Gets a dimension from the DB table.
[ "Gets", "a", "dimension", "from", "the", "DB", "table", "." ]
python
train
jgillick/LendingClub
lendingclub/__init__.py
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/__init__.py#L1134-L1183
def __get_strut_token(self): """ Move the staged loan notes to the order stage and get the struts token from the place order HTML. The order will not be placed until calling _confirm_order() Returns ------- dict A dict with the token name and value """ try: # Move to the place order page and get the struts token response = self.lc.session.get('/portfolio/placeOrder.action') soup = BeautifulSoup(response.text, "html5lib") # Example HTML with the stuts token: """ <input type="hidden" name="struts.token.name" value="token" /> <input type="hidden" name="token" value="C4MJZP39Q86KDX8KN8SBTVCP0WSFBXEL" /> """ # 'struts.token.name' defines the field name with the token value strut_tag = None strut_token_name = soup.find('input', {'name': 'struts.token.name'}) if strut_token_name and strut_token_name['value'].strip(): # Get form around the strut.token.name element form = soup.form # assumed for parent in strut_token_name.parents: if parent and parent.name == 'form': form = parent break # Get strut token value strut_token_name = strut_token_name['value'] strut_tag = soup.find('input', {'name': strut_token_name}) if strut_tag and strut_tag['value'].strip(): return {'name': strut_token_name, 'value': strut_tag['value'].strip()} # No strut token found self.__log('No struts token! HTML: {0}'.format(response.text)) raise LendingClubError('No struts token. Please report this error.', response) except Exception as e: self.__log('Could not get struts token. Error message: {0}'.format(str(e))) raise LendingClubError('Could not get struts token. Error message: {0}'.format(str(e)))
[ "def", "__get_strut_token", "(", "self", ")", ":", "try", ":", "# Move to the place order page and get the struts token", "response", "=", "self", ".", "lc", ".", "session", ".", "get", "(", "'/portfolio/placeOrder.action'", ")", "soup", "=", "BeautifulSoup", "(", "...
Move the staged loan notes to the order stage and get the struts token from the place order HTML. The order will not be placed until calling _confirm_order() Returns ------- dict A dict with the token name and value
[ "Move", "the", "staged", "loan", "notes", "to", "the", "order", "stage", "and", "get", "the", "struts", "token", "from", "the", "place", "order", "HTML", ".", "The", "order", "will", "not", "be", "placed", "until", "calling", "_confirm_order", "()" ]
python
train
deepmind/pysc2
pysc2/bin/agent_remote.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/bin/agent_remote.py#L124-L152
def agent(): """Run the agent, connecting to a (remote) host started independently.""" agent_module, agent_name = FLAGS.agent.rsplit(".", 1) agent_cls = getattr(importlib.import_module(agent_module), agent_name) logging.info("Starting agent:") with remote_sc2_env.RemoteSC2Env( map_name=FLAGS.map, host=FLAGS.host, host_port=FLAGS.host_port, lan_port=FLAGS.lan_port, name=FLAGS.agent_name or agent_name, race=sc2_env.Race[FLAGS.agent_race], step_mul=FLAGS.step_mul, agent_interface_format=sc2_env.parse_agent_interface_format( feature_screen=FLAGS.feature_screen_size, feature_minimap=FLAGS.feature_minimap_size, rgb_screen=FLAGS.rgb_screen_size, rgb_minimap=FLAGS.rgb_minimap_size, action_space=FLAGS.action_space, use_feature_units=FLAGS.use_feature_units), visualize=FLAGS.render) as env: agents = [agent_cls()] logging.info("Connected, starting run_loop.") try: run_loop.run_loop(agents, env) except remote_sc2_env.RestartException: pass logging.info("Done.")
[ "def", "agent", "(", ")", ":", "agent_module", ",", "agent_name", "=", "FLAGS", ".", "agent", ".", "rsplit", "(", "\".\"", ",", "1", ")", "agent_cls", "=", "getattr", "(", "importlib", ".", "import_module", "(", "agent_module", ")", ",", "agent_name", ")...
Run the agent, connecting to a (remote) host started independently.
[ "Run", "the", "agent", "connecting", "to", "a", "(", "remote", ")", "host", "started", "independently", "." ]
python
train
laysakura/relshell
relshell/batch_command.py
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch_command.py#L86-L110
def _parse_out_batch(cmd_array): """Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`. :param cmd_array: `shlex.split`-ed command :rtype: ([cmd_array], batch_from_file) :returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT). Returned `cmd_array` drops OUT_BATCH related tokens. :raises: `IndexError` if multiple OUT_BATCH are found """ res_cmd_array = cmd_array[:] res_batch_from_file = None out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array) if out_batch_cmdidx is None: return (res_cmd_array, res_batch_from_file) if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH` res_batch_from_file = BatchFromFile('STDOUT') del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1] else: # OUT_BATCH is TMPFILE res_batch_from_file = BatchFromFile('TMPFILE') res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path() return (res_cmd_array, res_batch_from_file)
[ "def", "_parse_out_batch", "(", "cmd_array", ")", ":", "res_cmd_array", "=", "cmd_array", "[", ":", "]", "res_batch_from_file", "=", "None", "out_batch_cmdidx", "=", "BatchCommand", ".", "_out_batch_cmdidx", "(", "cmd_array", ")", "if", "out_batch_cmdidx", "is", "...
Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`. :param cmd_array: `shlex.split`-ed command :rtype: ([cmd_array], batch_from_file) :returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT). Returned `cmd_array` drops OUT_BATCH related tokens. :raises: `IndexError` if multiple OUT_BATCH are found
[ "Find", "patterns", "that", "match", "to", "out_batch_pat", "and", "replace", "them", "into", "STDOUT", "or", "TMPFILE", "." ]
python
train
mlperf/training
rnn_translator/pytorch/seq2seq/data/dataset.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/data/dataset.py#L115-L123
def sort_by_length(self): """ Sorts dataset by the sequence length. """ self.lengths, indices = self.lengths.sort(descending=True) self.src = [self.src[idx] for idx in indices] self.indices = indices.tolist() self.sorted = True
[ "def", "sort_by_length", "(", "self", ")", ":", "self", ".", "lengths", ",", "indices", "=", "self", ".", "lengths", ".", "sort", "(", "descending", "=", "True", ")", "self", ".", "src", "=", "[", "self", ".", "src", "[", "idx", "]", "for", "idx", ...
Sorts dataset by the sequence length.
[ "Sorts", "dataset", "by", "the", "sequence", "length", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/mongodb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/mongodb.py#L63-L67
def add_record(self, msg_id, rec): """Add a new Task Record, by msg_id.""" # print rec rec = self._binary_buffers(rec) self._records.insert(rec)
[ "def", "add_record", "(", "self", ",", "msg_id", ",", "rec", ")", ":", "# print rec", "rec", "=", "self", ".", "_binary_buffers", "(", "rec", ")", "self", ".", "_records", ".", "insert", "(", "rec", ")" ]
Add a new Task Record, by msg_id.
[ "Add", "a", "new", "Task", "Record", "by", "msg_id", "." ]
python
test
Kane610/axis
axis/param_cgi.py
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/param_cgi.py#L27-L29
def update_brand(self) -> None: """Update brand group of parameters.""" self.update(path=URL_GET + GROUP.format(group=BRAND))
[ "def", "update_brand", "(", "self", ")", "->", "None", ":", "self", ".", "update", "(", "path", "=", "URL_GET", "+", "GROUP", ".", "format", "(", "group", "=", "BRAND", ")", ")" ]
Update brand group of parameters.
[ "Update", "brand", "group", "of", "parameters", "." ]
python
train
pgjones/quart
quart/app.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L1722-L1753
async def postprocess_websocket( self, response: Optional[Response], websocket_context: Optional[WebsocketContext]=None, ) -> Response: """Postprocess the websocket acting on the response. Arguments: response: The response after the websocket is finalized. webcoket_context: The websocket context, optional as Flask omits this argument. """ websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket functions = (websocket_context or _websocket_ctx_stack.top)._after_websocket_functions blueprint = websocket_.blueprint if blueprint is not None: functions = chain(functions, self.after_websocket_funcs[blueprint]) functions = chain(functions, self.after_websocket_funcs[None]) for function in functions: response = await function(response) session_ = (websocket_context or _request_ctx_stack.top).session if not self.session_interface.is_null_session(session_): if response is None and isinstance(session_, SecureCookieSession) and session_.modified: self.logger.exception( "Secure Cookie Session modified during websocket handling. " "These modifications will be lost as a cookie cannot be set." ) else: await self.save_session(session_, response) return response
[ "async", "def", "postprocess_websocket", "(", "self", ",", "response", ":", "Optional", "[", "Response", "]", ",", "websocket_context", ":", "Optional", "[", "WebsocketContext", "]", "=", "None", ",", ")", "->", "Response", ":", "websocket_", "=", "(", "webs...
Postprocess the websocket acting on the response. Arguments: response: The response after the websocket is finalized. webcoket_context: The websocket context, optional as Flask omits this argument.
[ "Postprocess", "the", "websocket", "acting", "on", "the", "response", "." ]
python
train
biolink/ontobio
ontobio/golr/golr_stats.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_stats.py#L17-L52
def get_counts(entities=None, object_category=None, min_count=1, **kwargs): """ given a set of entities (genes, diseases, etc), finds the number of entities associated with each descriptor in a given category. The result is a tuple (cmap, results), where cmap is a dict of TERM:COUNT """ if entities is None: entities = [] results = search_associations(subjects=entities, subject_direct=True, rows=0, facet_fields=[M.IS_DEFINED_BY, M.SUBJECT_TAXON, M.SUBJECT_CATEGORY], object_category=object_category, facet_mincount=3, # TODO facet_limit=-1, json_facet={ 'categories':{ 'limit':-1, 'type': 'terms', 'field' : M.OBJECT_CLOSURE, 'facet' : { 'uniq_subject': "unique(subject)" } } }, **kwargs) buckets = results['facets']['categories']['buckets'] cmap = {} for bucket in buckets: if bucket['uniq_subject'] >= min_count: cmap[bucket['val']] = bucket['uniq_subject'] return (cmap, results)
[ "def", "get_counts", "(", "entities", "=", "None", ",", "object_category", "=", "None", ",", "min_count", "=", "1", ",", "*", "*", "kwargs", ")", ":", "if", "entities", "is", "None", ":", "entities", "=", "[", "]", "results", "=", "search_associations", ...
given a set of entities (genes, diseases, etc), finds the number of entities associated with each descriptor in a given category. The result is a tuple (cmap, results), where cmap is a dict of TERM:COUNT
[ "given", "a", "set", "of", "entities", "(", "genes", "diseases", "etc", ")", "finds", "the", "number", "of", "entities", "associated", "with", "each", "descriptor", "in", "a", "given", "category", "." ]
python
train
rsmuc/health_monitoring_plugins
health_monitoring_plugins/idrac.py
https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/idrac.py#L177-L188
def probe_check(name, status, device_type): """if the status is "ok" in the PROBE_STATE dict, return ok + string if the status is not "ok", return critical + string""" status_string = PROBE_STATE.get(int(status), "unknown") if status_string == "ok": return ok, "{} '{}': {}".format(device_type, name, status_string) if status_string == "unknown": return unknown, "{} '{}': {}".format(device_type, name, status_string) return critical, "{} '{}': {}".format(device_type, name, status_string)
[ "def", "probe_check", "(", "name", ",", "status", ",", "device_type", ")", ":", "status_string", "=", "PROBE_STATE", ".", "get", "(", "int", "(", "status", ")", ",", "\"unknown\"", ")", "if", "status_string", "==", "\"ok\"", ":", "return", "ok", ",", "\"...
if the status is "ok" in the PROBE_STATE dict, return ok + string if the status is not "ok", return critical + string
[ "if", "the", "status", "is", "ok", "in", "the", "PROBE_STATE", "dict", "return", "ok", "+", "string", "if", "the", "status", "is", "not", "ok", "return", "critical", "+", "string" ]
python
train
denisenkom/pytds
src/pytds/tds.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L247-L254
def read_whole_packet(self): """ Reads single packet and returns bytes payload of the packet Can only be called when transport's read pointer is at the beginning of the packet. """ self._read_packet() return readall(self, self._size - _header.size)
[ "def", "read_whole_packet", "(", "self", ")", ":", "self", ".", "_read_packet", "(", ")", "return", "readall", "(", "self", ",", "self", ".", "_size", "-", "_header", ".", "size", ")" ]
Reads single packet and returns bytes payload of the packet Can only be called when transport's read pointer is at the beginning of the packet.
[ "Reads", "single", "packet", "and", "returns", "bytes", "payload", "of", "the", "packet" ]
python
train
apache/incubator-mxnet
python/mxnet/profiler.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L89-L106
def set_state(state='stop', profile_process='worker'): """Set up the profiler state to 'run' or 'stop'. Parameters ---------- state : string, optional Indicates whether to run the profiler, can be 'stop' or 'run'. Default is `stop`. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ state2int = {'stop': 0, 'run': 1} profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]), profile_process2int[profile_process], profiler_kvstore_handle))
[ "def", "set_state", "(", "state", "=", "'stop'", ",", "profile_process", "=", "'worker'", ")", ":", "state2int", "=", "{", "'stop'", ":", "0", ",", "'run'", ":", "1", "}", "profile_process2int", "=", "{", "'worker'", ":", "0", ",", "'server'", ":", "1"...
Set up the profiler state to 'run' or 'stop'. Parameters ---------- state : string, optional Indicates whether to run the profiler, can be 'stop' or 'run'. Default is `stop`. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
[ "Set", "up", "the", "profiler", "state", "to", "run", "or", "stop", "." ]
python
train
MacHu-GWU/pymongo_mate-project
pymongo_mate/mongomock_mate.py
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/mongomock_mate.py#L12-L26
def _dump(db): """ Dump :class:`mongomock.database.Database` to dict data. """ db_data = {"name": db.name, "_collections": dict()} for col_name, collection in iteritems(db._collections): if col_name != "system.indexes": col_data = { "_documents": collection._documents, "_uniques": collection._uniques, } db_data["_collections"][col_name] = col_data return db_data
[ "def", "_dump", "(", "db", ")", ":", "db_data", "=", "{", "\"name\"", ":", "db", ".", "name", ",", "\"_collections\"", ":", "dict", "(", ")", "}", "for", "col_name", ",", "collection", "in", "iteritems", "(", "db", ".", "_collections", ")", ":", "if"...
Dump :class:`mongomock.database.Database` to dict data.
[ "Dump", ":", "class", ":", "mongomock", ".", "database", ".", "Database", "to", "dict", "data", "." ]
python
train
lowandrew/OLCTools
spadespipeline/typingclasses.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/typingclasses.py#L958-L982
def runner(self): """ Run the necessary methods in the correct order """ vir_report = os.path.join(self.reportpath, 'virulence.csv') if os.path.isfile(vir_report): self.report_parse(vir_report) else: logging.info('Starting {} analysis pipeline'.format(self.analysistype)) if not self.pipeline: general = None for sample in self.runmetadata.samples: general = getattr(sample, 'general') if general is None: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses Sippr(self, self.cutoff) # Create the reports self.reporter() # Print the metadata MetadataPrinter(self)
[ "def", "runner", "(", "self", ")", ":", "vir_report", "=", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "'virulence.csv'", ")", "if", "os", ".", "path", ".", "isfile", "(", "vir_report", ")", ":", "self", ".", "report_parse", ...
Run the necessary methods in the correct order
[ "Run", "the", "necessary", "methods", "in", "the", "correct", "order" ]
python
train
FNNDSC/pfmisc
pfmisc/C_snode.py
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L1385-L1439
def treeExplore(self, **kwargs): """ Recursively walk through a C_stree, applying a passed function at each node. The actual "walk" uses individual nodes' internal child dictionaries. It is assumed that the start origin of exploration can in fact be reached by a 'cd()' call. Directories are added to the internal l_allPaths list variable as they are discovered. kwargs: startPath=<startPath> : The starting point in the tree func=<f> : The function to apply at each node Additional kwargs are passed to <f> <f> is a function that is called on a node path. It is of form: f(path, **kwargs) where path is a node in the tree space. <f> must return a dictionary containing at least one field: { "status": True | False } This same dictionary is also returned out to the caller of this function. """ str_recursePath = '' str_startPath = '/' f = None ret = {} for key,val in kwargs.items(): if key == 'startPath': str_startPath = val if key == 'f': f = val # print 'processing node: %s' % str_startPath if self.cd(str_startPath)['status']: ret = f(str_startPath, **kwargs) if ret['status']: for node in self.lstr_lsnode(str_startPath): if str_startPath == '/': str_recursePath = "/%s" % node else: str_recursePath = '%s/%s' % (str_startPath, node) l_recursePath = str_recursePath.split('/') l_recursePath[0] = '/' if not l_recursePath in self.l_allPaths: self.l_allPaths.append(l_recursePath) kwargs['startPath'] = str_recursePath self.treeExplore(**kwargs) else: ret['status'] = False return ret
[ "def", "treeExplore", "(", "self", ",", "*", "*", "kwargs", ")", ":", "str_recursePath", "=", "''", "str_startPath", "=", "'/'", "f", "=", "None", "ret", "=", "{", "}", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "if", ...
Recursively walk through a C_stree, applying a passed function at each node. The actual "walk" uses individual nodes' internal child dictionaries. It is assumed that the start origin of exploration can in fact be reached by a 'cd()' call. Directories are added to the internal l_allPaths list variable as they are discovered. kwargs: startPath=<startPath> : The starting point in the tree func=<f> : The function to apply at each node Additional kwargs are passed to <f> <f> is a function that is called on a node path. It is of form: f(path, **kwargs) where path is a node in the tree space. <f> must return a dictionary containing at least one field: { "status": True | False } This same dictionary is also returned out to the caller of this function.
[ "Recursively", "walk", "through", "a", "C_stree", "applying", "a", "passed", "function", "at", "each", "node", ".", "The", "actual", "walk", "uses", "individual", "nodes", "internal", "child", "dictionaries", "." ]
python
train
koszullab/metaTOR
metator/scripts/network.py
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/network.py#L438-L574
def alignment_to_reads( sam_merged, output_dir, parameters=DEFAULT_PARAMETERS, save_memory=True, *bin_fasta ): """Generate reads from ambiguous alignment file Extract reads found to be mapping an input FASTA bin. If one read maps, the whole pair is extracted and written to the output paired-end FASTQ files. Reads that mapped and weren't part of a pair are kept in a third 'single' file for people who need it (e.g. to get extra paired reads by fetching the opposite one from the original FASTQ library). Parameters ---------- sam_merged : file, str or pathlib.Path The input alignment file in SAM/BAM format to be processed. output_dir : str or pathlib.Path The output directory to write the network and chunk data into. parameters : dict, optional Parameters for the network to read conversion, similar to alignment_to_network. save_memory : bool, optional Whether to keep the read names into memory or write them in different files, which takes longer but may prevent out-of-memory crashes. Default is True. `*bin_fasta` : file, str or pathlib.Path The bin FASTA files with appropriately named records. Returns ------- A dictionary of files with read names for each bin if save_memory is True, and a dictionary of the read names lists themselves otherwise. Note ---- This will throw an IOError ('close failed in file object destructor') on exit with older versions of pysam for some reason. It's harmless but you may consider upgrading to a later version of pysam if it comes up in a pipeline. """ # Just in case file objects are sent as input def get_file_string(file_thing): try: file_string = file_thing.name except AttributeError: file_string = str(file_thing) return file_string # Global set of chunks against which reads are required to # map - we store them in a tuple that keeps track of the # original bin each chunk came from so we can reattribute the reads later bin_chunks = set() for bin_file in bin_fasta: for record in SeqIO.parse(bin_file, "fasta"): bin_chunks.add((get_file_string(bin_file), record.id)) chunk_size = int(parameters["chunk_size"]) mapq_threshold = int(parameters["mapq_threshold"]) def read_name(read): return read.query_name.split()[0] # Since reading a huge BAM file can take up a # lot of time and resources, we only do it once # but that requires opening fastq files for writing # as matching reads get detected along the # bam and keeping track of which ones are # currently open. def get_base_name(bin_file): base_name = ".".join(os.path.basename(bin_file).split(".")[:-1]) output_path = os.path.join( output_dir, "{}.readnames".format(base_name) ) return output_path if save_memory: opened_files = dict() else: read_names = dict() with pysam.AlignmentFile(sam_merged, "rb") as alignment_merged_handle: for (my_read_name, alignment_pool) in itertools.groupby( alignment_merged_handle, read_name ): for my_alignment in alignment_pool: relative_position = my_alignment.reference_start contig_name = my_alignment.reference_name chunk_position = relative_position // chunk_size # The 'chunk name' is used to detect macthing positions chunk_name = "{}_{}".format(contig_name, chunk_position) # But such matching positions have to map acceptably quality_test = my_alignment.mapping_quality > mapq_threshold for bin_file in bin_fasta: chunk_tuple = (bin_file, chunk_name) if chunk_tuple in bin_chunks and quality_test: if save_memory: output_path = get_base_name(bin_file) try: output_handle = opened_files[bin_file] except KeyError: output_handle = open(output_path, "w") opened_files[bin_file] = output_handle output_handle.write("@{}\n".format(my_read_name)) else: try: read_names[my_read_name].append(bin_file) except KeyError: read_names[my_read_name] = [bin_file] for file_handle in opened_files.values(): file_handle.close() # Return unpaired file names for pair_unpaired_reads() to process if save_memory: return opened_files.keys() else: return read_names
[ "def", "alignment_to_reads", "(", "sam_merged", ",", "output_dir", ",", "parameters", "=", "DEFAULT_PARAMETERS", ",", "save_memory", "=", "True", ",", "*", "bin_fasta", ")", ":", "# Just in case file objects are sent as input", "def", "get_file_string", "(", "file_thi...
Generate reads from ambiguous alignment file Extract reads found to be mapping an input FASTA bin. If one read maps, the whole pair is extracted and written to the output paired-end FASTQ files. Reads that mapped and weren't part of a pair are kept in a third 'single' file for people who need it (e.g. to get extra paired reads by fetching the opposite one from the original FASTQ library). Parameters ---------- sam_merged : file, str or pathlib.Path The input alignment file in SAM/BAM format to be processed. output_dir : str or pathlib.Path The output directory to write the network and chunk data into. parameters : dict, optional Parameters for the network to read conversion, similar to alignment_to_network. save_memory : bool, optional Whether to keep the read names into memory or write them in different files, which takes longer but may prevent out-of-memory crashes. Default is True. `*bin_fasta` : file, str or pathlib.Path The bin FASTA files with appropriately named records. Returns ------- A dictionary of files with read names for each bin if save_memory is True, and a dictionary of the read names lists themselves otherwise. Note ---- This will throw an IOError ('close failed in file object destructor') on exit with older versions of pysam for some reason. It's harmless but you may consider upgrading to a later version of pysam if it comes up in a pipeline.
[ "Generate", "reads", "from", "ambiguous", "alignment", "file" ]
python
train
cs01/gdbgui
gdbgui/SSLify.py
https://github.com/cs01/gdbgui/blob/5367f87554f8f7c671d1f4596c133bf1303154f0/gdbgui/SSLify.py#L90-L104
def get_ssl_context(private_key, certificate): """Get ssl context from private key and certificate paths. The return value is used when calling Flask. i.e. app.run(ssl_context=get_ssl_context(,,,)) """ if ( certificate and os.path.isfile(certificate) and private_key and os.path.isfile(private_key) ): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) context.load_cert_chain(certificate, private_key) return context return None
[ "def", "get_ssl_context", "(", "private_key", ",", "certificate", ")", ":", "if", "(", "certificate", "and", "os", ".", "path", ".", "isfile", "(", "certificate", ")", "and", "private_key", "and", "os", ".", "path", ".", "isfile", "(", "private_key", ")", ...
Get ssl context from private key and certificate paths. The return value is used when calling Flask. i.e. app.run(ssl_context=get_ssl_context(,,,))
[ "Get", "ssl", "context", "from", "private", "key", "and", "certificate", "paths", ".", "The", "return", "value", "is", "used", "when", "calling", "Flask", ".", "i", ".", "e", ".", "app", ".", "run", "(", "ssl_context", "=", "get_ssl_context", "(", "))" ]
python
train
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_import.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_import.py#L95-L111
def import_participant_element(diagram_graph, participants_dictionary, participant_element): """ Adds 'participant' element to the collaboration dictionary. :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param participants_dictionary: dictionary with participant element attributes. Key is participant ID, value is a dictionary of participant attributes, :param participant_element: object representing a BPMN XML 'participant' element. """ participant_id = participant_element.getAttribute(consts.Consts.id) name = participant_element.getAttribute(consts.Consts.name) process_ref = participant_element.getAttribute(consts.Consts.process_ref) if participant_element.getAttribute(consts.Consts.process_ref) == '': diagram_graph.add_node(participant_id) diagram_graph.node[participant_id][consts.Consts.type] = consts.Consts.participant diagram_graph.node[participant_id][consts.Consts.process] = participant_id participants_dictionary[participant_id] = {consts.Consts.name: name, consts.Consts.process_ref: process_ref}
[ "def", "import_participant_element", "(", "diagram_graph", ",", "participants_dictionary", ",", "participant_element", ")", ":", "participant_id", "=", "participant_element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "id", ")", "name", "=", "participant_...
Adds 'participant' element to the collaboration dictionary. :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param participants_dictionary: dictionary with participant element attributes. Key is participant ID, value is a dictionary of participant attributes, :param participant_element: object representing a BPMN XML 'participant' element.
[ "Adds", "participant", "element", "to", "the", "collaboration", "dictionary", "." ]
python
train
firstprayer/monsql
monsql/sql.py
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/sql.py#L70-L84
def build_insert(table_name, attributes): """ Given the table_name and the data, return the sql to insert the data """ sql = "INSERT INTO %s" %(table_name) column_str = u"" value_str = u"" for index, (key, value) in enumerate(attributes.items()): if index > 0: column_str += u"," value_str += u"," column_str += key value_str += value_to_sql_str(value) sql = sql + u"(%s) VALUES(%s)" %(column_str, value_str) return sql
[ "def", "build_insert", "(", "table_name", ",", "attributes", ")", ":", "sql", "=", "\"INSERT INTO %s\"", "%", "(", "table_name", ")", "column_str", "=", "u\"\"", "value_str", "=", "u\"\"", "for", "index", ",", "(", "key", ",", "value", ")", "in", "enumerat...
Given the table_name and the data, return the sql to insert the data
[ "Given", "the", "table_name", "and", "the", "data", "return", "the", "sql", "to", "insert", "the", "data" ]
python
train
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L407-L419
def tags(self): """Returns a dictionary that lists all available tags that can be used for further filtering """ ret = {} for typ in _meta_fields_twig: if typ in ['uniqueid', 'plugin', 'feedback', 'fitting', 'history', 'twig', 'uniquetwig']: continue k = '{}s'.format(typ) ret[k] = getattr(self, k) return ret
[ "def", "tags", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "typ", "in", "_meta_fields_twig", ":", "if", "typ", "in", "[", "'uniqueid'", ",", "'plugin'", ",", "'feedback'", ",", "'fitting'", ",", "'history'", ",", "'twig'", ",", "'uniquetwig'", ...
Returns a dictionary that lists all available tags that can be used for further filtering
[ "Returns", "a", "dictionary", "that", "lists", "all", "available", "tags", "that", "can", "be", "used", "for", "further", "filtering" ]
python
train
thespacedoctor/tastic
tastic/workspace/workspace.py
https://github.com/thespacedoctor/tastic/blob/a0a16cf329a50057906ac3f696bb60b6fcee25e0/tastic/workspace/workspace.py#L158-L181
def _sort_tp_file( self, taskpaperPath): """*sort individual taskpaper documents* **Key Arguments:** - ``taskpaperPath`` -- path to a taskpaper file **Return:** - None """ self.log.info('starting the ``_sort_tp_file`` method') # OPEN TASKPAPER FILE self.log.info("sorting taskpaper file %(taskpaperPath)s" % locals()) doc = document(taskpaperPath) doc.tidy() doc.sort_tasks(self.settings["workflowTags"]) doc.sort_projects(self.settings["workflowTags"]) doc.save() self.log.info('completed the ``_sort_tp_file`` method') return None
[ "def", "_sort_tp_file", "(", "self", ",", "taskpaperPath", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_sort_tp_file`` method'", ")", "# OPEN TASKPAPER FILE", "self", ".", "log", ".", "info", "(", "\"sorting taskpaper file %(taskpaperPath)s\"", "%...
*sort individual taskpaper documents* **Key Arguments:** - ``taskpaperPath`` -- path to a taskpaper file **Return:** - None
[ "*", "sort", "individual", "taskpaper", "documents", "*" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L59-L68
def output_csv(filehandle: TextIO, values: Iterable[str]) -> None: """ Write a line of CSV. POOR; does not escape things properly. DEPRECATED. Args: filehandle: file to write to values: values """ line = ",".join(values) filehandle.write(line + "\n")
[ "def", "output_csv", "(", "filehandle", ":", "TextIO", ",", "values", ":", "Iterable", "[", "str", "]", ")", "->", "None", ":", "line", "=", "\",\"", ".", "join", "(", "values", ")", "filehandle", ".", "write", "(", "line", "+", "\"\\n\"", ")" ]
Write a line of CSV. POOR; does not escape things properly. DEPRECATED. Args: filehandle: file to write to values: values
[ "Write", "a", "line", "of", "CSV", ".", "POOR", ";", "does", "not", "escape", "things", "properly", ".", "DEPRECATED", "." ]
python
train
unt-libraries/edtf-validate
edtf_validate/valid_edtf.py
https://github.com/unt-libraries/edtf-validate/blob/d6d63141919a66aea4ff1c31fa0cb8ff744ef9d9/edtf_validate/valid_edtf.py#L326-L351
def replace_u(matchobj): """Break the interval into parts, and replace 'u's. pieces - [pos/neg, start_year, start_month, start_day, pos/neg, end_year, end_month, end_day] """ pieces = list(matchobj.groups('')) # Replace "u"s in start and end years. if 'u' in pieces[1]: pieces[1] = pieces[1].replace('u', '0') if 'u' in pieces[5]: pieces[5] = pieces[5].replace('u', '9') # Replace "u"s in start month. if 'u' in pieces[2]: pieces[2] = '-' + replace_u_start_month(pieces[2]) # Replace "u"s in end month. if 'u' in pieces[6]: pieces[6] = '-' + replace_u_end_month(pieces[6]) # Replace "u"s in start day. if 'u' in pieces[3]: pieces[3] = '-' + replace_u_start_day(pieces[3]) # Replace "u"s in end day. if 'u' in pieces[7]: pieces[7] = '-' + replace_u_end_day(pieces[7], year=pieces[5], month=pieces[6]) return ''.join((''.join(pieces[:4]), '/', ''.join(pieces[4:])))
[ "def", "replace_u", "(", "matchobj", ")", ":", "pieces", "=", "list", "(", "matchobj", ".", "groups", "(", "''", ")", ")", "# Replace \"u\"s in start and end years.", "if", "'u'", "in", "pieces", "[", "1", "]", ":", "pieces", "[", "1", "]", "=", "pieces"...
Break the interval into parts, and replace 'u's. pieces - [pos/neg, start_year, start_month, start_day, pos/neg, end_year, end_month, end_day]
[ "Break", "the", "interval", "into", "parts", "and", "replace", "u", "s", "." ]
python
train
sernst/cauldron
cauldron/session/reloading.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/reloading.py#L146-L175
def refresh( *modules: typing.Union[str, types.ModuleType], recursive: bool = False, force: bool = False ) -> bool: """ Checks the specified module or modules for changes and reloads them if they have been changed since the module was first imported or last refreshed. :param modules: One or more module objects that should be refreshed if they the currently loaded versions are out of date. The package name for modules can also be used. :param recursive: When true, any imported sub-modules of this module will also be refreshed if they have been updated. :param force: When true, all modules will be refreshed even if it doesn't appear that they have been updated. :return: True or False depending on whether any modules were refreshed by this call. """ out = [] for module in modules: out.append(reload_module(module, recursive, force)) return any(out)
[ "def", "refresh", "(", "*", "modules", ":", "typing", ".", "Union", "[", "str", ",", "types", ".", "ModuleType", "]", ",", "recursive", ":", "bool", "=", "False", ",", "force", ":", "bool", "=", "False", ")", "->", "bool", ":", "out", "=", "[", "...
Checks the specified module or modules for changes and reloads them if they have been changed since the module was first imported or last refreshed. :param modules: One or more module objects that should be refreshed if they the currently loaded versions are out of date. The package name for modules can also be used. :param recursive: When true, any imported sub-modules of this module will also be refreshed if they have been updated. :param force: When true, all modules will be refreshed even if it doesn't appear that they have been updated. :return: True or False depending on whether any modules were refreshed by this call.
[ "Checks", "the", "specified", "module", "or", "modules", "for", "changes", "and", "reloads", "them", "if", "they", "have", "been", "changed", "since", "the", "module", "was", "first", "imported", "or", "last", "refreshed", "." ]
python
train
starling-lab/rnlp
rnlp/__init__.py
https://github.com/starling-lab/rnlp/blob/72054cc2c0cbaea1d281bf3d56b271d4da29fc4a/rnlp/__init__.py#L82-L94
def converter(input_string, block_size=2): """ The cli tool as a built-in function. :param input_string: A string that should be converted to a set of facts. :type input_string: str. :param blocks_size: Optional block size of sentences (Default: 2). :type block_size: int. """ sentences = textprocessing.getSentences(input_string) blocks = textprocessing.getBlocks(sentences, block_size) parse.makeIdentifiers(blocks)
[ "def", "converter", "(", "input_string", ",", "block_size", "=", "2", ")", ":", "sentences", "=", "textprocessing", ".", "getSentences", "(", "input_string", ")", "blocks", "=", "textprocessing", ".", "getBlocks", "(", "sentences", ",", "block_size", ")", "par...
The cli tool as a built-in function. :param input_string: A string that should be converted to a set of facts. :type input_string: str. :param blocks_size: Optional block size of sentences (Default: 2). :type block_size: int.
[ "The", "cli", "tool", "as", "a", "built", "-", "in", "function", "." ]
python
train
dswah/pyGAM
pygam/terms.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1340-L1368
def build_constraints(self, coef, constraint_lam, constraint_l2): """ builds the GAM block-diagonal constraint matrix in quadratic form out of constraint matrices specified for each feature. Parameters ---------- coefs : array-like containing the coefficients of a term constraint_lam : float, penalty to impose on the constraint. typically this is a very large number. constraint_l2 : float, loading to improve the numerical conditioning of the constraint matrix. typically this is a very small number. Returns ------- C : sparse CSC matrix containing the model constraints in quadratic form """ C = sp.sparse.csc_matrix(np.zeros((self.n_coefs, self.n_coefs))) for i in range(len(self._terms)): C += self._build_marginal_constraints(i, coef, constraint_lam, constraint_l2) return sp.sparse.csc_matrix(C)
[ "def", "build_constraints", "(", "self", ",", "coef", ",", "constraint_lam", ",", "constraint_l2", ")", ":", "C", "=", "sp", ".", "sparse", ".", "csc_matrix", "(", "np", ".", "zeros", "(", "(", "self", ".", "n_coefs", ",", "self", ".", "n_coefs", ")", ...
builds the GAM block-diagonal constraint matrix in quadratic form out of constraint matrices specified for each feature. Parameters ---------- coefs : array-like containing the coefficients of a term constraint_lam : float, penalty to impose on the constraint. typically this is a very large number. constraint_l2 : float, loading to improve the numerical conditioning of the constraint matrix. typically this is a very small number. Returns ------- C : sparse CSC matrix containing the model constraints in quadratic form
[ "builds", "the", "GAM", "block", "-", "diagonal", "constraint", "matrix", "in", "quadratic", "form", "out", "of", "constraint", "matrices", "specified", "for", "each", "feature", "." ]
python
train
batiste/django-page-cms
pages/placeholders.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/placeholders.py#L162-L171
def get_field(self, page, language, initial=None): """The field that will be shown within the admin.""" if self.parsed: help_text = _('Note: This field is evaluated as template code.') else: help_text = '' widget = self.get_widget(page, language) return self.field( widget=widget, initial=initial, help_text=help_text, required=False)
[ "def", "get_field", "(", "self", ",", "page", ",", "language", ",", "initial", "=", "None", ")", ":", "if", "self", ".", "parsed", ":", "help_text", "=", "_", "(", "'Note: This field is evaluated as template code.'", ")", "else", ":", "help_text", "=", "''",...
The field that will be shown within the admin.
[ "The", "field", "that", "will", "be", "shown", "within", "the", "admin", "." ]
python
train
PyCQA/pylint
pylint/checkers/utils.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/utils.py#L669-L681
def inherit_from_std_ex(node: astroid.node_classes.NodeNG) -> bool: """ Return true if the given class node is subclass of exceptions.Exception. """ ancestors = node.ancestors() if hasattr(node, "ancestors") else [] for ancestor in itertools.chain([node], ancestors): if ( ancestor.name in ("Exception", "BaseException") and ancestor.root().name == EXCEPTIONS_MODULE ): return True return False
[ "def", "inherit_from_std_ex", "(", "node", ":", "astroid", ".", "node_classes", ".", "NodeNG", ")", "->", "bool", ":", "ancestors", "=", "node", ".", "ancestors", "(", ")", "if", "hasattr", "(", "node", ",", "\"ancestors\"", ")", "else", "[", "]", "for",...
Return true if the given class node is subclass of exceptions.Exception.
[ "Return", "true", "if", "the", "given", "class", "node", "is", "subclass", "of", "exceptions", ".", "Exception", "." ]
python
test
elifesciences/elife-tools
elifetools/parseJATS.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L990-L997
def contrib_phone(contrib_tag): """ Given a contrib tag, look for an phone tag """ phone = None if raw_parser.phone(contrib_tag): phone = first(raw_parser.phone(contrib_tag)).text return phone
[ "def", "contrib_phone", "(", "contrib_tag", ")", ":", "phone", "=", "None", "if", "raw_parser", ".", "phone", "(", "contrib_tag", ")", ":", "phone", "=", "first", "(", "raw_parser", ".", "phone", "(", "contrib_tag", ")", ")", ".", "text", "return", "phon...
Given a contrib tag, look for an phone tag
[ "Given", "a", "contrib", "tag", "look", "for", "an", "phone", "tag" ]
python
train
maciejkula/glove-python
glove/glove.py
https://github.com/maciejkula/glove-python/blob/749494290fdfd24379dcc2e244c583ee61808634/glove/glove.py#L190-L209
def add_dictionary(self, dictionary): """ Supply a word-id dictionary to allow similarity queries. """ if self.word_vectors is None: raise Exception('Model must be fit before adding a dictionary') if len(dictionary) > self.word_vectors.shape[0]: raise Exception('Dictionary length must be smaller ' 'or equal to the number of word vectors') self.dictionary = dictionary if hasattr(self.dictionary, 'iteritems'): # Python 2 compat items_iterator = self.dictionary.iteritems() else: items_iterator = self.dictionary.items() self.inverse_dictionary = {v: k for k, v in items_iterator}
[ "def", "add_dictionary", "(", "self", ",", "dictionary", ")", ":", "if", "self", ".", "word_vectors", "is", "None", ":", "raise", "Exception", "(", "'Model must be fit before adding a dictionary'", ")", "if", "len", "(", "dictionary", ")", ">", "self", ".", "w...
Supply a word-id dictionary to allow similarity queries.
[ "Supply", "a", "word", "-", "id", "dictionary", "to", "allow", "similarity", "queries", "." ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata_iot.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_iot.py#L320-L333
async def get_protocol_version(self): """ This method retrieves the Firmata protocol version. JSON command: {"method": "get_protocol_version", "params": ["null"]} :returns: {"method": "protocol_version_reply", "params": [PROTOCOL_VERSION]} """ value = await self.core.get_protocol_version() if value: reply = json.dumps({"method": "protocol_version_reply", "params": value}) else: reply = json.dumps({"method": "protocol_version_reply", "params": "Unknown"}) await self.websocket.send(reply)
[ "async", "def", "get_protocol_version", "(", "self", ")", ":", "value", "=", "await", "self", ".", "core", ".", "get_protocol_version", "(", ")", "if", "value", ":", "reply", "=", "json", ".", "dumps", "(", "{", "\"method\"", ":", "\"protocol_version_reply\"...
This method retrieves the Firmata protocol version. JSON command: {"method": "get_protocol_version", "params": ["null"]} :returns: {"method": "protocol_version_reply", "params": [PROTOCOL_VERSION]}
[ "This", "method", "retrieves", "the", "Firmata", "protocol", "version", "." ]
python
train
nitmir/django-cas-server
cas_server/views.py
https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/views.py#L543-L581
def process_post(self): """ Analyse the POST request: * check that the LoginTicket is valid * check that the user sumited credentials are valid :return: * :attr:`INVALID_LOGIN_TICKET` if the POSTed LoginTicket is not valid * :attr:`USER_ALREADY_LOGGED` if the user is already logged and do no request reauthentication. * :attr:`USER_LOGIN_FAILURE` if the user is not logged or request for reauthentication and his credentials are not valid * :attr:`USER_LOGIN_OK` if the user is not logged or request for reauthentication and his credentials are valid :rtype: int """ if not self.check_lt(): self.init_form(self.request.POST) logger.warning("Received an invalid login ticket") return self.INVALID_LOGIN_TICKET elif not self.request.session.get("authenticated") or self.renew: # authentication request receive, initialize the form to use self.init_form(self.request.POST) if self.form.is_valid(): self.request.session.set_expiry(0) self.request.session["username"] = self.form.cleaned_data['username'] self.request.session["warn"] = True if self.form.cleaned_data.get("warn") else False self.request.session["authenticated"] = True self.renewed = True self.warned = True logger.info("User %s successfully authenticated" % self.request.session["username"]) return self.USER_LOGIN_OK else: logger.warning("A login attempt failed") return self.USER_LOGIN_FAILURE else: logger.warning("Received a login attempt for an already-active user") return self.USER_ALREADY_LOGGED
[ "def", "process_post", "(", "self", ")", ":", "if", "not", "self", ".", "check_lt", "(", ")", ":", "self", ".", "init_form", "(", "self", ".", "request", ".", "POST", ")", "logger", ".", "warning", "(", "\"Received an invalid login ticket\"", ")", "return"...
Analyse the POST request: * check that the LoginTicket is valid * check that the user sumited credentials are valid :return: * :attr:`INVALID_LOGIN_TICKET` if the POSTed LoginTicket is not valid * :attr:`USER_ALREADY_LOGGED` if the user is already logged and do no request reauthentication. * :attr:`USER_LOGIN_FAILURE` if the user is not logged or request for reauthentication and his credentials are not valid * :attr:`USER_LOGIN_OK` if the user is not logged or request for reauthentication and his credentials are valid :rtype: int
[ "Analyse", "the", "POST", "request", ":" ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L984-L998
def pop(self, idx=None): """ Remove an item from the array. By default this will be the last item by index, but any index can be specified. """ if idx is not None: return self.database.run_script( 'array_remove', keys=[self.key], args=[idx]) else: return self.database.run_script( 'array_pop', keys=[self.key], args=[])
[ "def", "pop", "(", "self", ",", "idx", "=", "None", ")", ":", "if", "idx", "is", "not", "None", ":", "return", "self", ".", "database", ".", "run_script", "(", "'array_remove'", ",", "keys", "=", "[", "self", ".", "key", "]", ",", "args", "=", "[...
Remove an item from the array. By default this will be the last item by index, but any index can be specified.
[ "Remove", "an", "item", "from", "the", "array", ".", "By", "default", "this", "will", "be", "the", "last", "item", "by", "index", "but", "any", "index", "can", "be", "specified", "." ]
python
train
sassoftware/saspy
saspy/sasbase.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasbase.py#L733-L753
def read_csv(self, file: str, table: str = '_csv', libref: str = '', results: str = '', opts: dict = None) -> 'SASdata': """ :param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file :param table: the name of the SAS Data Set to create :param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned :param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives :param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows) :return: SASdata object """ opts = opts if opts is not None else {} if results == '': results = self.results self._io.read_csv(file, table, libref, self.nosub, opts) if self.exist(table, libref): return SASdata(self, libref, table, results) else: return None
[ "def", "read_csv", "(", "self", ",", "file", ":", "str", ",", "table", ":", "str", "=", "'_csv'", ",", "libref", ":", "str", "=", "''", ",", "results", ":", "str", "=", "''", ",", "opts", ":", "dict", "=", "None", ")", "->", "'SASdata'", ":", "...
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file :param table: the name of the SAS Data Set to create :param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned :param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives :param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows) :return: SASdata object
[ ":", "param", "file", ":", "either", "the", "OS", "filesystem", "path", "of", "the", "file", "or", "HTTP", ":", "//", "...", "for", "a", "url", "accessible", "file", ":", "param", "table", ":", "the", "name", "of", "the", "SAS", "Data", "Set", "to", ...
python
train
habnabit/panglery
panglery/pangler.py
https://github.com/habnabit/panglery/blob/4d62e408c4bfaae126c93a6151ded1e8dc75bcc8/panglery/pangler.py#L113-L126
def combine(self, *others): """Combine other Panglers into this Pangler. Returns a copy of this Pangler with all of the hooks from the provided Panglers added to it as well. The new Pangler will be bound to the same instance and have the same `id`, but new hooks will not be shared with this Pangler or any provided Panglers. """ p = self.clone() for other in others: p.hooks.extend(other.hooks) return p
[ "def", "combine", "(", "self", ",", "*", "others", ")", ":", "p", "=", "self", ".", "clone", "(", ")", "for", "other", "in", "others", ":", "p", ".", "hooks", ".", "extend", "(", "other", ".", "hooks", ")", "return", "p" ]
Combine other Panglers into this Pangler. Returns a copy of this Pangler with all of the hooks from the provided Panglers added to it as well. The new Pangler will be bound to the same instance and have the same `id`, but new hooks will not be shared with this Pangler or any provided Panglers.
[ "Combine", "other", "Panglers", "into", "this", "Pangler", "." ]
python
train
mishbahr/django-connected
connected_accounts/views.py
https://github.com/mishbahr/django-connected/blob/7ec1f042786fef2eb6c00b1479ce47c90341ba81/connected_accounts/views.py#L119-L129
def get_login_redirect(self, provider, account): """Return url to redirect authenticated users.""" info = self.model._meta.app_label, self.model._meta.model_name # inline import to prevent circular imports. from .admin import PRESERVED_FILTERS_SESSION_KEY preserved_filters = self.request.session.get(PRESERVED_FILTERS_SESSION_KEY, None) redirect_url = reverse('admin:%s_%s_changelist' % info) if preserved_filters: redirect_url = add_preserved_filters( {'preserved_filters': preserved_filters, 'opts': self.model._meta}, redirect_url) return redirect_url
[ "def", "get_login_redirect", "(", "self", ",", "provider", ",", "account", ")", ":", "info", "=", "self", ".", "model", ".", "_meta", ".", "app_label", ",", "self", ".", "model", ".", "_meta", ".", "model_name", "# inline import to prevent circular imports.", ...
Return url to redirect authenticated users.
[ "Return", "url", "to", "redirect", "authenticated", "users", "." ]
python
train
pmichali/whodunit
whodunit/__init__.py
https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L111-L118
def store_attribute(self, key, value): """Store blame info we are interested in.""" if key == 'summary' or key == 'filename' or key == 'previous': return attr = key.replace('-', '_') if key.endswith('-time'): value = int(value) setattr(self, attr, value)
[ "def", "store_attribute", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "==", "'summary'", "or", "key", "==", "'filename'", "or", "key", "==", "'previous'", ":", "return", "attr", "=", "key", ".", "replace", "(", "'-'", ",", "'_'", ")...
Store blame info we are interested in.
[ "Store", "blame", "info", "we", "are", "interested", "in", "." ]
python
train
Nachtfeuer/pipeline
spline/tools/condition.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L144-L167
def match_tokens(ast_tokens, ast_types): """ Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types """ ast_final_types = [ast.Module, ast.Expr] + ast_types return all(isinstance(ast_token, ast_type) for ast_token, ast_type in zip(ast_tokens, ast_final_types))
[ "def", "match_tokens", "(", "ast_tokens", ",", "ast_types", ")", ":", "ast_final_types", "=", "[", "ast", ".", "Module", ",", "ast", ".", "Expr", "]", "+", "ast_types", "return", "all", "(", "isinstance", "(", "ast_token", ",", "ast_type", ")", "for", "a...
Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types
[ "Verify", "that", "each", "token", "in", "order", "does", "match", "the", "expected", "types", "." ]
python
train
drdoctr/doctr
doctr/__main__.py
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/__main__.py#L372-L568
def configure(args, parser): """ Color guide - red: Error and warning messages - green: Welcome messages (use sparingly) - blue: Default values - bold_magenta: Action items - bold_black: Parts of code to be run or copied that should be modified """ if not args.force and on_travis(): parser.error(red("doctr appears to be running on Travis. Use " "doctr configure --force to run anyway.")) if not args.authenticate: args.upload_key = False if args.travis_tld: if args.travis_tld in ['c', 'com', '.com', 'travis-ci.com']: args.travis_tld = 'travis-ci.com' else: args.travis_tld = 'travis-ci.org' print(green(dedent("""\ Welcome to Doctr. We need to ask you a few questions to get you on your way to automatically deploying from Travis CI to GitHub pages. """))) login_kwargs = {} if args.authenticate: while not login_kwargs: try: login_kwargs = GitHub_login() except AuthenticationFailed as e: print(red(e)) else: login_kwargs = {'auth': None, 'headers': None} GitHub_token = None get_build_repo = False default_repo = guess_github_repo() while not get_build_repo: try: if default_repo: build_repo = input("What repo do you want to build the docs for? [{default_repo}] ".format(default_repo=blue(default_repo))) if not build_repo: build_repo = default_repo else: build_repo = input("What repo do you want to build the docs for (org/reponame, like 'drdoctr/doctr')? ") is_private = check_repo_exists(build_repo, service='github', **login_kwargs)['private'] if is_private and not args.authenticate: sys.exit(red("--no-authenticate is not supported for private repositories.")) headers = {} travis_token = None if is_private: if args.token: GitHub_token = generate_GitHub_token(note="Doctr token for pushing to gh-pages from Travis (for {build_repo}).".format(build_repo=build_repo), scopes=["read:org", "user:email", "repo"], **login_kwargs)['token'] travis_token = get_travis_token(GitHub_token=GitHub_token, **login_kwargs) headers['Authorization'] = "token {}".format(travis_token) service = args.travis_tld if args.travis_tld else 'travis' c = check_repo_exists(build_repo, service=service, ask=True, headers=headers) tld = c['service'][-4:] is_private = c['private'] or is_private if is_private and not args.authenticate: sys.exit(red("--no-authenticate is not supported for private repos.")) get_build_repo = True except GitHubError: raise except RuntimeError as e: print(red('\n{!s:-^{}}\n'.format(e, 70))) get_deploy_repo = False while not get_deploy_repo: try: deploy_repo = input("What repo do you want to deploy the docs to? [{build_repo}] ".format(build_repo=blue(build_repo))) if not deploy_repo: deploy_repo = build_repo if deploy_repo != build_repo: check_repo_exists(deploy_repo, service='github', **login_kwargs) get_deploy_repo = True except GitHubError: raise except RuntimeError as e: print(red('\n{!s:-^{}}\n'.format(e, 70))) N = IncrementingInt(1) header = green("\n================== You should now do the following ==================\n") if args.token: if not GitHub_token: GitHub_token = generate_GitHub_token(**login_kwargs)['token'] encrypted_variable = encrypt_variable("GH_TOKEN={GitHub_token}".format(GitHub_token=GitHub_token).encode('utf-8'), build_repo=build_repo, tld=tld, travis_token=travis_token, **login_kwargs) print(dedent(""" A personal access token for doctr has been created. You can go to https://github.com/settings/tokens to revoke it.""")) print(header) else: deploy_key_repo, env_name, keypath = get_deploy_key_repo(deploy_repo, args.key_path) private_ssh_key, public_ssh_key = generate_ssh_key() key = encrypt_to_file(private_ssh_key, keypath + '.enc') del private_ssh_key # Prevent accidental use below public_ssh_key = public_ssh_key.decode('ASCII') encrypted_variable = encrypt_variable(env_name.encode('utf-8') + b"=" + key, build_repo=build_repo, tld=tld, travis_token=travis_token, **login_kwargs) deploy_keys_url = 'https://github.com/{deploy_repo}/settings/keys'.format(deploy_repo=deploy_key_repo) if args.upload_key: upload_GitHub_deploy_key(deploy_key_repo, public_ssh_key, **login_kwargs) print(dedent(""" The deploy key has been added for {deploy_repo}. You can go to {deploy_keys_url} to revoke the deploy key.\ """.format(deploy_repo=deploy_key_repo, deploy_keys_url=deploy_keys_url, keypath=keypath))) print(header) else: print(header) print(dedent("""\ {N}. {BOLD_MAGENTA}Go to {deploy_keys_url} and add the following as a new key:{RESET} {ssh_key} {BOLD_MAGENTA}Be sure to allow write access for the key.{RESET} """.format(ssh_key=public_ssh_key, deploy_keys_url=deploy_keys_url, N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) print(dedent("""\ {N}. {BOLD_MAGENTA}Add the file {keypath}.enc to be staged for commit:{RESET} git add {keypath}.enc """.format(keypath=keypath, N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) options = '--built-docs ' + bold_black('<path/to/built/html/>') if args.key_path: options += ' --key-path {keypath}.enc'.format(keypath=keypath) if deploy_repo != build_repo: options += ' --deploy-repo {deploy_repo}'.format(deploy_repo=deploy_repo) key_type = "deploy key" if args.token: options += ' --token' key_type = "personal access token" print(dedent("""\ {N}. {BOLD_MAGENTA}Add these lines to your `.travis.yml` file:{RESET} env: global: # Doctr {key_type} for {deploy_repo} - secure: "{encrypted_variable}" script: - set -e - {BOLD_BLACK}<Command to build your docs>{RESET} - pip install doctr - doctr deploy {options} {BOLD_BLACK}<target-directory>{RESET} """.format(options=options, N=N, key_type=key_type, encrypted_variable=encrypted_variable.decode('utf-8'), deploy_repo=deploy_repo, BOLD_MAGENTA=BOLD_MAGENTA, BOLD_BLACK=BOLD_BLACK, RESET=RESET))) print(dedent("""\ Replace the text in {BOLD_BLACK}<angle brackets>{RESET} with the relevant things for your repository. """.format(BOLD_BLACK=BOLD_BLACK, RESET=RESET))) print(dedent("""\ Note: the `set -e` prevents doctr from running when the docs build fails. We put this code under `script:` so that if doctr fails it causes the build to fail. """)) print(dedent("""\ {N}. {BOLD_MAGENTA}Commit and push these changes to your GitHub repository.{RESET} The docs should now build automatically on Travis. """.format(N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) print("See the documentation at https://drdoctr.github.io/ for more information.")
[ "def", "configure", "(", "args", ",", "parser", ")", ":", "if", "not", "args", ".", "force", "and", "on_travis", "(", ")", ":", "parser", ".", "error", "(", "red", "(", "\"doctr appears to be running on Travis. Use \"", "\"doctr configure --force to run anyway.\"", ...
Color guide - red: Error and warning messages - green: Welcome messages (use sparingly) - blue: Default values - bold_magenta: Action items - bold_black: Parts of code to be run or copied that should be modified
[ "Color", "guide" ]
python
train
JukeboxPipeline/jukedj
src/jukedj/models.py
https://github.com/JukeboxPipeline/jukedj/blob/d4159961c819c26792a278981ee68106ee15f3f3/src/jukedj/models.py#L490-L508
def create_all_tasks(element): """Create all tasks for the element :param element: The shot or asset that needs tasks :type element: :class:`muke.models.Shot` | :class:`muke.models.Asset` :returns: None :rtype: None :raises: None """ prj = element.project if isinstance(element, Asset): flag=True else: flag=False deps = prj.department_set.filter(assetflag=flag) for d in deps: t = Task(project=prj, department=d, element=element) t.full_clean() t.save()
[ "def", "create_all_tasks", "(", "element", ")", ":", "prj", "=", "element", ".", "project", "if", "isinstance", "(", "element", ",", "Asset", ")", ":", "flag", "=", "True", "else", ":", "flag", "=", "False", "deps", "=", "prj", ".", "department_set", "...
Create all tasks for the element :param element: The shot or asset that needs tasks :type element: :class:`muke.models.Shot` | :class:`muke.models.Asset` :returns: None :rtype: None :raises: None
[ "Create", "all", "tasks", "for", "the", "element" ]
python
test
taxpon/openpyscad
openpyscad/custom3dshapes.py
https://github.com/taxpon/openpyscad/blob/d487b409fcef429da7585dcc50180af3e9203236/openpyscad/custom3dshapes.py#L7-L36
def dice(edge=15, fn=32): """ dice """ edge = float(edge) # dice c = ops.Cube(edge, center=True) s = ops.Sphere(edge * 3 / 4, center=True) dice = c & s # points c = ops.Circle(edge / 12, _fn=fn) h = 0.7 point = c.linear_extrude(height=h) point1 = point.translate([0, 0, edge / 2 - h / 2]) point2_1 = point1.rotate(a=90, v=[1, 0, 0]).translate([edge / 6, 0, edge / 6]) point2_2 = point2_1.mirror([-edge / 6, 0, -edge / 6]) point2 = point2_1 + point2_2 point3 = point2.rotate(a=90, v=[0, 0, 1]) + point1.rotate(a=90, v=[0, 1, 0]) point4_12 = point2.rotate(a=-90, v=[0, 0, 1]) point4 = point4_12 + point4_12.mirror([0, 1, 0]) point5_123 = point3.rotate(a=90, v=[0, 0, 1]) point5 = point5_123 + point5_123.mirror([1, 0, 0]) point6_1 = point.translate([0, 0, -(edge / 2 + h / 2)]).translate([0, edge / 6, 0]) point6_2 = point6_1.translate([edge / 4, 0, 0]) point6_3 = point6_1.translate([-edge / 4, 0, 0]) point6_123 = point6_1 + point6_2 + point6_3 point6_456 = point6_123.mirror([0, 1, 0]) point6 = point6_123 + point6_456 dice_with_holes = dice - point1 - point2 - point3 - point4 - point5 - point6 dice_with_holes = dice_with_holes.mirror([0, 0, 1]) return(dice_with_holes)
[ "def", "dice", "(", "edge", "=", "15", ",", "fn", "=", "32", ")", ":", "edge", "=", "float", "(", "edge", ")", "# dice", "c", "=", "ops", ".", "Cube", "(", "edge", ",", "center", "=", "True", ")", "s", "=", "ops", ".", "Sphere", "(", "edge", ...
dice
[ "dice" ]
python
train
mmp2/megaman
megaman/utils/k_means_clustering.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/k_means_clustering.py#L121-L145
def get_centroids(data,k,labels,centroids,data_norms): """ For each element in the dataset, choose the closest centroid Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer, number of K clusters centroids: array-like, shape=(K, n_samples) labels: array-like, shape (1,n_samples) returns ------------- centroids: array-like, shape (K,n_samples) """ D = data.shape[1] for j in range(k): cluster_points = np.where(labels == j) cluster_total = len(cluster_points) if cluster_total == 0: _, temp = new_orthogonal_center(data,data_norms,centroids) else: temp = np.mean(data[cluster_points,:],axis=1) centroids[j,:] = temp return centroids
[ "def", "get_centroids", "(", "data", ",", "k", ",", "labels", ",", "centroids", ",", "data_norms", ")", ":", "D", "=", "data", ".", "shape", "[", "1", "]", "for", "j", "in", "range", "(", "k", ")", ":", "cluster_points", "=", "np", ".", "where", ...
For each element in the dataset, choose the closest centroid Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer, number of K clusters centroids: array-like, shape=(K, n_samples) labels: array-like, shape (1,n_samples) returns ------------- centroids: array-like, shape (K,n_samples)
[ "For", "each", "element", "in", "the", "dataset", "choose", "the", "closest", "centroid", "Parameters", "------------", "data", ":", "array", "-", "like", "shape", "=", "(", "m_samples", "n_samples", ")", "K", ":", "integer", "number", "of", "K", "clusters",...
python
train
saltstack/salt
salt/modules/mac_power.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L360-L376
def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
[ "def", "get_restart_power_failure", "(", ")", ":", "ret", "=", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_result", "(", "'systemsetup -getrestartpowerfailure'", ")", "return", "salt", ".", "utils", ".", "mac_utils", ".", "validate_enabled", "(", ...
Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure
[ "Displays", "whether", "restart", "on", "power", "failure", "is", "on", "or", "off", "if", "supported" ]
python
train
rytilahti/python-eq3bt
eq3bt/eq3cli.py
https://github.com/rytilahti/python-eq3bt/blob/595459d9885920cf13b7059a1edd2cf38cede1f0/eq3bt/eq3cli.py#L157-L168
def state(ctx): """ Prints out all available information. """ dev = ctx.obj click.echo(dev) ctx.forward(locked) ctx.forward(low_battery) ctx.forward(window_open) ctx.forward(boost) ctx.forward(temp) # ctx.forward(presets) ctx.forward(mode) ctx.forward(valve_state)
[ "def", "state", "(", "ctx", ")", ":", "dev", "=", "ctx", ".", "obj", "click", ".", "echo", "(", "dev", ")", "ctx", ".", "forward", "(", "locked", ")", "ctx", ".", "forward", "(", "low_battery", ")", "ctx", ".", "forward", "(", "window_open", ")", ...
Prints out all available information.
[ "Prints", "out", "all", "available", "information", "." ]
python
train
WhyNotHugo/django-afip
django_afip/models.py
https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L1230-L1255
def save_pdf(self, save_model=True): """ Save the receipt as a PDF related to this model. The related :class:`~.Receipt` should be validated first, of course. :param bool save_model: If True, immediately save this model instance. """ from django_afip.views import ReceiptPDFView if not self.receipt.is_validated: raise exceptions.DjangoAfipException( _('Cannot generate pdf for non-authorized receipt') ) self.pdf_file = File(BytesIO(), name='{}.pdf'.format(uuid.uuid4().hex)) render_pdf( template='receipts/code_{}.html'.format( self.receipt.receipt_type.code, ), file_=self.pdf_file, context=ReceiptPDFView.get_context_for_pk(self.receipt_id), ) if save_model: self.save()
[ "def", "save_pdf", "(", "self", ",", "save_model", "=", "True", ")", ":", "from", "django_afip", ".", "views", "import", "ReceiptPDFView", "if", "not", "self", ".", "receipt", ".", "is_validated", ":", "raise", "exceptions", ".", "DjangoAfipException", "(", ...
Save the receipt as a PDF related to this model. The related :class:`~.Receipt` should be validated first, of course. :param bool save_model: If True, immediately save this model instance.
[ "Save", "the", "receipt", "as", "a", "PDF", "related", "to", "this", "model", "." ]
python
train
zeroSteiner/smoke-zephyr
smoke_zephyr/job.py
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/job.py#L219-L241
def stop(self): """ Stop the JobManager thread. """ self.logger.debug('stopping the job manager') self._thread_running.clear() self._thread_shutdown.wait() self._job_lock.acquire() self.logger.debug('waiting on ' + str(len(self._jobs)) + ' job threads') for job_desc in self._jobs.values(): if job_desc['job'] is None: continue if not job_desc['job'].is_alive(): continue job_desc['job'].join() # the job lock must be released before the thread can be joined because the thread routine acquires it before # checking if it should exit, see https://github.com/zeroSteiner/smoke-zephyr/issues/4 for more details self._job_lock.release() self._thread.join() self.logger.info('the job manager has been stopped') return
[ "def", "stop", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "'stopping the job manager'", ")", "self", ".", "_thread_running", ".", "clear", "(", ")", "self", ".", "_thread_shutdown", ".", "wait", "(", ")", "self", ".", "_job_lock", "...
Stop the JobManager thread.
[ "Stop", "the", "JobManager", "thread", "." ]
python
train
log2timeline/plaso
plaso/parsers/mac_keychain.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/mac_keychain.py#L759-L798
def _ParseApplicationPasswordRecord(self, parser_mediator, record): """Extracts the information from an application password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed. """ key = record.get('_key_', None) if not key or not key.startswith(b'ssgp'): raise errors.ParseError(( 'Unsupported application password record key value does not start ' 'with: "ssgp".')) event_data = KeychainApplicationRecordEventData() event_data.account_name = self._ParseBinaryDataAsString( parser_mediator, record['acct']) event_data.comments = self._ParseBinaryDataAsString( parser_mediator, record['crtr']) event_data.entry_name = self._ParseBinaryDataAsString( parser_mediator, record['PrintName']) ssgp_hash = codecs.encode(key[4:], 'hex') event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8') event_data.text_description = self._ParseBinaryDataAsString( parser_mediator, record['desc']) date_time = self._ParseDateTimeValue(parser_mediator, record['cdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseDateTimeValue(parser_mediator, record['mdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "_ParseApplicationPasswordRecord", "(", "self", ",", "parser_mediator", ",", "record", ")", ":", "key", "=", "record", ".", "get", "(", "'_key_'", ",", "None", ")", "if", "not", "key", "or", "not", "key", ".", "startswith", "(", "b'ssgp'", ")", ":...
Extracts the information from an application password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed.
[ "Extracts", "the", "information", "from", "an", "application", "password", "record", "." ]
python
train
Jammy2211/PyAutoLens
autolens/model/galaxy/galaxy.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy.py#L259-L279
def mass_within_ellipse_in_units(self, major_axis, unit_mass='angular', kpc_per_arcsec=None, critical_surface_density=None): """Compute the total angular mass of the galaxy's mass profiles within an ellipse of specified major_axis. See *profiles.mass_profiles.angualr_mass_within_ellipse* for details of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. units_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts. """ if self.has_mass_profile: return sum(map(lambda p: p.mass_within_ellipse_in_units(major_axis=major_axis, unit_mass=unit_mass, kpc_per_arcsec=kpc_per_arcsec, critical_surface_density=critical_surface_density), self.mass_profiles)) else: return None
[ "def", "mass_within_ellipse_in_units", "(", "self", ",", "major_axis", ",", "unit_mass", "=", "'angular'", ",", "kpc_per_arcsec", "=", "None", ",", "critical_surface_density", "=", "None", ")", ":", "if", "self", ".", "has_mass_profile", ":", "return", "sum", "(...
Compute the total angular mass of the galaxy's mass profiles within an ellipse of specified major_axis. See *profiles.mass_profiles.angualr_mass_within_ellipse* for details of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. units_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts.
[ "Compute", "the", "total", "angular", "mass", "of", "the", "galaxy", "s", "mass", "profiles", "within", "an", "ellipse", "of", "specified", "major_axis", "." ]
python
valid
danielhrisca/asammdf
asammdf/blocks/mdf_v3.py
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L2406-L2451
def get_channel_comment(self, name=None, group=None, index=None): """Gets channel comment. Channel can be specified in two ways: * using the first positional argument *name* * if there are multiple occurances for this channel then the *group* and *index* arguments can be used to select a specific group. * if there are multiple occurances for this channel and either the *group* or *index* arguments is None then a warning is issued * using the group number (keyword argument *group*) and the channel number (keyword argument *index*). Use *info* method for group and channel numbers If the *raster* keyword argument is not *None* the output is interpolated accordingly. Parameters ---------- name : string name of channel group : int 0-based group index index : int 0-based channel index Returns ------- comment : str found channel comment """ gp_nr, ch_nr = self._validate_channel_selection(name, group, index) grp = self.groups[gp_nr] if grp.data_location == v23c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile channel = grp.channels[ch_nr] return channel.comment
[ "def", "get_channel_comment", "(", "self", ",", "name", "=", "None", ",", "group", "=", "None", ",", "index", "=", "None", ")", ":", "gp_nr", ",", "ch_nr", "=", "self", ".", "_validate_channel_selection", "(", "name", ",", "group", ",", "index", ")", "...
Gets channel comment. Channel can be specified in two ways: * using the first positional argument *name* * if there are multiple occurances for this channel then the *group* and *index* arguments can be used to select a specific group. * if there are multiple occurances for this channel and either the *group* or *index* arguments is None then a warning is issued * using the group number (keyword argument *group*) and the channel number (keyword argument *index*). Use *info* method for group and channel numbers If the *raster* keyword argument is not *None* the output is interpolated accordingly. Parameters ---------- name : string name of channel group : int 0-based group index index : int 0-based channel index Returns ------- comment : str found channel comment
[ "Gets", "channel", "comment", ".", "Channel", "can", "be", "specified", "in", "two", "ways", ":" ]
python
train
python-rope/rope
rope/base/pycore.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/pycore.py#L326-L334
def is_changed(self, start, end): """Tell whether any of start till end lines have changed The end points are inclusive and indices start from 1. """ left, right = self._get_changed(start, end) if left < right: return True return False
[ "def", "is_changed", "(", "self", ",", "start", ",", "end", ")", ":", "left", ",", "right", "=", "self", ".", "_get_changed", "(", "start", ",", "end", ")", "if", "left", "<", "right", ":", "return", "True", "return", "False" ]
Tell whether any of start till end lines have changed The end points are inclusive and indices start from 1.
[ "Tell", "whether", "any", "of", "start", "till", "end", "lines", "have", "changed" ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L1875-L1885
def setVisibleColumns(self, visible): """ Sets the list of visible columns for this widget. This method will take any column in this tree's list NOT found within the inputed column list and hide them. :param columns | [<str>, ..] """ colnames = self.columns() for c, column in enumerate(colnames): self.setColumnHidden(c, column not in visible)
[ "def", "setVisibleColumns", "(", "self", ",", "visible", ")", ":", "colnames", "=", "self", ".", "columns", "(", ")", "for", "c", ",", "column", "in", "enumerate", "(", "colnames", ")", ":", "self", ".", "setColumnHidden", "(", "c", ",", "column", "not...
Sets the list of visible columns for this widget. This method will take any column in this tree's list NOT found within the inputed column list and hide them. :param columns | [<str>, ..]
[ "Sets", "the", "list", "of", "visible", "columns", "for", "this", "widget", ".", "This", "method", "will", "take", "any", "column", "in", "this", "tree", "s", "list", "NOT", "found", "within", "the", "inputed", "column", "list", "and", "hide", "them", "....
python
train
HydrelioxGitHub/pybbox
pybbox/__init__.py
https://github.com/HydrelioxGitHub/pybbox/blob/bedcdccab5d18d36890ef8bf414845f2dec18b5c/pybbox/__init__.py#L115-L127
def is_device_connected(self, ip): """ Check if a device identified by it IP is connected to the box :param ip: IP of the device you want to test :type ip: str :return: True is the device is connected, False if it's not :rtype: bool """ all_devices = self.get_all_connected_devices() for device in all_devices: if ip == device['ipaddress']: return device['active'] == 1 return False
[ "def", "is_device_connected", "(", "self", ",", "ip", ")", ":", "all_devices", "=", "self", ".", "get_all_connected_devices", "(", ")", "for", "device", "in", "all_devices", ":", "if", "ip", "==", "device", "[", "'ipaddress'", "]", ":", "return", "device", ...
Check if a device identified by it IP is connected to the box :param ip: IP of the device you want to test :type ip: str :return: True is the device is connected, False if it's not :rtype: bool
[ "Check", "if", "a", "device", "identified", "by", "it", "IP", "is", "connected", "to", "the", "box", ":", "param", "ip", ":", "IP", "of", "the", "device", "you", "want", "to", "test", ":", "type", "ip", ":", "str", ":", "return", ":", "True", "is",...
python
train
tchellomello/python-arlo
pyarlo/base_station.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/base_station.py#L362-L369
def is_in_schedule_mode(self): """Returns True if base_station is currently on a scheduled mode.""" resource = "schedule" mode_event = self.publish_and_get_event(resource) if mode_event and mode_event.get("resource", None) == "schedule": properties = mode_event.get('properties') return properties.get("active", False) return False
[ "def", "is_in_schedule_mode", "(", "self", ")", ":", "resource", "=", "\"schedule\"", "mode_event", "=", "self", ".", "publish_and_get_event", "(", "resource", ")", "if", "mode_event", "and", "mode_event", ".", "get", "(", "\"resource\"", ",", "None", ")", "==...
Returns True if base_station is currently on a scheduled mode.
[ "Returns", "True", "if", "base_station", "is", "currently", "on", "a", "scheduled", "mode", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_access_list.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_access_list.py#L446-L460
def get_mac_acl_for_intf_output_interface_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf") config = get_mac_acl_for_intf output = ET.SubElement(get_mac_acl_for_intf, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name = ET.SubElement(interface, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_mac_acl_for_intf_output_interface_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_mac_acl_for_intf", "=", "ET", ".", "Element", "(", "\"get_mac_acl_for_intf\"", ")", "co...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/structure_analyzer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/structure_analyzer.py#L350-L363
def get_sitej(self, site_index, image_index): """ Assuming there is some value in the connectivity array at indices (1, 3, 12). sitei can be obtained directly from the input structure (structure[1]). sitej can be obtained by passing 3, 12 to this function Args: site_index (int): index of the site (3 in the example) image_index (int): index of the image (12 in the example) """ atoms_n_occu = self.s[site_index].species lattice = self.s.lattice coords = self.s[site_index].frac_coords + self.offsets[image_index] return PeriodicSite(atoms_n_occu, coords, lattice)
[ "def", "get_sitej", "(", "self", ",", "site_index", ",", "image_index", ")", ":", "atoms_n_occu", "=", "self", ".", "s", "[", "site_index", "]", ".", "species", "lattice", "=", "self", ".", "s", ".", "lattice", "coords", "=", "self", ".", "s", "[", "...
Assuming there is some value in the connectivity array at indices (1, 3, 12). sitei can be obtained directly from the input structure (structure[1]). sitej can be obtained by passing 3, 12 to this function Args: site_index (int): index of the site (3 in the example) image_index (int): index of the image (12 in the example)
[ "Assuming", "there", "is", "some", "value", "in", "the", "connectivity", "array", "at", "indices", "(", "1", "3", "12", ")", ".", "sitei", "can", "be", "obtained", "directly", "from", "the", "input", "structure", "(", "structure", "[", "1", "]", ")", "...
python
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L734-L744
def remove_dependency_layer(self): """ Removes the dependency layer (if exists) of the object (in memory) """ if self.dependency_layer is not None: this_node = self.dependency_layer.get_node() self.root.remove(this_node) self.dependency_layer = self.my_dependency_extractor = None if self.header is not None: self.header.remove_lp('deps')
[ "def", "remove_dependency_layer", "(", "self", ")", ":", "if", "self", ".", "dependency_layer", "is", "not", "None", ":", "this_node", "=", "self", ".", "dependency_layer", ".", "get_node", "(", ")", "self", ".", "root", ".", "remove", "(", "this_node", ")...
Removes the dependency layer (if exists) of the object (in memory)
[ "Removes", "the", "dependency", "layer", "(", "if", "exists", ")", "of", "the", "object", "(", "in", "memory", ")" ]
python
train
cons3rt/pycons3rt
pycons3rt/deployment.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/deployment.py#L638-L665
def get_scenario_host_ip_on_network(self, scenario_role_name, network_name): """Given a network name, returns the IP address :param network_name: (str) Name of the network to search for :param scenario_role_name: (str) role name to return the IP address for :return: (str) IP address on the specified network or None """ log = logging.getLogger(self.cls_logger + '.get_scenario_host_ip_on_network') # Determine the network info for this host based on role name cons3rt_network_info = None for scenario_host in self.scenario_network_info: if scenario_host['scenario_role_name'] == scenario_role_name: cons3rt_network_info = scenario_host['network_info'] if not cons3rt_network_info: log.warn('Unable to find network info for this host') return # Attempt to find a matching IP for network name internal_ip = None for cons3rt_network in cons3rt_network_info: if cons3rt_network['network_name'] == network_name: internal_ip = cons3rt_network['internal_ip'] if not internal_ip: log.warn('Unable to find an internal IP for network: {n}'.format(n=network_name)) return log.debug('Found IP address [{i}] for network name: {n}'.format(i=internal_ip, n=network_name)) return internal_ip
[ "def", "get_scenario_host_ip_on_network", "(", "self", ",", "scenario_role_name", ",", "network_name", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "self", ".", "cls_logger", "+", "'.get_scenario_host_ip_on_network'", ")", "# Determine the network info for this...
Given a network name, returns the IP address :param network_name: (str) Name of the network to search for :param scenario_role_name: (str) role name to return the IP address for :return: (str) IP address on the specified network or None
[ "Given", "a", "network", "name", "returns", "the", "IP", "address" ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/execution/scheduler_parallel.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/scheduler_parallel.py#L812-L848
def _set_least_batch_id(self, txn_signature): """Set the first batch id that doesn't have all results. Args: txn_signature (str): The txn identifier of the transaction with results being set. """ batch = self._batches_by_txn_id[txn_signature] least_index = self._index_of_batch( self._batches_by_id[self._least_batch_id_wo_results].batch) current_index = self._index_of_batch(batch) all_prior = False if current_index <= least_index: return # Test to see if all batches from the least_batch to # the prior batch to the current batch have results. if all( all(t.header_signature in self._txn_results for t in b.transactions) for b in self._batches[least_index:current_index]): all_prior = True if not all_prior: return possible_least = self._batches[current_index].header_signature # Find the first batch from the current batch on, that doesn't have # all results. for b in self._batches[current_index:]: if not all(t.header_signature in self._txn_results for t in b.transactions): possible_least = b.header_signature break self._least_batch_id_wo_results = possible_least
[ "def", "_set_least_batch_id", "(", "self", ",", "txn_signature", ")", ":", "batch", "=", "self", ".", "_batches_by_txn_id", "[", "txn_signature", "]", "least_index", "=", "self", ".", "_index_of_batch", "(", "self", ".", "_batches_by_id", "[", "self", ".", "_l...
Set the first batch id that doesn't have all results. Args: txn_signature (str): The txn identifier of the transaction with results being set.
[ "Set", "the", "first", "batch", "id", "that", "doesn", "t", "have", "all", "results", "." ]
python
train
senaite/senaite.core.supermodel
src/senaite/core/supermodel/model.py
https://github.com/senaite/senaite.core.supermodel/blob/1819154332b8776f187aa98a2e299701983a0119/src/senaite/core/supermodel/model.py#L77-L83
def init_with_brain(self, brain): """Initialize with a catalog brain """ self._uid = api.get_uid(brain) self._brain = brain self._catalog = self.get_catalog_for(brain) self._instance = None
[ "def", "init_with_brain", "(", "self", ",", "brain", ")", ":", "self", ".", "_uid", "=", "api", ".", "get_uid", "(", "brain", ")", "self", ".", "_brain", "=", "brain", "self", ".", "_catalog", "=", "self", ".", "get_catalog_for", "(", "brain", ")", "...
Initialize with a catalog brain
[ "Initialize", "with", "a", "catalog", "brain" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/trax/trax.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L307-L333
def _jit_update_fun(predict_fun, loss_fun, optimizer, lr_fun, num_devices): """Get jit-ed update function for loss, optimizer, learning rate function.""" if num_devices == 1: # TODO(lukaszkaiser): remove branch when not needed. def single_update(i, opt_state, batch, rng): rng, subrng = jax_random.split(rng[0]) _, opt_update = optimizer(lr_fun) params = trax_opt.get_params(opt_state) return opt_update(i, backend.grad(loss_fun)( params, batch, predict_fun, rng), opt_state), [subrng] return backend.jit(single_update) @functools.partial(backend.pmap, axis_name="batch") def mapped_update(i, opt_state, batch, rng): """This is a multi-device version of the update function above.""" # We assume all tensors have the first dimension = num_devices. rng, subrng = jax_random.split(rng) _, opt_update = optimizer(lr_fun) params = trax_opt.get_params(opt_state) grads = backend.grad(loss_fun)(params, batch, predict_fun, rng) grads = jax.tree_util.tree_map( lambda g: lax.psum(g, "batch"), grads) return opt_update(i, grads, opt_state), subrng def update(i, opt_state, batch, rng): return mapped_update(jax.replicate(i), opt_state, batch, rng) return update
[ "def", "_jit_update_fun", "(", "predict_fun", ",", "loss_fun", ",", "optimizer", ",", "lr_fun", ",", "num_devices", ")", ":", "if", "num_devices", "==", "1", ":", "# TODO(lukaszkaiser): remove branch when not needed.", "def", "single_update", "(", "i", ",", "opt_sta...
Get jit-ed update function for loss, optimizer, learning rate function.
[ "Get", "jit", "-", "ed", "update", "function", "for", "loss", "optimizer", "learning", "rate", "function", "." ]
python
train
doraemonext/wechat-python-sdk
wechat_sdk/core/conf.py
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/core/conf.py#L286-L295
def _update_crypto(self): """ 根据当前配置内容更新 Crypto 类 """ if self.__encrypt_mode in ['compatible', 'safe'] and self.__encoding_aes_key is not None: if self.__token is None or self.__appid is None: raise NeedParamError('Please provide token and appid parameters in the construction of class.') self.__crypto = BasicCrypto(self.__token, self.__encoding_aes_key, self.__appid) else: self.__crypto = None
[ "def", "_update_crypto", "(", "self", ")", ":", "if", "self", ".", "__encrypt_mode", "in", "[", "'compatible'", ",", "'safe'", "]", "and", "self", ".", "__encoding_aes_key", "is", "not", "None", ":", "if", "self", ".", "__token", "is", "None", "or", "sel...
根据当前配置内容更新 Crypto 类
[ "根据当前配置内容更新", "Crypto", "类" ]
python
valid
saltstack/salt
salt/modules/btrfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/btrfs.py#L514-L580
def _restripe(mountpoint, direction, *devices, **kwargs): ''' Restripe BTRFS: add or remove devices from the particular mounted filesystem. ''' fs_log = [] if salt.utils.fsutils._is_device(mountpoint): raise CommandExecutionError( "Mountpount expected, while device \"{0}\" specified".format(mountpoint)) mounted = False for device, mntpoints in six.iteritems(salt.utils.fsutils._get_mounts("btrfs")): for mntdata in mntpoints: if mntdata['mount_point'] == mountpoint: mounted = True break if not mounted: raise CommandExecutionError( "No BTRFS device mounted on \"{0}\" mountpoint".format(mountpoint)) if not devices: raise CommandExecutionError("No devices specified.") available_devices = __salt__['btrfs.devices']() for device in devices: if device not in six.iterkeys(available_devices): raise CommandExecutionError("Device \"{0}\" is not recognized".format(device)) cmd = ['btrfs device {0}'.format(direction)] for device in devices: cmd.append(device) if direction == 'add': if kwargs.get("nodiscard"): cmd.append("-K") if kwargs.get("force"): cmd.append("-f") cmd.append(mountpoint) out = __salt__['cmd.run_all'](' '.join(cmd)) salt.utils.fsutils._verify_run(out) if out['stdout']: fs_log.append(out['stdout']) if direction == 'add': out = None data_conversion = kwargs.get("dc") meta_conversion = kwargs.get("mc") if data_conversion and meta_conversion: out = __salt__['cmd.run_all']( "btrfs balance start -dconvert={0} -mconvert={1} {2}".format( data_conversion, meta_conversion, mountpoint)) else: out = __salt__['cmd.run_all']("btrfs filesystem balance {0}".format(mountpoint)) salt.utils.fsutils._verify_run(out) if out['stdout']: fs_log.append(out['stdout']) # Summarize the result ret = {} if fs_log: ret.update({'log': '\n'.join(fs_log)}) ret.update(__salt__['btrfs.info'](mountpoint)) return ret
[ "def", "_restripe", "(", "mountpoint", ",", "direction", ",", "*", "devices", ",", "*", "*", "kwargs", ")", ":", "fs_log", "=", "[", "]", "if", "salt", ".", "utils", ".", "fsutils", ".", "_is_device", "(", "mountpoint", ")", ":", "raise", "CommandExecu...
Restripe BTRFS: add or remove devices from the particular mounted filesystem.
[ "Restripe", "BTRFS", ":", "add", "or", "remove", "devices", "from", "the", "particular", "mounted", "filesystem", "." ]
python
train
tmontaigu/pylas
pylas/point/dims.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/dims.py#L29-L37
def _build_point_formats_dtypes(point_format_dimensions, dimensions_dict): """ Builds the dict mapping point format id to numpy.dtype In the dtypes, bit fields are still packed, and need to be unpacked each time you want to access them """ return { fmt_id: _point_format_to_dtype(point_fmt, dimensions_dict) for fmt_id, point_fmt in point_format_dimensions.items() }
[ "def", "_build_point_formats_dtypes", "(", "point_format_dimensions", ",", "dimensions_dict", ")", ":", "return", "{", "fmt_id", ":", "_point_format_to_dtype", "(", "point_fmt", ",", "dimensions_dict", ")", "for", "fmt_id", ",", "point_fmt", "in", "point_format_dimensio...
Builds the dict mapping point format id to numpy.dtype In the dtypes, bit fields are still packed, and need to be unpacked each time you want to access them
[ "Builds", "the", "dict", "mapping", "point", "format", "id", "to", "numpy", ".", "dtype", "In", "the", "dtypes", "bit", "fields", "are", "still", "packed", "and", "need", "to", "be", "unpacked", "each", "time", "you", "want", "to", "access", "them" ]
python
test