repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
saltstack/salt
salt/loader.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/loader.py#L1067-L1076
def cache(opts, serial): ''' Returns the returner modules ''' return LazyLoader( _module_dirs(opts, 'cache', 'cache'), opts, tag='cache', pack={'__opts__': opts, '__context__': {'serial': serial}}, )
[ "def", "cache", "(", "opts", ",", "serial", ")", ":", "return", "LazyLoader", "(", "_module_dirs", "(", "opts", ",", "'cache'", ",", "'cache'", ")", ",", "opts", ",", "tag", "=", "'cache'", ",", "pack", "=", "{", "'__opts__'", ":", "opts", ",", "'__c...
Returns the returner modules
[ "Returns", "the", "returner", "modules" ]
python
train
24.2
devassistant/devassistant
devassistant/gui/gui_helper.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/gui_helper.py#L138-L144
def create_button(self, style=Gtk.ReliefStyle.NORMAL): """ This is generalized method for creating Gtk.Button """ btn = Gtk.Button() btn.set_relief(style) return btn
[ "def", "create_button", "(", "self", ",", "style", "=", "Gtk", ".", "ReliefStyle", ".", "NORMAL", ")", ":", "btn", "=", "Gtk", ".", "Button", "(", ")", "btn", ".", "set_relief", "(", "style", ")", "return", "btn" ]
This is generalized method for creating Gtk.Button
[ "This", "is", "generalized", "method", "for", "creating", "Gtk", ".", "Button" ]
python
train
29.571429
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/util.py
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/util.py#L321-L332
def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if os.path.exists(egg_link): return egg_link return dist.location
[ "def", "dist_location", "(", "dist", ")", ":", "egg_link", "=", "egg_link_path", "(", "dist", ")", "if", "os", ".", "path", ".", "exists", "(", "egg_link", ")", ":", "return", "egg_link", "return", "dist", ".", "location" ]
Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is.
[ "Get", "the", "site", "-", "packages", "location", "of", "this", "distribution", ".", "Generally", "this", "is", "dist", ".", "location", "except", "in", "the", "case", "of", "develop", "-", "installed", "packages", "where", "dist", ".", "location", "is", ...
python
train
33
google/mobly
mobly/controllers/monsoon.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L290-L298
def SetMaxCurrent(self, i): """Set the max output current. """ if i < 0 or i > 8: raise MonsoonError(("Target max current %sA, is out of acceptable " "range [0, 8].") % i) val = 1023 - int((i / 8) * 1023) self._SendStruct("BBB", 0x01, 0x0a, val & 0xff) self._SendStruct("BBB", 0x01, 0x0b, val >> 8)
[ "def", "SetMaxCurrent", "(", "self", ",", "i", ")", ":", "if", "i", "<", "0", "or", "i", ">", "8", ":", "raise", "MonsoonError", "(", "(", "\"Target max current %sA, is out of acceptable \"", "\"range [0, 8].\"", ")", "%", "i", ")", "val", "=", "1023", "-"...
Set the max output current.
[ "Set", "the", "max", "output", "current", "." ]
python
train
42.444444
linkhub-sdk/popbill.py
popbill/cashbillService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L304-L351
def search(self, CorpNum, DType, SDate, EDate, State, TradeType, TradeUsage, TaxationType, Page, PerPage, Order, UserID=None, QString=None, TradeOpt=None): """ λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμž, T-거래일자, I-λ°œν–‰μΌμž 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ λ°°μ—΄, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ TradeType : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜ν˜„κΈˆμ˜μˆ˜μ¦, C-μ·¨μ†Œν˜„κΈˆμ˜μˆ˜μ¦ TradeUsage : κ±°λž˜κ΅¬λΆ„ λ°°μ—΄, P-μ†Œλ“κ³΅μ œμš©, C-μ§€μΆœμ¦λΉ™μš© TaxationType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λΉ„κ³Όμ„Έ Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή κ²€μƒ‰κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : ν˜„κΈˆμ˜μˆ˜μ¦ μ‹λ³„λ²ˆν˜Έ, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ TradeOpt : κ±°λž˜μœ ν˜•, N-일반, B-λ„μ„œκ³΅μ—°, T-λŒ€μ€‘κ΅ν†΅ """ if DType == None or DType == '': raise PopbillException(-99999999, "μΌμžμœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if SDate == None or SDate == '': raise PopbillException(-99999999, "μ‹œμž‘μΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if EDate == None or EDate == '': raise PopbillException(-99999999, "μ’…λ£ŒμΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") uri = '/Cashbill/Search' uri += '?DType=' + DType uri += '&SDate=' + SDate uri += '&EDate=' + EDate uri += '&State=' + ','.join(State) uri += '&TradeUsage=' + ','.join(TradeUsage) uri += '&TradeType=' + ','.join(TradeType) uri += '&TaxationType=' + ','.join(TaxationType) uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order if QString is not None: uri += '&QString=' + QString if TradeOpt is not None: uri += '&TradeOpt=' + ','.join(TradeOpt) return self._httpget(uri, CorpNum, UserID)
[ "def", "search", "(", "self", ",", "CorpNum", ",", "DType", ",", "SDate", ",", "EDate", ",", "State", ",", "TradeType", ",", "TradeUsage", ",", "TaxationType", ",", "Page", ",", "PerPage", ",", "Order", ",", "UserID", "=", "None", ",", "QString", "=", ...
λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμž, T-거래일자, I-λ°œν–‰μΌμž 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ λ°°μ—΄, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ TradeType : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜ν˜„κΈˆμ˜μˆ˜μ¦, C-μ·¨μ†Œν˜„κΈˆμ˜μˆ˜μ¦ TradeUsage : κ±°λž˜κ΅¬λΆ„ λ°°μ—΄, P-μ†Œλ“κ³΅μ œμš©, C-μ§€μΆœμ¦λΉ™μš© TaxationType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λΉ„κ³Όμ„Έ Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή κ²€μƒ‰κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : ν˜„κΈˆμ˜μˆ˜μ¦ μ‹λ³„λ²ˆν˜Έ, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ TradeOpt : κ±°λž˜μœ ν˜•, N-일반, B-λ„μ„œκ³΅μ—°, T-λŒ€μ€‘κ΅ν†΅
[ "λͺ©λ‘", "쑰회", "args", "CorpNum", ":", "νŒλΉŒνšŒμ›", "μ‚¬μ—…μžλ²ˆν˜Έ", "DType", ":", "μΌμžμœ ν˜•", "R", "-", "λ“±λ‘μΌμž", "T", "-", "거래일자", "I", "-", "λ°œν–‰μΌμž", "쀑", "택", "1", "SDate", ":", "μ‹œμž‘μΌμž", "ν‘œμ‹œν˜•μ‹", "(", "yyyyMMdd", ")", "EDate", ":", "μ’…λ£ŒμΌμž", "ν‘œμ‹œν˜•μ‹", "(", "yyyyMMdd", ")"...
python
train
38.729167
apache/spark
python/pyspark/sql/streaming.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L811-L825
def queryName(self, queryName): """Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') """ if not queryName or type(queryName) != str or len(queryName.strip()) == 0: raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName) self._jwrite = self._jwrite.queryName(queryName) return self
[ "def", "queryName", "(", "self", ",", "queryName", ")", ":", "if", "not", "queryName", "or", "type", "(", "queryName", ")", "!=", "str", "or", "len", "(", "queryName", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'The que...
Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query')
[ "Specifies", "the", "name", "of", "the", ":", "class", ":", "StreamingQuery", "that", "can", "be", "started", "with", ":", "func", ":", "start", ".", "This", "name", "must", "be", "unique", "among", "all", "the", "currently", "active", "queries", "in", "...
python
train
43
DistrictDataLabs/yellowbrick
yellowbrick/base.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/base.py#L236-L247
def set_title(self, title=None): """ Sets the title on the current axes. Parameters ---------- title: string, default: None Add title to figure or if None leave untitled. """ title = self.title or title if title is not None: self.ax.set_title(title)
[ "def", "set_title", "(", "self", ",", "title", "=", "None", ")", ":", "title", "=", "self", ".", "title", "or", "title", "if", "title", "is", "not", "None", ":", "self", ".", "ax", ".", "set_title", "(", "title", ")" ]
Sets the title on the current axes. Parameters ---------- title: string, default: None Add title to figure or if None leave untitled.
[ "Sets", "the", "title", "on", "the", "current", "axes", "." ]
python
train
27.25
andreikop/qutepart
qutepart/syntax/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/syntax/__init__.py#L205-L212
def _getSyntaxByFirstLine(self, firstLine): """Get syntax by first line of the file """ for pattern, xmlFileName in self._firstLineToXmlFileName.items(): if fnmatch.fnmatch(firstLine, pattern): return self._getSyntaxByXmlFileName(xmlFileName) else: raise KeyError("No syntax for " + firstLine)
[ "def", "_getSyntaxByFirstLine", "(", "self", ",", "firstLine", ")", ":", "for", "pattern", ",", "xmlFileName", "in", "self", ".", "_firstLineToXmlFileName", ".", "items", "(", ")", ":", "if", "fnmatch", ".", "fnmatch", "(", "firstLine", ",", "pattern", ")", ...
Get syntax by first line of the file
[ "Get", "syntax", "by", "first", "line", "of", "the", "file" ]
python
train
44.75
merll/docker-map
dockermap/map/runner/base.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/runner/base.py#L450-L465
def get_network_remove_kwargs(self, action, network_name, kwargs=None): """ Generates keyword arguments for the Docker client to remove a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """ c_kwargs = dict(net_id=network_name) update_kwargs(c_kwargs, kwargs) return c_kwargs
[ "def", "get_network_remove_kwargs", "(", "self", ",", "action", ",", "network_name", ",", "kwargs", "=", "None", ")", ":", "c_kwargs", "=", "dict", "(", "net_id", "=", "network_name", ")", "update_kwargs", "(", "c_kwargs", ",", "kwargs", ")", "return", "c_kw...
Generates keyword arguments for the Docker client to remove a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict
[ "Generates", "keyword", "arguments", "for", "the", "Docker", "client", "to", "remove", "a", "network", "." ]
python
train
40.25
basho/riak-python-client
riak/codecs/pbuf.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/pbuf.py#L758-L787
def encode_timeseries_put(self, tsobj): """ Fills an TsPutReq message with the appropriate data and metadata from a TsObject. :param tsobj: a TsObject :type tsobj: TsObject :param req: the protobuf message to fill :type req: riak.pb.riak_ts_pb2.TsPutReq """ req = riak.pb.riak_ts_pb2.TsPutReq() req.table = str_to_bytes(tsobj.table.name) if tsobj.columns: raise NotImplementedError("columns are not implemented yet") if tsobj.rows and isinstance(tsobj.rows, list): for row in tsobj.rows: tsr = req.rows.add() # NB: type TsRow if not isinstance(row, list): raise ValueError("TsObject row must be a list of values") for cell in row: tsc = tsr.cells.add() # NB: type TsCell self.encode_to_ts_cell(cell, tsc) else: raise RiakError("TsObject requires a list of rows") mc = riak.pb.messages.MSG_CODE_TS_PUT_REQ rc = riak.pb.messages.MSG_CODE_TS_PUT_RESP return Msg(mc, req.SerializeToString(), rc)
[ "def", "encode_timeseries_put", "(", "self", ",", "tsobj", ")", ":", "req", "=", "riak", ".", "pb", ".", "riak_ts_pb2", ".", "TsPutReq", "(", ")", "req", ".", "table", "=", "str_to_bytes", "(", "tsobj", ".", "table", ".", "name", ")", "if", "tsobj", ...
Fills an TsPutReq message with the appropriate data and metadata from a TsObject. :param tsobj: a TsObject :type tsobj: TsObject :param req: the protobuf message to fill :type req: riak.pb.riak_ts_pb2.TsPutReq
[ "Fills", "an", "TsPutReq", "message", "with", "the", "appropriate", "data", "and", "metadata", "from", "a", "TsObject", "." ]
python
train
38
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L285-L322
def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
[ "def", "score_samples", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "'means_'", ")", "X", "=", "check_array", "(", "X", ")", "if", "X", ".", "ndim", "==", "1", ":", "X", "=", "X", "[", ":", ",", "np", ".", "newaxis", "...
Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation
[ "Return", "the", "per", "-", "sample", "likelihood", "of", "the", "data", "under", "the", "model", "." ]
python
train
37.763158
secdev/scapy
scapy/layers/tls/record.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/record.py#L543-L555
def _tls_auth_encrypt(self, s): """ Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole GenericAEADCipher. Also, the additional data is computed right here. """ write_seq_num = struct.pack("!Q", self.tls_session.wcs.seq_num) self.tls_session.wcs.seq_num += 1 add_data = (write_seq_num + pkcs_i2osp(self.type, 1) + pkcs_i2osp(self.version, 2) + pkcs_i2osp(len(s), 2)) return self.tls_session.wcs.cipher.auth_encrypt(s, add_data, write_seq_num)
[ "def", "_tls_auth_encrypt", "(", "self", ",", "s", ")", ":", "write_seq_num", "=", "struct", ".", "pack", "(", "\"!Q\"", ",", "self", ".", "tls_session", ".", "wcs", ".", "seq_num", ")", "self", ".", "tls_session", ".", "wcs", ".", "seq_num", "+=", "1"...
Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole GenericAEADCipher. Also, the additional data is computed right here.
[ "Return", "the", "TLSCiphertext", ".", "fragment", "for", "AEAD", "ciphers", "i", ".", "e", ".", "the", "whole", "GenericAEADCipher", ".", "Also", "the", "additional", "data", "is", "computed", "right", "here", "." ]
python
train
48.076923
pypa/pipenv
pipenv/vendor/cerberus/errors.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/errors.py#L407-L431
def encode_unicode(f): """Cerberus error messages expect regular binary strings. If unicode is used in a ValidationError message can't be printed. This decorator ensures that if legacy Python is used unicode strings are encoded before passing to a function. """ @wraps(f) def wrapped(obj, error): def _encode(value): """Helper encoding unicode strings into binary utf-8""" if isinstance(value, unicode): # noqa: F821 return value.encode('utf-8') return value error = copy(error) error.document_path = _encode(error.document_path) error.schema_path = _encode(error.schema_path) error.constraint = _encode(error.constraint) error.value = _encode(error.value) error.info = _encode(error.info) return f(obj, error) return wrapped if PYTHON_VERSION < 3 else f
[ "def", "encode_unicode", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "obj", ",", "error", ")", ":", "def", "_encode", "(", "value", ")", ":", "\"\"\"Helper encoding unicode strings into binary utf-8\"\"\"", "if", "isinstance", "(", ...
Cerberus error messages expect regular binary strings. If unicode is used in a ValidationError message can't be printed. This decorator ensures that if legacy Python is used unicode strings are encoded before passing to a function.
[ "Cerberus", "error", "messages", "expect", "regular", "binary", "strings", ".", "If", "unicode", "is", "used", "in", "a", "ValidationError", "message", "can", "t", "be", "printed", "." ]
python
train
35.32
BernardFW/bernard
src/bernard/storage/register/redis.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/register/redis.py#L82-L88
async def _replace(self, key: Text, data: Dict[Text, Any]) -> None: """ Replace the register with a new value. """ with await self.pool as r: await r.set(self.register_key(key), ujson.dumps(data))
[ "async", "def", "_replace", "(", "self", ",", "key", ":", "Text", ",", "data", ":", "Dict", "[", "Text", ",", "Any", "]", ")", "->", "None", ":", "with", "await", "self", ".", "pool", "as", "r", ":", "await", "r", ".", "set", "(", "self", ".", ...
Replace the register with a new value.
[ "Replace", "the", "register", "with", "a", "new", "value", "." ]
python
train
33.571429
Jaymon/prom
prom/cli/generate.py
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/cli/generate.py#L50-L84
def main_generate(table_names, stream): """This will print out valid prom python code for given tables that already exist in a database. This is really handy when you want to bootstrap an existing database to work with prom and don't want to manually create Orm objects for the tables you want to use, let `generate` do it for you """ with stream.open() as fp: fp.write_line("from datetime import datetime, date") fp.write_line("from decimal import Decimal") fp.write_line("from prom import Orm, Field") fp.write_newlines() for table_name, inter, fields in get_table_info(*table_names): fp.write_line("class {}(Orm):".format(table_name.title().replace("_", ""))) fp.write_line(" table_name = '{}'".format(table_name)) if inter.connection_config.name: fp.write_line(" connection_name = '{}'".format(inter.connection_config.name)) fp.write_newlines() magic_field_names = set(["_id", "_created", "_updated"]) if "_id" in fields: fp.write_line(get_field_def("_id", fields.pop("_id"))) magic_field_names.discard("_id") for field_name, field_d in fields.items(): fp.write_line(get_field_def(field_name, field_d)) for magic_field_name in magic_field_names: if magic_field_name not in fields: fp.write_line(" {} = None".format(magic_field_name)) fp.write_newlines(2)
[ "def", "main_generate", "(", "table_names", ",", "stream", ")", ":", "with", "stream", ".", "open", "(", ")", "as", "fp", ":", "fp", ".", "write_line", "(", "\"from datetime import datetime, date\"", ")", "fp", ".", "write_line", "(", "\"from decimal import Deci...
This will print out valid prom python code for given tables that already exist in a database. This is really handy when you want to bootstrap an existing database to work with prom and don't want to manually create Orm objects for the tables you want to use, let `generate` do it for you
[ "This", "will", "print", "out", "valid", "prom", "python", "code", "for", "given", "tables", "that", "already", "exist", "in", "a", "database", "." ]
python
train
43.314286
Karaage-Cluster/python-tldap
tldap/database/__init__.py
https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/database/__init__.py#L448-L475
def search(table: LdapObjectClass, query: Optional[Q] = None, database: Optional[Database] = None, base_dn: Optional[str] = None) -> Iterator[LdapObject]: """ Search for a object of given type in the database. """ fields = table.get_fields() db_fields = { name: field for name, field in fields.items() if field.db_field } database = get_database(database) connection = database.connection search_options = table.get_search_options(database) iterator = tldap.query.search( connection=connection, query=query, fields=db_fields, base_dn=base_dn or search_options.base_dn, object_classes=search_options.object_class, pk=search_options.pk_field, ) for dn, data in iterator: python_data = _db_to_python(data, table, dn) python_data = table.on_load(python_data, database) yield python_data
[ "def", "search", "(", "table", ":", "LdapObjectClass", ",", "query", ":", "Optional", "[", "Q", "]", "=", "None", ",", "database", ":", "Optional", "[", "Database", "]", "=", "None", ",", "base_dn", ":", "Optional", "[", "str", "]", "=", "None", ")",...
Search for a object of given type in the database.
[ "Search", "for", "a", "object", "of", "given", "type", "in", "the", "database", "." ]
python
train
32.357143
Neurita/boyle
boyle/dicom/convert.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L44-L102
def add_meta_to_nii(nii_file, dicom_file, dcm_tags=''): """ Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file. """ # Load a dicom image dcmimage = dicom.read_file(dicom_file) # Load the nifti1 image image = nibabel.load(nii_file) # Check the we have a nifti1 format image if not isinstance(image, nibabel.nifti1.Nifti1Image): raise Exception( "Only Nifti1 image are supported not '{0}'.".format( type(image))) # check if dcm_tags is one string, if yes put it in a list: if isinstance(dcm_tags, str): dcm_tags = [dcm_tags] # Fill the nifti1 header header = image.get_header() # slice_duration: Time for 1 slice repetition_time = float(dcmimage[("0x0018", "0x0080")].value) header.set_dim_info(slice=2) nb_slices = header.get_n_slices() # Force round to 0 digit after coma. If more, nibabel completes to # 6 digits with random numbers... slice_duration = round(repetition_time / nb_slices, 0) header.set_slice_duration(slice_duration) # add free dicom fields if dcm_tags: content = ["{0}={1}".format(name, dcmimage[tag].value) for name, tag in dcm_tags] free_field = numpy.array(";".join(content), dtype=header["descrip"].dtype) image.get_header()["descrip"] = free_field # Update the image header image.update_header() # Save the filled image nibabel.save(image, nii_file)
[ "def", "add_meta_to_nii", "(", "nii_file", ",", "dicom_file", ",", "dcm_tags", "=", "''", ")", ":", "# Load a dicom image", "dcmimage", "=", "dicom", ".", "read_file", "(", "dicom_file", ")", "# Load the nifti1 image", "image", "=", "nibabel", ".", "load", "(", ...
Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file.
[ "Add", "slice", "duration", "and", "acquisition", "times", "to", "the", "headers", "of", "the", "nifit1", "files", "in", "nii_file", ".", "It", "will", "add", "the", "repetition", "time", "of", "the", "DICOM", "file", "(", "field", ":", "{", "0x0018", "0...
python
valid
35.186441
saltstack/salt
salt/modules/pushbullet.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pushbullet.py#L65-L84
def push_note(device=None, title=None, body=None): ''' Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body." ''' spb = _SaltPushbullet(device) res = spb.push_note(title, body) return res
[ "def", "push_note", "(", "device", "=", "None", ",", "title", "=", "None", ",", "body", "=", "None", ")", ":", "spb", "=", "_SaltPushbullet", "(", "device", ")", "res", "=", "spb", ".", "push_note", "(", "title", ",", "body", ")", "return", "res" ]
Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body."
[ "Pushing", "a", "text", "note", "." ]
python
train
23.9
raphaelm/django-hierarkey
hierarkey/forms.py
https://github.com/raphaelm/django-hierarkey/blob/3ca822f94fa633c9a6d5abe9c80cb1551299ae46/hierarkey/forms.py#L30-L64
def save(self) -> None: """ Saves all changed values to the database. """ for name, field in self.fields.items(): value = self.cleaned_data[name] if isinstance(value, UploadedFile): # Delete old file fname = self._s.get(name, as_type=File) if fname: try: default_storage.delete(fname.name) except OSError: # pragma: no cover logger.error('Deleting file %s failed.' % fname.name) # Create new file newname = default_storage.save(self.get_new_filename(value.name), value) value._name = newname self._s.set(name, value) elif isinstance(value, File): # file is unchanged continue elif isinstance(field, forms.FileField): # file is deleted fname = self._s.get(name, as_type=File) if fname: try: default_storage.delete(fname.name) except OSError: # pragma: no cover logger.error('Deleting file %s failed.' % fname.name) del self._s[name] elif value is None: del self._s[name] elif self._s.get(name, as_type=type(value)) != value: self._s.set(name, value)
[ "def", "save", "(", "self", ")", "->", "None", ":", "for", "name", ",", "field", "in", "self", ".", "fields", ".", "items", "(", ")", ":", "value", "=", "self", ".", "cleaned_data", "[", "name", "]", "if", "isinstance", "(", "value", ",", "Uploaded...
Saves all changed values to the database.
[ "Saves", "all", "changed", "values", "to", "the", "database", "." ]
python
train
41.057143
ktbyers/netmiko
netmiko/base_connection.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/base_connection.py#L996-L1016
def set_terminal_width(self, command="", delay_factor=1): """CLI terminals try to automatically adjust the line based on the width of the terminal. This causes the output to get distorted when accessed programmatically. Set terminal width to 511 which works on a broad set of devices. :param command: Command string to send to the device :type command: str :param delay_factor: See __init__: global_delay_factor :type delay_factor: int """ if not command: return "" delay_factor = self.select_delay_factor(delay_factor) command = self.normalize_cmd(command) self.write_channel(command) output = self.read_until_prompt() if self.ansi_escape_codes: output = self.strip_ansi_escape_codes(output) return output
[ "def", "set_terminal_width", "(", "self", ",", "command", "=", "\"\"", ",", "delay_factor", "=", "1", ")", ":", "if", "not", "command", ":", "return", "\"\"", "delay_factor", "=", "self", ".", "select_delay_factor", "(", "delay_factor", ")", "command", "=", ...
CLI terminals try to automatically adjust the line based on the width of the terminal. This causes the output to get distorted when accessed programmatically. Set terminal width to 511 which works on a broad set of devices. :param command: Command string to send to the device :type command: str :param delay_factor: See __init__: global_delay_factor :type delay_factor: int
[ "CLI", "terminals", "try", "to", "automatically", "adjust", "the", "line", "based", "on", "the", "width", "of", "the", "terminal", ".", "This", "causes", "the", "output", "to", "get", "distorted", "when", "accessed", "programmatically", "." ]
python
train
39.666667
saltstack/salt
salt/utils/args.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/args.py#L72-L88
def condition_input(args, kwargs): ''' Return a single arg structure for the publisher to safely use ''' ret = [] for arg in args: if (six.PY3 and isinstance(arg, six.integer_types) and salt.utils.jid.is_jid(six.text_type(arg))) or \ (six.PY2 and isinstance(arg, long)): # pylint: disable=incompatible-py3-code,undefined-variable ret.append(six.text_type(arg)) else: ret.append(arg) if isinstance(kwargs, dict) and kwargs: kw_ = {'__kwarg__': True} for key, val in six.iteritems(kwargs): kw_[key] = val return ret + [kw_] return ret
[ "def", "condition_input", "(", "args", ",", "kwargs", ")", ":", "ret", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "(", "six", ".", "PY3", "and", "isinstance", "(", "arg", ",", "six", ".", "integer_types", ")", "and", "salt", ".", "utils"...
Return a single arg structure for the publisher to safely use
[ "Return", "a", "single", "arg", "structure", "for", "the", "publisher", "to", "safely", "use" ]
python
train
37
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/workflow/workflow_cumulative_statistics.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/workflow/workflow_cumulative_statistics.py#L93-L107
def get_instance(self, payload): """ Build an instance of WorkflowCumulativeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance """ return WorkflowCumulativeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "WorkflowCumulativeStatisticsInstance", "(", "self", ".", "_version", ",", "payload", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'workspace_sid'", "]", ",", "workflow_sid", "...
Build an instance of WorkflowCumulativeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance
[ "Build", "an", "instance", "of", "WorkflowCumulativeStatisticsInstance" ]
python
train
43.733333
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py#L698-L710
def confd_state_internal_callpoints_validationpoint_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") validationpoint = ET.SubElement(callpoints, "validationpoint") id = ET.SubElement(validationpoint, "id") id.text = kwargs.pop('id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "confd_state_internal_callpoints_validationpoint_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "confd_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"confd-state\"", ",", "...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
47.076923
pyQode/pyqode.core
pyqode/core/modes/right_margin.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/modes/right_margin.py#L72-L83
def _paint_margin(self, event): """ Paints the right margin after editor paint event. """ font = QtGui.QFont(self.editor.font_name, self.editor.font_size + self.editor.zoom_level) metrics = QtGui.QFontMetricsF(font) pos = self._margin_pos offset = self.editor.contentOffset().x() + \ self.editor.document().documentMargin() x80 = round(metrics.width(' ') * pos) + offset painter = QtGui.QPainter(self.editor.viewport()) painter.setPen(self._pen) painter.drawLine(x80, 0, x80, 2 ** 16)
[ "def", "_paint_margin", "(", "self", ",", "event", ")", ":", "font", "=", "QtGui", ".", "QFont", "(", "self", ".", "editor", ".", "font_name", ",", "self", ".", "editor", ".", "font_size", "+", "self", ".", "editor", ".", "zoom_level", ")", "metrics", ...
Paints the right margin after editor paint event.
[ "Paints", "the", "right", "margin", "after", "editor", "paint", "event", "." ]
python
train
48.666667
twitterdev/twitter-python-ads-sdk
twitter_ads/audience.py
https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L116-L136
def save(self): """ Saves or updates the current tailored audience permission. """ if self.id: method = 'put' resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) else: method = 'post' resource = self.RESOURCE_COLLECTION.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id) response = Request( self.account.client, method, resource, params=self.to_params()).perform() return self.from_response(response.body['data'])
[ "def", "save", "(", "self", ")", ":", "if", "self", ".", "id", ":", "method", "=", "'put'", "resource", "=", "self", ".", "RESOURCE", ".", "format", "(", "account_id", "=", "self", ".", "account", ".", "id", ",", "tailored_audience_id", "=", "self", ...
Saves or updates the current tailored audience permission.
[ "Saves", "or", "updates", "the", "current", "tailored", "audience", "permission", "." ]
python
train
33.571429
gcrahay/python-wer
src/wer/schema.py
https://github.com/gcrahay/python-wer/blob/fad6bc4e379ec96a9483d32079098f19dfff1be5/src/wer/schema.py#L186-L199
def id(self): """ Computes the signature of the record, a SHA-512 of significant values :return: SHa-512 Hex string """ h = hashlib.new('sha512') for value in (self.machine.name, self.machine.os, self.user, self.application.name, self.application.path, self.event.report_type, self.event.type, self.event.time.isoformat()): h.update(str(value).encode('utf-8')) for parameter in sorted(self.parameters, key=lambda k: getattr(k, 'id')): h.update(parameter.value.encode('utf-8')) return h.hexdigest()
[ "def", "id", "(", "self", ")", ":", "h", "=", "hashlib", ".", "new", "(", "'sha512'", ")", "for", "value", "in", "(", "self", ".", "machine", ".", "name", ",", "self", ".", "machine", ".", "os", ",", "self", ".", "user", ",", "self", ".", "appl...
Computes the signature of the record, a SHA-512 of significant values :return: SHa-512 Hex string
[ "Computes", "the", "signature", "of", "the", "record", "a", "SHA", "-", "512", "of", "significant", "values" ]
python
train
44.071429
openstack/horizon
openstack_dashboard/management/commands/migrate_settings.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/management/commands/migrate_settings.py#L164-L220
def patch(self, force=False): """Patch local_settings.py.example with local_settings.diff. The patch application generates the local_settings.py file (the local_settings.py.example remains unchanged). http://github.com/sitkatech/pypatch fails if the local_settings.py.example file is not 100% identical to the one used to generate the first diff so we use the patch command instead. """ with DirContext(self.local_settings_dir) as dircontext: if os.path.exists(self.local_settings_diff): if not os.path.exists(self.local_settings_file) or force: local_settings_reject = \ self.local_settings_reject_pattern % ( time.strftime(self.file_time_fmt, time.localtime()) ) patch_cmd = shlex.split( 'patch %s %s -o %s -r %s' % ( self.local_settings_example, self.local_settings_diff, self.local_settings_file, local_settings_reject ) ) try: subprocess.check_call(patch_cmd) except subprocess.CalledProcessError: if os.path.exists(local_settings_reject): sys.exit( 'Some conflict(s) occurred. Please check "%s" ' 'to find unapplied parts of the diff.\n' 'Once conflicts are solved, it is safer to ' 'regenerate a newer diff with the "--gendiff" ' 'option.' % os.path.join( dircontext.curdir, local_settings_reject) ) else: sys.exit('An unhandled error occurred.') print('Generation of "%s" successful.' % os.path.join( dircontext.curdir, self.local_settings_file) ) sys.exit(0) else: sys.exit( '"%s" already exists.' % os.path.join(dircontext.curdir, self.local_settings_file) ) else: sys.exit('No diff file found, please generate one with the ' '"--gendiff" option.')
[ "def", "patch", "(", "self", ",", "force", "=", "False", ")", ":", "with", "DirContext", "(", "self", ".", "local_settings_dir", ")", "as", "dircontext", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "local_settings_diff", ")", ":", "i...
Patch local_settings.py.example with local_settings.diff. The patch application generates the local_settings.py file (the local_settings.py.example remains unchanged). http://github.com/sitkatech/pypatch fails if the local_settings.py.example file is not 100% identical to the one used to generate the first diff so we use the patch command instead.
[ "Patch", "local_settings", ".", "py", ".", "example", "with", "local_settings", ".", "diff", "." ]
python
train
46.596491
bpsmith/tia
tia/analysis/model/ins.py
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/ins.py#L30-L34
def total_return(self): """http://en.wikipedia.org/wiki/Total_shareholder_return - mimics bloomberg total return""" pxend = self.close pxstart = pxend.shift(1).bfill() return (1. + (pxend - pxstart + self.dvds.fillna(0)) / pxstart).cumprod() - 1
[ "def", "total_return", "(", "self", ")", ":", "pxend", "=", "self", ".", "close", "pxstart", "=", "pxend", ".", "shift", "(", "1", ")", ".", "bfill", "(", ")", "return", "(", "1.", "+", "(", "pxend", "-", "pxstart", "+", "self", ".", "dvds", ".",...
http://en.wikipedia.org/wiki/Total_shareholder_return - mimics bloomberg total return
[ "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Total_shareholder_return", "-", "mimics", "bloomberg", "total", "return" ]
python
train
54.6
welbornprod/colr
colr/colr.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/colr.py#L400-L405
def get_all_names() -> Tuple[str]: """ Retrieve a tuple of all known color names, basic and 'known names'. """ names = list(basic_names) names.extend(name_data) return tuple(sorted(set(names)))
[ "def", "get_all_names", "(", ")", "->", "Tuple", "[", "str", "]", ":", "names", "=", "list", "(", "basic_names", ")", "names", ".", "extend", "(", "name_data", ")", "return", "tuple", "(", "sorted", "(", "set", "(", "names", ")", ")", ")" ]
Retrieve a tuple of all known color names, basic and 'known names'.
[ "Retrieve", "a", "tuple", "of", "all", "known", "color", "names", "basic", "and", "known", "names", "." ]
python
train
34.666667
googleads/googleads-python-lib
examples/adwords/v201809/shopping/add_smart_shopping_ad.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/shopping/add_smart_shopping_ad.py#L132-L162
def CreateSmartShoppingAdGroup(client, campaign_id): """Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID. """ ad_group_service = client.GetService('AdGroupService', version='v201809') # Create the ad group. ad_group = { 'campaignId': campaign_id, 'name': 'Smart Shopping ad group #%s' % uuid.uuid4(), # Set the ad group type to SHOPPING_GOAL_OPTIMIZED_ADS. 'adGroupType': 'SHOPPING_GOAL_OPTIMIZED_ADS' } adgroup_operations = { 'operator': 'ADD', 'operand': ad_group } # Make the mutate request to add the AdGroup to the Smart Shopping campaign. ad_group = ad_group_service.mutate(adgroup_operations)['value'][0] ad_group_id = ad_group['id'] print ('AdGroup with name "%s" and ID "%s" was added.' % (ad_group['name'], ad_group_id)) return ad_group_id
[ "def", "CreateSmartShoppingAdGroup", "(", "client", ",", "campaign_id", ")", ":", "ad_group_service", "=", "client", ".", "GetService", "(", "'AdGroupService'", ",", "version", "=", "'v201809'", ")", "# Create the ad group.", "ad_group", "=", "{", "'campaignId'", ":...
Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID.
[ "Adds", "a", "new", "Smart", "Shopping", "ad", "group", "." ]
python
train
29.580645
MatiasSM/fcb
fcb/processing/transformations/ToImage.py
https://github.com/MatiasSM/fcb/blob/92a6c535287ea1c1ef986954a5d66e7905fb6221/fcb/processing/transformations/ToImage.py#L83-L93
def do_heavy_work(self, block): """ Note: Expects Compressor Block like objects """ src_file_path = block.latest_file_info.path img_path = src_file_path + self.get_extension() self.log.debug("Converting file '%s' to image '%s'", src_file_path, img_path) from_file_to_image(src_file_path, img_path) block.image_converted_file_info = FileInfo(img_path) block.latest_file_info = block.image_converted_file_info return block
[ "def", "do_heavy_work", "(", "self", ",", "block", ")", ":", "src_file_path", "=", "block", ".", "latest_file_info", ".", "path", "img_path", "=", "src_file_path", "+", "self", ".", "get_extension", "(", ")", "self", ".", "log", ".", "debug", "(", "\"Conve...
Note: Expects Compressor Block like objects
[ "Note", ":", "Expects", "Compressor", "Block", "like", "objects" ]
python
train
44.545455
chorsley/python-Wappalyzer
Wappalyzer/Wappalyzer.py
https://github.com/chorsley/python-Wappalyzer/blob/b785e29f12c8032c54279cfa9ce01ead702a386c/Wappalyzer/Wappalyzer.py#L244-L252
def get_categories(self, app_name): """ Returns a list of the categories for an app name. """ cat_nums = self.apps.get(app_name, {}).get("cats", []) cat_names = [self.categories.get("%s" % cat_num, "") for cat_num in cat_nums] return cat_names
[ "def", "get_categories", "(", "self", ",", "app_name", ")", ":", "cat_nums", "=", "self", ".", "apps", ".", "get", "(", "app_name", ",", "{", "}", ")", ".", "get", "(", "\"cats\"", ",", "[", "]", ")", "cat_names", "=", "[", "self", ".", "categories...
Returns a list of the categories for an app name.
[ "Returns", "a", "list", "of", "the", "categories", "for", "an", "app", "name", "." ]
python
train
33.888889
HttpRunner/HttpRunner
httprunner/parser.py
https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/parser.py#L757-L838
def _extend_with_api(test_dict, api_def_dict): """ extend test with api definition, test will merge and override api definition. Args: test_dict (dict): test block, this will override api_def_dict api_def_dict (dict): api definition Examples: >>> api_def_dict = { "name": "get token 1", "request": {...}, "validate": [{'eq': ['status_code', 200]}] } >>> test_dict = { "name": "get token 2", "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } >>> _extend_with_api(test_dict, api_def_dict) >>> print(test_dict) { "name": "get token 2", "request": {...}, "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } """ # override api name test_dict.setdefault("name", api_def_dict.pop("name", "api name undefined")) # override variables def_variables = api_def_dict.pop("variables", []) test_dict["variables"] = utils.extend_variables( def_variables, test_dict.get("variables", {}) ) # merge & override validators TODO: relocate def_raw_validators = api_def_dict.pop("validate", []) def_validators = [ validator.uniform_validator(_validator) for _validator in def_raw_validators ] ref_validators = test_dict.pop("validate", []) test_dict["validate"] = validator.extend_validators( def_validators, ref_validators ) # merge & override extractors def_extrators = api_def_dict.pop("extract", {}) test_dict["extract"] = utils.extend_variables( def_extrators, test_dict.get("extract", {}) ) # merge & override request test_dict["request"] = api_def_dict.pop("request", {}) # base_url & verify: priority api_def_dict > test_dict if api_def_dict.get("base_url"): test_dict["base_url"] = api_def_dict["base_url"] if "verify" in api_def_dict: test_dict["request"]["verify"] = api_def_dict["verify"] # merge & override setup_hooks def_setup_hooks = api_def_dict.pop("setup_hooks", []) ref_setup_hooks = test_dict.get("setup_hooks", []) extended_setup_hooks = list(set(def_setup_hooks + ref_setup_hooks)) if extended_setup_hooks: test_dict["setup_hooks"] = extended_setup_hooks # merge & override teardown_hooks def_teardown_hooks = api_def_dict.pop("teardown_hooks", []) ref_teardown_hooks = test_dict.get("teardown_hooks", []) extended_teardown_hooks = list(set(def_teardown_hooks + ref_teardown_hooks)) if extended_teardown_hooks: test_dict["teardown_hooks"] = extended_teardown_hooks # TODO: extend with other api definition items, e.g. times test_dict.update(api_def_dict)
[ "def", "_extend_with_api", "(", "test_dict", ",", "api_def_dict", ")", ":", "# override api name", "test_dict", ".", "setdefault", "(", "\"name\"", ",", "api_def_dict", ".", "pop", "(", "\"name\"", ",", "\"api name undefined\"", ")", ")", "# override variables", "de...
extend test with api definition, test will merge and override api definition. Args: test_dict (dict): test block, this will override api_def_dict api_def_dict (dict): api definition Examples: >>> api_def_dict = { "name": "get token 1", "request": {...}, "validate": [{'eq': ['status_code', 200]}] } >>> test_dict = { "name": "get token 2", "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } >>> _extend_with_api(test_dict, api_def_dict) >>> print(test_dict) { "name": "get token 2", "request": {...}, "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] }
[ "extend", "test", "with", "api", "definition", "test", "will", "merge", "and", "override", "api", "definition", "." ]
python
train
35.646341
shad7/tvrenamer
tvrenamer/services/tvdb.py
https://github.com/shad7/tvrenamer/blob/7fb59cb02669357e73b7acb92dcb6d74fdff4654/tvrenamer/services/tvdb.py#L81-L108
def get_episode_name(self, series, episode_numbers, season_number): """Perform lookup for name of episode numbers for a given series. :param object series: instance of a series :param list episode_numbers: the episode sequence number :param int season_number: numeric season of series :returns: list of episode name :rtype: list(str) """ try: episodes = self.api.get_episodes(series.get('id'), airedSeason=season_number) except exceptions.TVDBRequestException as err: LOG.exception('episodes for series %s season no %s failed', series.get('id'), season_number) return None, _as_str(err) epnames = [] for epno in episode_numbers: epname = _get_epname(episodes, epno) if epname is None: epname = _get_epname(episodes, epno, absolute=True) if epname is None: return None, None epnames.append(epname) return epnames, None
[ "def", "get_episode_name", "(", "self", ",", "series", ",", "episode_numbers", ",", "season_number", ")", ":", "try", ":", "episodes", "=", "self", ".", "api", ".", "get_episodes", "(", "series", ".", "get", "(", "'id'", ")", ",", "airedSeason", "=", "se...
Perform lookup for name of episode numbers for a given series. :param object series: instance of a series :param list episode_numbers: the episode sequence number :param int season_number: numeric season of series :returns: list of episode name :rtype: list(str)
[ "Perform", "lookup", "for", "name", "of", "episode", "numbers", "for", "a", "given", "series", "." ]
python
train
38.75
obriencj/python-javatools
javatools/distinfo.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/distinfo.py#L179-L188
def close(self): """ if this was a zip'd distribution, any introspection may have resulted in opening or creating temporary files. Call close in order to clean up. """ if self.tmpdir: rmtree(self.tmpdir) self.tmpdir = None self._contents = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "tmpdir", ":", "rmtree", "(", "self", ".", "tmpdir", ")", "self", ".", "tmpdir", "=", "None", "self", ".", "_contents", "=", "None" ]
if this was a zip'd distribution, any introspection may have resulted in opening or creating temporary files. Call close in order to clean up.
[ "if", "this", "was", "a", "zip", "d", "distribution", "any", "introspection", "may", "have", "resulted", "in", "opening", "or", "creating", "temporary", "files", ".", "Call", "close", "in", "order", "to", "clean", "up", "." ]
python
train
30.1
bradmontgomery/django-redis-metrics
redis_metrics/models.py
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/models.py#L620-L624
def delete_gauge(self, slug): """Removes all gauges with the given ``slug``.""" key = self._gauge_key(slug) self.r.delete(key) # Remove the Gauge self.r.srem(self._gauge_slugs_key, slug)
[ "def", "delete_gauge", "(", "self", ",", "slug", ")", ":", "key", "=", "self", ".", "_gauge_key", "(", "slug", ")", "self", ".", "r", ".", "delete", "(", "key", ")", "# Remove the Gauge", "self", ".", "r", ".", "srem", "(", "self", ".", "_gauge_slugs...
Removes all gauges with the given ``slug``.
[ "Removes", "all", "gauges", "with", "the", "given", "slug", "." ]
python
train
43
google/google-visualization-python
gviz_api.py
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L1011-L1049
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0, response_handler="google.visualization.Query.setResponse"): """Writes a table as a JSON response that can be returned as-is to a client. This method writes a JSON response to return to a client in response to a Google Visualization API query. This string can be processed by the calling page, and is used to deliver a data table to a visualization hosted on a different page. Args: columns_order: Optional. Passed straight to self.ToJSon(). order_by: Optional. Passed straight to self.ToJSon(). req_id: Optional. The response id, as retrieved by the request. response_handler: Optional. The response handler, as retrieved by the request. Returns: A JSON response string to be received by JS the visualization Query object. This response would be translated into a DataTable on the client side. Example result (newlines added for readability): google.visualization.Query.setResponse({ 'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols: [...], rows: [...]}}); Note: The URL returning this string can be used as a data source by Google Visualization Gadgets or from JS code. """ response_obj = { "version": "0.6", "reqId": str(req_id), "table": self._ToJSonObj(columns_order, order_by), "status": "ok" } encoded_response_str = DataTableJSONEncoder().encode(response_obj) if not isinstance(encoded_response_str, str): encoded_response_str = encoded_response_str.encode("utf-8") return "%s(%s);" % (response_handler, encoded_response_str)
[ "def", "ToJSonResponse", "(", "self", ",", "columns_order", "=", "None", ",", "order_by", "=", "(", ")", ",", "req_id", "=", "0", ",", "response_handler", "=", "\"google.visualization.Query.setResponse\"", ")", ":", "response_obj", "=", "{", "\"version\"", ":", ...
Writes a table as a JSON response that can be returned as-is to a client. This method writes a JSON response to return to a client in response to a Google Visualization API query. This string can be processed by the calling page, and is used to deliver a data table to a visualization hosted on a different page. Args: columns_order: Optional. Passed straight to self.ToJSon(). order_by: Optional. Passed straight to self.ToJSon(). req_id: Optional. The response id, as retrieved by the request. response_handler: Optional. The response handler, as retrieved by the request. Returns: A JSON response string to be received by JS the visualization Query object. This response would be translated into a DataTable on the client side. Example result (newlines added for readability): google.visualization.Query.setResponse({ 'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols: [...], rows: [...]}}); Note: The URL returning this string can be used as a data source by Google Visualization Gadgets or from JS code.
[ "Writes", "a", "table", "as", "a", "JSON", "response", "that", "can", "be", "returned", "as", "-", "is", "to", "a", "client", "." ]
python
train
43.282051
ulule/django-badgify
badgify/utils.py
https://github.com/ulule/django-badgify/blob/1bf233ffeb6293ee659454de7b3794682128b6ca/badgify/utils.py#L115-L120
def chunks(l, n): """ Yields successive n-sized chunks from l. """ for i in _range(0, len(l), n): yield l[i:i + n]
[ "def", "chunks", "(", "l", ",", "n", ")", ":", "for", "i", "in", "_range", "(", "0", ",", "len", "(", "l", ")", ",", "n", ")", ":", "yield", "l", "[", "i", ":", "i", "+", "n", "]" ]
Yields successive n-sized chunks from l.
[ "Yields", "successive", "n", "-", "sized", "chunks", "from", "l", "." ]
python
train
22.166667
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L1482-L1491
def debug(self, nest_level=1): """ Show the binary data and parsed data in a tree structure """ prefix = ' ' * nest_level print('%s%s Object #%s' % (prefix, type_name(self), id(self))) print('%s Children:' % (prefix,)) for child in self._children: child.debug(nest_level + 2)
[ "def", "debug", "(", "self", ",", "nest_level", "=", "1", ")", ":", "prefix", "=", "' '", "*", "nest_level", "print", "(", "'%s%s Object #%s'", "%", "(", "prefix", ",", "type_name", "(", "self", ")", ",", "id", "(", "self", ")", ")", ")", "print", ...
Show the binary data and parsed data in a tree structure
[ "Show", "the", "binary", "data", "and", "parsed", "data", "in", "a", "tree", "structure" ]
python
train
33.7
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/injector.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/injector.py#L950-L963
def injector_gear_2_json(self): """ transform this local object to JSON. :return: the JSON from this local object """ LOGGER.debug("InjectorCachedGear.injector_gear_2_json") json_obj = { 'gearId': self.id, 'gearName': self.name, 'gearAdminQueue': self.admin_queue, 'gearDescription': self.description, 'running': 'true' if self.running else 'false' } return json_obj
[ "def", "injector_gear_2_json", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"InjectorCachedGear.injector_gear_2_json\"", ")", "json_obj", "=", "{", "'gearId'", ":", "self", ".", "id", ",", "'gearName'", ":", "self", ".", "name", ",", "'gearAdminQueue'",...
transform this local object to JSON. :return: the JSON from this local object
[ "transform", "this", "local", "object", "to", "JSON", ".", ":", "return", ":", "the", "JSON", "from", "this", "local", "object" ]
python
train
34.071429
h2oai/h2o-3
h2o-py/h2o/model/regression.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/regression.py#L56-L66
def h2o_mean_squared_error(y_actual, y_predicted, weights=None): """ Mean squared error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean squared error loss (best is 0.0). """ ModelBase._check_targets(y_actual, y_predicted) return _colmean((y_predicted - y_actual) ** 2)
[ "def", "h2o_mean_squared_error", "(", "y_actual", ",", "y_predicted", ",", "weights", "=", "None", ")", ":", "ModelBase", ".", "_check_targets", "(", "y_actual", ",", "y_predicted", ")", "return", "_colmean", "(", "(", "y_predicted", "-", "y_actual", ")", "**"...
Mean squared error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean squared error loss (best is 0.0).
[ "Mean", "squared", "error", "regression", "loss" ]
python
test
38
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2467-L2501
def truncated_normal_expval(mu, tau, a, b): """Expected value of the truncated normal distribution. .. math:: E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T} where .. math:: T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi \left(\frac{A-\mu}{\sigma}\right)\text \\ \varphi_1 &= \varphi\left(\frac{A-\mu}{\sigma}\right) \\ \varphi_2 &= \varphi\left(\frac{B-\mu}{\sigma}\right) \\ and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`. :Parameters: - `mu` : Mean of the distribution. - `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0). - `a` : Left bound of the distribution. - `b` : Right bound of the distribution. """ phia = np.exp(normal_like(a, mu, tau)) phib = np.exp(normal_like(b, mu, tau)) sigma = 1. / np.sqrt(tau) Phia = utils.normcdf((a - mu) / sigma) if b == np.inf: Phib = 1.0 else: Phib = utils.normcdf((b - mu) / sigma) return (mu + (phia - phib) / (Phib - Phia))[0]
[ "def", "truncated_normal_expval", "(", "mu", ",", "tau", ",", "a", ",", "b", ")", ":", "phia", "=", "np", ".", "exp", "(", "normal_like", "(", "a", ",", "mu", ",", "tau", ")", ")", "phib", "=", "np", ".", "exp", "(", "normal_like", "(", "b", ",...
Expected value of the truncated normal distribution. .. math:: E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T} where .. math:: T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi \left(\frac{A-\mu}{\sigma}\right)\text \\ \varphi_1 &= \varphi\left(\frac{A-\mu}{\sigma}\right) \\ \varphi_2 &= \varphi\left(\frac{B-\mu}{\sigma}\right) \\ and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`. :Parameters: - `mu` : Mean of the distribution. - `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0). - `a` : Left bound of the distribution. - `b` : Right bound of the distribution.
[ "Expected", "value", "of", "the", "truncated", "normal", "distribution", "." ]
python
train
29.571429
nicolargo/glances
glances/plugins/glances_ports.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_ports.py#L104-L128
def get_ports_alert(self, port, header="", log=False): """Return the alert status relative to the port scan return value.""" ret = 'OK' if port['status'] is None: ret = 'CAREFUL' elif port['status'] == 0: ret = 'CRITICAL' elif (isinstance(port['status'], (float, int)) and port['rtt_warning'] is not None and port['status'] > port['rtt_warning']): ret = 'WARNING' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, port[self.get_key()]) return ret
[ "def", "get_ports_alert", "(", "self", ",", "port", ",", "header", "=", "\"\"", ",", "log", "=", "False", ")", ":", "ret", "=", "'OK'", "if", "port", "[", "'status'", "]", "is", "None", ":", "ret", "=", "'CAREFUL'", "elif", "port", "[", "'status'", ...
Return the alert status relative to the port scan return value.
[ "Return", "the", "alert", "status", "relative", "to", "the", "port", "scan", "return", "value", "." ]
python
train
32.12
jaraco/jaraco.functools
jaraco/functools.py
https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L184-L211
def _special_method_cache(method, cache_wrapper): """ Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5 """ name = method.__name__ special_names = '__getattr__', '__getitem__' if name not in special_names: return wrapper_name = '__cached' + name def proxy(self, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) setattr(self, wrapper_name, cache) else: cache = getattr(self, wrapper_name) return cache(*args, **kwargs) return proxy
[ "def", "_special_method_cache", "(", "method", ",", "cache_wrapper", ")", ":", "name", "=", "method", ".", "__name__", "special_names", "=", "'__getattr__'", ",", "'__getitem__'", "if", "name", "not", "in", "special_names", ":", "return", "wrapper_name", "=", "'...
Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5
[ "Because", "Python", "treats", "special", "methods", "differently", "it", "s", "not", "possible", "to", "use", "instance", "attributes", "to", "implement", "the", "cached", "methods", "." ]
python
train
26.678571
mahmoudimus/nose-timer
nosetimer/plugin.py
https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L147-L165
def configure(self, options, config): """Configures the test timer plugin.""" super(TimerPlugin, self).configure(options, config) self.config = config if self.enabled: self.timer_top_n = int(options.timer_top_n) self.timer_ok = self._parse_time(options.timer_ok) self.timer_warning = self._parse_time(options.timer_warning) self.timer_filter = self._parse_filter(options.timer_filter) self.timer_fail = options.timer_fail self.timer_no_color = True self.json_file = options.json_file # Windows + nosetests does not support colors (even with colorama). if not IS_NT: self.timer_no_color = options.timer_no_color # determine if multiprocessing plugin enabled self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False))
[ "def", "configure", "(", "self", ",", "options", ",", "config", ")", ":", "super", "(", "TimerPlugin", ",", "self", ")", ".", "configure", "(", "options", ",", "config", ")", "self", ".", "config", "=", "config", "if", "self", ".", "enabled", ":", "s...
Configures the test timer plugin.
[ "Configures", "the", "test", "timer", "plugin", "." ]
python
train
47.631579
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L1585-L1590
def run_check(self, check, argument_names): """Run a check plugin.""" arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments)
[ "def", "run_check", "(", "self", ",", "check", ",", "argument_names", ")", ":", "arguments", "=", "[", "]", "for", "name", "in", "argument_names", ":", "arguments", ".", "append", "(", "getattr", "(", "self", ",", "name", ")", ")", "return", "check", "...
Run a check plugin.
[ "Run", "a", "check", "plugin", "." ]
python
train
35.666667
xolox/python-verboselogs
verboselogs/__init__.py
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/__init__.py#L163-L166
def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
[ "def", "verbose", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "isEnabledFor", "(", "VERBOSE", ")", ":", "self", ".", "_log", "(", "VERBOSE", ",", "msg", ",", "args", ",", "*", "*", "kw", ")" ]
Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.
[ "Log", "a", "message", "with", "level", ":", "data", ":", "VERBOSE", ".", "The", "arguments", "are", "interpreted", "as", "for", ":", "func", ":", "logging", ".", "debug", "()", "." ]
python
train
59.5
Parisson/TimeSide
timeside/plugins/grapher/utils.py
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/grapher/utils.py#L168-L179
def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)): """imprints a PIL image with the indicated text in lower-right corner""" if im.mode != "RGBA": im = im.convert("RGBA") textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0)) textdraw = ImageDraw.Draw(textlayer) textsize = textdraw.textsize(inputtext, font=font) textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]] textdraw.text(textpos, inputtext, font=font, fill=color) if opacity != 1: textlayer = reduce_opacity(textlayer, opacity) return Image.composite(textlayer, im, textlayer)
[ "def", "im_watermark", "(", "im", ",", "inputtext", ",", "font", "=", "None", ",", "color", "=", "None", ",", "opacity", "=", ".6", ",", "margin", "=", "(", "30", ",", "30", ")", ")", ":", "if", "im", ".", "mode", "!=", "\"RGBA\"", ":", "im", "...
imprints a PIL image with the indicated text in lower-right corner
[ "imprints", "a", "PIL", "image", "with", "the", "indicated", "text", "in", "lower", "-", "right", "corner" ]
python
train
51.666667
bcb/jsonrpcserver
jsonrpcserver/dispatcher.py
https://github.com/bcb/jsonrpcserver/blob/26bb70e868f81691816cabfc4b60a83428842b2f/jsonrpcserver/dispatcher.py#L84-L96
def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]: """ Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError """ jsonschema_validate(request, schema) return request
[ "def", "validate", "(", "request", ":", "Union", "[", "Dict", ",", "List", "]", ",", "schema", ":", "dict", ")", "->", "Union", "[", "Dict", ",", "List", "]", ":", "jsonschema_validate", "(", "request", ",", "schema", ")", "return", "request" ]
Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError
[ "Wraps", "jsonschema", ".", "validate", "returning", "the", "same", "object", "passed", "in", "." ]
python
train
29.153846
programa-stic/barf-project
barf/core/reil/emulator/cpu.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/emulator/cpu.py#L459-L468
def __execute_bisz(self, instr): """Execute BISZ instruction. """ op0_val = self.read_operand(instr.operands[0]) op2_val = 1 if op0_val == 0 else 0 self.write_operand(instr.operands[2], op2_val) return None
[ "def", "__execute_bisz", "(", "self", ",", "instr", ")", ":", "op0_val", "=", "self", ".", "read_operand", "(", "instr", ".", "operands", "[", "0", "]", ")", "op2_val", "=", "1", "if", "op0_val", "==", "0", "else", "0", "self", ".", "write_operand", ...
Execute BISZ instruction.
[ "Execute", "BISZ", "instruction", "." ]
python
train
24.8
Jajcus/pyxmpp2
pyxmpp2/transport.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/transport.py#L201-L206
def _set_state(self, state): """Set `_state` and notify any threads waiting for the change. """ logger.debug(" _set_state({0!r})".format(state)) self._state = state self._state_cond.notify()
[ "def", "_set_state", "(", "self", ",", "state", ")", ":", "logger", ".", "debug", "(", "\" _set_state({0!r})\"", ".", "format", "(", "state", ")", ")", "self", ".", "_state", "=", "state", "self", ".", "_state_cond", ".", "notify", "(", ")" ]
Set `_state` and notify any threads waiting for the change.
[ "Set", "_state", "and", "notify", "any", "threads", "waiting", "for", "the", "change", "." ]
python
valid
37.5
RedHatInsights/insights-core
insights/util/subproc.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/subproc.py#L165-L214
def call(cmd, timeout=None, signum=signal.SIGKILL, keep_rc=False, encoding="utf-8", env=os.environ): """ Execute a cmd or list of commands with an optional timeout in seconds. If `timeout` is supplied and expires, the process is killed with SIGKILL (kill -9) and an exception is raised. Otherwise, the command output is returned. Parameters ---------- cmd: str or [[str]] The command(s) to execute timeout: int Seconds before kill is issued to the process signum: int The signal number to issue to the process on timeout keep_rc: bool Whether to return the exit code along with the output encoding: str unicode decoding scheme to use. Default is "utf-8" env: dict The environment in which to execute commands. Default is os.environ Returns ------- str Content of stdout of cmd on success. Raises ------ CalledProcessError Raised when cmd fails """ if not isinstance(cmd, list): cmd = [cmd] p = Pipeline(*cmd, timeout=timeout, signum=signum, env=env) res = p(keep_rc=keep_rc) if keep_rc: rc, output = res output = output.decode(encoding, 'ignore') return rc, output return res.decode(encoding, "ignore")
[ "def", "call", "(", "cmd", ",", "timeout", "=", "None", ",", "signum", "=", "signal", ".", "SIGKILL", ",", "keep_rc", "=", "False", ",", "encoding", "=", "\"utf-8\"", ",", "env", "=", "os", ".", "environ", ")", ":", "if", "not", "isinstance", "(", ...
Execute a cmd or list of commands with an optional timeout in seconds. If `timeout` is supplied and expires, the process is killed with SIGKILL (kill -9) and an exception is raised. Otherwise, the command output is returned. Parameters ---------- cmd: str or [[str]] The command(s) to execute timeout: int Seconds before kill is issued to the process signum: int The signal number to issue to the process on timeout keep_rc: bool Whether to return the exit code along with the output encoding: str unicode decoding scheme to use. Default is "utf-8" env: dict The environment in which to execute commands. Default is os.environ Returns ------- str Content of stdout of cmd on success. Raises ------ CalledProcessError Raised when cmd fails
[ "Execute", "a", "cmd", "or", "list", "of", "commands", "with", "an", "optional", "timeout", "in", "seconds", "." ]
python
train
26.18
scraperwiki/data-services-helpers
dshelpers.py
https://github.com/scraperwiki/data-services-helpers/blob/a31ea2f40d20fd99d4c0938b87466330679db2c9/dshelpers.py#L109-L117
def download_url(url, back_off=True, **kwargs): """ Get the content of a URL and return a file-like object. back_off=True provides retry """ if back_off: return _download_with_backoff(url, as_file=True, **kwargs) else: return _download_without_backoff(url, as_file=True, **kwargs)
[ "def", "download_url", "(", "url", ",", "back_off", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "back_off", ":", "return", "_download_with_backoff", "(", "url", ",", "as_file", "=", "True", ",", "*", "*", "kwargs", ")", "else", ":", "return",...
Get the content of a URL and return a file-like object. back_off=True provides retry
[ "Get", "the", "content", "of", "a", "URL", "and", "return", "a", "file", "-", "like", "object", ".", "back_off", "=", "True", "provides", "retry" ]
python
train
34.666667
pantsbuild/pants
src/python/pants/engine/rules.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/rules.py#L417-L465
def create(cls, rule_entries, union_rules=None): """Creates a RuleIndex with tasks indexed by their output type.""" serializable_rules = OrderedDict() serializable_roots = OrderedSet() union_rules = OrderedDict(union_rules or ()) def add_task(product_type, rule): # TODO(#7311): make a defaultdict-like wrapper for OrderedDict if more widely used. if product_type not in serializable_rules: serializable_rules[product_type] = OrderedSet() serializable_rules[product_type].add(rule) def add_root_rule(root_rule): serializable_roots.add(root_rule) def add_rule(rule): if isinstance(rule, RootRule): add_root_rule(rule) else: add_task(rule.output_type, rule) for dep_rule in rule.dependency_rules: add_rule(dep_rule) def add_type_transition_rule(union_rule): # NB: This does not require that union bases be supplied to `def rules():`, as the union type # is never instantiated! union_base = union_rule.union_base assert union_base._is_union union_member = union_rule.union_member if union_base not in union_rules: union_rules[union_base] = OrderedSet() union_rules[union_base].add(union_member) for entry in rule_entries: if isinstance(entry, Rule): add_rule(entry) elif isinstance(entry, UnionRule): add_type_transition_rule(entry) elif hasattr(entry, '__call__'): rule = getattr(entry, 'rule', None) if rule is None: raise TypeError("Expected callable {} to be decorated with @rule.".format(entry)) add_rule(rule) else: raise TypeError("""\ Rule entry {} had an unexpected type: {}. Rules either extend Rule or UnionRule, or are static \ functions decorated with @rule.""".format(entry, type(entry))) return cls(serializable_rules, serializable_roots, union_rules)
[ "def", "create", "(", "cls", ",", "rule_entries", ",", "union_rules", "=", "None", ")", ":", "serializable_rules", "=", "OrderedDict", "(", ")", "serializable_roots", "=", "OrderedSet", "(", ")", "union_rules", "=", "OrderedDict", "(", "union_rules", "or", "("...
Creates a RuleIndex with tasks indexed by their output type.
[ "Creates", "a", "RuleIndex", "with", "tasks", "indexed", "by", "their", "output", "type", "." ]
python
train
38.102041
pallets/werkzeug
src/werkzeug/debug/__init__.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/debug/__init__.py#L357-L367
def get_resource(self, request, filename): """Return a static resource from the shared folder.""" filename = join("shared", basename(filename)) try: data = pkgutil.get_data(__package__, filename) except OSError: data = None if data is not None: mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" return Response(data, mimetype=mimetype) return Response("Not Found", status=404)
[ "def", "get_resource", "(", "self", ",", "request", ",", "filename", ")", ":", "filename", "=", "join", "(", "\"shared\"", ",", "basename", "(", "filename", ")", ")", "try", ":", "data", "=", "pkgutil", ".", "get_data", "(", "__package__", ",", "filename...
Return a static resource from the shared folder.
[ "Return", "a", "static", "resource", "from", "the", "shared", "folder", "." ]
python
train
44.272727
astropy/photutils
photutils/segmentation/properties.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L987-L1000
def covariance(self): """ The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source. """ mu = self.moments_central if mu[0, 0] != 0: m = mu / mu[0, 0] covariance = self._check_covariance( np.array([[m[0, 2], m[1, 1]], [m[1, 1], m[2, 0]]])) return covariance * u.pix**2 else: return np.empty((2, 2)) * np.nan * u.pix**2
[ "def", "covariance", "(", "self", ")", ":", "mu", "=", "self", ".", "moments_central", "if", "mu", "[", "0", ",", "0", "]", "!=", "0", ":", "m", "=", "mu", "/", "mu", "[", "0", ",", "0", "]", "covariance", "=", "self", ".", "_check_covariance", ...
The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source.
[ "The", "covariance", "matrix", "of", "the", "2D", "Gaussian", "function", "that", "has", "the", "same", "second", "-", "order", "moments", "as", "the", "source", "." ]
python
train
33.642857
rohankapoorcom/zm-py
zoneminder/monitor.py
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L163-L192
def get_events(self, time_period, include_archived=False) -> Optional[int]: """Get the number of events that have occurred on this Monitor. Specifically only gets events that have occurred within the TimePeriod provided. """ date_filter = '1%20{}'.format(time_period.period) if time_period == TimePeriod.ALL: # The consoleEvents API uses DATE_SUB, so give it # something large date_filter = '100%20year' archived_filter = '/Archived=:0' if include_archived: archived_filter = '' event = self._client.get_state( 'api/events/consoleEvents/{}{}.json'.format( date_filter, archived_filter ) ) try: events_by_monitor = event['results'] if isinstance(events_by_monitor, list): return 0 return events_by_monitor.get(str(self._monitor_id), 0) except (TypeError, KeyError, AttributeError): return None
[ "def", "get_events", "(", "self", ",", "time_period", ",", "include_archived", "=", "False", ")", "->", "Optional", "[", "int", "]", ":", "date_filter", "=", "'1%20{}'", ".", "format", "(", "time_period", ".", "period", ")", "if", "time_period", "==", "Tim...
Get the number of events that have occurred on this Monitor. Specifically only gets events that have occurred within the TimePeriod provided.
[ "Get", "the", "number", "of", "events", "that", "have", "occurred", "on", "this", "Monitor", "." ]
python
train
34.366667
vpelletier/python-functionfs
functionfs/__init__.py
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L574-L584
def getRealInterfaceNumber(self, interface): """ Returns the host-visible interface number, or None if there is no such interface. """ try: return self._ioctl(INTERFACE_REVMAP, interface) except IOError as exc: if exc.errno == errno.EDOM: return None raise
[ "def", "getRealInterfaceNumber", "(", "self", ",", "interface", ")", ":", "try", ":", "return", "self", ".", "_ioctl", "(", "INTERFACE_REVMAP", ",", "interface", ")", "except", "IOError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "...
Returns the host-visible interface number, or None if there is no such interface.
[ "Returns", "the", "host", "-", "visible", "interface", "number", "or", "None", "if", "there", "is", "no", "such", "interface", "." ]
python
train
31.454545
weblyzard/inscriptis
src/inscriptis/__init__.py
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/__init__.py#L18-L34
def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False): ''' ::param: html_content ::returns: a text representation of the html content. ''' html_content = html_content.strip() if not html_content: return "" # strip XML declaration, if necessary if html_content.startswith('<?xml '): html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1) html_tree = fromstring(html_content) parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links) return parser.get_text()
[ "def", "get_text", "(", "html_content", ",", "display_images", "=", "False", ",", "deduplicate_captions", "=", "False", ",", "display_links", "=", "False", ")", ":", "html_content", "=", "html_content", ".", "strip", "(", ")", "if", "not", "html_content", ":",...
::param: html_content ::returns: a text representation of the html content.
[ "::", "param", ":", "html_content", "::", "returns", ":", "a", "text", "representation", "of", "the", "html", "content", "." ]
python
train
38
bitesofcode/projexui
projexui/widgets/xnavigationedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnavigationedit.py#L319-L331
def cancelEdit( self ): """ Rejects the current edit and shows the parts widget. """ if ( self._partsWidget.isVisible() ): return False self._completerTree.hide() self.completer().popup().hide() self.setText(self._originalText) return True
[ "def", "cancelEdit", "(", "self", ")", ":", "if", "(", "self", ".", "_partsWidget", ".", "isVisible", "(", ")", ")", ":", "return", "False", "self", ".", "_completerTree", ".", "hide", "(", ")", "self", ".", "completer", "(", ")", ".", "popup", "(", ...
Rejects the current edit and shows the parts widget.
[ "Rejects", "the", "current", "edit", "and", "shows", "the", "parts", "widget", "." ]
python
train
25.692308
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py#L744-L864
def batch_get_documents( self, database, documents, mask=None, transaction=None, new_transaction=None, read_time=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "batch_get_documents" not in self._inner_api_calls: self._inner_api_calls[ "batch_get_documents" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_get_documents, default_retry=self._method_configs["BatchGetDocuments"].retry, default_timeout=self._method_configs["BatchGetDocuments"].timeout, client_info=self._client_info, ) # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. google.api_core.protobuf_helpers.check_oneof( transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) request = firestore_pb2.BatchGetDocumentsRequest( database=database, documents=documents, mask=mask, transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["batch_get_documents"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "batch_get_documents", "(", "self", ",", "database", ",", "documents", ",", "mask", "=", "None", ",", "transaction", "=", "None", ",", "new_transaction", "=", "None", ",", "read_time", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", ...
Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Gets", "multiple", "documents", "." ]
python
train
44.338843
spotify/luigi
luigi/tools/luigi_grep.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/luigi_grep.py#L46-L53
def status_search(self, status): """Searches for jobs matching the given ``status``.""" json = self._fetch_json() jobs = json['response'] for job in jobs: job_info = jobs[job] if job_info['status'].lower() == status.lower(): yield self._build_results(jobs, job)
[ "def", "status_search", "(", "self", ",", "status", ")", ":", "json", "=", "self", ".", "_fetch_json", "(", ")", "jobs", "=", "json", "[", "'response'", "]", "for", "job", "in", "jobs", ":", "job_info", "=", "jobs", "[", "job", "]", "if", "job_info",...
Searches for jobs matching the given ``status``.
[ "Searches", "for", "jobs", "matching", "the", "given", "status", "." ]
python
train
40.75
ocaballeror/LyricFetch
lyricfetch/scraping.py
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L170-L185
def azlyrics(song): """ Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() if artist[0:2] == 'a ': artist = artist[2:] artist = normalize(artist, URLESCAPES, '') title = song.title.lower() title = normalize(title, URLESCAPES, '') url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.find_all('div', class_='')[-1] return body.get_text().strip()
[ "def", "azlyrics", "(", "song", ")", ":", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "if", "artist", "[", "0", ":", "2", "]", "==", "'a '", ":", "artist", "=", "artist", "[", "2", ":", "]", "artist", "=", "normalize", "(", "a...
Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "azlyrics", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
python
train
32.5625
PSPC-SPAC-buyandsell/von_anchor
von_anchor/wallet/wallet.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/wallet.py#L818-L834
async def delete_pairwise(self, their_did: str) -> None: """ Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove """ LOGGER.debug('Wallet.delete_pairwise >>> their_did: %s', their_did) if not ok_did(their_did): LOGGER.debug('Wallet.delete_pairwise <!< Bad DID %s', their_did) raise BadIdentifier('Bad DID {}'.format(their_did)) await self.delete_non_secret(TYPE_PAIRWISE, their_did) LOGGER.debug('Wallet.delete_pairwise <<<')
[ "async", "def", "delete_pairwise", "(", "self", ",", "their_did", ":", "str", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'Wallet.delete_pairwise >>> their_did: %s'", ",", "their_did", ")", "if", "not", "ok_did", "(", "their_did", ")", ":", "LOGGER",...
Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove
[ "Remove", "a", "pairwise", "DID", "record", "by", "its", "remote", "DID", ".", "Silently", "return", "if", "no", "such", "record", "is", "present", ".", "Raise", "WalletState", "for", "closed", "wallet", "or", "BadIdentifier", "for", "invalid", "pairwise", "...
python
train
40.705882
archman/pyrpn
pyrpn/rpn.py
https://github.com/archman/pyrpn/blob/fee706fccee1aa5c527f3f4221cc6b0d69bdde2c/pyrpn/rpn.py#L144-L171
def solve(self): """Solve rpn expression, return None if not valid.""" popflag = True self.tmpopslist = [] while True: while self.opslist and popflag: op = self.opslist.pop() if self.is_variable(op): op = self.variables.get(op) if self.is_operator(op): popflag = False break self.tmpopslist.append(op) # operations tmpr = self._get_temp_result(op) if tmpr == 'ERROR': return None if tmpr is not None: self.opslist.append('{r:.20f}'.format(r=tmpr)) if len(self.tmpopslist) > 0 or len(self.opslist) > 1: popflag = True else: break return float(self.opslist[0])
[ "def", "solve", "(", "self", ")", ":", "popflag", "=", "True", "self", ".", "tmpopslist", "=", "[", "]", "while", "True", ":", "while", "self", ".", "opslist", "and", "popflag", ":", "op", "=", "self", ".", "opslist", ".", "pop", "(", ")", "if", ...
Solve rpn expression, return None if not valid.
[ "Solve", "rpn", "expression", "return", "None", "if", "not", "valid", "." ]
python
train
30.25
log2timeline/plaso
plaso/engine/extractors.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/extractors.py#L548-L571
def _ExtractPathSpecsFromFile(self, file_entry): """Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file. """ produced_main_path_spec = False for data_stream in file_entry.data_streams: # Make a copy so we don't make the changes on a path specification # directly. Otherwise already produced path specifications can be # altered in the process. path_spec = copy.deepcopy(file_entry.path_spec) if data_stream.name: setattr(path_spec, 'data_stream', data_stream.name) yield path_spec if not data_stream.name: produced_main_path_spec = True if not produced_main_path_spec: yield file_entry.path_spec
[ "def", "_ExtractPathSpecsFromFile", "(", "self", ",", "file_entry", ")", ":", "produced_main_path_spec", "=", "False", "for", "data_stream", "in", "file_entry", ".", "data_streams", ":", "# Make a copy so we don't make the changes on a path specification", "# directly. Otherwis...
Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file.
[ "Extracts", "path", "specification", "from", "a", "file", "." ]
python
train
34.083333
Erotemic/utool
utool/util_path.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L308-L368
def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None, newfname=None, ensure=False, prefix=None, suffix=None): """ augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak """ if prefix is not None: augpref = prefix if suffix is not None: augsuf = suffix # Breakup path dpath, fname = split(path) fname_noext, ext = splitext(fname) if newfname is not None: fname_noext = newfname # Augment ext if newext is None: newext = ext # Augment fname new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext)) # Augment dpath if augdir is not None: new_dpath = join(dpath, augdir) if ensure: # create new dir if needebe ensuredir(new_dpath) else: new_dpath = dpath # Recombine into new path newpath = join(new_dpath, new_fname) return newpath
[ "def", "augpath", "(", "path", ",", "augsuf", "=", "''", ",", "augext", "=", "''", ",", "augpref", "=", "''", ",", "augdir", "=", "None", ",", "newext", "=", "None", ",", "newfname", "=", "None", ",", "ensure", "=", "False", ",", "prefix", "=", "...
augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak
[ "augments", "end", "of", "path", "before", "the", "extension", "." ]
python
train
27.229508
thespacedoctor/rockAtlas
rockAtlas/positions/pyephemPositions.py
https://github.com/thespacedoctor/rockAtlas/blob/062ecaa95ab547efda535aa33165944f13c621de/rockAtlas/positions/pyephemPositions.py#L93-L141
def get(self, singleSnapshot=False): """ *geneate the pyephem positions* **Key Arguments:** - ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing) **Return:** - ``None`` """ self.log.info('starting the ``get`` method') global xephemOE global tileSide global magLimit # GRAB PARAMETERS FROM SETTINGS FILE tileSide = float(self.settings["pyephem"]["atlas exposure match side"]) magLimit = float(self.settings["pyephem"]["magnitude limit"]) snapshotsRequired = 1 while snapshotsRequired > 0: nextMjds, exposures, snapshotsRequired = self._get_exposures_requiring_pyephem_positions( concurrentSnapshots=int(self.settings["pyephem"]["batch size"])) print "There are currently %(snapshotsRequired)s more pyephem snapshots required " % locals() if snapshotsRequired == 0: return if len(xephemOE) == 0: xephemOE = self._get_xephem_orbital_elements() # DEFINE AN INPUT ARRAY magLimit = self.settings["pyephem"]["magnitude limit"] pyephemDB = fmultiprocess(log=self.log, function=_generate_pyephem_snapshot, timeout=300, inputArray=nextMjds, magLimit=magLimit) matchedObjects = [] for p, e, m in zip(pyephemDB, exposures, nextMjds): matchedObjects.append( self._match_pyephem_snapshot_to_atlas_exposures(p, e, m)) self._add_matched_objects_to_database(matchedObjects) self._update_database_flag(exposures) if singleSnapshot: snapshotsRequired = 0 self.log.info('completed the ``get`` method') return None
[ "def", "get", "(", "self", ",", "singleSnapshot", "=", "False", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``get`` method'", ")", "global", "xephemOE", "global", "tileSide", "global", "magLimit", "# GRAB PARAMETERS FROM SETTINGS FILE", "tileSide"...
*geneate the pyephem positions* **Key Arguments:** - ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing) **Return:** - ``None``
[ "*", "geneate", "the", "pyephem", "positions", "*" ]
python
train
37.510204
nicolargo/glances
glances/main.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/main.py#L252-L417
def parse_args(self): """Parse command line arguments.""" args = self.init_args().parse_args() # Load the configuration file, if it exists self.config = Config(args.conf_file) # Debug mode if args.debug: from logging import DEBUG logger.setLevel(DEBUG) else: from warnings import simplefilter simplefilter("ignore") # Plugins disable/enable if args.disable_plugin is not None: for p in args.disable_plugin.split(','): disable(args, p) else: # Allow users to disable plugins from the glances.conf (issue #1378) for s in self.config.sections(): if self.config.has_section(s) \ and (self.config.get_bool_value(s, 'disable', False)): disable(args, s) logger.debug('{} disabled by the configuration file'.format(s)) # Exporters activation if args.export is not None: for p in args.export.split(','): setattr(args, 'export_' + p, True) # Client/server Port if args.port is None: if args.webserver: args.port = self.web_server_port else: args.port = self.server_port # Port in the -c URI #996 if args.client is not None: args.client, args.port = (x if x else y for (x, y) in zip(args.client.partition(':')[::2], (args.client, args.port))) # Autodiscover if args.disable_autodiscover: logger.info("Auto discover mode is disabled") # By default Windows is started in Web mode if WINDOWS: args.webserver = True # In web server mode, default refresh time: 5 sec if args.webserver: args.time = 5 args.process_short_name = True # Server or client login/password if args.username_prompt: # Every username needs a password args.password_prompt = True # Prompt username if args.server: args.username = self.__get_username( description='Define the Glances server username: ') elif args.webserver: args.username = self.__get_username( description='Define the Glances webserver username: ') elif args.client: args.username = self.__get_username( description='Enter the Glances server username: ') else: if args.username_used: # A username has been set using the -u option ? args.username = args.username_used else: # Default user name is 'glances' args.username = self.username if args.password_prompt or args.username_used: # Interactive or file password if args.server: args.password = self.__get_password( description='Define the Glances server password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.webserver: args.password = self.__get_password( description='Define the Glances webserver password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.client: args.password = self.__get_password( description='Enter the Glances server password ({} username): '.format( args.username), clear=True, username=args.username) else: # Default is no password args.password = self.password # By default help is hidden args.help_tag = False # Display Rx and Tx, not the sum for the network args.network_sum = False args.network_cumul = False # Manage light mode if args.enable_light: logger.info("Light mode is on") args.disable_left_sidebar = True disable(args, 'process') disable(args, 'alert') disable(args, 'amps') disable(args, 'docker') # Manage full quicklook option if args.full_quicklook: logger.info("Full quicklook mode") enable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') enable(args, 'load') # Manage disable_top option if args.disable_top: logger.info("Disable top menu") disable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') disable(args, 'load') # Init the generate_graph tag # Should be set to True to generate graphs args.generate_graph = False # Control parameter and exit if it is not OK self.args = args # Export is only available in standalone or client mode (issue #614) export_tag = self.args.export is not None and any(self.args.export) if WINDOWS and export_tag: # On Windows, export is possible but only in quiet mode # See issue #1038 logger.info("On Windows OS, export disable the Web interface") self.args.quiet = True self.args.webserver = False elif not (self.is_standalone() or self.is_client()) and export_tag: logger.critical("Export is only available in standalone or client mode") sys.exit(2) # Filter is only available in standalone mode if args.process_filter is not None and not self.is_standalone(): logger.critical( "Process filter is only available in standalone mode") sys.exit(2) # Disable HDDTemp if sensors are disabled if getattr(args, 'disable_sensors', False): disable(args, 'hddtemp') logger.debug("Sensors and HDDTemp are disabled") return args
[ "def", "parse_args", "(", "self", ")", ":", "args", "=", "self", ".", "init_args", "(", ")", ".", "parse_args", "(", ")", "# Load the configuration file, if it exists", "self", ".", "config", "=", "Config", "(", "args", ".", "conf_file", ")", "# Debug mode", ...
Parse command line arguments.
[ "Parse", "command", "line", "arguments", "." ]
python
train
36.909639
KnowledgeLinks/rdfframework
rdfframework/datasets/jsonquery.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datasets/jsonquery.py#L14-L63
def parse_json_qry(qry_str): """ Parses a json query string into its parts args: qry_str: query string params: variables passed into the string """ def param_analyzer(param_list): rtn_list = [] for param in param_list: parts = param.strip().split("=") try: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()](parts[1])) except IndexError: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()]()) return rtn_list def part_analyzer(part, idx): nonlocal dallor, asterick, question_mark if part == "$": dallor = idx return part elif part == "*": asterick = idx return part elif part == "?": question_mark = idx return part elif part.startswith("="): return part return cssparse(part)[0] # pdb.set_trace() main_parts = qry_str.split("|") or_parts = main_parts.pop(0).strip() params = param_analyzer(main_parts) rtn_list = [] for or_part in [item.strip() for item in or_parts.split(",") if item.strip()]: dallor, asterick, question_mark = None, None, None dot_parts = or_part.split(".") rtn_list.append(([part_analyzer(part, i) \ for i, part in enumerate(dot_parts)], dallor, asterick, question_mark)) return {"qry_parts": rtn_list, "params": params}
[ "def", "parse_json_qry", "(", "qry_str", ")", ":", "def", "param_analyzer", "(", "param_list", ")", ":", "rtn_list", "=", "[", "]", "for", "param", "in", "param_list", ":", "parts", "=", "param", ".", "strip", "(", ")", ".", "split", "(", "\"=\"", ")",...
Parses a json query string into its parts args: qry_str: query string params: variables passed into the string
[ "Parses", "a", "json", "query", "string", "into", "its", "parts" ]
python
train
32.4
gitpython-developers/GitPython
git/util.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/util.py#L573-L590
def _from_string(cls, string): """Create an Actor from a string. :param string: is the string, which is expected to be in regular git format John Doe <jdoe@example.com> :return: Actor """ m = cls.name_email_regex.search(string) if m: name, email = m.groups() return Actor(name, email) else: m = cls.name_only_regex.search(string) if m: return Actor(m.group(1), None) else: # assume best and use the whole string as name return Actor(string, None)
[ "def", "_from_string", "(", "cls", ",", "string", ")", ":", "m", "=", "cls", ".", "name_email_regex", ".", "search", "(", "string", ")", "if", "m", ":", "name", ",", "email", "=", "m", ".", "groups", "(", ")", "return", "Actor", "(", "name", ",", ...
Create an Actor from a string. :param string: is the string, which is expected to be in regular git format John Doe <jdoe@example.com> :return: Actor
[ "Create", "an", "Actor", "from", "a", "string", ".", ":", "param", "string", ":", "is", "the", "string", "which", "is", "expected", "to", "be", "in", "regular", "git", "format" ]
python
train
33.5
saltstack/salt
salt/utils/kickstart.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L739-L745
def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True
[ "def", "parse_updates", "(", "rule", ")", ":", "rules", "=", "shlex", ".", "split", "(", "rule", ")", "rules", ".", "pop", "(", "0", ")", "return", "{", "'url'", ":", "rules", "[", "0", "]", "}", "if", "rules", "else", "True" ]
Parse the updates line
[ "Parse", "the", "updates", "line" ]
python
train
22.285714
instaloader/instaloader
instaloader/structures.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/structures.py#L75-L80
def from_shortcode(cls, context: InstaloaderContext, shortcode: str): """Create a post object from a given shortcode""" # pylint:disable=protected-access post = cls(context, {'shortcode': shortcode}) post._node = post._full_metadata return post
[ "def", "from_shortcode", "(", "cls", ",", "context", ":", "InstaloaderContext", ",", "shortcode", ":", "str", ")", ":", "# pylint:disable=protected-access", "post", "=", "cls", "(", "context", ",", "{", "'shortcode'", ":", "shortcode", "}", ")", "post", ".", ...
Create a post object from a given shortcode
[ "Create", "a", "post", "object", "from", "a", "given", "shortcode" ]
python
train
46.5
closeio/tasktiger
tasktiger/worker.py
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L573-L632
def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): """Process tasks in queue.""" processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
[ "def", "_process_queue_tasks", "(", "self", ",", "queue", ",", "queue_lock", ",", "task_ids", ",", "now", ",", "log", ")", ":", "processed_count", "=", "0", "# Get all tasks", "serialized_tasks", "=", "self", ".", "connection", ".", "mget", "(", "[", "self",...
Process tasks in queue.
[ "Process", "tasks", "in", "queue", "." ]
python
train
37.683333
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L261-L296
def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False): """Write an integer to one 16-bit register in the slave, possibly scaling it. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int or float): The value to store in the slave register (might be scaled before sending). * numberOfDecimals (int): The number of decimals for content conversion. * functioncode (int): Modbus function code. Can be 6 or 16. * signed (bool): Whether the data should be interpreted as unsigned or signed. To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally. This will multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. For discussion on negative values, the range and on alternative names, see :meth:`.read_register`. Use the parameter ``signed=True`` if writing to a register that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). Returns: None Raises: ValueError, TypeError, IOError """ _checkFunctioncode(functioncode, [6, 16]) _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals') _checkBool(signed, description='signed') _checkNumerical(value, description='input value') self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)
[ "def", "write_register", "(", "self", ",", "registeraddress", ",", "value", ",", "numberOfDecimals", "=", "0", ",", "functioncode", "=", "16", ",", "signed", "=", "False", ")", ":", "_checkFunctioncode", "(", "functioncode", ",", "[", "6", ",", "16", "]", ...
Write an integer to one 16-bit register in the slave, possibly scaling it. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int or float): The value to store in the slave register (might be scaled before sending). * numberOfDecimals (int): The number of decimals for content conversion. * functioncode (int): Modbus function code. Can be 6 or 16. * signed (bool): Whether the data should be interpreted as unsigned or signed. To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally. This will multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. For discussion on negative values, the range and on alternative names, see :meth:`.read_register`. Use the parameter ``signed=True`` if writing to a register that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). Returns: None Raises: ValueError, TypeError, IOError
[ "Write", "an", "integer", "to", "one", "16", "-", "bit", "register", "in", "the", "slave", "possibly", "scaling", "it", "." ]
python
train
50.583333
sdispater/cleo
cleo/application.py
https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/application.py#L38-L45
def add(self, command): # type: (BaseCommand) -> Application """ Adds a command object. """ self.add_command(command.config) command.set_application(self) return self
[ "def", "add", "(", "self", ",", "command", ")", ":", "# type: (BaseCommand) -> Application", "self", ".", "add_command", "(", "command", ".", "config", ")", "command", ".", "set_application", "(", "self", ")", "return", "self" ]
Adds a command object.
[ "Adds", "a", "command", "object", "." ]
python
train
26.125
googledatalab/pydatalab
google/datalab/bigquery/_table.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L603-L635
def to_dataframe(self, start_row=0, max_rows=None): """ Exports the table to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0) max_rows: an upper limit on the number of rows to export (default None) Returns: A Pandas dataframe containing the table data. """ fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows, page_size=self._MAX_PAGE_SIZE) count = 0 page_token = None # Collect results of page fetcher in separate dataframe objects, then # concatenate them to reduce the amount of copying df_list = [] df = None while True: page_rows, page_token = fetcher(page_token, count) if len(page_rows): count += len(page_rows) df_list.append(pandas.DataFrame.from_records(page_rows)) if not page_token: break if df_list: df = pandas.concat(df_list, ignore_index=True, copy=False) # Need to reorder the dataframe to preserve column ordering ordered_fields = [field.name for field in self.schema] return df[ordered_fields] if df is not None else pandas.DataFrame()
[ "def", "to_dataframe", "(", "self", ",", "start_row", "=", "0", ",", "max_rows", "=", "None", ")", ":", "fetcher", "=", "self", ".", "_get_row_fetcher", "(", "start_row", "=", "start_row", ",", "max_rows", "=", "max_rows", ",", "page_size", "=", "self", ...
Exports the table to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0) max_rows: an upper limit on the number of rows to export (default None) Returns: A Pandas dataframe containing the table data.
[ "Exports", "the", "table", "to", "a", "Pandas", "dataframe", "." ]
python
train
36.272727
pantsbuild/pants
src/python/pants/option/global_options.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/option/global_options.py#L91-L377
def register_bootstrap_options(cls, register): """Register bootstrap options. "Bootstrap options" are a small set of options whose values are useful when registering other options. Therefore we must bootstrap them early, before other options are registered, let alone parsed. Bootstrap option values can be interpolated into the config file, and can be referenced programatically in registration code, e.g., as register.bootstrap.pants_workdir. Note that regular code can also access these options as normal global-scope options. Their status as "bootstrap options" is only pertinent during option registration. """ buildroot = get_buildroot() default_distdir_name = 'dist' default_distdir = os.path.join(buildroot, default_distdir_name) default_rel_distdir = '/{}/'.format(default_distdir_name) register('-l', '--level', choices=['trace', 'debug', 'info', 'warn'], default='info', recursive=True, help='Set the logging level.') register('-q', '--quiet', type=bool, recursive=True, daemon=False, help='Squelches most console output. NOTE: Some tasks default to behaving quietly: ' 'inverting this option supports making them noisier than they would be otherwise.') register('--log-show-rust-3rdparty', type=bool, default=False, advanced=True, help='Whether to show/hide logging done by 3rdparty rust crates used by the pants ' 'engine.') # Not really needed in bootstrap options, but putting it here means it displays right # after -l and -q in help output, which is conveniently contextual. register('--colors', type=bool, default=sys.stdout.isatty(), recursive=True, daemon=False, help='Set whether log messages are displayed in color.') # TODO(#7203): make a regexp option type! register('--ignore-pants-warnings', type=list, member_type=str, default=[], help='Regexps matching warning strings to ignore, e.g. ' '["DEPRECATED: scope some_scope will be removed"]. The regexps will be matched ' 'from the start of the warning string, and will always be case-insensitive. ' 'See the `warnings` module documentation for more background on these are used.') register('--pants-version', advanced=True, default=pants_version(), help='Use this pants version. Note Pants code only uses this to verify that you are ' 'using the requested version, as Pants cannot dynamically change the version it ' 'is using once the program is already running. This option is useful to set in ' 'your pants.ini, however, and then you can grep the value to select which ' 'version to use for setup scripts (e.g. `./pants`), runner scripts, IDE plugins, ' 'etc. For example, the setup script we distribute at https://www.pantsbuild.org/install.html#recommended-installation ' 'uses this value to determine which Python version to run with. You may find the ' 'version of the pants instance you are running using -v, -V, or --version.') register('--pants-runtime-python-version', advanced=True, removal_version='1.19.0.dev0', deprecation_start_version='1.17.0.dev0', removal_hint=dedent(""" This option was only used to help with Pants' migration to run on Python 3. \ Pants will now correctly default to whichever Python versions are supported for \ the current `pants_version` you are using. Please make sure you are using the \ most up-to-date version of the `./pants` script with: curl -L -O https://pantsbuild.github.io/setup/pants and then unset this option."""), help='Use this Python version to run Pants. The option expects the major and minor ' 'version, e.g. 2.7 or 3.6. Note Pants code only uses this to verify that you are ' 'using the requested interpreter, as Pants cannot dynamically change the ' 'interpreter it is using once the program is already running. This option is ' 'useful to set in your pants.ini, however, and then you can grep the value to ' 'select which interpreter to use for setup scripts (e.g. `./pants`), runner ' 'scripts, IDE plugins, etc. For example, the setup script we distribute at ' 'https://www.pantsbuild.org/install.html#recommended-installation uses this ' 'value to determine which Python version to run with. Also note this does not mean ' 'your own code must use this Python version. See ' 'https://www.pantsbuild.org/python_readme.html#configure-the-python-version ' 'for how to configure your code\'s compatibility.') register('--plugins', advanced=True, type=list, help='Load these plugins.') register('--plugin-cache-dir', advanced=True, default=os.path.join(get_pants_cachedir(), 'plugins'), help='Cache resolved plugin requirements here.') register('--backend-packages', advanced=True, type=list, default=['pants.backend.graph_info', 'pants.backend.python', 'pants.backend.jvm', 'pants.backend.native', # TODO: Move into the graph_info backend. 'pants.rules.core', 'pants.backend.codegen.antlr.java', 'pants.backend.codegen.antlr.python', 'pants.backend.codegen.jaxb', 'pants.backend.codegen.protobuf.java', 'pants.backend.codegen.ragel.java', 'pants.backend.codegen.thrift.java', 'pants.backend.codegen.thrift.python', 'pants.backend.codegen.grpcio.python', 'pants.backend.codegen.wire.java', 'pants.backend.project_info'], help='Load backends from these packages that are already on the path. ' 'Add contrib and custom backends to this list.') register('--pants-bootstrapdir', advanced=True, metavar='<dir>', default=get_pants_cachedir(), help='Use this dir for global cache.') register('--pants-configdir', advanced=True, metavar='<dir>', default=get_pants_configdir(), help='Use this dir for global config files.') register('--pants-workdir', advanced=True, metavar='<dir>', default=os.path.join(buildroot, '.pants.d'), help='Write intermediate output files to this dir.') register('--pants-supportdir', advanced=True, metavar='<dir>', default=os.path.join(buildroot, 'build-support'), help='Use support files from this dir.') register('--pants-distdir', advanced=True, metavar='<dir>', default=default_distdir, help='Write end-product artifacts to this dir. If you modify this path, you ' 'should also update --build-ignore and --pants-ignore to include the ' 'custom dist dir path as well.') register('--pants-subprocessdir', advanced=True, default=os.path.join(buildroot, '.pids'), help='The directory to use for tracking subprocess metadata, if any. This should ' 'live outside of the dir used by `--pants-workdir` to allow for tracking ' 'subprocesses that outlive the workdir data (e.g. `./pants server`).') register('--pants-config-files', advanced=True, type=list, daemon=False, default=[get_default_pants_config_file()], help='Paths to Pants config files.') # TODO: Deprecate the --pantsrc/--pantsrc-files options? This would require being able # to set extra config file locations in an initial bootstrap config file. register('--pantsrc', advanced=True, type=bool, default=True, help='Use pantsrc files.') register('--pantsrc-files', advanced=True, type=list, metavar='<path>', daemon=False, default=['/etc/pantsrc', '~/.pants.rc'], help='Override config with values from these files. ' 'Later files override earlier ones.') register('--pythonpath', advanced=True, type=list, help='Add these directories to PYTHONPATH to search for plugins.') register('--target-spec-file', type=list, dest='target_spec_files', daemon=False, help='Read additional specs from this file, one per line') register('--verify-config', type=bool, default=True, daemon=False, advanced=True, help='Verify that all config file values correspond to known options.') register('--build-ignore', advanced=True, type=list, default=['.*/', default_rel_distdir, 'bower_components/', 'node_modules/', '*.egg-info/'], help='Paths to ignore when identifying BUILD files. ' 'This does not affect any other filesystem operations. ' 'Patterns use the gitignore pattern syntax (https://git-scm.com/docs/gitignore).') register('--pants-ignore', advanced=True, type=list, default=['.*/', default_rel_distdir], help='Paths to ignore for all filesystem operations performed by pants ' '(e.g. BUILD file scanning, glob matching, etc). ' 'Patterns use the gitignore syntax (https://git-scm.com/docs/gitignore).') register('--glob-expansion-failure', advanced=True, default=GlobMatchErrorBehavior.warn, type=GlobMatchErrorBehavior, help="Raise an exception if any targets declaring source files " "fail to match any glob provided in the 'sources' argument.") # TODO(#7203): make a regexp option type! register('--exclude-target-regexp', advanced=True, type=list, default=[], daemon=False, metavar='<regexp>', help='Exclude target roots that match these regexes.') register('--subproject-roots', type=list, advanced=True, default=[], help='Paths that correspond with build roots for any subproject that this ' 'project depends on.') register('--owner-of', type=list, member_type=file_option, default=[], daemon=False, metavar='<path>', help='Select the targets that own these files. ' 'This is the third target calculation strategy along with the --changed-* ' 'options and specifying the targets directly. These three types of target ' 'selection are mutually exclusive.') # These logging options are registered in the bootstrap phase so that plugins can log during # registration and not so that their values can be interpolated in configs. register('-d', '--logdir', advanced=True, metavar='<dir>', help='Write logs to files under this directory.') # This facilitates bootstrap-time configuration of pantsd usage such that we can # determine whether or not to use the Pailgun client to invoke a given pants run # without resorting to heavier options parsing. register('--enable-pantsd', advanced=True, type=bool, default=False, help='Enables use of the pants daemon (and implicitly, the v2 engine). (Beta)') # Shutdown pantsd after the current run. # This needs to be accessed at the same time as enable_pantsd, # so we register it at bootstrap time. register('--shutdown-pantsd-after-run', advanced=True, type=bool, default=False, help='Create a new pantsd server, and use it, and shut it down immediately after. ' 'If pantsd is already running, it will shut it down and spawn a new instance (Beta)') # These facilitate configuring the native engine. register('--native-engine-visualize-to', advanced=True, default=None, type=dir_option, daemon=False, help='A directory to write execution and rule graphs to as `dot` files. The contents ' 'of the directory will be overwritten if any filenames collide.') register('--print-exception-stacktrace', advanced=True, type=bool, help='Print to console the full exception stack trace if encountered.') # BinaryUtil options. register('--binaries-baseurls', type=list, advanced=True, default=['https://binaries.pantsbuild.org'], help='List of URLs from which binary tools are downloaded. URLs are ' 'searched in order until the requested path is found.') register('--binaries-fetch-timeout-secs', type=int, default=30, advanced=True, daemon=False, help='Timeout in seconds for URL reads when fetching binary tools from the ' 'repos specified by --baseurls.') register('--binaries-path-by-id', type=dict, advanced=True, help=("Maps output of uname for a machine to a binary search path: " "(sysname, id) -> (os, arch), e.g. {('darwin', '15'): ('mac', '10.11'), " "('linux', 'arm32'): ('linux', 'arm32')}.")) register('--allow-external-binary-tool-downloads', type=bool, default=True, advanced=True, help="If False, require BinaryTool subclasses to download their contents from urls " "generated from --binaries-baseurls, even if the tool has an external url " "generator. This can be necessary if using Pants in an environment which cannot " "contact the wider Internet.") # Pants Daemon options. register('--pantsd-pailgun-host', advanced=True, default='127.0.0.1', help='The host to bind the pants nailgun server to.') register('--pantsd-pailgun-port', advanced=True, type=int, default=0, help='The port to bind the pants nailgun server to. Defaults to a random port.') # TODO(#7514): Make this default to 1.0 seconds if stdin is a tty! register('--pantsd-pailgun-quit-timeout', advanced=True, type=float, default=5.0, help='The length of time (in seconds) to wait for further output after sending a ' 'signal to the remote pantsd-runner process before killing it.') register('--pantsd-log-dir', advanced=True, default=None, help='The directory to log pantsd output to.') register('--pantsd-invalidation-globs', advanced=True, type=list, default=[], help='Filesystem events matching any of these globs will trigger a daemon restart.') # Watchman options. register('--watchman-version', advanced=True, default='4.9.0-pants1', help='Watchman version.') register('--watchman-supportdir', advanced=True, default='bin/watchman', help='Find watchman binaries under this dir. Used as part of the path to lookup ' 'the binary with --binaries-baseurls and --pants-bootstrapdir.') register('--watchman-startup-timeout', type=float, advanced=True, default=30.0, help='The watchman socket timeout (in seconds) for the initial `watch-project` command. ' 'This may need to be set higher for larger repos due to watchman startup cost.') register('--watchman-socket-timeout', type=float, advanced=True, default=0.1, help='The watchman client socket timeout in seconds. Setting this to too high a ' 'value can negatively impact the latency of runs forked by pantsd.') register('--watchman-socket-path', type=str, advanced=True, default=None, help='The path to the watchman UNIX socket. This can be overridden if the default ' 'absolute path length exceeds the maximum allowed by the OS.') # This option changes the parser behavior in a fundamental way (which currently invalidates # all caches), and needs to be parsed out early, so we make it a bootstrap option. register('--build-file-imports', choices=['allow', 'warn', 'error'], default='warn', advanced=True, help='Whether to allow import statements in BUILD files') register('--local-store-dir', advanced=True, help="Directory to use for engine's local file store.", # This default is also hard-coded into the engine's rust code in # fs::Store::default_path default=os.path.expanduser('~/.cache/pants/lmdb_store')) register('--remote-store-server', advanced=True, type=list, default=[], help='host:port of grpc server to use as remote execution file store.') register('--remote-store-thread-count', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_thread_count, help='Thread count to use for the pool that interacts with the remote file store.') register('--remote-execution-server', advanced=True, help='host:port of grpc server to use as remote execution scheduler.') register('--remote-store-chunk-bytes', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_chunk_bytes, help='Size in bytes of chunks transferred to/from the remote file store.') register('--remote-store-chunk-upload-timeout-seconds', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_chunk_upload_timeout_seconds, help='Timeout (in seconds) for uploads of individual chunks to the remote file store.') register('--remote-store-rpc-retries', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_rpc_retries, help='Number of times to retry any RPC to the remote store before giving up.') register('--remote-execution-process-cache-namespace', advanced=True, help="The cache namespace for remote process execution. " "Bump this to invalidate every artifact's remote execution. " "This is the remote execution equivalent of the legacy cache-key-gen-version " "flag.") register('--remote-instance-name', advanced=True, help='Name of the remote execution instance to use. Used for routing within ' '--remote-execution-server and --remote-store-server.') register('--remote-ca-certs-path', advanced=True, help='Path to a PEM file containing CA certificates used for verifying secure ' 'connections to --remote-execution-server and --remote-store-server. ' 'If not specified, TLS will not be used.') register('--remote-oauth-bearer-token-path', advanced=True, help='Path to a file containing an oauth token to use for grpc connections to ' '--remote-execution-server and --remote-store-server. If not specified, no ' 'authorization will be performed.') # This should eventually deprecate the RunTracker worker count, which is used for legacy cache # lookups via CacheSetup in TaskBase. register('--process-execution-parallelism', type=int, default=multiprocessing.cpu_count(), advanced=True, help='Number of concurrent processes that may be executed either locally and remotely.') register('--process-execution-cleanup-local-dirs', type=bool, default=True, advanced=True, help='Whether or not to cleanup directories used for local process execution ' '(primarily useful for e.g. debugging).')
[ "def", "register_bootstrap_options", "(", "cls", ",", "register", ")", ":", "buildroot", "=", "get_buildroot", "(", ")", "default_distdir_name", "=", "'dist'", "default_distdir", "=", "os", ".", "path", ".", "join", "(", "buildroot", ",", "default_distdir_name", ...
Register bootstrap options. "Bootstrap options" are a small set of options whose values are useful when registering other options. Therefore we must bootstrap them early, before other options are registered, let alone parsed. Bootstrap option values can be interpolated into the config file, and can be referenced programatically in registration code, e.g., as register.bootstrap.pants_workdir. Note that regular code can also access these options as normal global-scope options. Their status as "bootstrap options" is only pertinent during option registration.
[ "Register", "bootstrap", "options", "." ]
python
train
67.463415
rackerlabs/fleece
fleece/xray.py
https://github.com/rackerlabs/fleece/blob/42d79dfa0777e99dbb09bc46105449a9be5dbaa9/fleece/xray.py#L322-L334
def extract_function_metadata(wrapped, instance, args, kwargs, return_value): """Stash the `args` and `kwargs` into the metadata of the subsegment.""" LOGGER.debug( 'Extracting function call metadata', args=args, kwargs=kwargs, ) return { 'metadata': { 'args': args, 'kwargs': kwargs, }, }
[ "def", "extract_function_metadata", "(", "wrapped", ",", "instance", ",", "args", ",", "kwargs", ",", "return_value", ")", ":", "LOGGER", ".", "debug", "(", "'Extracting function call metadata'", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", ")"...
Stash the `args` and `kwargs` into the metadata of the subsegment.
[ "Stash", "the", "args", "and", "kwargs", "into", "the", "metadata", "of", "the", "subsegment", "." ]
python
train
27.769231
mattlong/hermes
hermes/chatroom.py
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L65-L76
def is_member(self, m): """Check if a user is a member of the chatroom""" if not m: return False elif isinstance(m, basestring): jid = m else: jid = m['JID'] is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0 return is_member
[ "def", "is_member", "(", "self", ",", "m", ")", ":", "if", "not", "m", ":", "return", "False", "elif", "isinstance", "(", "m", ",", "basestring", ")", ":", "jid", "=", "m", "else", ":", "jid", "=", "m", "[", "'JID'", "]", "is_member", "=", "len",...
Check if a user is a member of the chatroom
[ "Check", "if", "a", "user", "is", "a", "member", "of", "the", "chatroom" ]
python
train
31.083333
marrow/mongo
marrow/mongo/query/query.py
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L505-L526
def of_type(self, *kinds): """Selects documents if a field is of the correct type. Document.field.of_type() Document.field.of_type('string') Element operator: {$type: self.__foreign__} Documentation: https://docs.mongodb.org/manual/reference/operator/query/type/#op._S_type """ if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz). return reduce(self._combining, (q.of_type(*kinds) for q in self._field)) foreign = set(kinds) if kinds else self._field.__foreign__ if not foreign: return Filter() if len(foreign) == 1: # Simplify if the value is singular. foreign, = foreign # Unpack. return Filter({self._name: {'$type': foreign}})
[ "def", "of_type", "(", "self", ",", "*", "kinds", ")", ":", "if", "self", ".", "_combining", ":", "# We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).", "return", "reduce", "(", "self", ".", "_combining", ",", "(", "q", ".", "of_type", "(", "*", ...
Selects documents if a field is of the correct type. Document.field.of_type() Document.field.of_type('string') Element operator: {$type: self.__foreign__} Documentation: https://docs.mongodb.org/manual/reference/operator/query/type/#op._S_type
[ "Selects", "documents", "if", "a", "field", "is", "of", "the", "correct", "type", ".", "Document", ".", "field", ".", "of_type", "()", "Document", ".", "field", ".", "of_type", "(", "string", ")", "Element", "operator", ":", "{", "$type", ":", "self", ...
python
train
31.863636
biolink/ontobio
ontobio/sparql/sparql_ontology.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/sparql_ontology.py#L18-L36
def extract_subset(self, subset): """ Find all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs """ # note subsets have an unusual encoding query = """ prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> SELECT ?c WHERE {{ GRAPH <{g}> {{ ?c oboInOwl:inSubset ?s FILTER regex(?s,'#{s}$','i') }} }} """.format(s=subset, g=self.graph_name) bindings = run_sparql(query) return [r['c']['value'] for r in bindings]
[ "def", "extract_subset", "(", "self", ",", "subset", ")", ":", "# note subsets have an unusual encoding", "query", "=", "\"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT ?c WHERE {{\n GRAPH <{g}> {{\n ?c oboInOwl:inSubset ?s\n ...
Find all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs
[ "Find", "all", "nodes", "in", "a", "subset", ".", "We", "assume", "the", "oboInOwl", "encoding", "of", "subsets", "and", "subset", "IDs", "are", "IRIs" ]
python
train
30.526316
pywbem/pywbem
pywbem/_statistics.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_statistics.py#L676-L722
def formatted(self): # pylint: disable=line-too-long """ Return a human readable string with the statistics for this container. The operations are sorted by decreasing average time. The three columns for `ServerTime` are included only if the WBEM server has returned WBEM server response times. Example if statistics are enabled:: Statistics (times in seconds, lengths in Bytes): Count Excep ClientTime ServerTime RequestLen ReplyLen Operation Cnt Avg Min Max Avg Min Max Avg Min Max Avg Min Max 3 0 0.234 0.100 0.401 0.204 0.080 0.361 1233 1000 1500 26667 20000 35000 EnumerateInstances 1 0 0.100 0.100 0.100 0.080 0.080 0.080 1200 1200 1200 22000 22000 22000 EnumerateInstanceNames . . . Example if statistics are disabled:: Statistics (times in seconds, lengths in Bytes): Disabled """ # noqa: E501 # pylint: enable=line-too-long ret = "Statistics (times in seconds, lengths in Bytes):\n" if self.enabled: snapshot = sorted(self.snapshot(), key=lambda item: item[1].avg_time, reverse=True) # Test to see if any server time is non-zero include_svr = False for name, stats in snapshot: # pylint: disable=unused-variable # pylint: disable=protected-access if stats._server_time_stored: include_svr = True # pylint: disable=protected-access if include_svr: ret += OperationStatistic._formatted_header_w_svr else: ret += OperationStatistic._formatted_header for name, stats in snapshot: # pylint: disable=unused-variable ret += stats.formatted(include_svr) else: ret += "Disabled" return ret.strip()
[ "def", "formatted", "(", "self", ")", ":", "# pylint: disable=line-too-long", "# noqa: E501", "# pylint: enable=line-too-long", "ret", "=", "\"Statistics (times in seconds, lengths in Bytes):\\n\"", "if", "self", ".", "enabled", ":", "snapshot", "=", "sorted", "(", "self", ...
Return a human readable string with the statistics for this container. The operations are sorted by decreasing average time. The three columns for `ServerTime` are included only if the WBEM server has returned WBEM server response times. Example if statistics are enabled:: Statistics (times in seconds, lengths in Bytes): Count Excep ClientTime ServerTime RequestLen ReplyLen Operation Cnt Avg Min Max Avg Min Max Avg Min Max Avg Min Max 3 0 0.234 0.100 0.401 0.204 0.080 0.361 1233 1000 1500 26667 20000 35000 EnumerateInstances 1 0 0.100 0.100 0.100 0.080 0.080 0.080 1200 1200 1200 22000 22000 22000 EnumerateInstanceNames . . . Example if statistics are disabled:: Statistics (times in seconds, lengths in Bytes): Disabled
[ "Return", "a", "human", "readable", "string", "with", "the", "statistics", "for", "this", "container", ".", "The", "operations", "are", "sorted", "by", "decreasing", "average", "time", "." ]
python
train
45.12766
JasonKessler/scattertext
scattertext/ScatterChart.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/ScatterChart.py#L145-L181
def inject_coordinates(self, x_coords, y_coords, rescale_x=None, rescale_y=None, original_x=None, original_y=None): ''' Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart ''' self._verify_coordinates(x_coords, 'x') self._verify_coordinates(y_coords, 'y') self.x_coords = x_coords self.y_coords = y_coords self._rescale_x = rescale_x self._rescale_y = rescale_y self.original_x = x_coords if original_x is None else original_x self.original_y = y_coords if original_y is None else original_y
[ "def", "inject_coordinates", "(", "self", ",", "x_coords", ",", "y_coords", ",", "rescale_x", "=", "None", ",", "rescale_y", "=", "None", ",", "original_x", "=", "None", ",", "original_y", "=", "None", ")", ":", "self", ".", "_verify_coordinates", "(", "x_...
Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart
[ "Inject", "custom", "x", "and", "y", "coordinates", "for", "each", "term", "into", "chart", "." ]
python
train
37.351351
jtwhite79/pyemu
pyemu/pst/pst_utils.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_utils.py#L151-L187
def res_from_en(pst,enfile): """load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame """ converters = {"name": str_con, "group": str_con} try: #substitute ensemble for res, 'base' if there, otherwise mean obs=pst.observation_data if isinstance(enfile,str): df=pd.read_csv(enfile,converters=converters) df.columns=df.columns.str.lower() df = df.set_index('real_name').T.rename_axis('name').rename_axis(None, 1) else: df = enfile.T if 'base' in df.columns: df['modelled']=df['base'] df['std']=df.std(axis=1) else: df['modelled']=df.mean(axis=1) df['std']=df.std(axis=1) #probably a more pandastic way to do this res_df=df[['modelled','std']].copy() res_df['group']=obs.loc[:,'obgnme'].copy() res_df['measured']=obs['obsval'].copy() res_df['weight']=obs['weight'].copy() res_df['residual']=res_df['measured']-res_df['modelled'] except Exception as e: raise Exception("Pst.res_from_en:{0}".format(str(e))) return res_df
[ "def", "res_from_en", "(", "pst", ",", "enfile", ")", ":", "converters", "=", "{", "\"name\"", ":", "str_con", ",", "\"group\"", ":", "str_con", "}", "try", ":", "#substitute ensemble for res, 'base' if there, otherwise mean", "obs", "=", "pst", ".", "observation_...
load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame
[ "load", "ensemble", "file", "for", "residual", "into", "a", "pandas", ".", "DataFrame" ]
python
train
34.351351
Azure/msrest-for-python
msrest/serialization.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/serialization.py#L857-L899
def serialize_object(self, attr, **kwargs): """Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be cast to str. :param dict attr: Object to be serialized. :rtype: dict or str """ if attr is None: return None if isinstance(attr, ET.Element): return attr obj_type = type(attr) if obj_type in self.basic_types: return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) if obj_type is _long_type: return self.serialize_long(attr) # If it's a model or I know this dependency, serialize as a Model elif obj_type in self.dependencies.values() or isinstance(obj_type, Model): return self._serialize(attr) if obj_type == dict: serialized = {} for key, value in attr.items(): try: serialized[self.serialize_unicode(key)] = self.serialize_object( value, **kwargs) except ValueError: serialized[self.serialize_unicode(key)] = None return serialized if obj_type == list: serialized = [] for obj in attr: try: serialized.append(self.serialize_object( obj, **kwargs)) except ValueError: pass return serialized return str(attr)
[ "def", "serialize_object", "(", "self", ",", "attr", ",", "*", "*", "kwargs", ")", ":", "if", "attr", "is", "None", ":", "return", "None", "if", "isinstance", "(", "attr", ",", "ET", ".", "Element", ")", ":", "return", "attr", "obj_type", "=", "type"...
Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be cast to str. :param dict attr: Object to be serialized. :rtype: dict or str
[ "Serialize", "a", "generic", "object", ".", "This", "will", "be", "handled", "as", "a", "dictionary", ".", "If", "object", "passed", "in", "is", "not", "a", "basic", "type", "(", "str", "int", "float", "dict", "list", ")", "it", "will", "simply", "be",...
python
train
36.023256
kivy/python-for-android
pythonforandroid/bootstraps/common/build/build.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/common/build/build.py#L201-L247
def make_tar(tfn, source_dirs, ignore_path=[], optimize_python=True): ''' Make a zip file `fn` from the contents of source_dis. ''' # selector function def select(fn): rfn = realpath(fn) for p in ignore_path: if p.endswith('/'): p = p[:-1] if rfn.startswith(p): return False if rfn in python_files: return False return not is_blacklist(fn) # get the files and relpath file of all the directory we asked for files = [] for sd in source_dirs: sd = realpath(sd) compile_dir(sd, optimize_python=optimize_python) files += [(x, relpath(realpath(x), sd)) for x in listfiles(sd) if select(x)] # create tar.gz of thoses files tf = tarfile.open(tfn, 'w:gz', format=tarfile.USTAR_FORMAT) dirs = [] for fn, afn in files: dn = dirname(afn) if dn not in dirs: # create every dirs first if not exist yet d = '' for component in split(dn): d = join(d, component) if d.startswith('/'): d = d[1:] if d == '' or d in dirs: continue dirs.append(d) tinfo = tarfile.TarInfo(d) tinfo.type = tarfile.DIRTYPE tf.addfile(tinfo) # put the file tf.add(fn, afn) tf.close()
[ "def", "make_tar", "(", "tfn", ",", "source_dirs", ",", "ignore_path", "=", "[", "]", ",", "optimize_python", "=", "True", ")", ":", "# selector function", "def", "select", "(", "fn", ")", ":", "rfn", "=", "realpath", "(", "fn", ")", "for", "p", "in", ...
Make a zip file `fn` from the contents of source_dis.
[ "Make", "a", "zip", "file", "fn", "from", "the", "contents", "of", "source_dis", "." ]
python
train
30.106383
UCBerkeleySETI/blimpy
blimpy/utils.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/utils.py#L24-L51
def rebin(d, n_x, n_y=None): """ Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y) """ if d.ndim == 2: if n_y is None: n_y = 1 if n_x is None: n_x = 1 d = d[:int(d.shape[0] // n_x) * n_x, :int(d.shape[1] // n_y) * n_y] d = d.reshape((d.shape[0] // n_x, n_x, d.shape[1] // n_y, n_y)) d = d.mean(axis=3) d = d.mean(axis=1) elif d.ndim == 1: d = d[:int(d.shape[0] // n_x) * n_x] d = d.reshape((d.shape[0] // n_x, n_x)) d = d.mean(axis=1) else: raise RuntimeError("Only NDIM <= 2 supported") return d
[ "def", "rebin", "(", "d", ",", "n_x", ",", "n_y", "=", "None", ")", ":", "if", "d", ".", "ndim", "==", "2", ":", "if", "n_y", "is", "None", ":", "n_y", "=", "1", "if", "n_x", "is", "None", ":", "n_x", "=", "1", "d", "=", "d", "[", ":", ...
Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y)
[ "Rebin", "data", "by", "averaging", "bins", "together" ]
python
test
28.25
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/context.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/context.py#L79-L85
def forget_canvas(canvas): """ Forget about the given canvas. Used by the canvas when closed. """ cc = [c() for c in canvasses if c() is not None] while canvas in cc: cc.remove(canvas) canvasses[:] = [weakref.ref(c) for c in cc]
[ "def", "forget_canvas", "(", "canvas", ")", ":", "cc", "=", "[", "c", "(", ")", "for", "c", "in", "canvasses", "if", "c", "(", ")", "is", "not", "None", "]", "while", "canvas", "in", "cc", ":", "cc", ".", "remove", "(", "canvas", ")", "canvasses"...
Forget about the given canvas. Used by the canvas when closed.
[ "Forget", "about", "the", "given", "canvas", ".", "Used", "by", "the", "canvas", "when", "closed", "." ]
python
train
35.714286
Alignak-monitoring/alignak
alignak/objects/schedulingitem.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1316-L1332
def disable_active_checks(self, checks): """Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None """ self.active_checks_enabled = False for chk_id in self.checks_in_progress: chk = checks[chk_id] chk.status = ACT_STATUS_WAIT_CONSUME chk.exit_status = self.state_id chk.output = self.output chk.check_time = time.time() chk.execution_time = 0 chk.perf_data = self.perf_data
[ "def", "disable_active_checks", "(", "self", ",", "checks", ")", ":", "self", ".", "active_checks_enabled", "=", "False", "for", "chk_id", "in", "self", ".", "checks_in_progress", ":", "chk", "=", "checks", "[", "chk_id", "]", "chk", ".", "status", "=", "A...
Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None
[ "Disable", "active", "checks", "for", "this", "host", "/", "service", "Update", "check", "in", "progress", "with", "current", "object", "information" ]
python
train
39.588235
refenv/cijoe
modules/cij/runner.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/runner.py#L199-L230
def hooks_setup(trun, parent, hnames=None): """ Setup test-hooks @returns dict of hook filepaths {"enter": [], "exit": []} """ hooks = { "enter": [], "exit": [] } if hnames is None: # Nothing to do, just return the struct return hooks for hname in hnames: # Fill out paths for med in HOOK_PATTERNS: for ptn in HOOK_PATTERNS[med]: fpath = os.sep.join([trun["conf"]["HOOKS"], ptn % hname]) if not os.path.exists(fpath): continue hook = hook_setup(parent, fpath) if not hook: continue hooks[med].append(hook) if not hooks["enter"] + hooks["exit"]: cij.err("rnr:hooks_setup:FAIL { hname: %r has no files }" % hname) return None return hooks
[ "def", "hooks_setup", "(", "trun", ",", "parent", ",", "hnames", "=", "None", ")", ":", "hooks", "=", "{", "\"enter\"", ":", "[", "]", ",", "\"exit\"", ":", "[", "]", "}", "if", "hnames", "is", "None", ":", "# Nothing to do, just return the struct", "ret...
Setup test-hooks @returns dict of hook filepaths {"enter": [], "exit": []}
[ "Setup", "test", "-", "hooks" ]
python
valid
26.71875
senseobservationsystems/commonsense-python-lib
senseapi.py
https://github.com/senseobservationsystems/commonsense-python-lib/blob/aac59a1751ef79eb830b3ca1fab6ef2c83931f87/senseapi.py#L744-L761
def GroupSensorsFind(self, group_id, parameters, filters, namespace = None): """ Find sensors in a group based on a number of filters on metatags @param group_id (int) - Id of the group in which to find sensors @param namespace (string) - Namespace to use in filtering on metatags @param parameters (dictionary) - Dictionary containing additional parameters @param filters (dictionary) - Dictioanry containing the filters on metatags @return (bool) - Boolean indicating whether GroupSensorsFind was successful """ ns = "default" if namespace is None else namespace parameters['namespace'] = ns if self.__SenseApiCall__("/groups/{0}/sensors/find.json?{1}".format(group_id, urllib.urlencode(parameters, True)), "POST", parameters = filters): return True else: self.__error__ = "api call unsuccessful" return False
[ "def", "GroupSensorsFind", "(", "self", ",", "group_id", ",", "parameters", ",", "filters", ",", "namespace", "=", "None", ")", ":", "ns", "=", "\"default\"", "if", "namespace", "is", "None", "else", "namespace", "parameters", "[", "'namespace'", "]", "=", ...
Find sensors in a group based on a number of filters on metatags @param group_id (int) - Id of the group in which to find sensors @param namespace (string) - Namespace to use in filtering on metatags @param parameters (dictionary) - Dictionary containing additional parameters @param filters (dictionary) - Dictioanry containing the filters on metatags @return (bool) - Boolean indicating whether GroupSensorsFind was successful
[ "Find", "sensors", "in", "a", "group", "based", "on", "a", "number", "of", "filters", "on", "metatags" ]
python
train
55.166667
HDI-Project/BTB
btb/tuning/tuner.py
https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L44-L50
def _generate_grid(self): """Get the all possible values for each of the tunables.""" grid_axes = [] for _, param in self.tunables: grid_axes.append(param.get_grid_axis(self.grid_width)) return grid_axes
[ "def", "_generate_grid", "(", "self", ")", ":", "grid_axes", "=", "[", "]", "for", "_", ",", "param", "in", "self", ".", "tunables", ":", "grid_axes", ".", "append", "(", "param", ".", "get_grid_axis", "(", "self", ".", "grid_width", ")", ")", "return"...
Get the all possible values for each of the tunables.
[ "Get", "the", "all", "possible", "values", "for", "each", "of", "the", "tunables", "." ]
python
train
34.571429
dhylands/rshell
rshell/main.py
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L386-L404
def column_print(fmt, rows, print_func): """Prints a formatted list, adjusting the width so everything fits. fmt contains a single character for each column. < indicates that the column should be left justified, > indicates that the column should be right justified. The last column may be a space which implies left justification and no padding. """ # Figure out the max width of each column num_cols = len(fmt) width = [max(0 if isinstance(row, str) else len(row[i]) for row in rows) for i in range(num_cols)] for row in rows: if isinstance(row, str): # Print a separator line print_func(' '.join([row * width[i] for i in range(num_cols)])) else: print_func(' '.join([align_cell(fmt[i], row[i], width[i]) for i in range(num_cols)]))
[ "def", "column_print", "(", "fmt", ",", "rows", ",", "print_func", ")", ":", "# Figure out the max width of each column", "num_cols", "=", "len", "(", "fmt", ")", "width", "=", "[", "max", "(", "0", "if", "isinstance", "(", "row", ",", "str", ")", "else", ...
Prints a formatted list, adjusting the width so everything fits. fmt contains a single character for each column. < indicates that the column should be left justified, > indicates that the column should be right justified. The last column may be a space which implies left justification and no padding.
[ "Prints", "a", "formatted", "list", "adjusting", "the", "width", "so", "everything", "fits", ".", "fmt", "contains", "a", "single", "character", "for", "each", "column", ".", "<", "indicates", "that", "the", "column", "should", "be", "left", "justified", ">"...
python
train
45
chaoss/grimoirelab-perceval
perceval/utils.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/utils.py#L224-L270
def xml_to_dict(raw_xml): """Convert a XML stream into a dictionary. This function transforms a xml stream into a dictionary. The attributes are stored as single elements while child nodes are stored into lists. The text node is stored using the special key '__text__'. This code is based on Winston Ewert's solution to this problem. See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict for more info. The code was licensed as cc by-sa 3.0. :param raw_xml: XML stream :returns: a dict with the XML data :raises ParseError: raised when an error occurs parsing the given XML stream """ def node_to_dict(node): d = {} d.update(node.items()) text = getattr(node, 'text', None) if text is not None: d['__text__'] = text childs = {} for child in node: childs.setdefault(child.tag, []).append(node_to_dict(child)) d.update(childs.items()) return d purged_xml = remove_invalid_xml_chars(raw_xml) try: tree = xml.etree.ElementTree.fromstring(purged_xml) except xml.etree.ElementTree.ParseError as e: cause = "XML stream %s" % (str(e)) raise ParseError(cause=cause) d = node_to_dict(tree) return d
[ "def", "xml_to_dict", "(", "raw_xml", ")", ":", "def", "node_to_dict", "(", "node", ")", ":", "d", "=", "{", "}", "d", ".", "update", "(", "node", ".", "items", "(", ")", ")", "text", "=", "getattr", "(", "node", ",", "'text'", ",", "None", ")", ...
Convert a XML stream into a dictionary. This function transforms a xml stream into a dictionary. The attributes are stored as single elements while child nodes are stored into lists. The text node is stored using the special key '__text__'. This code is based on Winston Ewert's solution to this problem. See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict for more info. The code was licensed as cc by-sa 3.0. :param raw_xml: XML stream :returns: a dict with the XML data :raises ParseError: raised when an error occurs parsing the given XML stream
[ "Convert", "a", "XML", "stream", "into", "a", "dictionary", "." ]
python
test
27.191489
ktdreyer/treq-kerberos
treq_kerberos/__init__.py
https://github.com/ktdreyer/treq-kerberos/blob/8331867cf2bade6b4f9d6d7035b72679c4eafc28/treq_kerberos/__init__.py#L73-L83
def negotiate_header(url): """ Return the "Authorization" HTTP header value to use for this URL. """ hostname = urlparse(url).hostname _, krb_context = kerberos.authGSSClientInit('HTTP@%s' % hostname) # authGSSClientStep goes over the network to the KDC (ie blocking). yield threads.deferToThread(kerberos.authGSSClientStep, krb_context, '') negotiate_details = kerberos.authGSSClientResponse(krb_context) defer.returnValue('Negotiate ' + negotiate_details)
[ "def", "negotiate_header", "(", "url", ")", ":", "hostname", "=", "urlparse", "(", "url", ")", ".", "hostname", "_", ",", "krb_context", "=", "kerberos", ".", "authGSSClientInit", "(", "'HTTP@%s'", "%", "hostname", ")", "# authGSSClientStep goes over the network t...
Return the "Authorization" HTTP header value to use for this URL.
[ "Return", "the", "Authorization", "HTTP", "header", "value", "to", "use", "for", "this", "URL", "." ]
python
train
46.818182
Phylliade/ikpy
src/ikpy/geometry_utils.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/geometry_utils.py#L10-L16
def Rx_matrix(theta): """Rotation matrix around the X axis""" return np.array([ [1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)] ])
[ "def", "Rx_matrix", "(", "theta", ")", ":", "return", "np", ".", "array", "(", "[", "[", "1", ",", "0", ",", "0", "]", ",", "[", "0", ",", "np", ".", "cos", "(", "theta", ")", ",", "-", "np", ".", "sin", "(", "theta", ")", "]", ",", "[", ...
Rotation matrix around the X axis
[ "Rotation", "matrix", "around", "the", "X", "axis" ]
python
train
27.571429
aheadley/python-crunchyroll
crunchyroll/subtitles.py
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/subtitles.py#L61-L72
def decrypt(self, encryption_key, iv, encrypted_data): """Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str """ logger.info('Decrypting subtitles with length (%d bytes), key=%r', len(encrypted_data), encryption_key) return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data))
[ "def", "decrypt", "(", "self", ",", "encryption_key", ",", "iv", ",", "encrypted_data", ")", ":", "logger", ".", "info", "(", "'Decrypting subtitles with length (%d bytes), key=%r'", ",", "len", "(", "encrypted_data", ")", ",", "encryption_key", ")", "return", "zl...
Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str
[ "Decrypt", "encrypted", "subtitle", "data" ]
python
train
34.25
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L188-L195
def _pre_delete_hook(cls, key): """ Removes instance from index. """ if cls.searching_enabled: doc_id = cls.search_get_document_id(key) index = cls.search_get_index() index.delete(doc_id)
[ "def", "_pre_delete_hook", "(", "cls", ",", "key", ")", ":", "if", "cls", ".", "searching_enabled", ":", "doc_id", "=", "cls", ".", "search_get_document_id", "(", "key", ")", "index", "=", "cls", ".", "search_get_index", "(", ")", "index", ".", "delete", ...
Removes instance from index.
[ "Removes", "instance", "from", "index", "." ]
python
train
31
titusjan/argos
argos/inspector/registry.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/registry.py#L55-L63
def create(self, collector, tryImport=True): """ Creates an inspector of the registered and passes the collector to the constructor. Tries to import the class if tryImport is True. Raises ImportError if the class could not be imported. """ cls = self.getClass(tryImport=tryImport) if not self.successfullyImported: raise ImportError("Class not successfully imported: {}".format(self.exception)) return cls(collector)
[ "def", "create", "(", "self", ",", "collector", ",", "tryImport", "=", "True", ")", ":", "cls", "=", "self", ".", "getClass", "(", "tryImport", "=", "tryImport", ")", "if", "not", "self", ".", "successfullyImported", ":", "raise", "ImportError", "(", "\"...
Creates an inspector of the registered and passes the collector to the constructor. Tries to import the class if tryImport is True. Raises ImportError if the class could not be imported.
[ "Creates", "an", "inspector", "of", "the", "registered", "and", "passes", "the", "collector", "to", "the", "constructor", ".", "Tries", "to", "import", "the", "class", "if", "tryImport", "is", "True", ".", "Raises", "ImportError", "if", "the", "class", "coul...
python
train
53.777778