repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ceph/ceph-deploy
ceph_deploy/conf/ceph.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/conf/ceph.py#L34-L46
def safe_get(self, section, key): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ try: #Use full parent function so we can replace it in the class # if desired return configparser.RawConfigParser.get(self, section, key) except (configparser.NoSectionError, configparser.NoOptionError): return None
[ "def", "safe_get", "(", "self", ",", "section", ",", "key", ")", ":", "try", ":", "#Use full parent function so we can replace it in the class", "# if desired", "return", "configparser", ".", "RawConfigParser", ".", "get", "(", "self", ",", "section", ",", "key", ...
Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time.
[ "Attempt", "to", "get", "a", "configuration", "value", "from", "a", "certain", "section", "in", "a", "cfg", "object", "but", "returning", "None", "if", "not", "found", ".", "Avoids", "the", "need", "to", "be", "doing", "try", "/", "except", "{", "ConfigP...
python
train
zhanglab/psamm
psamm/datasource/sbml.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/sbml.py#L217-L234
def properties(self): """All species properties as a dict""" properties = {'id': self._id, 'boundary': self._boundary} if 'name' in self._root.attrib: properties['name'] = self._root.get('name') if 'compartment' in self._root.attrib: properties['compartment'] = self._root.get('compartment') charge = self.charge if charge is not None: properties['charge'] = charge formula = self.formula if formula is not None: properties['formula'] = formula return properties
[ "def", "properties", "(", "self", ")", ":", "properties", "=", "{", "'id'", ":", "self", ".", "_id", ",", "'boundary'", ":", "self", ".", "_boundary", "}", "if", "'name'", "in", "self", ".", "_root", ".", "attrib", ":", "properties", "[", "'name'", "...
All species properties as a dict
[ "All", "species", "properties", "as", "a", "dict" ]
python
train
pantsbuild/pex
pex/link.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/link.py#L19-L30
def wrap(cls, url): """Given a url that is either a string or :class:`Link`, return a :class:`Link`. :param url: A string-like or :class:`Link` object to wrap. :returns: A :class:`Link` object wrapping the url. """ if isinstance(url, cls): return url elif isinstance(url, compatible_string): return cls(url) else: raise ValueError('url must be either a string or Link.')
[ "def", "wrap", "(", "cls", ",", "url", ")", ":", "if", "isinstance", "(", "url", ",", "cls", ")", ":", "return", "url", "elif", "isinstance", "(", "url", ",", "compatible_string", ")", ":", "return", "cls", "(", "url", ")", "else", ":", "raise", "V...
Given a url that is either a string or :class:`Link`, return a :class:`Link`. :param url: A string-like or :class:`Link` object to wrap. :returns: A :class:`Link` object wrapping the url.
[ "Given", "a", "url", "that", "is", "either", "a", "string", "or", ":", "class", ":", "Link", "return", "a", ":", "class", ":", "Link", "." ]
python
train
python-rope/rope
rope/contrib/autoimport.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/contrib/autoimport.py#L107-L121
def generate_modules_cache(self, modules, underlined=None, task_handle=taskhandle.NullTaskHandle()): """Generate global name cache for modules listed in `modules`""" job_set = task_handle.create_jobset( 'Generatig autoimport cache for modules', len(modules)) for modname in modules: job_set.started_job('Working on <%s>' % modname) if modname.endswith('.*'): mod = self.project.find_module(modname[:-2]) if mod: for sub in submodules(mod): self.update_resource(sub, underlined) else: self.update_module(modname, underlined) job_set.finished_job()
[ "def", "generate_modules_cache", "(", "self", ",", "modules", ",", "underlined", "=", "None", ",", "task_handle", "=", "taskhandle", ".", "NullTaskHandle", "(", ")", ")", ":", "job_set", "=", "task_handle", ".", "create_jobset", "(", "'Generatig autoimport cache f...
Generate global name cache for modules listed in `modules`
[ "Generate", "global", "name", "cache", "for", "modules", "listed", "in", "modules" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/topology.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/topology.py#L405-L411
def _request_check(self, address): """Wake one monitor. Hold the lock when calling this.""" server = self._servers.get(address) # "server" is None if another thread removed it from the topology. if server: server.request_check()
[ "def", "_request_check", "(", "self", ",", "address", ")", ":", "server", "=", "self", ".", "_servers", ".", "get", "(", "address", ")", "# \"server\" is None if another thread removed it from the topology.", "if", "server", ":", "server", ".", "request_check", "(",...
Wake one monitor. Hold the lock when calling this.
[ "Wake", "one", "monitor", ".", "Hold", "the", "lock", "when", "calling", "this", "." ]
python
train
MaxHalford/starboost
starboost/boosting.py
https://github.com/MaxHalford/starboost/blob/59d96dcc983404cbc326878facd8171fd2655ce1/starboost/boosting.py#L49-L164
def fit(self, X, y, eval_set=None): """Fit a gradient boosting procedure to a dataset. Args: X (array-like or sparse matrix of shape (n_samples, n_features)): The training input samples. Sparse matrices are accepted only if they are supported by the weak model. y (array-like of shape (n_samples,)): The training target values (strings or integers in classification, real numbers in regression). eval_set (tuple of length 2, optional, default=None): The evaluation set is a tuple ``(X_val, y_val)``. It has to respect the same conventions as ``X`` and ``y``. Returns: self """ # Verify the input parameters base_estimator = self.base_estimator base_estimator_is_tree = self.base_estimator_is_tree if base_estimator is None: base_estimator = tree.DecisionTreeRegressor(max_depth=1, random_state=self.random_state) base_estimator_is_tree = True if not isinstance(base_estimator, base.RegressorMixin): raise ValueError('base_estimator must be a RegressorMixin') loss = self.loss or self._default_loss self.init_estimator_ = base.clone(self.init_estimator or loss.default_init_estimator) eval_metric = self.eval_metric or loss line_searcher = self.line_searcher if line_searcher is None and base_estimator_is_tree: line_searcher = loss.tree_line_searcher self._rng = utils.check_random_state(self.random_state) # At this point we assume the input data has been checked if y.ndim == 1: y = y.reshape(-1, 1) # Instantiate some training variables self.estimators_ = [] self.line_searchers_ = [] self.columns_ = [] self.eval_scores_ = [] if eval_set else None # Use init_estimator for the first fit self.init_estimator_.fit(X, y) y_pred = self.init_estimator_.predict(X) # We keep training weak learners until we reach n_estimators or early stopping occurs for _ in range(self.n_estimators): # If row_sampling is lower than 1 then we're doing stochastic gradient descent rows = None if self.row_sampling < 1: n_rows = int(X.shape[0] * self.row_sampling) rows = self._rng.choice(X.shape[0], n_rows, replace=False) # If col_sampling is lower than 1 then we only use a subset of the features cols = None if self.col_sampling < 1: n_cols = int(X.shape[1] * self.col_sampling) cols = self._rng.choice(X.shape[1], n_cols, replace=False) # Subset X X_fit = X if rows is not None: X_fit = X_fit[rows, :] if cols is not None: X_fit = X_fit[:, cols] # Compute the gradients of the loss for the current prediction gradients = loss.gradient(y, self._transform_y_pred(y_pred)) # We will train one weak model per column in y estimators = [] line_searchers = [] for i, gradient in enumerate(gradients.T): # Fit a weak learner to the negative gradient of the loss function estimator = base.clone(base_estimator) estimator = estimator.fit(X_fit, -gradient if rows is None else -gradient[rows]) estimators.append(estimator) # Estimate the descent direction using the weak learner direction = estimator.predict(X if cols is None else X[:, cols]) # Apply line search if a line searcher has been provided if line_searcher: ls = copy.copy(line_searcher) ls = ls.fit(y[:, i], y_pred[:, i], gradient, direction) line_searchers.append(ls) direction = ls.update(direction) # Move the predictions along the estimated descent direction y_pred[:, i] += self.learning_rate * direction # Store the estimator and the step self.estimators_.append(estimators) if line_searchers: self.line_searchers_.append(line_searchers) self.columns_.append(cols) # We're now at the end of a round so we can evaluate the model on the validation set if not eval_set: continue X_val, y_val = eval_set self.eval_scores_.append(eval_metric(y_val, self.predict(X_val))) # Check for early stopping if self.early_stopping_rounds and len(self.eval_scores_) > self.early_stopping_rounds: if self.eval_scores_[-1] > self.eval_scores_[-(self.early_stopping_rounds + 1)]: break return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ",", "eval_set", "=", "None", ")", ":", "# Verify the input parameters", "base_estimator", "=", "self", ".", "base_estimator", "base_estimator_is_tree", "=", "self", ".", "base_estimator_is_tree", "if", "base_estimato...
Fit a gradient boosting procedure to a dataset. Args: X (array-like or sparse matrix of shape (n_samples, n_features)): The training input samples. Sparse matrices are accepted only if they are supported by the weak model. y (array-like of shape (n_samples,)): The training target values (strings or integers in classification, real numbers in regression). eval_set (tuple of length 2, optional, default=None): The evaluation set is a tuple ``(X_val, y_val)``. It has to respect the same conventions as ``X`` and ``y``. Returns: self
[ "Fit", "a", "gradient", "boosting", "procedure", "to", "a", "dataset", "." ]
python
train
ArchiveTeam/wpull
wpull/protocol/http/web.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/http/web.py#L192-L196
def _add_cookies(self, request: Request): '''Add the cookie headers to the Request.''' self._cookie_jar.add_cookie_header( request, self._get_cookie_referrer_host() )
[ "def", "_add_cookies", "(", "self", ",", "request", ":", "Request", ")", ":", "self", ".", "_cookie_jar", ".", "add_cookie_header", "(", "request", ",", "self", ".", "_get_cookie_referrer_host", "(", ")", ")" ]
Add the cookie headers to the Request.
[ "Add", "the", "cookie", "headers", "to", "the", "Request", "." ]
python
train
codelv/enaml-native
src/enamlnative/ios/app.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/ios/app.py#L34-L41
def _default_objc(self): """ Load the objc library using ctypes. """ objc = ctypes.cdll.LoadLibrary(find_library('objc')) objc.objc_getClass.restype = ctypes.c_void_p objc.sel_registerName.restype = ctypes.c_void_p objc.objc_msgSend.restype = ctypes.c_void_p objc.objc_msgSend.argtypes = [ctypes.c_void_p, ctypes.c_void_p] return objc
[ "def", "_default_objc", "(", "self", ")", ":", "objc", "=", "ctypes", ".", "cdll", ".", "LoadLibrary", "(", "find_library", "(", "'objc'", ")", ")", "objc", ".", "objc_getClass", ".", "restype", "=", "ctypes", ".", "c_void_p", "objc", ".", "sel_registerNam...
Load the objc library using ctypes.
[ "Load", "the", "objc", "library", "using", "ctypes", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/jira.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/jira.py#L65-L80
def filter_custom_fields(fields): """Filter custom fields from a given set of fields. :param fields: set of fields :returns: an object with the filtered custom fields """ custom_fields = {} sorted_fields = [field for field in fields if field['custom'] is True] for custom_field in sorted_fields: custom_fields[custom_field['id']] = custom_field return custom_fields
[ "def", "filter_custom_fields", "(", "fields", ")", ":", "custom_fields", "=", "{", "}", "sorted_fields", "=", "[", "field", "for", "field", "in", "fields", "if", "field", "[", "'custom'", "]", "is", "True", "]", "for", "custom_field", "in", "sorted_fields", ...
Filter custom fields from a given set of fields. :param fields: set of fields :returns: an object with the filtered custom fields
[ "Filter", "custom", "fields", "from", "a", "given", "set", "of", "fields", "." ]
python
test
NoviceLive/intellicoder
intellicoder/msbuild/locators.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/msbuild/locators.py#L287-L302
def get_sdk_version(self): """Get the version of Windows SDK from VCVarsQueryRegistry.bat.""" name = 'VCVarsQueryRegistry.bat' path = os.path.join(self.tool_dir, name) batch = read_file(path) if not batch: raise RuntimeError(_('failed to find the SDK version')) regex = r'(?<=\\Microsoft SDKs\\Windows\\).+?(?=")' try: version = re.search(regex, batch).group() except AttributeError: return '' else: logging.debug(_('SDK version: %s'), version) return version
[ "def", "get_sdk_version", "(", "self", ")", ":", "name", "=", "'VCVarsQueryRegistry.bat'", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tool_dir", ",", "name", ")", "batch", "=", "read_file", "(", "path", ")", "if", "not", "batch", "...
Get the version of Windows SDK from VCVarsQueryRegistry.bat.
[ "Get", "the", "version", "of", "Windows", "SDK", "from", "VCVarsQueryRegistry", ".", "bat", "." ]
python
train
dagwieers/vmguestlib
vmguestlib.py
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L315-L322
def GetMemBalloonedMB(self): '''Retrieves the amount of memory that has been reclaimed from this virtual machine by the vSphere memory balloon driver (also referred to as the "vmmemctl" driver).''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemBalloonedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemBalloonedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VM...
Retrieves the amount of memory that has been reclaimed from this virtual machine by the vSphere memory balloon driver (also referred to as the "vmmemctl" driver).
[ "Retrieves", "the", "amount", "of", "memory", "that", "has", "been", "reclaimed", "from", "this", "virtual", "machine", "by", "the", "vSphere", "memory", "balloon", "driver", "(", "also", "referred", "to", "as", "the", "vmmemctl", "driver", ")", "." ]
python
train
horazont/aioxmpp
aioxmpp/hashes.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/hashes.py#L141-L175
def hash_from_algo(algo): """ Return a :mod:`hashlib` hash given the :xep:`300` `algo`. :param algo: The algorithm identifier as defined in :xep:`300`. :type algo: :class:`str` :raises NotImplementedError: if the hash algortihm is not supported by :mod:`hashlib`. :raises ValueError: if the hash algorithm MUST NOT be supported. :return: A hash object from :mod:`hashlib` or compatible. If the `algo` is not supported by the :mod:`hashlib` module, :class:`NotImplementedError` is raised. """ try: enabled, (fun_name, fun_args, fun_kwargs) = _HASH_ALGO_MAP[algo] except KeyError: raise NotImplementedError( "hash algorithm {!r} unknown".format(algo) ) from None if not enabled: raise ValueError( "support of {} in XMPP is forbidden".format(algo) ) try: fun = getattr(hashlib, fun_name) except AttributeError as exc: raise NotImplementedError( "{} not supported by hashlib".format(algo) ) from exc return fun(*fun_args, **fun_kwargs)
[ "def", "hash_from_algo", "(", "algo", ")", ":", "try", ":", "enabled", ",", "(", "fun_name", ",", "fun_args", ",", "fun_kwargs", ")", "=", "_HASH_ALGO_MAP", "[", "algo", "]", "except", "KeyError", ":", "raise", "NotImplementedError", "(", "\"hash algorithm {!r...
Return a :mod:`hashlib` hash given the :xep:`300` `algo`. :param algo: The algorithm identifier as defined in :xep:`300`. :type algo: :class:`str` :raises NotImplementedError: if the hash algortihm is not supported by :mod:`hashlib`. :raises ValueError: if the hash algorithm MUST NOT be supported. :return: A hash object from :mod:`hashlib` or compatible. If the `algo` is not supported by the :mod:`hashlib` module, :class:`NotImplementedError` is raised.
[ "Return", "a", ":", "mod", ":", "hashlib", "hash", "given", "the", ":", "xep", ":", "300", "algo", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/wrap/access_policy.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L220-L237
def wrap(access_pyxb, read_only=False): """Work with the AccessPolicy in a SystemMetadata PyXB object. Args: access_pyxb : AccessPolicy PyXB object The AccessPolicy to modify. read_only: bool Do not update the wrapped AccessPolicy. When only a single AccessPolicy operation is needed, there's no need to use this context manager. Instead, use the generated context manager wrappers. """ w = AccessPolicyWrapper(access_pyxb) yield w if not read_only: w.get_normalized_pyxb()
[ "def", "wrap", "(", "access_pyxb", ",", "read_only", "=", "False", ")", ":", "w", "=", "AccessPolicyWrapper", "(", "access_pyxb", ")", "yield", "w", "if", "not", "read_only", ":", "w", ".", "get_normalized_pyxb", "(", ")" ]
Work with the AccessPolicy in a SystemMetadata PyXB object. Args: access_pyxb : AccessPolicy PyXB object The AccessPolicy to modify. read_only: bool Do not update the wrapped AccessPolicy. When only a single AccessPolicy operation is needed, there's no need to use this context manager. Instead, use the generated context manager wrappers.
[ "Work", "with", "the", "AccessPolicy", "in", "a", "SystemMetadata", "PyXB", "object", "." ]
python
train
amzn/ion-python
amazon/ion/writer.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/writer.py#L79-L111
def writer_trampoline(start): """Provides the co-routine trampoline for a writer state machine. The given co-routine is a state machine that yields :class:`Transition` and takes a :class:`Transition` with a :class:`amazon.ion.core.IonEvent` and the co-routine itself. Notes: A writer delimits its logical flush points with ``WriteEventType.COMPLETE``, depending on the configuration, a user may need to send an ``IonEventType.STREAM_END`` to force this to occur. Args: start: The writer co-routine to initially delegate to. Yields: DataEvent: the result of serialization. Receives :class:`amazon.ion.core.IonEvent` to serialize into :class:`DataEvent`. """ trans = Transition(None, start) while True: ion_event = (yield trans.event) if trans.event is None: if ion_event is None: raise TypeError('Cannot start Writer with no event') else: if trans.event.type is WriteEventType.HAS_PENDING and ion_event is not None: raise TypeError('Writer expected to receive no event: %r' % (ion_event,)) if trans.event.type is not WriteEventType.HAS_PENDING and ion_event is None: raise TypeError('Writer expected to receive event') if ion_event is not None and ion_event.event_type is IonEventType.INCOMPLETE: raise TypeError('Writer cannot receive INCOMPLETE event') trans = trans.delegate.send(Transition(ion_event, trans.delegate))
[ "def", "writer_trampoline", "(", "start", ")", ":", "trans", "=", "Transition", "(", "None", ",", "start", ")", "while", "True", ":", "ion_event", "=", "(", "yield", "trans", ".", "event", ")", "if", "trans", ".", "event", "is", "None", ":", "if", "i...
Provides the co-routine trampoline for a writer state machine. The given co-routine is a state machine that yields :class:`Transition` and takes a :class:`Transition` with a :class:`amazon.ion.core.IonEvent` and the co-routine itself. Notes: A writer delimits its logical flush points with ``WriteEventType.COMPLETE``, depending on the configuration, a user may need to send an ``IonEventType.STREAM_END`` to force this to occur. Args: start: The writer co-routine to initially delegate to. Yields: DataEvent: the result of serialization. Receives :class:`amazon.ion.core.IonEvent` to serialize into :class:`DataEvent`.
[ "Provides", "the", "co", "-", "routine", "trampoline", "for", "a", "writer", "state", "machine", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/toro_2002.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/toro_2002.py#L76-L97
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) C = self.COEFFS[imt] mean = self._compute_mean(C, rup.mag, dists.rjb) stddevs = self._compute_stddevs(C, rup.mag, dists.rjb, imt, stddev_types) # apply decay factor for 3 and 4 seconds (not originally supported # by the equations) if imt.period == 3.0: mean /= 0.612 if imt.period == 4.0: mean /= 0.559 return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "assert", "all", "(", "stddev_type", "in", "self", ".", "DEFINED_FOR_STANDARD_DEVIATION_TYPES", "for", "stddev_type", "in", "stddev_typ...
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
koebi/potypo
potypo/check.py
https://github.com/koebi/potypo/blob/5ae58b28dbe3925d687906d71a145a3671e1f31d/potypo/check.py#L42-L89
def get_wordlist(lang, wl_dir, po_path): #print("Looking for Wordlist in:\nlang {}\nwl_dir {}\npo_path {}".format(lang, wl_dir, po_path)) po_path = os.path.abspath(po_path) """ If wl_dir is given, there may be a file called "<lang>.txt". If this is the case, this should be the wordlist we are looking for. """ if wl_dir is not None: wl_path = os.path.join(wl_dir, lang + '.txt') if os.path.isfile(wl_path): return wl_path """ If wl_dir is not given, the wordlist should live in a file named "wordlist.txt" either in the locales_dir for the default language or in the same directory as the .po-files """ if po_path.endswith("po"): # translated language po_dir = os.path.dirname(po_path) for f in os.scandir(po_dir): if f.name == "wordlist.txt": #print("found wordlist in", f.path) return f.path #print("Checked po-dir, None Found") """ If no file was found so far, the po-files seem to lie in <lang>/LC_MESSAGES, and the wordlist should be in the directory above. """ if os.path.basename(po_dir) == "LC_MESSAGES": for f in os.scandir(os.path.join(po_dir, "..")): if f.name == "wordlist.txt": #print("found wordlist in", f.path) return f.path #print("Checked LC_MESSAGES-dir. none found") #print("Checked lang-specific files") if os.path.isdir(po_path): # default language for f in os.scandir(po_path): if f.name == "wordlist.txt": #print("found wordlist in", f.path) return f.path #print("If this shows up, no wordlist was found") return None
[ "def", "get_wordlist", "(", "lang", ",", "wl_dir", ",", "po_path", ")", ":", "#print(\"Looking for Wordlist in:\\nlang {}\\nwl_dir {}\\npo_path {}\".format(lang, wl_dir, po_path))", "po_path", "=", "os", ".", "path", ".", "abspath", "(", "po_path", ")", "if", "wl_dir", ...
If wl_dir is given, there may be a file called "<lang>.txt". If this is the case, this should be the wordlist we are looking for.
[ "If", "wl_dir", "is", "given", "there", "may", "be", "a", "file", "called", "<lang", ">", ".", "txt", ".", "If", "this", "is", "the", "case", "this", "should", "be", "the", "wordlist", "we", "are", "looking", "for", "." ]
python
train
DaveMcEwan/ndim
ndim_base.py
https://github.com/DaveMcEwan/ndim/blob/f1ea023d3e597160fc1e9e11921de07af659f9d2/ndim_base.py#L361-L377
def pts_scale(pts=[], f=1.0): '''Return given points scaled by factor f from origin. ''' assert isinstance(pts, list) and len(pts) > 0 l_pt_prev = None for pt in pts: assert isinstance(pt, tuple) l_pt = len(pt) assert l_pt > 1 for i in pt: assert isinstance(i, float) if l_pt_prev is not None: assert l_pt == l_pt_prev l_pt_prev = l_pt assert isinstance(f, float) return [pt_scale(pt, f) for pt in pts]
[ "def", "pts_scale", "(", "pts", "=", "[", "]", ",", "f", "=", "1.0", ")", ":", "assert", "isinstance", "(", "pts", ",", "list", ")", "and", "len", "(", "pts", ")", ">", "0", "l_pt_prev", "=", "None", "for", "pt", "in", "pts", ":", "assert", "is...
Return given points scaled by factor f from origin.
[ "Return", "given", "points", "scaled", "by", "factor", "f", "from", "origin", "." ]
python
train
cqparts/cqparts
src/cqparts/params/utils.py
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/params/utils.py#L6-L54
def as_parameter(nullable=True, strict=True): """ Decorate a container class as a functional :class:`Parameter` class for a :class:`ParametricObject`. :param nullable: if set, parameter's value may be Null :type nullable: :class:`bool` .. doctest:: >>> from cqparts.params import as_parameter, ParametricObject >>> @as_parameter(nullable=True) ... class Stuff(object): ... def __init__(self, a=1, b=2, c=3): ... self.a = a ... self.b = b ... self.c = c ... @property ... def abc(self): ... return (self.a, self.b, self.c) >>> class Thing(ParametricObject): ... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff") >>> thing = Thing(foo={'a': 20}) >>> thing.foo.a 20 >>> thing.foo.abc (20, 2, 3) """ def decorator(cls): base_class = Parameter if nullable else NonNullParameter return type(cls.__name__, (base_class,), { # Preserve text for documentation '__name__': cls.__name__, '__doc__': cls.__doc__, '__module__': cls.__module__, # Sphinx doc type string '_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format( class_name=cls.__name__, module=__name__ ), # 'type': lambda self, value: cls(**value) }) return decorator
[ "def", "as_parameter", "(", "nullable", "=", "True", ",", "strict", "=", "True", ")", ":", "def", "decorator", "(", "cls", ")", ":", "base_class", "=", "Parameter", "if", "nullable", "else", "NonNullParameter", "return", "type", "(", "cls", ".", "__name__"...
Decorate a container class as a functional :class:`Parameter` class for a :class:`ParametricObject`. :param nullable: if set, parameter's value may be Null :type nullable: :class:`bool` .. doctest:: >>> from cqparts.params import as_parameter, ParametricObject >>> @as_parameter(nullable=True) ... class Stuff(object): ... def __init__(self, a=1, b=2, c=3): ... self.a = a ... self.b = b ... self.c = c ... @property ... def abc(self): ... return (self.a, self.b, self.c) >>> class Thing(ParametricObject): ... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff") >>> thing = Thing(foo={'a': 20}) >>> thing.foo.a 20 >>> thing.foo.abc (20, 2, 3)
[ "Decorate", "a", "container", "class", "as", "a", "functional", ":", "class", ":", "Parameter", "class", "for", "a", ":", "class", ":", "ParametricObject", "." ]
python
train
junzis/pyModeS
pyModeS/decoder/bds/bds44.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds44.py#L70-L89
def wind44(msg): """Wind speed and direction. Args: msg (String): 28 bytes hexadecimal message string Returns: (int, float): speed (kt), direction (degree) """ d = hex2bin(data(msg)) status = int(d[4]) if not status: return None speed = bin2int(d[5:14]) # knots direction = bin2int(d[14:23]) * 180.0 / 256.0 # degree return round(speed, 0), round(direction, 1)
[ "def", "wind44", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "status", "=", "int", "(", "d", "[", "4", "]", ")", "if", "not", "status", ":", "return", "None", "speed", "=", "bin2int", "(", "d", "[", "5", ":",...
Wind speed and direction. Args: msg (String): 28 bytes hexadecimal message string Returns: (int, float): speed (kt), direction (degree)
[ "Wind", "speed", "and", "direction", "." ]
python
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L257-L293
def verify_enroll(self, response): """Verifies and saves U2F enroll""" seed = session.pop('_u2f_enroll_') try: new_device, cert = complete_register(seed, response, self.__facets_list) except Exception as e: if self.__call_fail_enroll: self.__call_fail_enroll(e) return { 'status' : 'failed', 'error' : 'Invalid key handle!' } finally: pass devices = self.__get_u2f_devices() # Setting new device counter to 0 new_device['counter'] = 0 new_device['index'] = 0 for device in devices: if new_device['index'] <= device['index']: new_device['index'] = device['index'] + 1 devices.append(new_device) self.__save_u2f_devices(devices) self.__call_success_enroll() return {'status': 'ok', 'message': 'Successfully enrolled new U2F device!'}
[ "def", "verify_enroll", "(", "self", ",", "response", ")", ":", "seed", "=", "session", ".", "pop", "(", "'_u2f_enroll_'", ")", "try", ":", "new_device", ",", "cert", "=", "complete_register", "(", "seed", ",", "response", ",", "self", ".", "__facets_list"...
Verifies and saves U2F enroll
[ "Verifies", "and", "saves", "U2F", "enroll" ]
python
train
mdgoldberg/sportsref
sportsref/nfl/teams.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L367-L377
def opp_stats(self, year): """Returns a Series (dict-like) of the team's opponent's stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats. """ doc = self.get_year_doc(year) table = doc('table#team_stats') df = sportsref.utils.parse_table(table) return df.loc[df.player_id == 'Opp. Stats'].iloc[0]
[ "def", "opp_stats", "(", "self", ",", "year", ")", ":", "doc", "=", "self", ".", "get_year_doc", "(", "year", ")", "table", "=", "doc", "(", "'table#team_stats'", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ")", "return...
Returns a Series (dict-like) of the team's opponent's stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats.
[ "Returns", "a", "Series", "(", "dict", "-", "like", ")", "of", "the", "team", "s", "opponent", "s", "stats", "from", "the", "team", "-", "season", "page", "." ]
python
test
andrewsnowden/dota2py
dota2py/summary.py
https://github.com/andrewsnowden/dota2py/blob/67637f4b9c160ea90c11b7e81545baf350affa7a/dota2py/summary.py#L220-L263
def calculate_kills(self): """ At the end of the game calculate kills/deaths, taking aegis into account This has to be done at the end when we have a playerid : hero map """ aegises = deque(self.aegis) next_aegis = aegises.popleft() if aegises else None aegis_expires = None active_aegis = None real_kills = [] for kill in self.kills: if next_aegis and next_aegis[0] < kill["tick"]: active_aegis = next_aegis[1] aegis_expires = next_aegis[0] + 1800 * 10 # 10 minutes self.indexed_players[active_aegis].aegises += 1 next_aegis = aegises.popleft() if aegises else None elif aegis_expires and kill["tick"] > aegis_expires: active_aegis = None aegis_expires = None source = kill["source"] target = kill["target"] timestamp = kill["timestamp"] if active_aegis == self.heroes[target].index: #This was an aegis kill active_aegis = None aegis_expires = None self.heroes[target].aegis_deaths += 1 else: real_kills.append(kill) self.heroes[target].add_death(source, timestamp) if target != source: #Don't count a deny as a kill self.heroes[source].add_kill(target, timestamp) self.kills = real_kills
[ "def", "calculate_kills", "(", "self", ")", ":", "aegises", "=", "deque", "(", "self", ".", "aegis", ")", "next_aegis", "=", "aegises", ".", "popleft", "(", ")", "if", "aegises", "else", "None", "aegis_expires", "=", "None", "active_aegis", "=", "None", ...
At the end of the game calculate kills/deaths, taking aegis into account This has to be done at the end when we have a playerid : hero map
[ "At", "the", "end", "of", "the", "game", "calculate", "kills", "/", "deaths", "taking", "aegis", "into", "account", "This", "has", "to", "be", "done", "at", "the", "end", "when", "we", "have", "a", "playerid", ":", "hero", "map" ]
python
train
liampauling/betfair
betfairlightweight/endpoints/account.py
https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/account.py#L30-L43
def get_account_details(self, session=None, lightweight=None): """ Returns the details relating your account, including your discount rate and Betfair point balance. :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.AccountDetails """ params = clean_locals(locals()) method = '%s%s' % (self.URI, 'getAccountDetails') (response, elapsed_time) = self.request(method, params, session) return self.process_response(response, resources.AccountDetails, elapsed_time, lightweight)
[ "def", "get_account_details", "(", "self", ",", "session", "=", "None", ",", "lightweight", "=", "None", ")", ":", "params", "=", "clean_locals", "(", "locals", "(", ")", ")", "method", "=", "'%s%s'", "%", "(", "self", ".", "URI", ",", "'getAccountDetail...
Returns the details relating your account, including your discount rate and Betfair point balance. :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.AccountDetails
[ "Returns", "the", "details", "relating", "your", "account", "including", "your", "discount", "rate", "and", "Betfair", "point", "balance", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/execution_history.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/execution_history.py#L251-L265
def get_history_item_for_tree_iter(self, child_tree_iter): """Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item: """ history_item = self.history_tree_store[child_tree_iter][self.HISTORY_ITEM_STORAGE_ID] if history_item is None: # is dummy item if self.history_tree_store.iter_n_children(child_tree_iter) > 0: child_iter = self.history_tree_store.iter_nth_child(child_tree_iter, 0) history_item = self.history_tree_store[child_iter][self.HISTORY_ITEM_STORAGE_ID] else: logger.debug("In a dummy history should be respective real call element.") return history_item
[ "def", "get_history_item_for_tree_iter", "(", "self", ",", "child_tree_iter", ")", ":", "history_item", "=", "self", ".", "history_tree_store", "[", "child_tree_iter", "]", "[", "self", ".", "HISTORY_ITEM_STORAGE_ID", "]", "if", "history_item", "is", "None", ":", ...
Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item:
[ "Hands", "history", "item", "for", "tree", "iter", "and", "compensate", "if", "tree", "item", "is", "a", "dummy", "item" ]
python
train
hatemile/hatemile-for-python
hatemile/util/html/bs/bshtmldomparser.py
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/util/html/bs/bshtmldomparser.py#L94-L120
def _fix_data_select(self): """ Replace all hyphens of data attributes for 'aaaaa', to avoid error in search. """ elements = self.document.select('*') for element in elements: attributes = element.attrs.keys() data_attributes = list() for attribute in attributes: if bool(re.findall('^data-', attribute)): data_attributes.append({ 'original': attribute, 'modified': re.sub('data-', 'dataaaaaa', attribute), 'value': element[attribute] }) if data_attributes: auxiliar_element = BeautifulSoupHTMLDOMElement(element) for data_attribute in data_attributes: auxiliar_element.remove_attribute( data_attribute['original'] ) auxiliar_element.set_attribute( data_attribute['modified'], data_attribute['value'] )
[ "def", "_fix_data_select", "(", "self", ")", ":", "elements", "=", "self", ".", "document", ".", "select", "(", "'*'", ")", "for", "element", "in", "elements", ":", "attributes", "=", "element", ".", "attrs", ".", "keys", "(", ")", "data_attributes", "="...
Replace all hyphens of data attributes for 'aaaaa', to avoid error in search.
[ "Replace", "all", "hyphens", "of", "data", "attributes", "for", "aaaaa", "to", "avoid", "error", "in", "search", "." ]
python
train
tchellomello/python-arlo
pyarlo/base_station.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/base_station.py#L618-L625
def play_track(self, track_id=DEFAULT_TRACK_ID, position=0): """Plays a track at the given position.""" self.publish( action='playTrack', resource='audioPlayback/player', publish_response=False, properties={'trackId': track_id, 'position': position} )
[ "def", "play_track", "(", "self", ",", "track_id", "=", "DEFAULT_TRACK_ID", ",", "position", "=", "0", ")", ":", "self", ".", "publish", "(", "action", "=", "'playTrack'", ",", "resource", "=", "'audioPlayback/player'", ",", "publish_response", "=", "False", ...
Plays a track at the given position.
[ "Plays", "a", "track", "at", "the", "given", "position", "." ]
python
train
thespacedoctor/sherlock
sherlock/database_cleaner.py
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/database_cleaner.py#L264-L346
def _clean_up_columns( self): """clean up columns .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_clean_up_columns`` method') sqlQueries = [ "update tcs_helper_catalogue_tables_info set old_table_name = table_name where old_table_name is null;", "update tcs_helper_catalogue_tables_info set version_number = 'stream' where table_name like '%%stream' and version_number is null;", """update tcs_helper_catalogue_tables_info set in_ned = 0 where table_name like '%%stream' and in_ned is null;""", """update tcs_helper_catalogue_tables_info set vizier_link = 0 where table_name like '%%stream' and vizier_link is null;""", "update tcs_helper_catalogue_views_info set old_view_name = view_name where old_view_name is null;", ] for sqlQuery in sqlQueries: writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, ) # VIEW OBJECT TYPES sqlQuery = u""" SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and object_type is null; """ % locals() rows = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) for row in rows: view_name = row["view_name"] object_type = view_name.replace("tcs_view_", "").split("_")[0] sqlQuery = u""" update tcs_helper_catalogue_views_info set object_type = "%(object_type)s" where view_name = "%(view_name)s" """ % locals() writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, ) # MASTER TABLE ID FOR VIEWS sqlQuery = u""" SELECT view_name FROM tcs_helper_catalogue_views_info where legacy_view = 0 and table_id is null; """ % locals() rows = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) for row in rows: view_name = row["view_name"] table_name = view_name.replace("tcs_view_", "").split("_")[1:] table_name = ("_").join(table_name) table_name = "tcs_cat_%(table_name)s" % locals() sqlQuery = u""" update tcs_helper_catalogue_views_info set table_id = (select id from tcs_helper_catalogue_tables_info where table_name = "%(table_name)s") where view_name = "%(view_name)s" """ % locals() writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, ) self.log.debug('completed the ``_clean_up_columns`` method') return None
[ "def", "_clean_up_columns", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``_clean_up_columns`` method'", ")", "sqlQueries", "=", "[", "\"update tcs_helper_catalogue_tables_info set old_table_name = table_name where old_table_name is null;\"", ",", ...
clean up columns .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
[ "clean", "up", "columns" ]
python
train
DataBiosphere/dsub
dsub/commands/dstat.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/commands/dstat.py#L345-L450
def _parse_arguments(): """Parses command line arguments. Returns: A Namespace of parsed arguments. """ # Handle version flag and exit if it was passed. param_util.handle_version_flag() parser = provider_base.create_parser(sys.argv[0]) parser.add_argument( '--version', '-v', default=False, help='Print the dsub version and exit.') parser.add_argument( '--jobs', '-j', nargs='*', help='A list of jobs IDs on which to check status') parser.add_argument( '--names', '-n', nargs='*', help='A list of job names on which to check status') parser.add_argument( '--tasks', '-t', nargs='*', help='A list of task IDs on which to check status') parser.add_argument( '--attempts', nargs='*', help='A list of task attempts on which to check status') parser.add_argument( '--users', '-u', nargs='*', default=[], help="""Lists only those jobs which were submitted by the list of users. Use "*" to list jobs of any user.""") parser.add_argument( '--status', '-s', nargs='*', default=['RUNNING'], choices=['RUNNING', 'SUCCESS', 'FAILURE', 'CANCELED', '*'], help="""Lists only those jobs which match the specified status(es). Choose from {'RUNNING', 'SUCCESS', 'FAILURE', 'CANCELED'}. Use "*" to list jobs of any status.""", metavar='STATUS') parser.add_argument( '--age', help="""List only those jobs newer than the specified age. Ages can be listed using a number followed by a unit. Supported units are s (seconds), m (minutes), h (hours), d (days), w (weeks). For example: '7d' (7 days). Bare numbers are treated as UTC.""") parser.add_argument( '--label', nargs='*', action=param_util.ListParamAction, default=[], help='User labels to match. Tasks returned must match all labels.', metavar='KEY=VALUE') parser.add_argument( '--poll-interval', default=10, type=int, help='Polling interval (in seconds) for checking job status ' 'when --wait is set.') parser.add_argument( '--wait', action='store_true', help='Wait until jobs have all completed.') parser.add_argument( '--limit', default=0, type=int, help='The maximum number of tasks to list. The default is unlimited.') parser.add_argument( '--format', choices=['text', 'json', 'yaml', 'provider-json'], help='Set the output format.') output_style = parser.add_mutually_exclusive_group() output_style.add_argument( '--full', '-f', action='store_true', help='Display output with full task information' ' and input parameters.') output_style.add_argument( '--summary', action='store_true', help='Display a summary of the results, grouped by (job, status).') # Shared arguments between the "google" and "google-v2" providers google_common = parser.add_argument_group( title='google-common', description='Options common to the "google" and "google-v2" providers') google_common.add_argument( '--project', help='Cloud project ID in which to find and delete the job(s)') return provider_base.parse_args( parser, { 'google': ['project'], 'google-v2': ['project'], 'test-fails': [], 'local': [], }, sys.argv[1:])
[ "def", "_parse_arguments", "(", ")", ":", "# Handle version flag and exit if it was passed.", "param_util", ".", "handle_version_flag", "(", ")", "parser", "=", "provider_base", ".", "create_parser", "(", "sys", ".", "argv", "[", "0", "]", ")", "parser", ".", "add...
Parses command line arguments. Returns: A Namespace of parsed arguments.
[ "Parses", "command", "line", "arguments", "." ]
python
valid
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_link.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_link.py#L169-L179
def parse_link_descriptor(self, descriptor): '''parse e.g. 'udpin:127.0.0.1:9877:{"foo":"bar"}' into python structure ("udpin:127.0.0.1:9877", {"foo":"bar"})''' optional_attributes = {} link_components = descriptor.split(":{", 1) device = link_components[0] if (len(link_components) == 2 and link_components[1].endswith("}")): # assume json some_json = "{" + link_components[1] optional_attributes = self.parse_link_attributes(some_json) return (device, optional_attributes)
[ "def", "parse_link_descriptor", "(", "self", ",", "descriptor", ")", ":", "optional_attributes", "=", "{", "}", "link_components", "=", "descriptor", ".", "split", "(", "\":{\"", ",", "1", ")", "device", "=", "link_components", "[", "0", "]", "if", "(", "l...
parse e.g. 'udpin:127.0.0.1:9877:{"foo":"bar"}' into python structure ("udpin:127.0.0.1:9877", {"foo":"bar"})
[ "parse", "e", ".", "g", ".", "udpin", ":", "127", ".", "0", ".", "0", ".", "1", ":", "9877", ":", "{", "foo", ":", "bar", "}", "into", "python", "structure", "(", "udpin", ":", "127", ".", "0", ".", "0", ".", "1", ":", "9877", "{", "foo", ...
python
train
googlefonts/fontbakery
snippets/fontbakery-check-upstream.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/snippets/fontbakery-check-upstream.py#L40-L50
def download_fonts(gh_url, dst): """Download fonts from a github dir""" font_paths = [] r = requests.get(gh_url) for item in r.json(): if item['name'].endswith(".ttf"): f = item['download_url'] dl_path = os.path.join(dst, os.path.basename(f)) download_file(f, dl_path) font_paths.append(dl_path) return font_paths
[ "def", "download_fonts", "(", "gh_url", ",", "dst", ")", ":", "font_paths", "=", "[", "]", "r", "=", "requests", ".", "get", "(", "gh_url", ")", "for", "item", "in", "r", ".", "json", "(", ")", ":", "if", "item", "[", "'name'", "]", ".", "endswit...
Download fonts from a github dir
[ "Download", "fonts", "from", "a", "github", "dir" ]
python
train
Titan-C/slaveparticles
examples/spins/plot_deg_2orb_fill.py
https://github.com/Titan-C/slaveparticles/blob/e4c2f5afb1a7b195517ef2f1b5cc758965036aab/examples/spins/plot_deg_2orb_fill.py#L29-L31
def restriction(lam, mu, orbitals, U, beta): """Equation that determines the restriction on lagrange multipier""" return 2*orbitals*fermi_dist(-(mu + lam), beta) - expected_filling(-1*lam, orbitals, U, beta)
[ "def", "restriction", "(", "lam", ",", "mu", ",", "orbitals", ",", "U", ",", "beta", ")", ":", "return", "2", "*", "orbitals", "*", "fermi_dist", "(", "-", "(", "mu", "+", "lam", ")", ",", "beta", ")", "-", "expected_filling", "(", "-", "1", "*",...
Equation that determines the restriction on lagrange multipier
[ "Equation", "that", "determines", "the", "restriction", "on", "lagrange", "multipier" ]
python
train
ariebovenberg/snug
examples/github/query.py
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/github/query.py#L68-L76
def parse(response): """check for errors""" if response.status_code == 400: try: msg = json.loads(response.content)['message'] except (KeyError, ValueError): msg = '' raise ApiError(msg) return response
[ "def", "parse", "(", "response", ")", ":", "if", "response", ".", "status_code", "==", "400", ":", "try", ":", "msg", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "[", "'message'", "]", "except", "(", "KeyError", ",", "ValueError", ...
check for errors
[ "check", "for", "errors" ]
python
train
gwpy/gwpy
gwpy/table/filter.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/filter.py#L58-L66
def _float_or_str(value): """Internal method to attempt `float(value)` handling a `ValueError` """ # remove any surrounding quotes value = QUOTE_REGEX.sub('', value) try: # attempt `float()` conversion return float(value) except ValueError: # just return the input return value
[ "def", "_float_or_str", "(", "value", ")", ":", "# remove any surrounding quotes", "value", "=", "QUOTE_REGEX", ".", "sub", "(", "''", ",", "value", ")", "try", ":", "# attempt `float()` conversion", "return", "float", "(", "value", ")", "except", "ValueError", ...
Internal method to attempt `float(value)` handling a `ValueError`
[ "Internal", "method", "to", "attempt", "float", "(", "value", ")", "handling", "a", "ValueError" ]
python
train
saltstack/salt
salt/modules/rabbitmq.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rabbitmq.py#L259-L271
def user_exists(name, runas=None): ''' Return whether the user exists based on rabbitmqctl list_users. CLI Example: .. code-block:: bash salt '*' rabbitmq.user_exists rabbit_user ''' if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() return name in list_users(runas=runas)
[ "def", "user_exists", "(", "name", ",", "runas", "=", "None", ")", ":", "if", "runas", "is", "None", "and", "not", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "runas", "=", "salt", ".", "utils", ".", "user", ".", "get_u...
Return whether the user exists based on rabbitmqctl list_users. CLI Example: .. code-block:: bash salt '*' rabbitmq.user_exists rabbit_user
[ "Return", "whether", "the", "user", "exists", "based", "on", "rabbitmqctl", "list_users", "." ]
python
train
MisterWil/abodepy
abodepy/devices/binary_sensor.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/binary_sensor.py#L11-L20
def is_on(self): """ Get sensor state. Assume offline or open (worst case). """ if self._type == 'Occupancy': return self.status not in CONST.STATUS_ONLINE return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE, CONST.STATUS_CLOSED)
[ "def", "is_on", "(", "self", ")", ":", "if", "self", ".", "_type", "==", "'Occupancy'", ":", "return", "self", ".", "status", "not", "in", "CONST", ".", "STATUS_ONLINE", "return", "self", ".", "status", "not", "in", "(", "CONST", ".", "STATUS_OFF", ","...
Get sensor state. Assume offline or open (worst case).
[ "Get", "sensor", "state", "." ]
python
train
CalebBell/fluids
fluids/fittings.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L2632-L2679
def diffuser_curved(Di1, Di2, l): r'''Returns loss coefficient for any curved wall pipe expansion as shown in [1]_. .. math:: K_1 = \phi(1.43-1.3\beta^2)(1-\beta^2)^2 .. math:: \phi = 1.01 - 0.624\frac{l}{d_1} + 0.30\left(\frac{l}{d_1}\right)^2 - 0.074\left(\frac{l}{d_1}\right)^3 + 0.0070\left(\frac{l}{d_1}\right)^4 .. figure:: fittings/curved_wall_diffuser.png :scale: 25 % :alt: diffuser curved; after [1]_ Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] l : float Length of the curve along the pipe axis, [m] Returns ------- K : float Loss coefficient [-] Notes ----- Beta^2 should be between 0.1 and 0.9. A small mismatch between tabulated values of this function in table 11.3 is observed with the equation presented. Examples -------- >>> diffuser_curved(Di1=.25**0.5, Di2=1., l=2.) 0.2299781250000002 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. ''' beta = Di1/Di2 phi = 1.01 - 0.624*l/Di1 + 0.30*(l/Di1)**2 - 0.074*(l/Di1)**3 + 0.0070*(l/Di1)**4 return phi*(1.43 - 1.3*beta**2)*(1 - beta**2)**2
[ "def", "diffuser_curved", "(", "Di1", ",", "Di2", ",", "l", ")", ":", "beta", "=", "Di1", "/", "Di2", "phi", "=", "1.01", "-", "0.624", "*", "l", "/", "Di1", "+", "0.30", "*", "(", "l", "/", "Di1", ")", "**", "2", "-", "0.074", "*", "(", "l...
r'''Returns loss coefficient for any curved wall pipe expansion as shown in [1]_. .. math:: K_1 = \phi(1.43-1.3\beta^2)(1-\beta^2)^2 .. math:: \phi = 1.01 - 0.624\frac{l}{d_1} + 0.30\left(\frac{l}{d_1}\right)^2 - 0.074\left(\frac{l}{d_1}\right)^3 + 0.0070\left(\frac{l}{d_1}\right)^4 .. figure:: fittings/curved_wall_diffuser.png :scale: 25 % :alt: diffuser curved; after [1]_ Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] l : float Length of the curve along the pipe axis, [m] Returns ------- K : float Loss coefficient [-] Notes ----- Beta^2 should be between 0.1 and 0.9. A small mismatch between tabulated values of this function in table 11.3 is observed with the equation presented. Examples -------- >>> diffuser_curved(Di1=.25**0.5, Di2=1., l=2.) 0.2299781250000002 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
[ "r", "Returns", "loss", "coefficient", "for", "any", "curved", "wall", "pipe", "expansion", "as", "shown", "in", "[", "1", "]", "_", "." ]
python
train
Stranger6667/pyoffers
pyoffers/utils.py
https://github.com/Stranger6667/pyoffers/blob/9575d6cdc878096242268311a22cc5fdd4f64b37/pyoffers/utils.py#L49-L65
def prepend_model(self, value, model): """ Prepends model name if it is not already prepended. For example model is "Offer": key -> Offer.key -key -> -Offer.key Offer.key -> Offer.key -Offer.key -> -Offer.key """ if '.' not in value: direction = '' if value.startswith('-'): value = value[1:] direction = '-' value = '%s%s.%s' % (direction, model, value) return value
[ "def", "prepend_model", "(", "self", ",", "value", ",", "model", ")", ":", "if", "'.'", "not", "in", "value", ":", "direction", "=", "''", "if", "value", ".", "startswith", "(", "'-'", ")", ":", "value", "=", "value", "[", "1", ":", "]", "direction...
Prepends model name if it is not already prepended. For example model is "Offer": key -> Offer.key -key -> -Offer.key Offer.key -> Offer.key -Offer.key -> -Offer.key
[ "Prepends", "model", "name", "if", "it", "is", "not", "already", "prepended", ".", "For", "example", "model", "is", "Offer", ":" ]
python
train
lambdalisue/django-roughpages
src/roughpages/backends/auth.py
https://github.com/lambdalisue/django-roughpages/blob/f6a2724ece729c5deced2c2546d172561ef785ec/src/roughpages/backends/auth.py#L16-L70
def prepare_filenames(self, normalized_url, request): """ Prepare template filename list based on the user authenticated state If user is authenticated user, it use '_authenticated' as a suffix. Otherwise it use '_anonymous' as a suffix to produce the template filename list. The list include original filename at the end of the list. Args: normalized_url (str): A normalized url request (instance): An instance of HttpRequest Returns: list Examples: >>> from mock import MagicMock >>> request = MagicMock() >>> backend = AuthTemplateFilenameBackend() >>> request.user.is_authenticated.return_value = True >>> filenames = backend.prepare_filenames('foo/bar/hogehoge', ... request) >>> assert filenames == [ ... 'foo/bar/hogehoge_authenticated.html', ... 'foo/bar/hogehoge.html' ... ] >>> request.user.is_authenticated.return_value = False >>> filenames = backend.prepare_filenames('foo/bar/hogehoge', ... request) >>> assert filenames == [ ... 'foo/bar/hogehoge_anonymous.html', ... 'foo/bar/hogehoge.html' ... ] >>> request.user.is_authenticated.return_value = True >>> filenames = backend.prepare_filenames('', ... request) >>> assert filenames == [ ... 'index_authenticated.html', ... 'index.html' ... ] >>> request.user.is_authenticated.return_value = False >>> filenames = backend.prepare_filenames('', ... request) >>> assert filenames == [ ... 'index_anonymous.html', ... 'index.html' ... ] """ filenames = [normalized_url] if request.user.is_authenticated(): filenames.insert(0, normalized_url + ".authenticated") else: filenames.insert(0, normalized_url + ".anonymous") return filenames
[ "def", "prepare_filenames", "(", "self", ",", "normalized_url", ",", "request", ")", ":", "filenames", "=", "[", "normalized_url", "]", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "filenames", ".", "insert", "(", "0", ",", "normal...
Prepare template filename list based on the user authenticated state If user is authenticated user, it use '_authenticated' as a suffix. Otherwise it use '_anonymous' as a suffix to produce the template filename list. The list include original filename at the end of the list. Args: normalized_url (str): A normalized url request (instance): An instance of HttpRequest Returns: list Examples: >>> from mock import MagicMock >>> request = MagicMock() >>> backend = AuthTemplateFilenameBackend() >>> request.user.is_authenticated.return_value = True >>> filenames = backend.prepare_filenames('foo/bar/hogehoge', ... request) >>> assert filenames == [ ... 'foo/bar/hogehoge_authenticated.html', ... 'foo/bar/hogehoge.html' ... ] >>> request.user.is_authenticated.return_value = False >>> filenames = backend.prepare_filenames('foo/bar/hogehoge', ... request) >>> assert filenames == [ ... 'foo/bar/hogehoge_anonymous.html', ... 'foo/bar/hogehoge.html' ... ] >>> request.user.is_authenticated.return_value = True >>> filenames = backend.prepare_filenames('', ... request) >>> assert filenames == [ ... 'index_authenticated.html', ... 'index.html' ... ] >>> request.user.is_authenticated.return_value = False >>> filenames = backend.prepare_filenames('', ... request) >>> assert filenames == [ ... 'index_anonymous.html', ... 'index.html' ... ]
[ "Prepare", "template", "filename", "list", "based", "on", "the", "user", "authenticated", "state" ]
python
train
istresearch/scrapy-cluster
utils/scutils/redis_queue.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_queue.py#L35-L44
def _encode_item(self, item): ''' Encode an item object @requires: The object be serializable ''' if self.encoding.__name__ == 'pickle': return self.encoding.dumps(item, protocol=-1) else: return self.encoding.dumps(item)
[ "def", "_encode_item", "(", "self", ",", "item", ")", ":", "if", "self", ".", "encoding", ".", "__name__", "==", "'pickle'", ":", "return", "self", ".", "encoding", ".", "dumps", "(", "item", ",", "protocol", "=", "-", "1", ")", "else", ":", "return"...
Encode an item object @requires: The object be serializable
[ "Encode", "an", "item", "object" ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_users.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_users.py#L137-L143
def DeleteGRRUser(self, username, cursor=None): """Deletes the user and all related metadata with the given username.""" cursor.execute("DELETE FROM grr_users WHERE username_hash = %s", (mysql_utils.Hash(username),)) if cursor.rowcount == 0: raise db.UnknownGRRUserError(username)
[ "def", "DeleteGRRUser", "(", "self", ",", "username", ",", "cursor", "=", "None", ")", ":", "cursor", ".", "execute", "(", "\"DELETE FROM grr_users WHERE username_hash = %s\"", ",", "(", "mysql_utils", ".", "Hash", "(", "username", ")", ",", ")", ")", "if", ...
Deletes the user and all related metadata with the given username.
[ "Deletes", "the", "user", "and", "all", "related", "metadata", "with", "the", "given", "username", "." ]
python
train
LonamiWebs/Telethon
telethon/client/messages.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/client/messages.py#L459-L610
async def send_message( self, entity, message='', *, reply_to=None, parse_mode=(), link_preview=True, file=None, force_document=False, clear_draft=False, buttons=None, silent=None): """ Sends the given message to the specified entity (user/chat/channel). The default parse mode is the same as the official applications (a custom flavour of markdown). ``**bold**, `code` or __italic__`` are available. In addition you can send ``[links](https://example.com)`` and ``[mentions](@username)`` (or using IDs like in the Bot API: ``[mention](tg://user?id=123456789)``) and ``pre`` blocks with three backticks. Sending a ``/start`` command with a parameter (like ``?start=data``) is also done through this method. Simply send ``'/start data'`` to the bot. Args: entity (`entity`): To who will it be sent. message (`str` | `Message <telethon.tl.custom.message.Message>`): The message to be sent, or another message object to resend. The maximum length for a message is 35,000 bytes or 4,096 characters. Longer messages will not be sliced automatically, and you should slice them manually if the text to send is longer than said length. reply_to (`int` | `Message <telethon.tl.custom.message.Message>`, optional): Whether to reply to a message or not. If an integer is provided, it should be the ID of the message that it should reply to. parse_mode (`object`, optional): See the `TelegramClient.parse_mode <telethon.client.messageparse.MessageParseMethods.parse_mode>` property for allowed values. Markdown parsing will be used by default. link_preview (`bool`, optional): Should the link preview be shown? file (`file`, optional): Sends a message with a file attached (e.g. a photo, video, audio or document). The ``message`` may be empty. force_document (`bool`, optional): Whether to send the given file as a document or not. clear_draft (`bool`, optional): Whether the existing draft should be cleared or not. Has no effect when sending a file. buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`): The matrix (list of lists), row list or button to be shown after sending the message. This parameter will only work if you have signed in as a bot. You can also pass your own :tl:`ReplyMarkup` here. All the following limits apply together: * There can be 100 buttons at most (any more are ignored). * There can be 8 buttons per row at most (more are ignored). * The maximum callback data per button is 64 bytes. * The maximum data that can be embedded in total is just over 4KB, shared between inline callback data and text. silent (`bool`, optional): Whether the message should notify people in a broadcast channel or not. Defaults to ``False``, which means it will notify them. Set it to ``True`` to alter this behaviour. Returns: The sent `custom.Message <telethon.tl.custom.message.Message>`. """ if file is not None: return await self.send_file( entity, file, caption=message, reply_to=reply_to, parse_mode=parse_mode, force_document=force_document, buttons=buttons ) elif not message: raise ValueError( 'The message cannot be empty unless a file is provided' ) entity = await self.get_input_entity(entity) if isinstance(message, types.Message): if buttons is None: markup = message.reply_markup else: markup = self.build_reply_markup(buttons) if silent is None: silent = message.silent if (message.media and not isinstance( message.media, types.MessageMediaWebPage)): return await self.send_file( entity, message.media, caption=message.message, silent=silent, reply_to=reply_to, buttons=markup, entities=message.entities ) request = functions.messages.SendMessageRequest( peer=entity, message=message.message or '', silent=silent, reply_to_msg_id=utils.get_message_id(reply_to), reply_markup=markup, entities=message.entities, clear_draft=clear_draft, no_webpage=not isinstance( message.media, types.MessageMediaWebPage) ) message = message.message else: message, msg_ent = await self._parse_message_text(message, parse_mode) request = functions.messages.SendMessageRequest( peer=entity, message=message, entities=msg_ent, no_webpage=not link_preview, reply_to_msg_id=utils.get_message_id(reply_to), clear_draft=clear_draft, silent=silent, reply_markup=self.build_reply_markup(buttons) ) result = await self(request) if isinstance(result, types.UpdateShortSentMessage): message = types.Message( id=result.id, to_id=utils.get_peer(entity), message=message, date=result.date, out=result.out, media=result.media, entities=result.entities, reply_markup=request.reply_markup ) message._finish_init(self, {}, entity) return message return self._get_response_message(request, result, entity)
[ "async", "def", "send_message", "(", "self", ",", "entity", ",", "message", "=", "''", ",", "*", ",", "reply_to", "=", "None", ",", "parse_mode", "=", "(", ")", ",", "link_preview", "=", "True", ",", "file", "=", "None", ",", "force_document", "=", "...
Sends the given message to the specified entity (user/chat/channel). The default parse mode is the same as the official applications (a custom flavour of markdown). ``**bold**, `code` or __italic__`` are available. In addition you can send ``[links](https://example.com)`` and ``[mentions](@username)`` (or using IDs like in the Bot API: ``[mention](tg://user?id=123456789)``) and ``pre`` blocks with three backticks. Sending a ``/start`` command with a parameter (like ``?start=data``) is also done through this method. Simply send ``'/start data'`` to the bot. Args: entity (`entity`): To who will it be sent. message (`str` | `Message <telethon.tl.custom.message.Message>`): The message to be sent, or another message object to resend. The maximum length for a message is 35,000 bytes or 4,096 characters. Longer messages will not be sliced automatically, and you should slice them manually if the text to send is longer than said length. reply_to (`int` | `Message <telethon.tl.custom.message.Message>`, optional): Whether to reply to a message or not. If an integer is provided, it should be the ID of the message that it should reply to. parse_mode (`object`, optional): See the `TelegramClient.parse_mode <telethon.client.messageparse.MessageParseMethods.parse_mode>` property for allowed values. Markdown parsing will be used by default. link_preview (`bool`, optional): Should the link preview be shown? file (`file`, optional): Sends a message with a file attached (e.g. a photo, video, audio or document). The ``message`` may be empty. force_document (`bool`, optional): Whether to send the given file as a document or not. clear_draft (`bool`, optional): Whether the existing draft should be cleared or not. Has no effect when sending a file. buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`): The matrix (list of lists), row list or button to be shown after sending the message. This parameter will only work if you have signed in as a bot. You can also pass your own :tl:`ReplyMarkup` here. All the following limits apply together: * There can be 100 buttons at most (any more are ignored). * There can be 8 buttons per row at most (more are ignored). * The maximum callback data per button is 64 bytes. * The maximum data that can be embedded in total is just over 4KB, shared between inline callback data and text. silent (`bool`, optional): Whether the message should notify people in a broadcast channel or not. Defaults to ``False``, which means it will notify them. Set it to ``True`` to alter this behaviour. Returns: The sent `custom.Message <telethon.tl.custom.message.Message>`.
[ "Sends", "the", "given", "message", "to", "the", "specified", "entity", "(", "user", "/", "chat", "/", "channel", ")", "." ]
python
train
CalebBell/ht
ht/hx.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/hx.py#L1212-L1350
def temperature_effectiveness_basic(R1, NTU1, subtype='crossflow'): r'''Returns temperature effectiveness `P1` of a heat exchanger with a specified heat capacity ratio, number of transfer units `NTU1`, and of type `subtype`. This function performs the calculations for the basic cases, not actual shell-and-tube exchangers. The supported cases are as follows: * Counterflow (ex. double-pipe) * Parallel (ex. double pipe inefficient configuration) * Crossflow, single pass, fluids unmixed * Crossflow, single pass, fluid 1 mixed, fluid 2 unmixed * Crossflow, single pass, fluid 2 mixed, fluid 1 unmixed * Crossflow, single pass, both fluids mixed For parallel flow heat exchangers (this configuration is symmetric): .. math:: P_1 = \frac{1 - \exp[-NTU_1(1+R_1)]}{1 + R_1} For counterflow heat exchangers (this configuration is symmetric): .. math:: P_1 = \frac{1 - \exp[-NTU_1(1-R_1)]}{1 - R_1 \exp[-NTU_1(1-R_1)]} For cross-flow (single-pass) heat exchangers with both fluids unmixed (this configuration is symmetric), there are two solutions available; a frequently cited approximation and an exact solution which uses a numerical integration developed in [4]_. The approximate solution is: .. math:: P_1 \approx 1 - \exp\left[\frac{NTU_1^{0.22}}{R_1} (\exp(-R_1 NTU_1^{0.78})-1)\right] The exact solution for crossflow (single pass, fluids unmixed) is: .. math:: \epsilon = \frac{1}{R_1} - \frac{\exp(-R_1 \cdot NTU_1)}{2(R_1 NTU_1)^2} \int_0^{2 NTU_1\sqrt{R_1}} \left(1 + NTU_1 - \frac{v^2}{4R_1 NTU_1} \right)\exp\left(-\frac{v^2}{4R_1 NTU_1}\right)v I_0(v) dv For cross-flow (single-pass) heat exchangers with fluid 1 mixed, fluid 2 unmixed: .. math:: P_1 = 1 - \exp\left(-\frac{K}{R_1}\right) K = 1 - \exp(-R_1 NTU_1) For cross-flow (single-pass) heat exchangers with fluid 2 mixed, fluid 1 unmixed: .. math:: P_1 = \frac{1 - \exp(-K R_1)}{R_1} K = 1 - \exp(-NTU_1) For cross-flow (single-pass) heat exchangers with both fluids mixed (this configuration is symmetric): .. math:: P_1 = \left(\frac{1}{K_1} + \frac{R_1}{K_2} - \frac{1}{NTU_1}\right)^{-1} K_1 = 1 - \exp(-NTU_1) K_2 = 1 - \exp(-R_1 NTU_1) Parameters ---------- R1 : float Heat capacity ratio of the heat exchanger in the P-NTU method, calculated with respect to stream 1 [-] NTU1 : float Thermal number of transfer units of the heat exchanger in the P-NTU method, calculated with respect to stream 1 [-] subtype : float The type of heat exchanger; one of 'counterflow', 'parallel', 'crossflow', 'crossflow approximate', 'crossflow, mixed 1', 'crossflow, mixed 2', 'crossflow, mixed 1&2'. Returns ------- P1 : float Thermal effectiveness of the heat exchanger in the P-NTU method, calculated with respect to stream 1 [-] Notes ----- The crossflow case is an approximation only. There is an actual solution involving an infinite sum. This was implemented, but found to differ substantially so the approximation is used instead. Examples -------- >>> temperature_effectiveness_basic(R1=.1, NTU1=4, subtype='counterflow') 0.9753412729761263 References ---------- .. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002. .. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition. CRC Press, 2013. .. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. .. [4] Triboix, Alain. "Exact and Approximate Formulas for Cross Flow Heat Exchangers with Unmixed Fluids." International Communications in Heat and Mass Transfer 36, no. 2 (February 1, 2009): 121-24. doi:10.1016/j.icheatmasstransfer.2008.10.012. ''' if subtype == 'counterflow': # Same as TEMA 1 pass P1 = (1 - exp(-NTU1*(1 - R1)))/(1 - R1*exp(-NTU1*(1-R1))) elif subtype == 'parallel': P1 = (1 - exp(-NTU1*(1 + R1)))/(1 + R1) elif subtype == 'crossflow approximate': # This isn't technically accurate, an infinite sum is required # It has been computed from two different sources # but is found not to be within the 1% claimed of this equation P1 = 1 - exp(NTU1**0.22/R1*(exp(-R1*NTU1**0.78) - 1.)) elif subtype == 'crossflow': def to_int(v, NTU1, R1): return (1. + NTU1 - v*v/(4.*R1*NTU1))*exp(-v*v/(4.*R1*NTU1))*v*iv(0, v) int_term = quad(to_int, 0, 2.*NTU1*R1**0.5, args=(NTU1, R1))[0] P1 = 1./R1 - exp(-R1*NTU1)/(2.*(R1*NTU1)**2)*int_term elif subtype == 'crossflow, mixed 1': # Not symmetric K = 1 - exp(-R1*NTU1) P1 = 1 - exp(-K/R1) elif subtype == 'crossflow, mixed 2': # Not symmetric K = 1 - exp(-NTU1) P1 = (1 - exp(-K*R1))/R1 elif subtype == 'crossflow, mixed 1&2': K1 = 1. - exp(-NTU1) K2 = 1. - exp(-R1*NTU1) P1 = (1./K1 + R1/K2 - 1./NTU1)**-1 else: raise Exception('Subtype not recognized.') return P1
[ "def", "temperature_effectiveness_basic", "(", "R1", ",", "NTU1", ",", "subtype", "=", "'crossflow'", ")", ":", "if", "subtype", "==", "'counterflow'", ":", "# Same as TEMA 1 pass", "P1", "=", "(", "1", "-", "exp", "(", "-", "NTU1", "*", "(", "1", "-", "...
r'''Returns temperature effectiveness `P1` of a heat exchanger with a specified heat capacity ratio, number of transfer units `NTU1`, and of type `subtype`. This function performs the calculations for the basic cases, not actual shell-and-tube exchangers. The supported cases are as follows: * Counterflow (ex. double-pipe) * Parallel (ex. double pipe inefficient configuration) * Crossflow, single pass, fluids unmixed * Crossflow, single pass, fluid 1 mixed, fluid 2 unmixed * Crossflow, single pass, fluid 2 mixed, fluid 1 unmixed * Crossflow, single pass, both fluids mixed For parallel flow heat exchangers (this configuration is symmetric): .. math:: P_1 = \frac{1 - \exp[-NTU_1(1+R_1)]}{1 + R_1} For counterflow heat exchangers (this configuration is symmetric): .. math:: P_1 = \frac{1 - \exp[-NTU_1(1-R_1)]}{1 - R_1 \exp[-NTU_1(1-R_1)]} For cross-flow (single-pass) heat exchangers with both fluids unmixed (this configuration is symmetric), there are two solutions available; a frequently cited approximation and an exact solution which uses a numerical integration developed in [4]_. The approximate solution is: .. math:: P_1 \approx 1 - \exp\left[\frac{NTU_1^{0.22}}{R_1} (\exp(-R_1 NTU_1^{0.78})-1)\right] The exact solution for crossflow (single pass, fluids unmixed) is: .. math:: \epsilon = \frac{1}{R_1} - \frac{\exp(-R_1 \cdot NTU_1)}{2(R_1 NTU_1)^2} \int_0^{2 NTU_1\sqrt{R_1}} \left(1 + NTU_1 - \frac{v^2}{4R_1 NTU_1} \right)\exp\left(-\frac{v^2}{4R_1 NTU_1}\right)v I_0(v) dv For cross-flow (single-pass) heat exchangers with fluid 1 mixed, fluid 2 unmixed: .. math:: P_1 = 1 - \exp\left(-\frac{K}{R_1}\right) K = 1 - \exp(-R_1 NTU_1) For cross-flow (single-pass) heat exchangers with fluid 2 mixed, fluid 1 unmixed: .. math:: P_1 = \frac{1 - \exp(-K R_1)}{R_1} K = 1 - \exp(-NTU_1) For cross-flow (single-pass) heat exchangers with both fluids mixed (this configuration is symmetric): .. math:: P_1 = \left(\frac{1}{K_1} + \frac{R_1}{K_2} - \frac{1}{NTU_1}\right)^{-1} K_1 = 1 - \exp(-NTU_1) K_2 = 1 - \exp(-R_1 NTU_1) Parameters ---------- R1 : float Heat capacity ratio of the heat exchanger in the P-NTU method, calculated with respect to stream 1 [-] NTU1 : float Thermal number of transfer units of the heat exchanger in the P-NTU method, calculated with respect to stream 1 [-] subtype : float The type of heat exchanger; one of 'counterflow', 'parallel', 'crossflow', 'crossflow approximate', 'crossflow, mixed 1', 'crossflow, mixed 2', 'crossflow, mixed 1&2'. Returns ------- P1 : float Thermal effectiveness of the heat exchanger in the P-NTU method, calculated with respect to stream 1 [-] Notes ----- The crossflow case is an approximation only. There is an actual solution involving an infinite sum. This was implemented, but found to differ substantially so the approximation is used instead. Examples -------- >>> temperature_effectiveness_basic(R1=.1, NTU1=4, subtype='counterflow') 0.9753412729761263 References ---------- .. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002. .. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition. CRC Press, 2013. .. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. .. [4] Triboix, Alain. "Exact and Approximate Formulas for Cross Flow Heat Exchangers with Unmixed Fluids." International Communications in Heat and Mass Transfer 36, no. 2 (February 1, 2009): 121-24. doi:10.1016/j.icheatmasstransfer.2008.10.012.
[ "r", "Returns", "temperature", "effectiveness", "P1", "of", "a", "heat", "exchanger", "with", "a", "specified", "heat", "capacity", "ratio", "number", "of", "transfer", "units", "NTU1", "and", "of", "type", "subtype", ".", "This", "function", "performs", "the"...
python
train
Qiskit/qiskit-terra
qiskit/dagcircuit/dagcircuit.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/dagcircuit/dagcircuit.py#L995-L1005
def gate_nodes(self): """Get the list of gate nodes in the dag. Returns: list: the list of node ids that represent gates. """ nodes = [] for node in self.op_nodes(): if isinstance(node.op, Gate): nodes.append(node) return nodes
[ "def", "gate_nodes", "(", "self", ")", ":", "nodes", "=", "[", "]", "for", "node", "in", "self", ".", "op_nodes", "(", ")", ":", "if", "isinstance", "(", "node", ".", "op", ",", "Gate", ")", ":", "nodes", ".", "append", "(", "node", ")", "return"...
Get the list of gate nodes in the dag. Returns: list: the list of node ids that represent gates.
[ "Get", "the", "list", "of", "gate", "nodes", "in", "the", "dag", "." ]
python
test
saltstack/salt
salt/beacons/http_status.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/http_status.py#L88-L158
def beacon(config): ''' Check on different service status reported by the django-server-status library. .. code-block:: yaml beacons: http_status: - sites: example-site-1: url: "https://example.com/status" timeout: 30 content-type: json status: - value: 400 comp: < - value: 300 comp: '>=' content: - path: 'certificate:status' value: down comp: '==' - path: 'status_all' value: down comp: '==' - interval: 10 ''' ret = [] _config = {} list(map(_config.update, config)) for site, site_config in _config.get('sites', {}).items(): url = site_config.pop('url') content_type = site_config.pop('content_type', 'json') try: r = requests.get(url, timeout=site_config.pop('timeout', 30)) except requests.exceptions.RequestException as e: log.info("Request failed: %s", e) if r.raise_for_status: log.info('[-] Response from status endpoint was invalid: ' '%s', r.status_code) _failed = {'status_code': r.status_code, 'url': url} ret.append(_failed) continue for attr, checks in site_config.items(): for check in checks: log.debug('[+] response_item: %s', attr) attr_path = check.get('path', '') comp = comparisons[check['comp']] expected_value = check['value'] if attr_path: received_value = salt.utils.data.traverse_dict_and_list(attr_func_map[attr](r), attr_path) else: received_value = attr_func_map[attr](r) if received_value is None: log.info('[-] No data found at location %s for url %s', attr_path, url) continue log.debug('[+] expected_value: %s', expected_value) log.debug('[+] received_value: %s', received_value) if not comp(expected_value, received_value): _failed = {'expected': expected_value, 'received': received_value, 'url': url, 'path': attr_path } ret.append(_failed) return ret
[ "def", "beacon", "(", "config", ")", ":", "ret", "=", "[", "]", "_config", "=", "{", "}", "list", "(", "map", "(", "_config", ".", "update", ",", "config", ")", ")", "for", "site", ",", "site_config", "in", "_config", ".", "get", "(", "'sites'", ...
Check on different service status reported by the django-server-status library. .. code-block:: yaml beacons: http_status: - sites: example-site-1: url: "https://example.com/status" timeout: 30 content-type: json status: - value: 400 comp: < - value: 300 comp: '>=' content: - path: 'certificate:status' value: down comp: '==' - path: 'status_all' value: down comp: '==' - interval: 10
[ "Check", "on", "different", "service", "status", "reported", "by", "the", "django", "-", "server", "-", "status", "library", "." ]
python
train
calmjs/calmjs.parse
src/calmjs/parse/handlers/obfuscation.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/handlers/obfuscation.py#L240-L251
def nest(self, node, cls=None): """ Create a new nested scope that is within this instance, binding the provided node to it. """ if cls is None: cls = type(self) nested_scope = cls(node, self) self.children.append(nested_scope) return nested_scope
[ "def", "nest", "(", "self", ",", "node", ",", "cls", "=", "None", ")", ":", "if", "cls", "is", "None", ":", "cls", "=", "type", "(", "self", ")", "nested_scope", "=", "cls", "(", "node", ",", "self", ")", "self", ".", "children", ".", "append", ...
Create a new nested scope that is within this instance, binding the provided node to it.
[ "Create", "a", "new", "nested", "scope", "that", "is", "within", "this", "instance", "binding", "the", "provided", "node", "to", "it", "." ]
python
train
cortical-io/retina-sdk.py
retinasdk/client/expressions_api.py
https://github.com/cortical-io/retina-sdk.py/blob/474c13ad399fe1e974d2650335537608f4456b07/retinasdk/client/expressions_api.py#L21-L41
def resolveExpression(self, retina_name, body, sparsity=1.0): """Resolve an expression Args: retina_name, str: The retina name (required) body, ExpressionOperation: The JSON formatted encoded to be evaluated (required) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: Fingerprint """ resourcePath = '/expressions' method = 'POST' queryParams = {} headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'} postData = None queryParams['retina_name'] = retina_name queryParams['sparsity'] = sparsity postData = body response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams) return fingerprint.Fingerprint(**response.json())
[ "def", "resolveExpression", "(", "self", ",", "retina_name", ",", "body", ",", "sparsity", "=", "1.0", ")", ":", "resourcePath", "=", "'/expressions'", "method", "=", "'POST'", "queryParams", "=", "{", "}", "headerParams", "=", "{", "'Accept'", ":", "'Applic...
Resolve an expression Args: retina_name, str: The retina name (required) body, ExpressionOperation: The JSON formatted encoded to be evaluated (required) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: Fingerprint
[ "Resolve", "an", "expression", "Args", ":", "retina_name", "str", ":", "The", "retina", "name", "(", "required", ")", "body", "ExpressionOperation", ":", "The", "JSON", "formatted", "encoded", "to", "be", "evaluated", "(", "required", ")", "sparsity", "float",...
python
train
ntoll/uflash
uflash.py
https://github.com/ntoll/uflash/blob/867468d386da0aa20212b69a152ce8bfc0972366/uflash.py#L171-L202
def extract_script(embedded_hex): """ Given a hex file containing the MicroPython runtime and an embedded Python script, will extract the original Python script. Returns a string containing the original embedded script. """ hex_lines = embedded_hex.split('\n') script_addr_high = hex((_SCRIPT_ADDR >> 16) & 0xffff)[2:].upper().zfill(4) script_addr_low = hex(_SCRIPT_ADDR & 0xffff)[2:].upper().zfill(4) start_script = None within_range = False # Look for the script start address for loc, val in enumerate(hex_lines): if val[0:9] == ':02000004': # Reached an extended address record, check if within script range within_range = val[9:13].upper() == script_addr_high elif within_range and val[0:3] == ':10' and \ val[3:7].upper() == script_addr_low: start_script = loc break if start_script: # Find the end of the script end_script = None for loc, val in enumerate(hex_lines[start_script:]): if val[9:41] == 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF': end_script = loc + start_script break # Pass the extracted hex through unhexlify return unhexlify('\n'.join( hex_lines[start_script - 1:end_script if end_script else -6])) return ''
[ "def", "extract_script", "(", "embedded_hex", ")", ":", "hex_lines", "=", "embedded_hex", ".", "split", "(", "'\\n'", ")", "script_addr_high", "=", "hex", "(", "(", "_SCRIPT_ADDR", ">>", "16", ")", "&", "0xffff", ")", "[", "2", ":", "]", ".", "upper", ...
Given a hex file containing the MicroPython runtime and an embedded Python script, will extract the original Python script. Returns a string containing the original embedded script.
[ "Given", "a", "hex", "file", "containing", "the", "MicroPython", "runtime", "and", "an", "embedded", "Python", "script", "will", "extract", "the", "original", "Python", "script", "." ]
python
train
pyviz/holoviews
holoviews/core/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L1351-L1360
def layer_groups(ordering, length=2): """ Splits a global ordering of Layers into groups based on a slice of the spec. The grouping behavior can be modified by changing the length of spec the entries are grouped by. """ group_orderings = defaultdict(list) for el in ordering: group_orderings[el[:length]].append(el) return group_orderings
[ "def", "layer_groups", "(", "ordering", ",", "length", "=", "2", ")", ":", "group_orderings", "=", "defaultdict", "(", "list", ")", "for", "el", "in", "ordering", ":", "group_orderings", "[", "el", "[", ":", "length", "]", "]", ".", "append", "(", "el"...
Splits a global ordering of Layers into groups based on a slice of the spec. The grouping behavior can be modified by changing the length of spec the entries are grouped by.
[ "Splits", "a", "global", "ordering", "of", "Layers", "into", "groups", "based", "on", "a", "slice", "of", "the", "spec", ".", "The", "grouping", "behavior", "can", "be", "modified", "by", "changing", "the", "length", "of", "spec", "the", "entries", "are", ...
python
train
arne-cl/discoursegraphs
src/discoursegraphs/discoursegraph.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L941-L959
def get_span_offsets(docgraph, node_id): """ returns the character start and end position of the span of text that the given node spans or dominates. Returns ------- offsets : tuple(int, int) character onset and offset of the span """ try: span = get_span(docgraph, node_id) # workaround for issue #138 # TODO: when #138 is fixed, just take the first onset / last offset onsets, offsets = zip(*[docgraph.get_offsets(tok_node) for tok_node in span]) return (min(onsets), max(offsets)) except KeyError as _: raise KeyError("Node '{}' doesn't span any tokens.".format(node_id))
[ "def", "get_span_offsets", "(", "docgraph", ",", "node_id", ")", ":", "try", ":", "span", "=", "get_span", "(", "docgraph", ",", "node_id", ")", "# workaround for issue #138", "# TODO: when #138 is fixed, just take the first onset / last offset", "onsets", ",", "offsets",...
returns the character start and end position of the span of text that the given node spans or dominates. Returns ------- offsets : tuple(int, int) character onset and offset of the span
[ "returns", "the", "character", "start", "and", "end", "position", "of", "the", "span", "of", "text", "that", "the", "given", "node", "spans", "or", "dominates", "." ]
python
train
agusmakmun/djipsum
djipsum/faker.py
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/faker.py#L326-L366
def create(self, fields): """ Create the object only once. So, you need loop to usage. :param `fields` is dictionary fields. """ try: # Cleaning the fields, and check if has `ForeignKey` type. cleaned_fields = {} for key, value in fields.items(): if type(value) is dict: try: if value['type'] == 'fk': fake_fk = self.fake_fk(value['field_name']) cleaned_fields.update({key: fake_fk}) except: pass else: cleaned_fields.update({key: value}) # Creating the object from dictionary fields. model_class = self.model_class() obj = model_class.objects.create(**cleaned_fields) # The `ManyToManyField` need specific object, # so i handle it after created the object. for key, value in fields.items(): if type(value) is dict: try: if value['type'] == 'm2m': self.fake_m2m(obj, value['field_name']) except: pass try: obj.save_m2m() except: obj.save() return obj except Exception as e: raise e
[ "def", "create", "(", "self", ",", "fields", ")", ":", "try", ":", "# Cleaning the fields, and check if has `ForeignKey` type.", "cleaned_fields", "=", "{", "}", "for", "key", ",", "value", "in", "fields", ".", "items", "(", ")", ":", "if", "type", "(", "val...
Create the object only once. So, you need loop to usage. :param `fields` is dictionary fields.
[ "Create", "the", "object", "only", "once", ".", "So", "you", "need", "loop", "to", "usage", "." ]
python
train
materials-data-facility/toolbox
mdf_toolbox/toolbox.py
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L577-L598
def translate_index(index_name): """Translate a known Globus Search index into the index UUID. The UUID is the proper way to access indices, and will eventually be the only way. This method will return names it cannot disambiguate. Arguments: index_name (str): The name of the index. Returns: str: The UUID of the index. If the index is not known and is not unambiguous, this will be the ``index_name`` unchanged instead. """ uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower()) if not uuid: try: index_info = globus_sdk.SearchClient().get_index(index_name).data if not isinstance(index_info, dict): raise ValueError("Multiple UUIDs possible") uuid = index_info.get("id", index_name) except Exception: uuid = index_name return uuid
[ "def", "translate_index", "(", "index_name", ")", ":", "uuid", "=", "SEARCH_INDEX_UUIDS", ".", "get", "(", "index_name", ".", "strip", "(", ")", ".", "lower", "(", ")", ")", "if", "not", "uuid", ":", "try", ":", "index_info", "=", "globus_sdk", ".", "S...
Translate a known Globus Search index into the index UUID. The UUID is the proper way to access indices, and will eventually be the only way. This method will return names it cannot disambiguate. Arguments: index_name (str): The name of the index. Returns: str: The UUID of the index. If the index is not known and is not unambiguous, this will be the ``index_name`` unchanged instead.
[ "Translate", "a", "known", "Globus", "Search", "index", "into", "the", "index", "UUID", ".", "The", "UUID", "is", "the", "proper", "way", "to", "access", "indices", "and", "will", "eventually", "be", "the", "only", "way", ".", "This", "method", "will", "...
python
train
NICTA/revrand
revrand/likelihoods.py
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/likelihoods.py#L171-L192
def loglike(self, y, f, n): r""" Binomial log likelihood. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) n: ndarray the total number of observations Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood. """ ll = binom.logpmf(y, n=n, p=expit(f)) return ll
[ "def", "loglike", "(", "self", ",", "y", ",", "f", ",", "n", ")", ":", "ll", "=", "binom", ".", "logpmf", "(", "y", ",", "n", "=", "n", ",", "p", "=", "expit", "(", "f", ")", ")", "return", "ll" ]
r""" Binomial log likelihood. Parameters ---------- y: ndarray array of 0, 1 valued integers of targets f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) n: ndarray the total number of observations Returns ------- logp: ndarray the log likelihood of each y given each f under this likelihood.
[ "r", "Binomial", "log", "likelihood", "." ]
python
train
JanHendrikDolling/configvalidator
configvalidator/tools/parser.py
https://github.com/JanHendrikDolling/configvalidator/blob/efde23a9352ae1fd6702b04ad964783ce11cbca5/configvalidator/tools/parser.py#L132-L148
def _resolve_dep(self, key): """ this method resolves dependencies for the given key. call the method afther the item "key" was added to the list of avalable items """ if key in self.future_values_key_dep: # there are some dependencies that can be resoled dep_list = self.future_values_key_dep[key] del self.future_values_key_dep[key] # remove dependencies also_finish = [] # iterate over the dependencies that can now be resoled for dep in dep_list: if self.__resolve_dep_helper(dep, key) is True: also_finish.append(dep) # maybe the resolving process leed to new deps that can be resolved for dep in also_finish: self._resolve_dep(dep)
[ "def", "_resolve_dep", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "future_values_key_dep", ":", "# there are some dependencies that can be resoled", "dep_list", "=", "self", ".", "future_values_key_dep", "[", "key", "]", "del", "self", ".",...
this method resolves dependencies for the given key. call the method afther the item "key" was added to the list of avalable items
[ "this", "method", "resolves", "dependencies", "for", "the", "given", "key", ".", "call", "the", "method", "afther", "the", "item", "key", "was", "added", "to", "the", "list", "of", "avalable", "items" ]
python
train
wummel/dosage
scripts/comicfury.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/scripts/comicfury.py#L289-L299
def get_results(): """Parse all search result pages.""" # store info in a dictionary {name -> shortname} res = {} session = requests.Session() baseUrl = 'http://comicfury.com/search.php?search=1&webcomics=Search+for+webcomics&query=&worder=5&asc=1&incvi=1&incse=1&incnu=1&incla=1&all_ge=1&all_st=1&all_la=1&page=' pages = 382 for i in range(1, pages+1): url = baseUrl + str(i) handle_url(url, session, res) save_result(res, json_file)
[ "def", "get_results", "(", ")", ":", "# store info in a dictionary {name -> shortname}", "res", "=", "{", "}", "session", "=", "requests", ".", "Session", "(", ")", "baseUrl", "=", "'http://comicfury.com/search.php?search=1&webcomics=Search+for+webcomics&query=&worder=5&asc=1&i...
Parse all search result pages.
[ "Parse", "all", "search", "result", "pages", "." ]
python
train
ggaughan/pipe2py
pipe2py/modules/pipenumberinput.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipenumberinput.py#L13-L40
def pipe_numberinput(context=None, _INPUT=None, conf=None, **kwargs): """An input that prompts the user for a number and yields it forever. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : not used conf : { 'name': {'value': 'parameter name'}, 'prompt': {'value': 'User prompt'}, 'default': {'value': 'default value'}, 'debug': {'value': 'debug value'} } Yields ------ _OUTPUT : text """ value = utils.get_input(context, conf) try: value = int(value) except: value = 0 while True: yield value
[ "def", "pipe_numberinput", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "value", "=", "utils", ".", "get_input", "(", "context", ",", "conf", ")", "try", ":", "value", "=", "i...
An input that prompts the user for a number and yields it forever. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : not used conf : { 'name': {'value': 'parameter name'}, 'prompt': {'value': 'User prompt'}, 'default': {'value': 'default value'}, 'debug': {'value': 'debug value'} } Yields ------ _OUTPUT : text
[ "An", "input", "that", "prompts", "the", "user", "for", "a", "number", "and", "yields", "it", "forever", ".", "Not", "loopable", "." ]
python
train
go-macaroon-bakery/py-macaroon-bakery
macaroonbakery/checkers/_time.py
https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/checkers/_time.py#L40-L67
def expiry_time(ns, cavs): ''' Returns the minimum time of any time-before caveats found in the given list or None if no such caveats were found. The ns parameter is :param ns: used to determine the standard namespace prefix - if the standard namespace is not found, the empty prefix is assumed. :param cavs: a list of pymacaroons.Caveat :return: datetime.DateTime or None. ''' prefix = ns.resolve(STD_NAMESPACE) time_before_cond = condition_with_prefix( prefix, COND_TIME_BEFORE) t = None for cav in cavs: if not cav.first_party(): continue cav = cav.caveat_id_bytes.decode('utf-8') name, rest = parse_caveat(cav) if name != time_before_cond: continue try: et = pyrfc3339.parse(rest, utc=True).replace(tzinfo=None) if t is None or et < t: t = et except ValueError: continue return t
[ "def", "expiry_time", "(", "ns", ",", "cavs", ")", ":", "prefix", "=", "ns", ".", "resolve", "(", "STD_NAMESPACE", ")", "time_before_cond", "=", "condition_with_prefix", "(", "prefix", ",", "COND_TIME_BEFORE", ")", "t", "=", "None", "for", "cav", "in", "ca...
Returns the minimum time of any time-before caveats found in the given list or None if no such caveats were found. The ns parameter is :param ns: used to determine the standard namespace prefix - if the standard namespace is not found, the empty prefix is assumed. :param cavs: a list of pymacaroons.Caveat :return: datetime.DateTime or None.
[ "Returns", "the", "minimum", "time", "of", "any", "time", "-", "before", "caveats", "found", "in", "the", "given", "list", "or", "None", "if", "no", "such", "caveats", "were", "found", "." ]
python
train
Opentrons/opentrons
update-server/otupdate/buildroot/config.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/update-server/otupdate/buildroot/config.py#L122-L129
def save_to_path(path: str, config: Config) -> None: """ Save the config file to a specific path (not what's in the config) """ LOG.debug(f"Saving config to {path}") with open(path, 'w') as cf: cf.write(json.dumps({k: v for k, v in config._asdict().items() if k != 'path'}))
[ "def", "save_to_path", "(", "path", ":", "str", ",", "config", ":", "Config", ")", "->", "None", ":", "LOG", ".", "debug", "(", "f\"Saving config to {path}\"", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "cf", ":", "cf", ".", "write", "(...
Save the config file to a specific path (not what's in the config)
[ "Save", "the", "config", "file", "to", "a", "specific", "path", "(", "not", "what", "s", "in", "the", "config", ")" ]
python
train
tanghaibao/goatools
goatools/cli/wr_hierarchy.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/wr_hierarchy.py#L116-L121
def _get_fout_go(self): """Get the name of an output file based on the top GO term.""" assert self.goids, "NO VALID GO IDs WERE PROVIDED AS STARTING POINTS FOR HIERARCHY REPORT" base = next(iter(self.goids)).replace(':', '') upstr = '_up' if 'up' in self.kws else '' return "hier_{BASE}{UP}.{EXT}".format(BASE=base, UP=upstr, EXT='txt')
[ "def", "_get_fout_go", "(", "self", ")", ":", "assert", "self", ".", "goids", ",", "\"NO VALID GO IDs WERE PROVIDED AS STARTING POINTS FOR HIERARCHY REPORT\"", "base", "=", "next", "(", "iter", "(", "self", ".", "goids", ")", ")", ".", "replace", "(", "':'", ","...
Get the name of an output file based on the top GO term.
[ "Get", "the", "name", "of", "an", "output", "file", "based", "on", "the", "top", "GO", "term", "." ]
python
train
textbook/aslack
aslack/slack_bot/bot.py
https://github.com/textbook/aslack/blob/9ac6a44e4464180109fa4be130ad7a980a9d1acc/aslack/slack_bot/bot.py#L104-L136
async def handle_message(self, message, filters): """Handle an incoming message appropriately. Arguments: message (:py:class:`aiohttp.websocket.Message`): The incoming message to handle. filters (:py:class:`list`): The filters to apply to incoming messages. """ data = self._unpack_message(message) logger.debug(data) if data.get('type') == 'error': raise SlackApiError( data.get('error', {}).get('msg', str(data)) ) elif self.message_is_to_me(data): text = data['text'][len(self.address_as):].strip() if text == 'help': return self._respond( channel=data['channel'], text=self._instruction_list(filters), ) elif text == 'version': return self._respond( channel=data['channel'], text=self.VERSION, ) for _filter in filters: if _filter.matches(data): logger.debug('Response triggered') async for response in _filter: self._respond(channel=data['channel'], text=response)
[ "async", "def", "handle_message", "(", "self", ",", "message", ",", "filters", ")", ":", "data", "=", "self", ".", "_unpack_message", "(", "message", ")", "logger", ".", "debug", "(", "data", ")", "if", "data", ".", "get", "(", "'type'", ")", "==", "...
Handle an incoming message appropriately. Arguments: message (:py:class:`aiohttp.websocket.Message`): The incoming message to handle. filters (:py:class:`list`): The filters to apply to incoming messages.
[ "Handle", "an", "incoming", "message", "appropriately", "." ]
python
valid
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L1474-L1501
async def _analog_message(self, data): """ This is a private message handler method. It is a message handler for analog messages. :param data: message data :returns: None - but saves the data in the pins structure """ pin = data[0] value = (data[PrivateConstants.MSB] << 7) + data[PrivateConstants.LSB] # if self.analog_pins[pin].current_value != value: self.analog_pins[pin].current_value = value # append pin number, pin value, and pin type to return value and return as a list message = [pin, value, Constants.ANALOG] if self.analog_pins[pin].cb: if self.analog_pins[pin].cb_type: await self.analog_pins[pin].cb(message) else: loop = self.loop loop.call_soon(self.analog_pins[pin].cb, message) # is there a latch entry for this pin? key = 'A' + str(pin) if key in self.latch_map: await self._check_latch_data(key, message[1])
[ "async", "def", "_analog_message", "(", "self", ",", "data", ")", ":", "pin", "=", "data", "[", "0", "]", "value", "=", "(", "data", "[", "PrivateConstants", ".", "MSB", "]", "<<", "7", ")", "+", "data", "[", "PrivateConstants", ".", "LSB", "]", "#...
This is a private message handler method. It is a message handler for analog messages. :param data: message data :returns: None - but saves the data in the pins structure
[ "This", "is", "a", "private", "message", "handler", "method", ".", "It", "is", "a", "message", "handler", "for", "analog", "messages", "." ]
python
train
splunk/splunk-sdk-python
splunklib/client.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/client.py#L1390-L1440
def iter(self, offset=0, count=None, pagesize=None, **kwargs): """Iterates over the collection. This method is equivalent to the :meth:`list` method, but it returns an iterator and can load a certain number of entities at a time from the server. :param offset: The index of the first entity to return (optional). :type offset: ``integer`` :param count: The maximum number of entities to return (optional). :type count: ``integer`` :param pagesize: The number of entities to load (optional). :type pagesize: ``integer`` :param kwargs: Additional arguments (optional): - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` **Example**:: import splunklib.client as client s = client.connect(...) for saved_search in s.saved_searches.iter(pagesize=10): # Loads 10 saved searches at a time from the # server. ... """ assert pagesize is None or pagesize > 0 if count is None: count = self.null_count fetched = 0 while count == self.null_count or fetched < count: response = self.get(count=pagesize or count, offset=offset, **kwargs) items = self._load_list(response) N = len(items) fetched += N for item in items: yield item if pagesize is None or N < pagesize: break offset += N logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
[ "def", "iter", "(", "self", ",", "offset", "=", "0", ",", "count", "=", "None", ",", "pagesize", "=", "None", ",", "*", "*", "kwargs", ")", ":", "assert", "pagesize", "is", "None", "or", "pagesize", ">", "0", "if", "count", "is", "None", ":", "co...
Iterates over the collection. This method is equivalent to the :meth:`list` method, but it returns an iterator and can load a certain number of entities at a time from the server. :param offset: The index of the first entity to return (optional). :type offset: ``integer`` :param count: The maximum number of entities to return (optional). :type count: ``integer`` :param pagesize: The number of entities to load (optional). :type pagesize: ``integer`` :param kwargs: Additional arguments (optional): - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` **Example**:: import splunklib.client as client s = client.connect(...) for saved_search in s.saved_searches.iter(pagesize=10): # Loads 10 saved searches at a time from the # server. ...
[ "Iterates", "over", "the", "collection", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/stringfunc.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/stringfunc.py#L145-L150
def strseq(prefix: str, first: int, last: int, suffix: str = "") -> List[str]: """ Makes a string of the format ``<prefix><number><suffix>`` for every number from ``first`` to ``last`` inclusive, and returns them as a list. """ return [strnum(prefix, n, suffix) for n in range(first, last + 1)]
[ "def", "strseq", "(", "prefix", ":", "str", ",", "first", ":", "int", ",", "last", ":", "int", ",", "suffix", ":", "str", "=", "\"\"", ")", "->", "List", "[", "str", "]", ":", "return", "[", "strnum", "(", "prefix", ",", "n", ",", "suffix", ")"...
Makes a string of the format ``<prefix><number><suffix>`` for every number from ``first`` to ``last`` inclusive, and returns them as a list.
[ "Makes", "a", "string", "of", "the", "format", "<prefix", ">", "<number", ">", "<suffix", ">", "for", "every", "number", "from", "first", "to", "last", "inclusive", "and", "returns", "them", "as", "a", "list", "." ]
python
train
ivanprjcts/sdklib
sdklib/util/urls.py
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/util/urls.py#L60-L82
def generate_url(scheme=None, host=None, port=None, path=None, query=None): """ Generate URI from parameters. :param str scheme: :param str host: :param int port: :param str path: :param dict query: :return: """ url = "" if scheme is not None: url += "%s://" % scheme if host is not None: url += host if port is not None: url += ":%s" % str(port) if path is not None: url += ensure_url_path_starts_with_slash(path) if query is not None: url += "?%s" % (urlencode(query)) return url
[ "def", "generate_url", "(", "scheme", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "path", "=", "None", ",", "query", "=", "None", ")", ":", "url", "=", "\"\"", "if", "scheme", "is", "not", "None", ":", "url", "+=", "\"%s...
Generate URI from parameters. :param str scheme: :param str host: :param int port: :param str path: :param dict query: :return:
[ "Generate", "URI", "from", "parameters", "." ]
python
train
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1160-L1320
def data_aligned_at_events(table, start_event_number=None, stop_event_number=None, start_index=None, stop_index=None, chunk_size=10000000, try_speedup=False, first_event_aligned=True, fail_on_missing_events=True): '''Takes the table with a event_number column and returns chunks with the size up to chunk_size. The chunks are chosen in a way that the events are not splitted. Additional parameters can be set to increase the readout speed. Events between a certain range can be selected. Also the start and the stop indices limiting the table size can be specified to improve performance. The event_number column must be sorted. In case of try_speedup is True, it is important to create an index of event_number column with pytables before using this function. Otherwise the queries are slowed down. Parameters ---------- table : pytables.table The data. start_event_number : int The retruned data contains events with event number >= start_event_number. If None, no limit is set. stop_event_number : int The retruned data contains events with event number < stop_event_number. If None, no limit is set. start_index : int Start index of data. If None, no limit is set. stop_index : int Stop index of data. If None, no limit is set. chunk_size : int Maximum chunk size per read. try_speedup : bool If True, try to reduce the index range to read by searching for the indices of start and stop event number. If these event numbers are usually not in the data this speedup can even slow down the function! The following parameters are not used when try_speedup is True: first_event_aligned : bool If True, assuming that the first event is aligned to the data chunk and will be added. If False, the lowest event number of the first chunk will not be read out. fail_on_missing_events : bool If True, an error is given when start_event_number or stop_event_number is not part of the data. Returns ------- Iterator of tuples Data of the actual data chunk and start index for the next chunk. Example ------- start_index = 0 for scan_parameter in scan_parameter_range: start_event_number, stop_event_number = event_select_function(scan_parameter) for data, start_index in data_aligned_at_events(table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=start_index): do_something(data) for data, index in data_aligned_at_events(table): do_something(data) ''' # initialize variables start_index_known = False stop_index_known = False start_index = 0 if start_index is None else start_index stop_index = table.nrows if stop_index is None else stop_index if stop_index < start_index: raise InvalidInputError('Invalid start/stop index') table_max_rows = table.nrows if stop_event_number is not None and start_event_number is not None and stop_event_number < start_event_number: raise InvalidInputError('Invalid start/stop event number') # set start stop indices from the event numbers for fast read if possible; not possible if the given event number does not exist in the data stream if try_speedup and table.colindexed["event_number"]: if start_event_number is not None: start_condition = 'event_number==' + str(start_event_number) start_indices = table.get_where_list(start_condition, start=start_index, stop=stop_index) if start_indices.shape[0] != 0: # set start index if possible start_index = start_indices[0] start_index_known = True if stop_event_number is not None: stop_condition = 'event_number==' + str(stop_event_number) stop_indices = table.get_where_list(stop_condition, start=start_index, stop=stop_index) if stop_indices.shape[0] != 0: # set the stop index if possible, stop index is excluded stop_index = stop_indices[0] stop_index_known = True if start_index_known and stop_index_known and start_index + chunk_size >= stop_index: # special case, one read is enough, data not bigger than one chunk and the indices are known yield table.read(start=start_index, stop=stop_index), stop_index else: # read data in chunks, chunks do not divide events, abort if stop_event_number is reached # search for begin current_start_index = start_index if start_event_number is not None: while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 last_event_in_chunk = array_chunk["event_number"][-1] if last_event_in_chunk < start_event_number: current_start_index = current_start_index + chunk_size # not there yet, continue to next read (assuming sorted events) else: first_event_in_chunk = array_chunk["event_number"][0] # if stop_event_number is not None and first_event_in_chunk >= stop_event_number and start_index != 0 and start_index == current_start_index: # raise InvalidInputError('The stop event %d is missing. Change stop_event_number.' % stop_event_number) if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small. Increase chunk size to fit full event.') if not first_event_aligned and first_event_in_chunk == start_event_number and start_index != 0 and start_index == current_start_index: # first event in first chunk not aligned at index 0, so take next event if fail_on_missing_events: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number + 1, side='left') elif fail_on_missing_events and first_event_in_chunk > start_event_number and start_index == current_start_index: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) elif first_event_aligned and first_event_in_chunk == start_event_number and start_index == current_start_index: chunk_start_index = 0 else: chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number, side='left') if fail_on_missing_events and array_chunk["event_number"][chunk_start_index] != start_event_number and start_index == current_start_index: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) # if fail_on_missing_events and ((start_index == current_start_index and chunk_start_index == 0 and start_index != 0 and not first_event_aligned) or array_chunk["event_number"][chunk_start_index] != start_event_number): # raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) current_start_index = current_start_index + chunk_start_index # calculate index for next loop break elif not first_event_aligned and start_index != 0: while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 first_event_in_chunk = array_chunk["event_number"][0] last_event_in_chunk = array_chunk["event_number"][-1] if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small. Increase chunk size to fit full event.') chunk_start_index = np.searchsorted(array_chunk["event_number"], first_event_in_chunk + 1, side='left') current_start_index = current_start_index + chunk_start_index if not first_event_in_chunk == last_event_in_chunk: break # data loop while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 first_event_in_chunk = array_chunk["event_number"][0] last_event_in_chunk = array_chunk["event_number"][-1] chunk_start_index = 0 if stop_event_number is None: if current_stop_index == table_max_rows: chunk_stop_index = array_chunk.shape[0] else: chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left') else: if last_event_in_chunk >= stop_event_number: chunk_stop_index = np.searchsorted(array_chunk["event_number"], stop_event_number, side='left') elif current_stop_index == table_max_rows: # this will also add the last event of the table chunk_stop_index = array_chunk.shape[0] else: chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left') nrows = chunk_stop_index - chunk_start_index if nrows == 0: if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small to fit event. Data corruption possible. Increase chunk size to read full event.') elif chunk_start_index == 0: # not increasing current_start_index return elif stop_event_number is not None and last_event_in_chunk >= stop_event_number: return else: yield array_chunk[chunk_start_index:chunk_stop_index], current_start_index + nrows + chunk_start_index current_start_index = current_start_index + nrows + chunk_start_index
[ "def", "data_aligned_at_events", "(", "table", ",", "start_event_number", "=", "None", ",", "stop_event_number", "=", "None", ",", "start_index", "=", "None", ",", "stop_index", "=", "None", ",", "chunk_size", "=", "10000000", ",", "try_speedup", "=", "False", ...
Takes the table with a event_number column and returns chunks with the size up to chunk_size. The chunks are chosen in a way that the events are not splitted. Additional parameters can be set to increase the readout speed. Events between a certain range can be selected. Also the start and the stop indices limiting the table size can be specified to improve performance. The event_number column must be sorted. In case of try_speedup is True, it is important to create an index of event_number column with pytables before using this function. Otherwise the queries are slowed down. Parameters ---------- table : pytables.table The data. start_event_number : int The retruned data contains events with event number >= start_event_number. If None, no limit is set. stop_event_number : int The retruned data contains events with event number < stop_event_number. If None, no limit is set. start_index : int Start index of data. If None, no limit is set. stop_index : int Stop index of data. If None, no limit is set. chunk_size : int Maximum chunk size per read. try_speedup : bool If True, try to reduce the index range to read by searching for the indices of start and stop event number. If these event numbers are usually not in the data this speedup can even slow down the function! The following parameters are not used when try_speedup is True: first_event_aligned : bool If True, assuming that the first event is aligned to the data chunk and will be added. If False, the lowest event number of the first chunk will not be read out. fail_on_missing_events : bool If True, an error is given when start_event_number or stop_event_number is not part of the data. Returns ------- Iterator of tuples Data of the actual data chunk and start index for the next chunk. Example ------- start_index = 0 for scan_parameter in scan_parameter_range: start_event_number, stop_event_number = event_select_function(scan_parameter) for data, start_index in data_aligned_at_events(table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=start_index): do_something(data) for data, index in data_aligned_at_events(table): do_something(data)
[ "Takes", "the", "table", "with", "a", "event_number", "column", "and", "returns", "chunks", "with", "the", "size", "up", "to", "chunk_size", ".", "The", "chunks", "are", "chosen", "in", "a", "way", "that", "the", "events", "are", "not", "splitted", ".", ...
python
train
pysal/giddy
giddy/util.py
https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/util.py#L9-L40
def shuffle_matrix(X, ids): """ Random permutation of rows and columns of a matrix Parameters ---------- X : array (k, k), array to be permutated. ids : array range (k, ). Returns ------- X : array (k, k) with rows and columns randomly shuffled. Examples -------- >>> import numpy as np >>> from giddy.util import shuffle_matrix >>> X=np.arange(16) >>> X.shape=(4,4) >>> np.random.seed(10) >>> shuffle_matrix(X,list(range(4))) array([[10, 8, 11, 9], [ 2, 0, 3, 1], [14, 12, 15, 13], [ 6, 4, 7, 5]]) """ np.random.shuffle(ids) return X[ids, :][:, ids]
[ "def", "shuffle_matrix", "(", "X", ",", "ids", ")", ":", "np", ".", "random", ".", "shuffle", "(", "ids", ")", "return", "X", "[", "ids", ",", ":", "]", "[", ":", ",", "ids", "]" ]
Random permutation of rows and columns of a matrix Parameters ---------- X : array (k, k), array to be permutated. ids : array range (k, ). Returns ------- X : array (k, k) with rows and columns randomly shuffled. Examples -------- >>> import numpy as np >>> from giddy.util import shuffle_matrix >>> X=np.arange(16) >>> X.shape=(4,4) >>> np.random.seed(10) >>> shuffle_matrix(X,list(range(4))) array([[10, 8, 11, 9], [ 2, 0, 3, 1], [14, 12, 15, 13], [ 6, 4, 7, 5]])
[ "Random", "permutation", "of", "rows", "and", "columns", "of", "a", "matrix" ]
python
train
praekeltfoundation/molo
molo/core/api/importers.py
https://github.com/praekeltfoundation/molo/blob/57702fda4fab261d67591415f7d46bc98fa38525/molo/core/api/importers.py#L757-L767
def get_foreign_page_id_from_type(self, page_type): ''' Get the foreign page id based on type Only works for index pages ''' # TODO: log this response = requests.get("{}pages/?type={}".format( self.api_url, page_type)) content = json.loads(response.content) return content["items"][0]["id"]
[ "def", "get_foreign_page_id_from_type", "(", "self", ",", "page_type", ")", ":", "# TODO: log this", "response", "=", "requests", ".", "get", "(", "\"{}pages/?type={}\"", ".", "format", "(", "self", ".", "api_url", ",", "page_type", ")", ")", "content", "=", "...
Get the foreign page id based on type Only works for index pages
[ "Get", "the", "foreign", "page", "id", "based", "on", "type" ]
python
train
yyuu/botornado
boto/iam/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/iam/connection.py#L720-L743
def update_server_cert(self, cert_name, new_cert_name=None, new_path=None): """ Updates the name and/or the path of the specified server certificate. :type cert_name: string :param cert_name: The name of the server certificate that you want to update. :type new_cert_name: string :param new_cert_name: The new name for the server certificate. Include this only if you are updating the server certificate's name. :type new_path: string :param new_path: If provided, the path of the certificate will be changed to this path. """ params = {'ServerCertificateName' : cert_name} if new_cert_name: params['NewServerCertificateName'] = new_cert_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateServerCertificate', params)
[ "def", "update_server_cert", "(", "self", ",", "cert_name", ",", "new_cert_name", "=", "None", ",", "new_path", "=", "None", ")", ":", "params", "=", "{", "'ServerCertificateName'", ":", "cert_name", "}", "if", "new_cert_name", ":", "params", "[", "'NewServerC...
Updates the name and/or the path of the specified server certificate. :type cert_name: string :param cert_name: The name of the server certificate that you want to update. :type new_cert_name: string :param new_cert_name: The new name for the server certificate. Include this only if you are updating the server certificate's name. :type new_path: string :param new_path: If provided, the path of the certificate will be changed to this path.
[ "Updates", "the", "name", "and", "/", "or", "the", "path", "of", "the", "specified", "server", "certificate", "." ]
python
train
hubo1016/vlcp
vlcp/event/runnable.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/runnable.py#L411-L427
async def wait_for_send(self, event, *, until=None): ''' Send an event to the main event queue. Can call without delegate. :param until: if the callback returns True, stop sending and return :return: the last True value the callback returns, or None ''' while True: if until: r = until() if r: return r waiter = self.scheduler.send(event) if waiter is None: break await waiter
[ "async", "def", "wait_for_send", "(", "self", ",", "event", ",", "*", ",", "until", "=", "None", ")", ":", "while", "True", ":", "if", "until", ":", "r", "=", "until", "(", ")", "if", "r", ":", "return", "r", "waiter", "=", "self", ".", "schedule...
Send an event to the main event queue. Can call without delegate. :param until: if the callback returns True, stop sending and return :return: the last True value the callback returns, or None
[ "Send", "an", "event", "to", "the", "main", "event", "queue", ".", "Can", "call", "without", "delegate", ".", ":", "param", "until", ":", "if", "the", "callback", "returns", "True", "stop", "sending", "and", "return", ":", "return", ":", "the", "last", ...
python
train
arogozhnikov/einops
einops/einops.py
https://github.com/arogozhnikov/einops/blob/9698f0f5efa6c5a79daa75253137ba5d79a95615/einops/einops.py#L412-L457
def rearrange(tensor, pattern, **axes_lengths): """ einops.rearrange is a reader-friendly smart element reordering for multidimensional tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze, stack, concatenate and other operations. Examples for rearrange operation: >>> # suppose we have a set of images in "h w c" format (height-width-channel) >>> images = [np.random.randn(30, 40, 3) for _ in range(32)] >>> # stack along first (batch) axis, output is a single array >>> rearrange(images, 'b h w c -> b h w c').shape (32, 30, 40, 3) >>> # concatenate images along height (vertical axis), 960 = 32 * 30 >>> rearrange(images, 'b h w c -> (b h) w c').shape (960, 40, 3) >>> # concatenated images along horizontal axis, 1280 = 32 * 40 >>> rearrange(images, 'b h w c -> h (b w) c').shape (30, 1280, 3) >>> # reordered axes to "b c h w" format for deep learning >>> rearrange(images, 'b h w c -> b c h w').shape (32, 3, 30, 40) >>> # flattened each image into a vector, 3600 = 30 * 40 * 3 >>> rearrange(images, 'b h w c -> b (c h w)').shape (32, 3600) >>> # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2 >>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape (128, 15, 20, 3) >>> # space-to-depth operation >>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape (32, 15, 20, 12) :param tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch, mxnet.ndarray). list of tensors is also accepted, those should be of the same type and shape :param pattern: string, rearrangement pattern :param axes_lengths: any additional specifications for dimensions :return: tensor of the same type as input. If possible, a view to the original tensor is returned. When composing axes, C-order enumeration used (consecutive elements have different last axis) More source_examples and explanations can be found in the einops guide. """ if isinstance(tensor, list): if len(tensor) == 0: raise TypeError("Rearrange can't be applied to an empty list") tensor = get_backend(tensor[0]).stack_on_zeroth_dimension(tensor) return reduce(tensor, pattern, reduction='rearrange', **axes_lengths)
[ "def", "rearrange", "(", "tensor", ",", "pattern", ",", "*", "*", "axes_lengths", ")", ":", "if", "isinstance", "(", "tensor", ",", "list", ")", ":", "if", "len", "(", "tensor", ")", "==", "0", ":", "raise", "TypeError", "(", "\"Rearrange can't be applie...
einops.rearrange is a reader-friendly smart element reordering for multidimensional tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze, stack, concatenate and other operations. Examples for rearrange operation: >>> # suppose we have a set of images in "h w c" format (height-width-channel) >>> images = [np.random.randn(30, 40, 3) for _ in range(32)] >>> # stack along first (batch) axis, output is a single array >>> rearrange(images, 'b h w c -> b h w c').shape (32, 30, 40, 3) >>> # concatenate images along height (vertical axis), 960 = 32 * 30 >>> rearrange(images, 'b h w c -> (b h) w c').shape (960, 40, 3) >>> # concatenated images along horizontal axis, 1280 = 32 * 40 >>> rearrange(images, 'b h w c -> h (b w) c').shape (30, 1280, 3) >>> # reordered axes to "b c h w" format for deep learning >>> rearrange(images, 'b h w c -> b c h w').shape (32, 3, 30, 40) >>> # flattened each image into a vector, 3600 = 30 * 40 * 3 >>> rearrange(images, 'b h w c -> b (c h w)').shape (32, 3600) >>> # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2 >>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape (128, 15, 20, 3) >>> # space-to-depth operation >>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape (32, 15, 20, 12) :param tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch, mxnet.ndarray). list of tensors is also accepted, those should be of the same type and shape :param pattern: string, rearrangement pattern :param axes_lengths: any additional specifications for dimensions :return: tensor of the same type as input. If possible, a view to the original tensor is returned. When composing axes, C-order enumeration used (consecutive elements have different last axis) More source_examples and explanations can be found in the einops guide.
[ "einops", ".", "rearrange", "is", "a", "reader", "-", "friendly", "smart", "element", "reordering", "for", "multidimensional", "tensors", ".", "This", "operation", "includes", "functionality", "of", "transpose", "(", "axes", "permutation", ")", "reshape", "(", "...
python
train
keon/algorithms
algorithms/arrays/merge_intervals.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/merge_intervals.py#L58-L63
def print_intervals(intervals): """ Print out the intervals. """ res = [] for i in intervals: res.append(repr(i)) print("".join(res))
[ "def", "print_intervals", "(", "intervals", ")", ":", "res", "=", "[", "]", "for", "i", "in", "intervals", ":", "res", ".", "append", "(", "repr", "(", "i", ")", ")", "print", "(", "\"\"", ".", "join", "(", "res", ")", ")" ]
Print out the intervals.
[ "Print", "out", "the", "intervals", "." ]
python
train
ARMmbed/icetea
icetea_lib/DeviceConnectors/Dut.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/DeviceConnectors/Dut.py#L329-L340
def set_default_init_cli_human_cmds(self): # pylint: disable=no-self-use """ Default commands to restore cli to human readable state are echo on, set --vt100 on, set --retcode false. :return: List of default commands to restore cli to human readable format """ post_cli_cmds = [] post_cli_cmds.append("echo on") post_cli_cmds.append("set --vt100 on") post_cli_cmds.append(["set --retcode false", False, False]) # last True is wait=<Boolean> return post_cli_cmds
[ "def", "set_default_init_cli_human_cmds", "(", "self", ")", ":", "# pylint: disable=no-self-use", "post_cli_cmds", "=", "[", "]", "post_cli_cmds", ".", "append", "(", "\"echo on\"", ")", "post_cli_cmds", ".", "append", "(", "\"set --vt100 on\"", ")", "post_cli_cmds", ...
Default commands to restore cli to human readable state are echo on, set --vt100 on, set --retcode false. :return: List of default commands to restore cli to human readable format
[ "Default", "commands", "to", "restore", "cli", "to", "human", "readable", "state", "are", "echo", "on", "set", "--", "vt100", "on", "set", "--", "retcode", "false", "." ]
python
train
wright-group/WrightTools
WrightTools/data/_data.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L132-L137
def kind(self): """Kind.""" if "kind" not in self.attrs.keys(): self.attrs["kind"] = "None" value = self.attrs["kind"] return value if not value == "None" else None
[ "def", "kind", "(", "self", ")", ":", "if", "\"kind\"", "not", "in", "self", ".", "attrs", ".", "keys", "(", ")", ":", "self", ".", "attrs", "[", "\"kind\"", "]", "=", "\"None\"", "value", "=", "self", ".", "attrs", "[", "\"kind\"", "]", "return", ...
Kind.
[ "Kind", "." ]
python
train
chemlab/chemlab
chemlab/mviewer/api/selections.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/mviewer/api/selections.py#L20-L32
def select_atoms(indices): '''Select atoms by their indices. You can select the first 3 atoms as follows:: select_atoms([0, 1, 2]) Return the current selection dictionary. ''' rep = current_representation() rep.select({'atoms': Selection(indices, current_system().n_atoms)}) return rep.selection_state
[ "def", "select_atoms", "(", "indices", ")", ":", "rep", "=", "current_representation", "(", ")", "rep", ".", "select", "(", "{", "'atoms'", ":", "Selection", "(", "indices", ",", "current_system", "(", ")", ".", "n_atoms", ")", "}", ")", "return", "rep",...
Select atoms by their indices. You can select the first 3 atoms as follows:: select_atoms([0, 1, 2]) Return the current selection dictionary.
[ "Select", "atoms", "by", "their", "indices", "." ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/lyonrri.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/lyonrri.py#L100-L117
def return_rri(self, begsam, endsam): """Return raw, irregularly-timed RRI.""" interval = endsam - begsam dat = empty(interval) k = 0 with open(self.filename, 'rt') as f: [next(f) for x in range(12)] for j, datum in enumerate(f): if begsam <= j < endsam: dat[k] = float64(datum[:datum.index('\t')]) k += 1 if k == interval: break return dat
[ "def", "return_rri", "(", "self", ",", "begsam", ",", "endsam", ")", ":", "interval", "=", "endsam", "-", "begsam", "dat", "=", "empty", "(", "interval", ")", "k", "=", "0", "with", "open", "(", "self", ".", "filename", ",", "'rt'", ")", "as", "f",...
Return raw, irregularly-timed RRI.
[ "Return", "raw", "irregularly", "-", "timed", "RRI", "." ]
python
train
saltstack/salt
salt/utils/versions.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/versions.py#L297-L364
def check_boto_reqs(boto_ver=None, boto3_ver=None, botocore_ver=None, check_boto=True, check_boto3=True): ''' Checks for the version of various required boto libs in one central location. Most boto states and modules rely on a single version of the boto, boto3, or botocore libs. However, some require newer versions of any of these dependencies. This function allows the module to pass in a version to override the default minimum required version. This function is useful in centralizing checks for ``__virtual__()`` functions in the various, and many, boto modules and states. boto_ver The minimum required version of the boto library. Defaults to ``2.0.0``. boto3_ver The minimum required version of the boto3 library. Defaults to ``1.2.6``. botocore_ver The minimum required version of the botocore library. Defaults to ``1.3.23``. check_boto Boolean defining whether or not to check for boto deps. This defaults to ``True`` as most boto modules/states rely on boto, but some do not. check_boto3 Boolean defining whether or not to check for boto3 (and therefore botocore) deps. This defaults to ``True`` as most boto modules/states rely on boto3/botocore, but some do not. ''' if check_boto is True: try: # Late import so we can only load these for this function import boto has_boto = True except ImportError: has_boto = False if boto_ver is None: boto_ver = '2.0.0' if not has_boto or version_cmp(boto.__version__, boto_ver) == -1: return False, 'A minimum version of boto {0} is required.'.format(boto_ver) if check_boto3 is True: try: # Late import so we can only load these for this function import boto3 import botocore has_boto3 = True except ImportError: has_boto3 = False # boto_s3_bucket module requires boto3 1.2.6 and botocore 1.3.23 for # idempotent ACL operations via the fix in https://github.com/boto/boto3/issues/390 if boto3_ver is None: boto3_ver = '1.2.6' if botocore_ver is None: botocore_ver = '1.3.23' if not has_boto3 or version_cmp(boto3.__version__, boto3_ver) == -1: return False, 'A minimum version of boto3 {0} is required.'.format(boto3_ver) elif version_cmp(botocore.__version__, botocore_ver) == -1: return False, 'A minimum version of botocore {0} is required'.format(botocore_ver) return True
[ "def", "check_boto_reqs", "(", "boto_ver", "=", "None", ",", "boto3_ver", "=", "None", ",", "botocore_ver", "=", "None", ",", "check_boto", "=", "True", ",", "check_boto3", "=", "True", ")", ":", "if", "check_boto", "is", "True", ":", "try", ":", "# Late...
Checks for the version of various required boto libs in one central location. Most boto states and modules rely on a single version of the boto, boto3, or botocore libs. However, some require newer versions of any of these dependencies. This function allows the module to pass in a version to override the default minimum required version. This function is useful in centralizing checks for ``__virtual__()`` functions in the various, and many, boto modules and states. boto_ver The minimum required version of the boto library. Defaults to ``2.0.0``. boto3_ver The minimum required version of the boto3 library. Defaults to ``1.2.6``. botocore_ver The minimum required version of the botocore library. Defaults to ``1.3.23``. check_boto Boolean defining whether or not to check for boto deps. This defaults to ``True`` as most boto modules/states rely on boto, but some do not. check_boto3 Boolean defining whether or not to check for boto3 (and therefore botocore) deps. This defaults to ``True`` as most boto modules/states rely on boto3/botocore, but some do not.
[ "Checks", "for", "the", "version", "of", "various", "required", "boto", "libs", "in", "one", "central", "location", ".", "Most", "boto", "states", "and", "modules", "rely", "on", "a", "single", "version", "of", "the", "boto", "boto3", "or", "botocore", "li...
python
train
adrn/gala
gala/coordinates/pm_cov_transform.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/pm_cov_transform.py#L21-L59
def get_transform_matrix(from_frame, to_frame): """Compose sequential matrix transformations (static or dynamic) to get a single transformation matrix from a given path through the Astropy transformation machinery. Parameters ---------- from_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass The *class* of the frame you're transforming from. to_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass The *class* of the frame you're transfrorming to. """ path, distance = coord.frame_transform_graph.find_shortest_path( from_frame, to_frame) matrices = [] currsys = from_frame for p in path[1:]: # first element is fromsys so we skip it trans = coord.frame_transform_graph._graph[currsys][p] if isinstance(trans, coord.DynamicMatrixTransform): M = trans.matrix_func(currsys(), p) elif isinstance(trans, coord.StaticMatrixTransform): M = trans.matrix else: raise ValueError("Transform path contains a '{0}': cannot " "be composed into a single transformation " "matrix.".format(trans.__class__.__name__)) matrices.append(M) currsys = p M = None for Mi in reversed(matrices): if M is None: M = Mi else: M = matrix_product(M, Mi) return M
[ "def", "get_transform_matrix", "(", "from_frame", ",", "to_frame", ")", ":", "path", ",", "distance", "=", "coord", ".", "frame_transform_graph", ".", "find_shortest_path", "(", "from_frame", ",", "to_frame", ")", "matrices", "=", "[", "]", "currsys", "=", "fr...
Compose sequential matrix transformations (static or dynamic) to get a single transformation matrix from a given path through the Astropy transformation machinery. Parameters ---------- from_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass The *class* of the frame you're transforming from. to_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass The *class* of the frame you're transfrorming to.
[ "Compose", "sequential", "matrix", "transformations", "(", "static", "or", "dynamic", ")", "to", "get", "a", "single", "transformation", "matrix", "from", "a", "given", "path", "through", "the", "Astropy", "transformation", "machinery", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/Ethnicity/check_ethnicity.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/Ethnicity/check_ethnicity.py#L544-L557
def allFileExists(fileList): """Check that all file exists. :param fileList: the list of file to check. :type fileList: list Check if all the files in ``fileList`` exists. """ allExists = True for fileName in fileList: allExists = allExists and os.path.isfile(fileName) return allExists
[ "def", "allFileExists", "(", "fileList", ")", ":", "allExists", "=", "True", "for", "fileName", "in", "fileList", ":", "allExists", "=", "allExists", "and", "os", ".", "path", ".", "isfile", "(", "fileName", ")", "return", "allExists" ]
Check that all file exists. :param fileList: the list of file to check. :type fileList: list Check if all the files in ``fileList`` exists.
[ "Check", "that", "all", "file", "exists", "." ]
python
train
dereneaton/ipyrad
ipyrad/core/assembly.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/assembly.py#L945-L988
def _step1func(self, force, ipyclient): """ hidden wrapped function to start step 1 """ ## check input data files sfiles = self.paramsdict["sorted_fastq_path"] rfiles = self.paramsdict["raw_fastq_path"] ## do not allow both a sorted_fastq_path and a raw_fastq if sfiles and rfiles: raise IPyradWarningExit(NOT_TWO_PATHS) ## but also require that at least one exists if not (sfiles or rfiles): raise IPyradWarningExit(NO_SEQ_PATH_FOUND) ## print headers if self._headers: if sfiles: print("\n{}Step 1: Loading sorted fastq data to Samples"\ .format(self._spacer)) else: print("\n{}Step 1: Demultiplexing fastq data to Samples"\ .format(self._spacer)) ## if Samples already exist then no demultiplexing if self.samples: if not force: print(SAMPLES_EXIST.format(len(self.samples), self.name)) else: ## overwrite existing data else do demux if glob.glob(sfiles): self._link_fastqs(ipyclient=ipyclient, force=force) else: assemble.demultiplex.run2(self, ipyclient, force) ## Creating new Samples else: ## first check if demultiplexed files exist in sorted path if glob.glob(sfiles): self._link_fastqs(ipyclient=ipyclient) ## otherwise do the demultiplexing else: assemble.demultiplex.run2(self, ipyclient, force)
[ "def", "_step1func", "(", "self", ",", "force", ",", "ipyclient", ")", ":", "## check input data files", "sfiles", "=", "self", ".", "paramsdict", "[", "\"sorted_fastq_path\"", "]", "rfiles", "=", "self", ".", "paramsdict", "[", "\"raw_fastq_path\"", "]", "## do...
hidden wrapped function to start step 1
[ "hidden", "wrapped", "function", "to", "start", "step", "1" ]
python
valid
spacetelescope/drizzlepac
drizzlepac/hlautils/astrometric_utils.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/hlautils/astrometric_utils.py#L935-L984
def create_image_footprint(image, refwcs, border=0.): """ Create the footprint of the image in the reference WCS frame. Parameters ---------- image : `astropy.io.fits.HDUList` or str Image to extract sources for matching to the external astrometric catalog. refwcs : `stwcs.wcsutil.HSTWCS` Reference WCS for coordinate frame of image. border : float Buffer (in arcseconds) around edge of image to exclude astrometric sources. """ # Interpret input image to generate initial source catalog and WCS if isinstance(image, str): image = pf.open(image) numSci = countExtn(image, extname='SCI') ref_x = refwcs._naxis1 ref_y = refwcs._naxis2 # convert border value into pixels border_pixels = int(border / refwcs.pscale) mask_arr = np.zeros((ref_y, ref_x), dtype=int) for chip in range(numSci): chip += 1 # Build arrays of pixel positions for all edges of chip chip_y, chip_x = image['sci', chip].data.shape chipwcs = wcsutil.HSTWCS(image, ext=('sci', chip)) xpix = np.arange(chip_x) + 1 ypix = np.arange(chip_y) + 1 edge_x = np.hstack([[1] * chip_y, xpix, [chip_x] * chip_y, xpix]) edge_y = np.hstack([ypix, [1] * chip_x, ypix, [chip_y] * chip_x]) edge_ra, edge_dec = chipwcs.all_pix2world(edge_x, edge_y, 1) edge_x_out, edge_y_out = refwcs.all_world2pix(edge_ra, edge_dec, 0) edge_x_out = np.clip(edge_x_out.astype(np.int32), 0, ref_x - 1) edge_y_out = np.clip(edge_y_out.astype(np.int32), 0, ref_y - 1) mask_arr[edge_y_out, edge_x_out] = 1 # Fill in outline of each chip mask_arr = ndimage.binary_fill_holes(ndimage.binary_dilation(mask_arr, iterations=2)) if border > 0.: mask_arr = ndimage.binary_erosion(mask_arr, iterations=border_pixels) return mask_arr
[ "def", "create_image_footprint", "(", "image", ",", "refwcs", ",", "border", "=", "0.", ")", ":", "# Interpret input image to generate initial source catalog and WCS", "if", "isinstance", "(", "image", ",", "str", ")", ":", "image", "=", "pf", ".", "open", "(", ...
Create the footprint of the image in the reference WCS frame. Parameters ---------- image : `astropy.io.fits.HDUList` or str Image to extract sources for matching to the external astrometric catalog. refwcs : `stwcs.wcsutil.HSTWCS` Reference WCS for coordinate frame of image. border : float Buffer (in arcseconds) around edge of image to exclude astrometric sources.
[ "Create", "the", "footprint", "of", "the", "image", "in", "the", "reference", "WCS", "frame", "." ]
python
train
edibledinos/pwnypack
pwnypack/shellcode/base.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/shellcode/base.py#L314-L325
def assemble(self, ops): """ Assemble a list of operations into executable code. Arguments: ops(list): A list of shellcode operations. Returns: bytes: The executable code that implements the shellcode. """ return pwnypack.asm.asm(self.compile(ops), target=self.target)
[ "def", "assemble", "(", "self", ",", "ops", ")", ":", "return", "pwnypack", ".", "asm", ".", "asm", "(", "self", ".", "compile", "(", "ops", ")", ",", "target", "=", "self", ".", "target", ")" ]
Assemble a list of operations into executable code. Arguments: ops(list): A list of shellcode operations. Returns: bytes: The executable code that implements the shellcode.
[ "Assemble", "a", "list", "of", "operations", "into", "executable", "code", "." ]
python
train
fermiPy/fermipy
fermipy/skymap.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/skymap.py#L351-L382
def get_map_values(self, lons, lats, ibin=None): """Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map """ pix_idxs = self.get_pixel_indices(lons, lats, ibin) idxs = copy.copy(pix_idxs) m = np.empty_like(idxs[0], dtype=bool) m.fill(True) for i, p in enumerate(pix_idxs): m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i]) idxs[i][~m] = 0 vals = self.counts.T[idxs] vals[~m] = np.nan return vals
[ "def", "get_map_values", "(", "self", ",", "lons", ",", "lats", ",", "ibin", "=", "None", ")", ":", "pix_idxs", "=", "self", ".", "get_pixel_indices", "(", "lons", ",", "lats", ",", "ibin", ")", "idxs", "=", "copy", ".", "copy", "(", "pix_idxs", ")",...
Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map
[ "Return", "the", "map", "values", "corresponding", "to", "a", "set", "of", "coordinates", "." ]
python
train
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/receivable/media.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/media.py#L429-L452
def from_array(array): """ Deserialize a new Audio from a given dictionary. :return: new Audio instance. :rtype: Audio """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") from pytgbot.api_types.receivable.media import PhotoSize data = {} data['file_id'] = u(array.get('file_id')) data['duration'] = int(array.get('duration')) data['performer'] = u(array.get('performer')) if array.get('performer') is not None else None data['title'] = u(array.get('title')) if array.get('title') is not None else None data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None data['_raw'] = array return Audio(**data)
[ "def", "from_array", "(", "array", ")", ":", "if", "array", "is", "None", "or", "not", "array", ":", "return", "None", "# end if", "assert_type_or_raise", "(", "array", ",", "dict", ",", "parameter_name", "=", "\"array\"", ")", "from", "pytgbot", ".", "api...
Deserialize a new Audio from a given dictionary. :return: new Audio instance. :rtype: Audio
[ "Deserialize", "a", "new", "Audio", "from", "a", "given", "dictionary", "." ]
python
train
Workiva/furious
furious/async.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L239-L249
def check_recursion_depth(self): """Check recursion depth, raise AsyncRecursionError if too deep.""" from furious.async import MAX_DEPTH recursion_options = self._options.get('_recursion', {}) max_depth = recursion_options.get('max', MAX_DEPTH) # Check if recursion check has been disabled, then check depth. if (max_depth != DISABLE_RECURSION_CHECK and self.recursion_depth > max_depth): raise errors.AsyncRecursionError('Max recursion depth reached.')
[ "def", "check_recursion_depth", "(", "self", ")", ":", "from", "furious", ".", "async", "import", "MAX_DEPTH", "recursion_options", "=", "self", ".", "_options", ".", "get", "(", "'_recursion'", ",", "{", "}", ")", "max_depth", "=", "recursion_options", ".", ...
Check recursion depth, raise AsyncRecursionError if too deep.
[ "Check", "recursion", "depth", "raise", "AsyncRecursionError", "if", "too", "deep", "." ]
python
train
mongodb/mongo-python-driver
pymongo/uri_parser.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/uri_parser.py#L256-L264
def _normalize_options(options): """Renames keys in the options dictionary to their internally-used names.""" normalized_options = {} for key, value in iteritems(options): optname = str(key).lower() intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, key) normalized_options[intname] = options[key] return normalized_options
[ "def", "_normalize_options", "(", "options", ")", ":", "normalized_options", "=", "{", "}", "for", "key", ",", "value", "in", "iteritems", "(", "options", ")", ":", "optname", "=", "str", "(", "key", ")", ".", "lower", "(", ")", "intname", "=", "INTERN...
Renames keys in the options dictionary to their internally-used names.
[ "Renames", "keys", "in", "the", "options", "dictionary", "to", "their", "internally", "-", "used", "names", "." ]
python
train
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2804-L2809
def pythonpath_changed(self): """Projects PYTHONPATH contribution has changed""" self.remove_path_from_sys_path() self.project_path = self.projects.get_pythonpath() self.add_path_to_sys_path() self.sig_pythonpath_changed.emit()
[ "def", "pythonpath_changed", "(", "self", ")", ":", "self", ".", "remove_path_from_sys_path", "(", ")", "self", ".", "project_path", "=", "self", ".", "projects", ".", "get_pythonpath", "(", ")", "self", ".", "add_path_to_sys_path", "(", ")", "self", ".", "s...
Projects PYTHONPATH contribution has changed
[ "Projects", "PYTHONPATH", "contribution", "has", "changed" ]
python
train
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/arrayeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/arrayeditor.py#L835-L839
def error(self, message): """An error occured, closing the dialog box""" QMessageBox.critical(self, _("Array editor"), message) self.setAttribute(Qt.WA_DeleteOnClose) self.reject()
[ "def", "error", "(", "self", ",", "message", ")", ":", "QMessageBox", ".", "critical", "(", "self", ",", "_", "(", "\"Array editor\"", ")", ",", "message", ")", "self", ".", "setAttribute", "(", "Qt", ".", "WA_DeleteOnClose", ")", "self", ".", "reject", ...
An error occured, closing the dialog box
[ "An", "error", "occured", "closing", "the", "dialog", "box" ]
python
train
dbcli/athenacli
athenacli/completer.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/completer.py#L338-L348
def find_files(self, word): """Yield matching directory or file names. :param word: :return: iterable """ base_path, last_path, position = parse_path(word) paths = suggest_path(word) for name in sorted(paths): suggestion = complete_path(name, last_path) if suggestion: yield Completion(suggestion, position)
[ "def", "find_files", "(", "self", ",", "word", ")", ":", "base_path", ",", "last_path", ",", "position", "=", "parse_path", "(", "word", ")", "paths", "=", "suggest_path", "(", "word", ")", "for", "name", "in", "sorted", "(", "paths", ")", ":", "sugges...
Yield matching directory or file names. :param word: :return: iterable
[ "Yield", "matching", "directory", "or", "file", "names", ".", ":", "param", "word", ":", ":", "return", ":", "iterable" ]
python
train
programa-stic/barf-project
barf/core/reil/emulator/cpu.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/emulator/cpu.py#L301-L317
def __execute_bsh(self, instr): """Execute BSH instruction. """ op0_val = self.read_operand(instr.operands[0]) op1_val = self.read_operand(instr.operands[1]) op1_size = instr.operands[1].size # Check sign bit. if extract_sign_bit(op1_val, op1_size) == 0: op2_val = op0_val << op1_val else: op2_val = op0_val >> twos_complement(op1_val, op1_size) self.write_operand(instr.operands[2], op2_val) return None
[ "def", "__execute_bsh", "(", "self", ",", "instr", ")", ":", "op0_val", "=", "self", ".", "read_operand", "(", "instr", ".", "operands", "[", "0", "]", ")", "op1_val", "=", "self", ".", "read_operand", "(", "instr", ".", "operands", "[", "1", "]", ")...
Execute BSH instruction.
[ "Execute", "BSH", "instruction", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/data_structures/sarray.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sarray.py#L431-L475
def date_range(cls,start_time,end_time,freq): ''' Returns a new SArray that represents a fixed frequency datetime index. Parameters ---------- start_time : datetime.datetime Left bound for generating dates. end_time : datetime.datetime Right bound for generating dates. freq : datetime.timedelta Fixed frequency between two consecutive data points. Returns ------- out : SArray Examples -------- >>> import datetime as dt >>> start = dt.datetime(2013, 5, 7, 10, 4, 10) >>> end = dt.datetime(2013, 5, 10, 10, 4, 10) >>> sa = tc.SArray.date_range(start,end,dt.timedelta(1)) >>> print sa dtype: datetime Rows: 4 [datetime.datetime(2013, 5, 7, 10, 4, 10), datetime.datetime(2013, 5, 8, 10, 4, 10), datetime.datetime(2013, 5, 9, 10, 4, 10), datetime.datetime(2013, 5, 10, 10, 4, 10)] ''' if not isinstance(start_time,datetime.datetime): raise TypeError("The ``start_time`` argument must be from type datetime.datetime.") if not isinstance(end_time,datetime.datetime): raise TypeError("The ``end_time`` argument must be from type datetime.datetime.") if not isinstance(freq,datetime.timedelta): raise TypeError("The ``freq`` argument must be from type datetime.timedelta.") from .. import extensions return extensions.date_range(start_time,end_time,freq.total_seconds())
[ "def", "date_range", "(", "cls", ",", "start_time", ",", "end_time", ",", "freq", ")", ":", "if", "not", "isinstance", "(", "start_time", ",", "datetime", ".", "datetime", ")", ":", "raise", "TypeError", "(", "\"The ``start_time`` argument must be from type dateti...
Returns a new SArray that represents a fixed frequency datetime index. Parameters ---------- start_time : datetime.datetime Left bound for generating dates. end_time : datetime.datetime Right bound for generating dates. freq : datetime.timedelta Fixed frequency between two consecutive data points. Returns ------- out : SArray Examples -------- >>> import datetime as dt >>> start = dt.datetime(2013, 5, 7, 10, 4, 10) >>> end = dt.datetime(2013, 5, 10, 10, 4, 10) >>> sa = tc.SArray.date_range(start,end,dt.timedelta(1)) >>> print sa dtype: datetime Rows: 4 [datetime.datetime(2013, 5, 7, 10, 4, 10), datetime.datetime(2013, 5, 8, 10, 4, 10), datetime.datetime(2013, 5, 9, 10, 4, 10), datetime.datetime(2013, 5, 10, 10, 4, 10)]
[ "Returns", "a", "new", "SArray", "that", "represents", "a", "fixed", "frequency", "datetime", "index", "." ]
python
train
bokeh/bokeh
bokeh/io/notebook.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/io/notebook.py#L308-L325
def destroy_server(server_id): ''' Given a UUID id of a div removed or replaced in the Jupyter notebook, destroy the corresponding server sessions and stop it. ''' server = curstate().uuid_to_server.get(server_id, None) if server is None: log.debug("No server instance found for uuid: %r" % server_id) return try: for session in server.get_sessions(): session.destroy() server.stop() del curstate().uuid_to_server[server_id] except Exception as e: log.debug("Could not destroy server for id %r: %s" % (server_id, e))
[ "def", "destroy_server", "(", "server_id", ")", ":", "server", "=", "curstate", "(", ")", ".", "uuid_to_server", ".", "get", "(", "server_id", ",", "None", ")", "if", "server", "is", "None", ":", "log", ".", "debug", "(", "\"No server instance found for uuid...
Given a UUID id of a div removed or replaced in the Jupyter notebook, destroy the corresponding server sessions and stop it.
[ "Given", "a", "UUID", "id", "of", "a", "div", "removed", "or", "replaced", "in", "the", "Jupyter", "notebook", "destroy", "the", "corresponding", "server", "sessions", "and", "stop", "it", "." ]
python
train
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_and_copy_submissions.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/validation_tool/validate_and_copy_submissions.py#L119-L133
def copy_submission_locally(self, cloud_path): """Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to """ local_path = os.path.join(self.download_dir, os.path.basename(cloud_path)) cmd = ['gsutil', 'cp', cloud_path, local_path] if subprocess.call(cmd) != 0: logging.error('Can\'t copy submission locally') return None return local_path
[ "def", "copy_submission_locally", "(", "self", ",", "cloud_path", ")", ":", "local_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "download_dir", ",", "os", ".", "path", ".", "basename", "(", "cloud_path", ")", ")", "cmd", "=", "[", "'gs...
Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to
[ "Copies", "submission", "from", "Google", "Cloud", "Storage", "to", "local", "directory", "." ]
python
train
AntagonistHQ/openprovider.py
openprovider/modules/ssl.py
https://github.com/AntagonistHQ/openprovider.py/blob/5871c3d5b3661e23667f147f49f20389c817a0a4/openprovider/modules/ssl.py#L59-L66
def retrieve_order(self, order_id): """Retrieve details on a single order.""" response = self.request(E.retrieveOrderSslCertRequest( E.id(order_id) )) return response.as_model(SSLOrder)
[ "def", "retrieve_order", "(", "self", ",", "order_id", ")", ":", "response", "=", "self", ".", "request", "(", "E", ".", "retrieveOrderSslCertRequest", "(", "E", ".", "id", "(", "order_id", ")", ")", ")", "return", "response", ".", "as_model", "(", "SSLO...
Retrieve details on a single order.
[ "Retrieve", "details", "on", "a", "single", "order", "." ]
python
train
mgedmin/check-manifest
check_manifest.py
https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L781-L783
def file_matches_regexps(filename, patterns): """Does this filename match any of the regular expressions?""" return any(re.match(pat, filename) for pat in patterns)
[ "def", "file_matches_regexps", "(", "filename", ",", "patterns", ")", ":", "return", "any", "(", "re", ".", "match", "(", "pat", ",", "filename", ")", "for", "pat", "in", "patterns", ")" ]
Does this filename match any of the regular expressions?
[ "Does", "this", "filename", "match", "any", "of", "the", "regular", "expressions?" ]
python
train
pzs741/TEDT
TEDT/candidate_corpus.py
https://github.com/pzs741/TEDT/blob/6b6663227b755005fe1a1e3e807a05bdb521e066/TEDT/candidate_corpus.py#L96-L125
def list_mapping(html_cleaned): """将预处理后的网页文档映射成列表和字典,并提取虚假标题 Keyword arguments: html_cleaned -- 预处理后的网页源代码,字符串类型 Return: unit_raw -- 网页文本行 init_dict -- 字典的key是索引,value是网页文本行,并按照网页文本行长度降序排序 fake_title -- 虚假标题,即网页源代码<title>中的文本行 """ unit_raw = html_cleaned.split('\n') for i in unit_raw: c = CDM(i) if c.PTN is not 0: fake_title = i break init_list = [] init_dict = {} for i in unit_raw: init_list.append(len(i)) for i in range(0, len(init_list)): init_dict[i] = init_list[i] init_dict = sorted(init_dict.items(), key=lambda item: item[1], reverse=True) try: log('debug', '映射成功,提取的虚假标题为:【{}】'.format(fake_title)) except UnboundLocalError: fake_title = '' log('err', '虚假标题提取失败') return unit_raw, init_dict, fake_title
[ "def", "list_mapping", "(", "html_cleaned", ")", ":", "unit_raw", "=", "html_cleaned", ".", "split", "(", "'\\n'", ")", "for", "i", "in", "unit_raw", ":", "c", "=", "CDM", "(", "i", ")", "if", "c", ".", "PTN", "is", "not", "0", ":", "fake_title", "...
将预处理后的网页文档映射成列表和字典,并提取虚假标题 Keyword arguments: html_cleaned -- 预处理后的网页源代码,字符串类型 Return: unit_raw -- 网页文本行 init_dict -- 字典的key是索引,value是网页文本行,并按照网页文本行长度降序排序 fake_title -- 虚假标题,即网页源代码<title>中的文本行
[ "将预处理后的网页文档映射成列表和字典,并提取虚假标题" ]
python
train
ladybug-tools/ladybug
ladybug/psychrometrics.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/psychrometrics.py#L176-L182
def dew_point_from_db_hr(db_temp, hr, b_press=101325): """Dew Point Temperature (C) at Temperature db_temp (C), Humidity Ratio hr (kg water/kg air) and Pressure b_press (Pa). """ rh = rel_humid_from_db_hr(db_temp, hr, b_press) td = dew_point_from_db_rh(db_temp, rh) return td
[ "def", "dew_point_from_db_hr", "(", "db_temp", ",", "hr", ",", "b_press", "=", "101325", ")", ":", "rh", "=", "rel_humid_from_db_hr", "(", "db_temp", ",", "hr", ",", "b_press", ")", "td", "=", "dew_point_from_db_rh", "(", "db_temp", ",", "rh", ")", "return...
Dew Point Temperature (C) at Temperature db_temp (C), Humidity Ratio hr (kg water/kg air) and Pressure b_press (Pa).
[ "Dew", "Point", "Temperature", "(", "C", ")", "at", "Temperature", "db_temp", "(", "C", ")", "Humidity", "Ratio", "hr", "(", "kg", "water", "/", "kg", "air", ")", "and", "Pressure", "b_press", "(", "Pa", ")", "." ]
python
train
nir0s/serv
serv/serv.py
https://github.com/nir0s/serv/blob/7af724ed49c0eb766c37c4b5287b043a8cf99e9c/serv/serv.py#L191-L202
def restart(self, name): """Restart a service """ init = self._get_implementation(name) self._assert_service_installed(init, name) logger.info('Restarting service: %s...', name) init.stop() # Here we would use status to verify that the service stopped # before restarting. If only status was stable. eh.. # The arbitrarity of this sleep time is making me sick... time.sleep(3) init.start()
[ "def", "restart", "(", "self", ",", "name", ")", ":", "init", "=", "self", ".", "_get_implementation", "(", "name", ")", "self", ".", "_assert_service_installed", "(", "init", ",", "name", ")", "logger", ".", "info", "(", "'Restarting service: %s...'", ",", ...
Restart a service
[ "Restart", "a", "service" ]
python
train
raphaelm/django-hierarkey
hierarkey/proxy.py
https://github.com/raphaelm/django-hierarkey/blob/3ca822f94fa633c9a6d5abe9c80cb1551299ae46/hierarkey/proxy.py#L217-L231
def delete(self, key: str) -> None: """ Deletes a setting from this object's storage. The write to the database is performed immediately and the cache in the cache backend is flushed. The cache within this object will be updated correctly. """ if key in self._write_cache(): self._write_cache()[key].delete() del self._write_cache()[key] if key in self._cache(): del self._cache()[key] self._flush_external_cache()
[ "def", "delete", "(", "self", ",", "key", ":", "str", ")", "->", "None", ":", "if", "key", "in", "self", ".", "_write_cache", "(", ")", ":", "self", ".", "_write_cache", "(", ")", "[", "key", "]", ".", "delete", "(", ")", "del", "self", ".", "_...
Deletes a setting from this object's storage. The write to the database is performed immediately and the cache in the cache backend is flushed. The cache within this object will be updated correctly.
[ "Deletes", "a", "setting", "from", "this", "object", "s", "storage", ".", "The", "write", "to", "the", "database", "is", "performed", "immediately", "and", "the", "cache", "in", "the", "cache", "backend", "is", "flushed", ".", "The", "cache", "within", "th...
python
train
chriso/gauged
gauged/structures/sparse_map.py
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L73-L77
def append(self, position, array): """Append an array to the end of the map. The position must be greater than any positions in the map""" if not Gauged.map_append(self.ptr, position, array.ptr): raise MemoryError
[ "def", "append", "(", "self", ",", "position", ",", "array", ")", ":", "if", "not", "Gauged", ".", "map_append", "(", "self", ".", "ptr", ",", "position", ",", "array", ".", "ptr", ")", ":", "raise", "MemoryError" ]
Append an array to the end of the map. The position must be greater than any positions in the map
[ "Append", "an", "array", "to", "the", "end", "of", "the", "map", ".", "The", "position", "must", "be", "greater", "than", "any", "positions", "in", "the", "map" ]
python
train
odlgroup/odl
odl/solvers/functional/functional.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/functional.py#L1045-L1049
def _call(self, x): """Apply the functional to the given point.""" return (self.functional(x) + self.quadratic_coeff * x.inner(x) + x.inner(self.linear_term) + self.constant)
[ "def", "_call", "(", "self", ",", "x", ")", ":", "return", "(", "self", ".", "functional", "(", "x", ")", "+", "self", ".", "quadratic_coeff", "*", "x", ".", "inner", "(", "x", ")", "+", "x", ".", "inner", "(", "self", ".", "linear_term", ")", ...
Apply the functional to the given point.
[ "Apply", "the", "functional", "to", "the", "given", "point", "." ]
python
train
manahl/arctic
arctic/decorators.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/decorators.py#L34-L65
def mongo_retry(f): """ Catch-all decorator that handles AutoReconnect and OperationFailure errors from PyMongo """ log_all_exceptions = 'arctic' in f.__module__ if f.__module__ else False @wraps(f) def f_retry(*args, **kwargs): global _retry_count, _in_retry top_level = not _in_retry _in_retry = True try: while True: try: return f(*args, **kwargs) except (DuplicateKeyError, ServerSelectionTimeoutError) as e: # Re-raise errors that won't go away. _handle_error(f, e, _retry_count, **_get_host(args)) raise except (OperationFailure, AutoReconnect) as e: _retry_count += 1 _handle_error(f, e, _retry_count, **_get_host(args)) except Exception as e: if log_all_exceptions: _log_exception(f.__name__, e, _retry_count, **_get_host(args)) raise finally: if top_level: _in_retry = False _retry_count = 0 return f_retry
[ "def", "mongo_retry", "(", "f", ")", ":", "log_all_exceptions", "=", "'arctic'", "in", "f", ".", "__module__", "if", "f", ".", "__module__", "else", "False", "@", "wraps", "(", "f", ")", "def", "f_retry", "(", "*", "args", ",", "*", "*", "kwargs", ")...
Catch-all decorator that handles AutoReconnect and OperationFailure errors from PyMongo
[ "Catch", "-", "all", "decorator", "that", "handles", "AutoReconnect", "and", "OperationFailure", "errors", "from", "PyMongo" ]
python
train