repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
gwastro/pycbc
pycbc/fft/parser_support.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/fft/parser_support.py#L96-L128
def from_cli(opt): """Parses the command line options and sets the FFT backend for each (available) scheme. Aside from setting the default backed for this context, this function will also call (if it exists) the from_cli function of the specified backends in the *current* scheme; typically one would only call this function once inside of a scheme context manager, but if it is desired to perform FFTs both inside and outside of a context, then this function would need to be called again. Parameters ---------- opt: object Result of parsing the CLI with OptionParser, or any object with the required attributes. Returns """ set_backend(opt.fft_backends) # Eventually, we need to be able to parse command lines # from more than just the current scheme's preference. But # the big problem is that calling from_cli for more than one # backend could cause interference; apparently, FFTW and MKL # don't play nice unless FFTW has been compiled and linked # with icc (and possibly numpy, scipy, and/or Python as well?) backend = get_backend() try: backend.from_cli(opt) except AttributeError: pass
[ "def", "from_cli", "(", "opt", ")", ":", "set_backend", "(", "opt", ".", "fft_backends", ")", "# Eventually, we need to be able to parse command lines", "# from more than just the current scheme's preference. But", "# the big problem is that calling from_cli for more than one", "# back...
Parses the command line options and sets the FFT backend for each (available) scheme. Aside from setting the default backed for this context, this function will also call (if it exists) the from_cli function of the specified backends in the *current* scheme; typically one would only call this function once inside of a scheme context manager, but if it is desired to perform FFTs both inside and outside of a context, then this function would need to be called again. Parameters ---------- opt: object Result of parsing the CLI with OptionParser, or any object with the required attributes. Returns
[ "Parses", "the", "command", "line", "options", "and", "sets", "the", "FFT", "backend", "for", "each", "(", "available", ")", "scheme", ".", "Aside", "from", "setting", "the", "default", "backed", "for", "this", "context", "this", "function", "will", "also", ...
python
train
IdentityPython/pysaml2
src/saml2/mcache.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mcache.py#L94-L122
def set(self, subject_id, entity_id, info, timestamp=0): """ Stores session information in the cache. Assumes that the subject_id is unique within the context of the Service Provider. :param subject_id: The subject identifier :param entity_id: The identifier of the entity_id/receiver of an assertion :param info: The session info, the assertion is part of this :param timestamp: A time after which the assertion is not valid. """ entities = self._cache.get(subject_id) if not entities: entities = [] subjects = self._cache.get("subjects") if not subjects: subjects = [] if subject_id not in subjects: subjects.append(subject_id) if not self._cache.set("subjects", subjects): raise CacheError("set failed") if entity_id not in entities: entities.append(entity_id) if not self._cache.set(subject_id, entities): raise CacheError("set failed") # Should use memcache's expire if not self._cache.set(_key(subject_id, entity_id), (timestamp, info)): raise CacheError("set failed")
[ "def", "set", "(", "self", ",", "subject_id", ",", "entity_id", ",", "info", ",", "timestamp", "=", "0", ")", ":", "entities", "=", "self", ".", "_cache", ".", "get", "(", "subject_id", ")", "if", "not", "entities", ":", "entities", "=", "[", "]", ...
Stores session information in the cache. Assumes that the subject_id is unique within the context of the Service Provider. :param subject_id: The subject identifier :param entity_id: The identifier of the entity_id/receiver of an assertion :param info: The session info, the assertion is part of this :param timestamp: A time after which the assertion is not valid.
[ "Stores", "session", "information", "in", "the", "cache", ".", "Assumes", "that", "the", "subject_id", "is", "unique", "within", "the", "context", "of", "the", "Service", "Provider", "." ]
python
train
sdispater/poetry
poetry/config.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/config.py#L36-L49
def setting(self, setting_name, default=None): # type: (str) -> Any """ Retrieve a setting value. """ keys = setting_name.split(".") config = self._content for key in keys: if key not in config: return default config = config[key] return config
[ "def", "setting", "(", "self", ",", "setting_name", ",", "default", "=", "None", ")", ":", "# type: (str) -> Any", "keys", "=", "setting_name", ".", "split", "(", "\".\"", ")", "config", "=", "self", ".", "_content", "for", "key", "in", "keys", ":", "if"...
Retrieve a setting value.
[ "Retrieve", "a", "setting", "value", "." ]
python
train
dhermes/bezier
src/bezier/_curve_helpers.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_curve_helpers.py#L346-L385
def _elevate_nodes(nodes): r"""Degree-elevate a B |eacute| zier curves. Does this by converting the current nodes :math:`v_0, \ldots, v_n` to new nodes :math:`w_0, \ldots, w_{n + 1}` where .. math:: \begin{align*} w_0 &= v_0 \\ w_j &= \frac{j}{n + 1} v_{j - 1} + \frac{n + 1 - j}{n + 1} v_j \\ w_{n + 1} &= v_n \end{align*} .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes defining a curve. Returns: numpy.ndarray: The nodes of the degree-elevated curve. """ dimension, num_nodes = np.shape(nodes) new_nodes = np.empty((dimension, num_nodes + 1), order="F") multipliers = np.arange(1, num_nodes, dtype=_FLOAT64)[np.newaxis, :] denominator = float(num_nodes) new_nodes[:, 1:-1] = ( multipliers * nodes[:, :-1] + (denominator - multipliers) * nodes[:, 1:] ) # Hold off on division until the end, to (attempt to) avoid round-off. new_nodes /= denominator # After setting the internal nodes (which require division), set the # boundary nodes. new_nodes[:, 0] = nodes[:, 0] new_nodes[:, -1] = nodes[:, -1] return new_nodes
[ "def", "_elevate_nodes", "(", "nodes", ")", ":", "dimension", ",", "num_nodes", "=", "np", ".", "shape", "(", "nodes", ")", "new_nodes", "=", "np", ".", "empty", "(", "(", "dimension", ",", "num_nodes", "+", "1", ")", ",", "order", "=", "\"F\"", ")",...
r"""Degree-elevate a B |eacute| zier curves. Does this by converting the current nodes :math:`v_0, \ldots, v_n` to new nodes :math:`w_0, \ldots, w_{n + 1}` where .. math:: \begin{align*} w_0 &= v_0 \\ w_j &= \frac{j}{n + 1} v_{j - 1} + \frac{n + 1 - j}{n + 1} v_j \\ w_{n + 1} &= v_n \end{align*} .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes defining a curve. Returns: numpy.ndarray: The nodes of the degree-elevated curve.
[ "r", "Degree", "-", "elevate", "a", "B", "|eacute|", "zier", "curves", "." ]
python
train
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L1254-L1299
def add_vrf(self, auth, attr): """ Add a new VRF. * `auth` [BaseAuth] AAA options. * `attr` [vrf_attr] The news VRF's attributes. Add a VRF based on the values stored in the `attr` dict. Returns a dict describing the VRF which was added. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.add_vrf` for full understanding. """ self._logger.debug("add_vrf called; attr: %s" % unicode(attr)) # sanity check - do we have all attributes? req_attr = [ 'rt', 'name' ] self._check_attr(attr, req_attr, _vrf_attrs) insert, params = self._sql_expand_insert(attr) sql = "INSERT INTO ip_net_vrf " + insert self._execute(sql, params) vrf_id = self._lastrowid() vrf = self.list_vrf(auth, { 'id': vrf_id })[0] # write to audit table audit_params = { 'vrf_id': vrf['id'], 'vrf_rt': vrf['rt'], 'vrf_name': vrf['name'], 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, 'description': 'Added VRF %s with attr: %s' % (vrf['rt'], unicode(vrf)) } sql, params = self._sql_expand_insert(audit_params) self._execute('INSERT INTO ip_net_log %s' % sql, params) return vrf
[ "def", "add_vrf", "(", "self", ",", "auth", ",", "attr", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"add_vrf called; attr: %s\"", "%", "unicode", "(", "attr", ")", ")", "# sanity check - do we have all attributes?", "req_attr", "=", "[", "'rt'", ",...
Add a new VRF. * `auth` [BaseAuth] AAA options. * `attr` [vrf_attr] The news VRF's attributes. Add a VRF based on the values stored in the `attr` dict. Returns a dict describing the VRF which was added. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.add_vrf` for full understanding.
[ "Add", "a", "new", "VRF", "." ]
python
train
richardkiss/pycoin
pycoin/vm/annotate.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/vm/annotate.py#L89-L133
def annotate_scripts(self, tx, tx_in_idx): "return list of pre_annotations, pc, opcode, instruction, post_annotations" # input_annotations_f, output_annotations_f = annotation_f_for_scripts(tx, tx_in_idx) data_annotations = collections.defaultdict(list) def traceback_f(opcode, data, pc, vmc): if opcode in (self.OP_CHECKSIG, self.OP_CHECKSIGVERIFY): self.annotate_checksig(vmc, data_annotations) if opcode in (self.OP_CHECKMULTISIG, self.OP_CHECKMULTISIGVERIFY): self.annotate_checkmultisig(vmc, data_annotations) return try: tx.check_solution(tx_in_idx, traceback_f=traceback_f) except ScriptError: pass r = [] def traceback_f(opcode, data, pc, vmc): a0 = [] if vmc.pc == 0: if vmc.is_solution_script: a0.append("--- SIGNATURE SCRIPT START") else: a0.append("--- PUBLIC KEY SCRIPT START") r.append((a0, vmc.pc, opcode, self.instruction_for_opcode(opcode, data), data_annotations[data])) try: tx.check_solution(tx_in_idx, traceback_f=traceback_f) except ScriptError: pass # the script may have ended early, so let's just double-check try: for idx, (opcode, data, pc, new_pc) in enumerate(itertools.chain( self._script_tools.get_opcodes(tx.unspents[tx_in_idx].script), self._script_tools.get_opcodes(tx.txs_in[tx_in_idx].script))): if idx >= len(r): r.append(([], pc, opcode, self.instruction_for_opcode(opcode, data), [])) except IndexError: pass return r
[ "def", "annotate_scripts", "(", "self", ",", "tx", ",", "tx_in_idx", ")", ":", "# input_annotations_f, output_annotations_f = annotation_f_for_scripts(tx, tx_in_idx)", "data_annotations", "=", "collections", ".", "defaultdict", "(", "list", ")", "def", "traceback_f", "(", ...
return list of pre_annotations, pc, opcode, instruction, post_annotations
[ "return", "list", "of", "pre_annotations", "pc", "opcode", "instruction", "post_annotations" ]
python
train
junaruga/rpm-py-installer
install.py
https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L1325-L1332
def python_lib_rpm_dirs(self): """Both arch and non-arch site-packages directories.""" libs = [self.python_lib_arch_dir, self.python_lib_non_arch_dir] def append_rpm(path): return os.path.join(path, 'rpm') return map(append_rpm, libs)
[ "def", "python_lib_rpm_dirs", "(", "self", ")", ":", "libs", "=", "[", "self", ".", "python_lib_arch_dir", ",", "self", ".", "python_lib_non_arch_dir", "]", "def", "append_rpm", "(", "path", ")", ":", "return", "os", ".", "path", ".", "join", "(", "path", ...
Both arch and non-arch site-packages directories.
[ "Both", "arch", "and", "non", "-", "arch", "site", "-", "packages", "directories", "." ]
python
train
LonamiWebs/Telethon
telethon_examples/gui.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_examples/gui.py#L150-L178
async def on_message(self, event): """ Event handler that will add new messages to the message log. """ # We want to show only messages sent to this chat if event.chat_id != self.chat_id: return # Save the message ID so we know which to reply to self.message_ids.append(event.id) # Decide a prefix (">> " for our messages, "<user>" otherwise) if event.out: text = '>> ' else: sender = await event.get_sender() text = '<{}> '.format(sanitize_str( utils.get_display_name(sender))) # If the message has media show "(MediaType) " if event.media: text += '({}) '.format(event.media.__class__.__name__) text += sanitize_str(event.text) text += '\n' # Append the text to the end with a newline, and scroll to the end self.log.insert(tkinter.END, text) self.log.yview(tkinter.END)
[ "async", "def", "on_message", "(", "self", ",", "event", ")", ":", "# We want to show only messages sent to this chat", "if", "event", ".", "chat_id", "!=", "self", ".", "chat_id", ":", "return", "# Save the message ID so we know which to reply to", "self", ".", "messag...
Event handler that will add new messages to the message log.
[ "Event", "handler", "that", "will", "add", "new", "messages", "to", "the", "message", "log", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/__init__.py#L38-L45
def account(self): """ :returns: Account provided as the authenticating account :rtype: AccountContext """ if self._account is None: self._account = AccountContext(self, self.domain.twilio.account_sid) return self._account
[ "def", "account", "(", "self", ")", ":", "if", "self", ".", "_account", "is", "None", ":", "self", ".", "_account", "=", "AccountContext", "(", "self", ",", "self", ".", "domain", ".", "twilio", ".", "account_sid", ")", "return", "self", ".", "_account...
:returns: Account provided as the authenticating account :rtype: AccountContext
[ ":", "returns", ":", "Account", "provided", "as", "the", "authenticating", "account", ":", "rtype", ":", "AccountContext" ]
python
train
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L302-L316
def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request]
[ "def", "clean_by_request", "(", "self", ",", "request", ")", ":", "if", "request", "not", "in", "self", ".", "request_map", ":", "return", "for", "tag", ",", "matcher", ",", "future", "in", "self", ".", "request_map", "[", "request", "]", ":", "# timeout...
Remove all futures that were waiting for request `request` since it is done waiting
[ "Remove", "all", "futures", "that", "were", "waiting", "for", "request", "request", "since", "it", "is", "done", "waiting" ]
python
train
log2timeline/plaso
plaso/cli/helpers/windows_services_analysis.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/windows_services_analysis.py#L41-L59
def ParseOptions(cls, options, analysis_plugin): """Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (WindowsServicePlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. """ if not isinstance( analysis_plugin, windows_services.WindowsServicesAnalysisPlugin): raise errors.BadConfigObject(( 'Analysis plugin is not an instance of ' 'WindowsServicesAnalysisPlugin')) output_format = cls._ParseStringOption( options, 'windows_services_output', default_value=cls._DEFAULT_OUTPUT) analysis_plugin.SetOutputFormat(output_format)
[ "def", "ParseOptions", "(", "cls", ",", "options", ",", "analysis_plugin", ")", ":", "if", "not", "isinstance", "(", "analysis_plugin", ",", "windows_services", ".", "WindowsServicesAnalysisPlugin", ")", ":", "raise", "errors", ".", "BadConfigObject", "(", "(", ...
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (WindowsServicePlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type.
[ "Parses", "and", "validates", "options", "." ]
python
train
kata198/AdvancedHTMLParser
AdvancedHTMLParser/constants.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/constants.py#L277-L286
def _special_value_size(em): ''' handle "size" property, which has different behaviour for input vs everything else ''' if em.tagName == 'input': # TODO: "size" on an input is implemented very weirdly. Negative values are treated as invalid, # A value of "0" raises an exception (and does not set HTML attribute) # No upper limit. return convertToPositiveInt(em.getAttribute('size', 20), invalidDefault=20) return em.getAttribute('size', '')
[ "def", "_special_value_size", "(", "em", ")", ":", "if", "em", ".", "tagName", "==", "'input'", ":", "# TODO: \"size\" on an input is implemented very weirdly. Negative values are treated as invalid,", "# A value of \"0\" raises an exception (and does not set HTML attribute)", ...
handle "size" property, which has different behaviour for input vs everything else
[ "handle", "size", "property", "which", "has", "different", "behaviour", "for", "input", "vs", "everything", "else" ]
python
train
ajenhl/tacl
tacl/__main__.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/__main__.py#L606-L615
def search_texts(args, parser): """Searches texts for presence of n-grams.""" store = utils.get_data_store(args) corpus = utils.get_corpus(args) catalogue = utils.get_catalogue(args) store.validate(corpus, catalogue) ngrams = [] for ngram_file in args.ngrams: ngrams.extend(utils.get_ngrams(ngram_file)) store.search(catalogue, ngrams, sys.stdout)
[ "def", "search_texts", "(", "args", ",", "parser", ")", ":", "store", "=", "utils", ".", "get_data_store", "(", "args", ")", "corpus", "=", "utils", ".", "get_corpus", "(", "args", ")", "catalogue", "=", "utils", ".", "get_catalogue", "(", "args", ")", ...
Searches texts for presence of n-grams.
[ "Searches", "texts", "for", "presence", "of", "n", "-", "grams", "." ]
python
train
kobejohn/PQHelper
pqhelper/capture.py
https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/capture.py#L99-L127
def find_or_graft(self, board): """Build a tree with each level corresponding to a fixed position on board. A path of tiles is stored for each board. If any two boards have the same path, then they are the same board. If there is any difference, a new branch will be created to store that path. Return: True if board already exists in the tree; False otherwise """ is_duplicate_board = True # assume same until find a difference # compare each position node = self for p, new_tile in board.positions_with_tile(): found_tile = False # assume no tile in same position until found for child in node.children: if child.tile == new_tile: # same tile found in this position --> continue this branch node = child found_tile = True break if found_tile: pass # go on to the next position else: # different tile --> start new branch and mark not exact match child = _DuplicateTree(new_tile) node.graft_child(child) node = child is_duplicate_board = False # this will get set many times. ok return is_duplicate_board
[ "def", "find_or_graft", "(", "self", ",", "board", ")", ":", "is_duplicate_board", "=", "True", "# assume same until find a difference", "# compare each position", "node", "=", "self", "for", "p", ",", "new_tile", "in", "board", ".", "positions_with_tile", "(", ")",...
Build a tree with each level corresponding to a fixed position on board. A path of tiles is stored for each board. If any two boards have the same path, then they are the same board. If there is any difference, a new branch will be created to store that path. Return: True if board already exists in the tree; False otherwise
[ "Build", "a", "tree", "with", "each", "level", "corresponding", "to", "a", "fixed", "position", "on", "board", ".", "A", "path", "of", "tiles", "is", "stored", "for", "each", "board", ".", "If", "any", "two", "boards", "have", "the", "same", "path", "t...
python
train
jonathf/chaospy
chaospy/distributions/operators/logarithm.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/logarithm.py#L53-L56
def _bnd(self, xloc, dist, cache): """Distribution bounds.""" return numpy.log(evaluation.evaluate_bound( dist, numpy.e**xloc, cache=cache))
[ "def", "_bnd", "(", "self", ",", "xloc", ",", "dist", ",", "cache", ")", ":", "return", "numpy", ".", "log", "(", "evaluation", ".", "evaluate_bound", "(", "dist", ",", "numpy", ".", "e", "**", "xloc", ",", "cache", "=", "cache", ")", ")" ]
Distribution bounds.
[ "Distribution", "bounds", "." ]
python
train
secdev/scapy
scapy/layers/tls/crypto/suites.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/crypto/suites.py#L21-L67
def get_algs_from_ciphersuite_name(ciphersuite_name): """ Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher class and the HMAC class, through the parsing of the ciphersuite name. """ tls1_3 = False if ciphersuite_name.startswith("TLS"): s = ciphersuite_name[4:] if s.endswith("CCM") or s.endswith("CCM_8"): kx_name, s = s.split("_WITH_") kx_alg = _tls_kx_algs.get(kx_name) hash_alg = _tls_hash_algs.get("SHA256") cipher_alg = _tls_cipher_algs.get(s) hmac_alg = None else: if "WITH" in s: kx_name, s = s.split("_WITH_") kx_alg = _tls_kx_algs.get(kx_name) else: tls1_3 = True kx_alg = _tls_kx_algs.get("TLS13") hash_name = s.split('_')[-1] hash_alg = _tls_hash_algs.get(hash_name) cipher_name = s[:-(len(hash_name) + 1)] if tls1_3: cipher_name += "_TLS13" cipher_alg = _tls_cipher_algs.get(cipher_name) hmac_alg = None if cipher_alg is not None and cipher_alg.type != "aead": hmac_name = "HMAC-%s" % hash_name hmac_alg = _tls_hmac_algs.get(hmac_name) elif ciphersuite_name.startswith("SSL"): s = ciphersuite_name[7:] kx_alg = _tls_kx_algs.get("SSLv2") cipher_name, hash_name = s.split("_WITH_") cipher_alg = _tls_cipher_algs.get(cipher_name.rstrip("_EXPORT40")) kx_alg.export = cipher_name.endswith("_EXPORT40") hmac_alg = _tls_hmac_algs.get("HMAC-NULL") hash_alg = _tls_hash_algs.get(hash_name) return kx_alg, cipher_alg, hmac_alg, hash_alg, tls1_3
[ "def", "get_algs_from_ciphersuite_name", "(", "ciphersuite_name", ")", ":", "tls1_3", "=", "False", "if", "ciphersuite_name", ".", "startswith", "(", "\"TLS\"", ")", ":", "s", "=", "ciphersuite_name", "[", "4", ":", "]", "if", "s", ".", "endswith", "(", "\"C...
Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher class and the HMAC class, through the parsing of the ciphersuite name.
[ "Return", "the", "3", "-", "tuple", "made", "of", "the", "Key", "Exchange", "Algorithm", "class", "the", "Cipher", "class", "and", "the", "HMAC", "class", "through", "the", "parsing", "of", "the", "ciphersuite", "name", "." ]
python
train
PostmonAPI/postmon-python
postmon.py
https://github.com/PostmonAPI/postmon-python/blob/bfd3a38a8aec5e069536a0bc36440946d1981e37/postmon.py#L31-L42
def user_agent(self): """ User-Agent para as requisições feitas para o Postmon. Retorna o ``base_user_agent`` concatenado com o ``User-Agent`` padrão do requests. """ if not self._user_agent: session = requests.Session() user_agent = session.headers['User-Agent'] self._user_agent = '%s %s' % (self.base_user_agent, user_agent) return self._user_agent
[ "def", "user_agent", "(", "self", ")", ":", "if", "not", "self", ".", "_user_agent", ":", "session", "=", "requests", ".", "Session", "(", ")", "user_agent", "=", "session", ".", "headers", "[", "'User-Agent'", "]", "self", ".", "_user_agent", "=", "'%s ...
User-Agent para as requisições feitas para o Postmon. Retorna o ``base_user_agent`` concatenado com o ``User-Agent`` padrão do requests.
[ "User", "-", "Agent", "para", "as", "requisições", "feitas", "para", "o", "Postmon", "." ]
python
train
CybOXProject/mixbox
mixbox/namespaces.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/namespaces.py#L294-L322
def set_preferred_prefix_for_namespace(self, ns_uri, prefix, add_if_not_exist=False): """Sets the preferred prefix for ns_uri. If add_if_not_exist is True, the prefix is added if it's not already registered. Otherwise, setting an unknown prefix as preferred is an error. The default is False. Setting to None always works, and indicates a preference to use the namespace as a default. The given namespace must already be in this set. Args: ns_uri (str): the namespace URI whose prefix is to be set prefix (str): the preferred prefix to set add_if_not_exist (bool): Whether to add the prefix if it is not already set as a prefix of ``ns_uri``. Raises: NamespaceNotFoundError: If namespace ``ns_uri`` isn't in this set. DuplicatePrefixError: If ``prefix`` already maps to a different namespace. """ ni = self.__lookup_uri(ns_uri) if not prefix: ni.preferred_prefix = None elif prefix in ni.prefixes: ni.preferred_prefix = prefix elif add_if_not_exist: self.add_prefix(ns_uri, prefix, set_as_preferred=True) else: raise PrefixNotFoundError(prefix)
[ "def", "set_preferred_prefix_for_namespace", "(", "self", ",", "ns_uri", ",", "prefix", ",", "add_if_not_exist", "=", "False", ")", ":", "ni", "=", "self", ".", "__lookup_uri", "(", "ns_uri", ")", "if", "not", "prefix", ":", "ni", ".", "preferred_prefix", "=...
Sets the preferred prefix for ns_uri. If add_if_not_exist is True, the prefix is added if it's not already registered. Otherwise, setting an unknown prefix as preferred is an error. The default is False. Setting to None always works, and indicates a preference to use the namespace as a default. The given namespace must already be in this set. Args: ns_uri (str): the namespace URI whose prefix is to be set prefix (str): the preferred prefix to set add_if_not_exist (bool): Whether to add the prefix if it is not already set as a prefix of ``ns_uri``. Raises: NamespaceNotFoundError: If namespace ``ns_uri`` isn't in this set. DuplicatePrefixError: If ``prefix`` already maps to a different namespace.
[ "Sets", "the", "preferred", "prefix", "for", "ns_uri", ".", "If", "add_if_not_exist", "is", "True", "the", "prefix", "is", "added", "if", "it", "s", "not", "already", "registered", ".", "Otherwise", "setting", "an", "unknown", "prefix", "as", "preferred", "i...
python
train
manns/pyspread
pyspread/src/gui/_chart_dialog.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_chart_dialog.py#L639-L645
def OnSecondaryCheckbox(self, event): """Top Checkbox event handler""" self.attrs["top"] = event.IsChecked() self.attrs["right"] = event.IsChecked() post_command_event(self, self.DrawChartMsg)
[ "def", "OnSecondaryCheckbox", "(", "self", ",", "event", ")", ":", "self", ".", "attrs", "[", "\"top\"", "]", "=", "event", ".", "IsChecked", "(", ")", "self", ".", "attrs", "[", "\"right\"", "]", "=", "event", ".", "IsChecked", "(", ")", "post_command...
Top Checkbox event handler
[ "Top", "Checkbox", "event", "handler" ]
python
train
juju/juju-bundlelib
jujubundlelib/validation.py
https://github.com/juju/juju-bundlelib/blob/c2efa614f53675ed9526027776448bfbb0454ca6/jujubundlelib/validation.py#L65-L100
def _validate_sections(bundle, add_error): """Check that the base bundle sections are valid. The bundle argument is a YAML decoded bundle content. A bundle is composed of series, services, machines and relations. Only the services section is mandatory. Use the given add_error callable to register validation error. Return the four sections """ # Check that the bundle itself is well formed. if not isdict(bundle): add_error('bundle does not appear to be a bundle') return None, None, None, None # Validate the services section. services = bundle.get('services', {}) if not services: add_error('bundle does not define any services') elif not isdict(services): add_error('services spec does not appear to be well-formed') # Validate the machines section. machines = bundle.get('machines') if machines is not None: if isdict(machines): try: machines = dict((int(k), v) for k, v in machines.items()) except (TypeError, ValueError): add_error('machines spec identifiers must be digits') else: add_error('machines spec does not appear to be well-formed') # Validate the relations section. relations = bundle.get('relations') if (relations is not None) and (not islist(relations)): add_error('relations spec does not appear to be well-formed') return bundle.get('series'), services, machines, relations
[ "def", "_validate_sections", "(", "bundle", ",", "add_error", ")", ":", "# Check that the bundle itself is well formed.", "if", "not", "isdict", "(", "bundle", ")", ":", "add_error", "(", "'bundle does not appear to be a bundle'", ")", "return", "None", ",", "None", "...
Check that the base bundle sections are valid. The bundle argument is a YAML decoded bundle content. A bundle is composed of series, services, machines and relations. Only the services section is mandatory. Use the given add_error callable to register validation error. Return the four sections
[ "Check", "that", "the", "base", "bundle", "sections", "are", "valid", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/manifest.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/manifest.py#L103-L123
def sorted(self, wantdirs=False): """ Return sorted files in directory order """ def add_dir(dirs, d): dirs.add(d) logger.debug('add_dir added %s', d) if d != self.base: parent, _ = os.path.split(d) assert parent not in ('', '/') add_dir(dirs, parent) result = set(self.files) # make a copy! if wantdirs: dirs = set() for f in result: add_dir(dirs, os.path.dirname(f)) result |= dirs return [os.path.join(*path_tuple) for path_tuple in sorted(os.path.split(path) for path in result)]
[ "def", "sorted", "(", "self", ",", "wantdirs", "=", "False", ")", ":", "def", "add_dir", "(", "dirs", ",", "d", ")", ":", "dirs", ".", "add", "(", "d", ")", "logger", ".", "debug", "(", "'add_dir added %s'", ",", "d", ")", "if", "d", "!=", "self"...
Return sorted files in directory order
[ "Return", "sorted", "files", "in", "directory", "order" ]
python
train
johnnoone/facts
facts/grafts/system_grafts.py
https://github.com/johnnoone/facts/blob/82d38a46c15d9c01200445526f4c0d1825fc1e51/facts/grafts/system_grafts.py#L26-L33
def os_info(): """Returns os data. """ return { 'uname': dict(platform.uname()._asdict()), 'path': os.environ.get('PATH', '').split(':'), 'shell': os.environ.get('SHELL', '/bin/sh'), }
[ "def", "os_info", "(", ")", ":", "return", "{", "'uname'", ":", "dict", "(", "platform", ".", "uname", "(", ")", ".", "_asdict", "(", ")", ")", ",", "'path'", ":", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "...
Returns os data.
[ "Returns", "os", "data", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/upload/filesystem.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/filesystem.py#L19-L30
def get_upload_path(finfo, sample_info, config): """"Dry" update the file: only return the upload path """ try: storage_dir = _get_storage_dir(finfo, config) except ValueError: return None if finfo.get("type") == "directory": return _get_dir_upload_path(finfo, storage_dir) else: return _get_file_upload_path(finfo, storage_dir)
[ "def", "get_upload_path", "(", "finfo", ",", "sample_info", ",", "config", ")", ":", "try", ":", "storage_dir", "=", "_get_storage_dir", "(", "finfo", ",", "config", ")", "except", "ValueError", ":", "return", "None", "if", "finfo", ".", "get", "(", "\"typ...
Dry" update the file: only return the upload path
[ "Dry", "update", "the", "file", ":", "only", "return", "the", "upload", "path" ]
python
train
senaite/senaite.core
bika/lims/browser/methodfolder.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/methodfolder.py#L100-L110
def before_render(self): """Before template render hook """ # Render the Add button if the user has the AddClient permission if check_permission(AddMethod, self.context): self.context_actions[_("Add")] = { "url": "createObject?type_name=Method", "icon": "++resource++bika.lims.images/add.png" } # Don't allow any context actions on the Methods folder self.request.set("disable_border", 1)
[ "def", "before_render", "(", "self", ")", ":", "# Render the Add button if the user has the AddClient permission", "if", "check_permission", "(", "AddMethod", ",", "self", ".", "context", ")", ":", "self", ".", "context_actions", "[", "_", "(", "\"Add\"", ")", "]", ...
Before template render hook
[ "Before", "template", "render", "hook" ]
python
train
tomnor/channelpack
channelpack/pulltxt.py
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pulltxt.py#L345-L363
def loadtxt_asdict(fn, **kwargs): """Return what is returned from loadtxt as a dict. The 'unpack' keyword is enforced to True. The keys in the dict is the column numbers loaded. It is the integers 0...N-1 for N loaded columns, or the numbers in usecols.""" kwargs.update(unpack=True) d = loadtxt(fn, **kwargs) if len(np.shape(d)) == 2: keys = kwargs.get('usecols', None) or range(len(d)) D = dict([(k, v) for k, v in zip(keys, d)]) elif len(np.shape(d)) == 1: keys = kwargs.get('usecols', None) or [0] D = dict([(keys[0], d)]) else: raise Exception('Unknown dimension of loaded data.') return D
[ "def", "loadtxt_asdict", "(", "fn", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "unpack", "=", "True", ")", "d", "=", "loadtxt", "(", "fn", ",", "*", "*", "kwargs", ")", "if", "len", "(", "np", ".", "shape", "(", "d", ")", ...
Return what is returned from loadtxt as a dict. The 'unpack' keyword is enforced to True. The keys in the dict is the column numbers loaded. It is the integers 0...N-1 for N loaded columns, or the numbers in usecols.
[ "Return", "what", "is", "returned", "from", "loadtxt", "as", "a", "dict", "." ]
python
train
MacHu-GWU/single_file_module-project
sfm/binarysearch.py
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/binarysearch.py#L50-L71
def find_lt(array, x): """ Find rightmost value less than x. :type array: list :param array: an iterable object that support inex :param x: a comparable value Example:: >>> find_lt([0, 1, 2, 3], 2.5) 2 **中文文档** 寻找最大的小于x的数。 """ i = bisect.bisect_left(array, x) if i: return array[i - 1] raise ValueError
[ "def", "find_lt", "(", "array", ",", "x", ")", ":", "i", "=", "bisect", ".", "bisect_left", "(", "array", ",", "x", ")", "if", "i", ":", "return", "array", "[", "i", "-", "1", "]", "raise", "ValueError" ]
Find rightmost value less than x. :type array: list :param array: an iterable object that support inex :param x: a comparable value Example:: >>> find_lt([0, 1, 2, 3], 2.5) 2 **中文文档** 寻找最大的小于x的数。
[ "Find", "rightmost", "value", "less", "than", "x", "." ]
python
train
PyGithub/PyGithub
github/BranchProtection.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/BranchProtection.py#L73-L84
def get_user_push_restrictions(self): """ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` """ if self._user_push_restrictions is github.GithubObject.NotSet: return None return github.PaginatedList.PaginatedList( github.NamedUser.NamedUser, self._requester, self._user_push_restrictions, None )
[ "def", "get_user_push_restrictions", "(", "self", ")", ":", "if", "self", ".", "_user_push_restrictions", "is", "github", ".", "GithubObject", ".", "NotSet", ":", "return", "None", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", "....
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
[ ":", "rtype", ":", ":", "class", ":", "github", ".", "PaginatedList", ".", "PaginatedList", "of", ":", "class", ":", "github", ".", "NamedUser", ".", "NamedUser" ]
python
train
pastpages/archiveis
archiveis/api.py
https://github.com/pastpages/archiveis/blob/1268066c0e4ef1b82a32a5fafd2e136113e63576/archiveis/api.py#L100-L108
def cli(url, user_agent): """ Archives the provided URL using archive.is. """ kwargs = {} if user_agent: kwargs['user_agent'] = user_agent archive_url = capture(url, **kwargs) click.echo(archive_url)
[ "def", "cli", "(", "url", ",", "user_agent", ")", ":", "kwargs", "=", "{", "}", "if", "user_agent", ":", "kwargs", "[", "'user_agent'", "]", "=", "user_agent", "archive_url", "=", "capture", "(", "url", ",", "*", "*", "kwargs", ")", "click", ".", "ec...
Archives the provided URL using archive.is.
[ "Archives", "the", "provided", "URL", "using", "archive", ".", "is", "." ]
python
valid
pypa/pipenv
pipenv/vendor/pexpect/ANSI.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/ANSI.py#L291-L298
def write (self, s): """Process text, writing it to the virtual screen while handling ANSI escape codes. """ if isinstance(s, bytes): s = self._decode(s) for c in s: self.process(c)
[ "def", "write", "(", "self", ",", "s", ")", ":", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "s", "=", "self", ".", "_decode", "(", "s", ")", "for", "c", "in", "s", ":", "self", ".", "process", "(", "c", ")" ]
Process text, writing it to the virtual screen while handling ANSI escape codes.
[ "Process", "text", "writing", "it", "to", "the", "virtual", "screen", "while", "handling", "ANSI", "escape", "codes", "." ]
python
train
ponty/pyscreenshot
pyscreenshot/plugins/gdk3pixbuf.py
https://github.com/ponty/pyscreenshot/blob/51010195cbb5361dcd4b414ff132b87244c9e1cb/pyscreenshot/plugins/gdk3pixbuf.py#L33-L63
def grab(self, bbox=None): """Grabs an image directly to a buffer. :param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates of sub-region to capture. :return: PIL RGB image :raises: ValueError, if image data does not have 3 channels (RGB), each with 8 bits. :rtype: Image """ w = Gdk.get_default_root_window() if bbox is not None: g = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]] else: g = w.get_geometry() pb = Gdk.pixbuf_get_from_window(w, *g) if pb.get_bits_per_sample() != 8: raise ValueError('Expected 8 bits per pixel.') elif pb.get_n_channels() != 3: raise ValueError('Expected RGB image.') # Read the entire buffer into a python bytes object. # read_pixel_bytes: New in version 2.32. pixel_bytes = pb.read_pixel_bytes().get_data() # type: bytes width, height = g[2], g[3] # Probably for SSE alignment reasons, the pixbuf has extra data in each line. # The args after "raw" help handle this; see # http://effbot.org/imagingbook/decoder.htm#the-raw-decoder return Image.frombytes( 'RGB', (width, height), pixel_bytes, 'raw', 'RGB', pb.get_rowstride(), 1)
[ "def", "grab", "(", "self", ",", "bbox", "=", "None", ")", ":", "w", "=", "Gdk", ".", "get_default_root_window", "(", ")", "if", "bbox", "is", "not", "None", ":", "g", "=", "[", "bbox", "[", "0", "]", ",", "bbox", "[", "1", "]", ",", "bbox", ...
Grabs an image directly to a buffer. :param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates of sub-region to capture. :return: PIL RGB image :raises: ValueError, if image data does not have 3 channels (RGB), each with 8 bits. :rtype: Image
[ "Grabs", "an", "image", "directly", "to", "a", "buffer", "." ]
python
valid
spyder-ide/spyder
spyder/plugins/editor/api/decoration.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/api/decoration.py#L211-L220
def set_as_error(self, color=Qt.red): """ Highlights text as a syntax error. :param color: Underline color :type color: QtGui.QColor """ self.format.setUnderlineStyle( QTextCharFormat.WaveUnderline) self.format.setUnderlineColor(color)
[ "def", "set_as_error", "(", "self", ",", "color", "=", "Qt", ".", "red", ")", ":", "self", ".", "format", ".", "setUnderlineStyle", "(", "QTextCharFormat", ".", "WaveUnderline", ")", "self", ".", "format", ".", "setUnderlineColor", "(", "color", ")" ]
Highlights text as a syntax error. :param color: Underline color :type color: QtGui.QColor
[ "Highlights", "text", "as", "a", "syntax", "error", "." ]
python
train
sarugaku/vistir
src/vistir/compat.py
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/compat.py#L205-L216
def is_bytes(string): """Check if a string is a bytes instance :param Union[str, bytes] string: A string that may be string or bytes like :return: Whether the provided string is a bytes type or not :rtype: bool """ if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): # noqa return True elif six.PY2 and isinstance(string, (buffer, bytearray)): # noqa return True return False
[ "def", "is_bytes", "(", "string", ")", ":", "if", "six", ".", "PY3", "and", "isinstance", "(", "string", ",", "(", "bytes", ",", "memoryview", ",", "bytearray", ")", ")", ":", "# noqa", "return", "True", "elif", "six", ".", "PY2", "and", "isinstance", ...
Check if a string is a bytes instance :param Union[str, bytes] string: A string that may be string or bytes like :return: Whether the provided string is a bytes type or not :rtype: bool
[ "Check", "if", "a", "string", "is", "a", "bytes", "instance" ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8211-L8221
def read_uic4tag(fh, byteorder, dtype, planecount, offsetsize): """Read MetaMorph STK UIC4Tag from file and return as dict.""" assert dtype == '1I' and byteorder == '<' result = {} while True: tagid = struct.unpack('<H', fh.read(2))[0] if tagid == 0: break name, value = read_uic_tag(fh, tagid, planecount, offset=False) result[name] = value return result
[ "def", "read_uic4tag", "(", "fh", ",", "byteorder", ",", "dtype", ",", "planecount", ",", "offsetsize", ")", ":", "assert", "dtype", "==", "'1I'", "and", "byteorder", "==", "'<'", "result", "=", "{", "}", "while", "True", ":", "tagid", "=", "struct", "...
Read MetaMorph STK UIC4Tag from file and return as dict.
[ "Read", "MetaMorph", "STK", "UIC4Tag", "from", "file", "and", "return", "as", "dict", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/library_manager.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/library_manager.py#L377-L403
def get_library_state_copy_instance(self, lib_os_path): """ A method to get a state copy of the library specified via the lib_os_path. :param lib_os_path: the location of the library to get a copy for :return: """ # originally libraries were called like this; DO NOT DELETE; interesting for performance tests # state_machine = storage.load_state_machine_from_path(lib_os_path) # return state_machine.version, state_machine.root_state # TODO observe changes on file system and update data if lib_os_path in self._loaded_libraries: # this list can also be taken to open library state machines TODO -> implement it -> because faster state_machine = self._loaded_libraries[lib_os_path] # logger.info("Take copy of {0}".format(lib_os_path)) # as long as the a library state root state is never edited so the state first has to be copied here state_copy = copy.deepcopy(state_machine.root_state) return state_machine.version, state_copy else: state_machine = storage.load_state_machine_from_path(lib_os_path) self._loaded_libraries[lib_os_path] = state_machine if config.global_config.get_config_value("NO_PROGRAMMATIC_CHANGE_OF_LIBRARY_STATES_PERFORMED", False): return state_machine.version, state_machine.root_state else: state_copy = copy.deepcopy(state_machine.root_state) return state_machine.version, state_copy
[ "def", "get_library_state_copy_instance", "(", "self", ",", "lib_os_path", ")", ":", "# originally libraries were called like this; DO NOT DELETE; interesting for performance tests", "# state_machine = storage.load_state_machine_from_path(lib_os_path)", "# return state_machine.version, state_mac...
A method to get a state copy of the library specified via the lib_os_path. :param lib_os_path: the location of the library to get a copy for :return:
[ "A", "method", "to", "get", "a", "state", "copy", "of", "the", "library", "specified", "via", "the", "lib_os_path", "." ]
python
train
bunq/sdk_python
bunq/sdk/model/core.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/core.py#L40-L51
def _from_json_array_nested(cls, response_raw): """ :type response_raw: client.BunqResponseRaw :rtype: bunq.sdk.client.BunqResponse[cls] """ json = response_raw.body_bytes.decode() obj = converter.json_to_class(dict, json) value = converter.deserialize(cls, obj[cls._FIELD_RESPONSE]) return client.BunqResponse(value, response_raw.headers)
[ "def", "_from_json_array_nested", "(", "cls", ",", "response_raw", ")", ":", "json", "=", "response_raw", ".", "body_bytes", ".", "decode", "(", ")", "obj", "=", "converter", ".", "json_to_class", "(", "dict", ",", "json", ")", "value", "=", "converter", "...
:type response_raw: client.BunqResponseRaw :rtype: bunq.sdk.client.BunqResponse[cls]
[ ":", "type", "response_raw", ":", "client", ".", "BunqResponseRaw" ]
python
train
openstack/horizon
horizon/tabs/views.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/views.py#L40-L44
def get_tabs(self, request, **kwargs): """Returns the initialized tab group for this view.""" if self._tab_group is None: self._tab_group = self.tab_group_class(request, **kwargs) return self._tab_group
[ "def", "get_tabs", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_tab_group", "is", "None", ":", "self", ".", "_tab_group", "=", "self", ".", "tab_group_class", "(", "request", ",", "*", "*", "kwargs", ")", "retu...
Returns the initialized tab group for this view.
[ "Returns", "the", "initialized", "tab", "group", "for", "this", "view", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L64-L84
def add_node(self, node, node_data=None): """ Adds a new node to the graph. Arbitrary data can be attached to the node via the node_data parameter. Adding the same node twice will be silently ignored. The node must be a hashable value. """ # # the nodes will contain tuples that will store incoming edges, # outgoing edges and data # # index 0 -> incoming edges # index 1 -> outgoing edges if node in self.hidden_nodes: # Node is present, but hidden return if node not in self.nodes: self.nodes[node] = ([], [], node_data)
[ "def", "add_node", "(", "self", ",", "node", ",", "node_data", "=", "None", ")", ":", "#", "# the nodes will contain tuples that will store incoming edges,", "# outgoing edges and data", "#", "# index 0 -> incoming edges", "# index 1 -> outgoing edges", "if", "node", "in", ...
Adds a new node to the graph. Arbitrary data can be attached to the node via the node_data parameter. Adding the same node twice will be silently ignored. The node must be a hashable value.
[ "Adds", "a", "new", "node", "to", "the", "graph", ".", "Arbitrary", "data", "can", "be", "attached", "to", "the", "node", "via", "the", "node_data", "parameter", ".", "Adding", "the", "same", "node", "twice", "will", "be", "silently", "ignored", "." ]
python
train
totalgood/pugnlp
src/pugnlp/segmentation.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/segmentation.py#L137-L186
def generate_sentences(text='', train_path=None, case_sensitive=True, ext=['.md', '.txt', '.asc', '.asciidoc'], normalize_ordinals=1, normalize_newlines=1, normalize_sentence_boundaries=1, epochs=20, classifier=nlup.BinaryAveragedPerceptron, re_eol=r'\r\n|\r|\n', **kwargs): """Generate sentences from a sequence of characters (text) Wrapped text (newlines at column 80, for instance) will break this, breaking up sentences. Wrapper and preprocessor for Kyle Gorman's "DetectorMorse" module Arguments: preprocess (bool): whether to assume common sentence delimitters in markdown and asciidoc formatting using r'[.?!][ \t]*\n\n|[.?!][ \t]*\r\n\r\n|[.?!][ \t]*\r\r|[.?!][ ][ ][A-Z]' case_sensitive (int): whether to consider case to make decisions about sentence boundaries epochs (int): number of epochs (iterations for classifier training) """ ext = [ext] if isinstance(ext, basestring) else ext if isinstance(text, basestring) and len(text) <= 256: if os.path.isfile(text) and os.path.splitext(text)[-1].lower() in ext: text = open(text) elif os.path.isdir(text): return chain.from_iterable(( generate_sentences(text=stat['path'], train_path=train_path, ext=ext, normalize_ordinals=normalize_ordinals, normalize_newlines=normalize_ordinals, normalize_sentence_boundaries=normalize_sentence_boundaries, epochs=epochs, classifier=classifier, re_eol=re_eol, **kwargs) for stat in find_files(text, ext=ext))) if isinstance(text, basestring): texts = Split(text=text, re_delim=re_eol) else: texts = chain.from_iterable(Split(text=doc, re_delm=re_eol) for doc in text) if normalize_newlines: re_eol = re.compile(r'\r\n|\r') texts = (re_eol.sub(r'\n', doc) for doc in texts) if normalize_ordinals: re_ord = re.compile(r'\b([0-9]+|[A-Za-z])[.?!][ \t]{1,4}([A-Za-z])') texts = (re_ord.sub(r'\1) \2', doc) for doc in texts) if normalize_sentence_boundaries: re_eos = re.compile(r'([.?!])([ ][ ])[\n]?([A-Z])') texts = (re_eos.sub(r'\1\n\3', doc) for doc in texts) if train_path: generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive) elif not isinstance(getattr(generate_sentences, 'detector', None), Detector): generate_sentences.detector = Detector.load( os.path.join(DATA_PATH, 'wsj_pugnlp.detector_morse.Detector.json.gz')) # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, # epochs=epochs, classifier=classifier) return iter(chain.from_iterable((s.lstrip() for s in generate_sentences.detector.segments(text)) for text in texts))
[ "def", "generate_sentences", "(", "text", "=", "''", ",", "train_path", "=", "None", ",", "case_sensitive", "=", "True", ",", "ext", "=", "[", "'.md'", ",", "'.txt'", ",", "'.asc'", ",", "'.asciidoc'", "]", ",", "normalize_ordinals", "=", "1", ",", "norm...
Generate sentences from a sequence of characters (text) Wrapped text (newlines at column 80, for instance) will break this, breaking up sentences. Wrapper and preprocessor for Kyle Gorman's "DetectorMorse" module Arguments: preprocess (bool): whether to assume common sentence delimitters in markdown and asciidoc formatting using r'[.?!][ \t]*\n\n|[.?!][ \t]*\r\n\r\n|[.?!][ \t]*\r\r|[.?!][ ][ ][A-Z]' case_sensitive (int): whether to consider case to make decisions about sentence boundaries epochs (int): number of epochs (iterations for classifier training)
[ "Generate", "sentences", "from", "a", "sequence", "of", "characters", "(", "text", ")" ]
python
train
rosenbrockc/acorn
acorn/logging/analysis.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/analysis.py#L32-L46
def analyze(fqdn, result, argl, argd): """Analyzes the result from calling the method with the specified FQDN. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ package = fqdn.split('.')[0] if package not in _methods: _load_methods(package) if _methods[package] is not None and fqdn in _methods[package]: return _methods[package][fqdn](fqdn, result, *argl, **argd)
[ "def", "analyze", "(", "fqdn", ",", "result", ",", "argl", ",", "argd", ")", ":", "package", "=", "fqdn", ".", "split", "(", "'.'", ")", "[", "0", "]", "if", "package", "not", "in", "_methods", ":", "_load_methods", "(", "package", ")", "if", "_met...
Analyzes the result from calling the method with the specified FQDN. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
[ "Analyzes", "the", "result", "from", "calling", "the", "method", "with", "the", "specified", "FQDN", "." ]
python
train
cnschema/cdata
cdata/table.py
https://github.com/cnschema/cdata/blob/893e2e1e27b61c8551c8b5f5f9bf05ec61490e23/cdata/table.py#L22-L53
def json2excel(items, keys, filename, page_size=60000): """ max_page_size is 65000 because we output old excel .xls format """ wb = xlwt.Workbook() rowindex = 0 sheetindex = 0 for item in items: if rowindex % page_size == 0: sheetname = "%02d" % sheetindex ws = wb.add_sheet(sheetname) rowindex = 0 sheetindex += 1 colindex = 0 for key in keys: ws.write(rowindex, colindex, key) colindex += 1 rowindex += 1 colindex = 0 for key in keys: v = item.get(key, "") if type(v) == list: v = ','.join(v) if type(v) == set: v = ','.join(v) ws.write(rowindex, colindex, v) colindex += 1 rowindex += 1 logging.debug(filename) wb.save(filename)
[ "def", "json2excel", "(", "items", ",", "keys", ",", "filename", ",", "page_size", "=", "60000", ")", ":", "wb", "=", "xlwt", ".", "Workbook", "(", ")", "rowindex", "=", "0", "sheetindex", "=", "0", "for", "item", "in", "items", ":", "if", "rowindex"...
max_page_size is 65000 because we output old excel .xls format
[ "max_page_size", "is", "65000", "because", "we", "output", "old", "excel", ".", "xls", "format" ]
python
train
stephrdev/django-tapeforms
tapeforms/utils.py
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/utils.py#L4-L11
def join_css_class(css_class, *additional_css_classes): """ Returns the union of one or more CSS classes as a space-separated string. Note that the order will not be preserved. """ css_set = set(chain.from_iterable( c.split(' ') for c in [css_class, *additional_css_classes] if c)) return ' '.join(css_set)
[ "def", "join_css_class", "(", "css_class", ",", "*", "additional_css_classes", ")", ":", "css_set", "=", "set", "(", "chain", ".", "from_iterable", "(", "c", ".", "split", "(", "' '", ")", "for", "c", "in", "[", "css_class", ",", "*", "additional_css_class...
Returns the union of one or more CSS classes as a space-separated string. Note that the order will not be preserved.
[ "Returns", "the", "union", "of", "one", "or", "more", "CSS", "classes", "as", "a", "space", "-", "separated", "string", ".", "Note", "that", "the", "order", "will", "not", "be", "preserved", "." ]
python
train
openstax/cnx-publishing
cnxpublishing/db.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/db.py#L292-L309
def _validate_roles(model): """Given the model, check that all the metadata role values have valid information in them and any required metadata fields contain values. """ required_roles = (ATTRIBUTED_ROLE_KEYS[0], ATTRIBUTED_ROLE_KEYS[4],) for role_key in ATTRIBUTED_ROLE_KEYS: try: roles = model.metadata[role_key] except KeyError: if role_key in required_roles: raise exceptions.MissingRequiredMetadata(role_key) else: if role_key in required_roles and len(roles) == 0: raise exceptions.MissingRequiredMetadata(role_key) for role in roles: if role.get('type') != 'cnx-id': raise exceptions.InvalidRole(role_key, role)
[ "def", "_validate_roles", "(", "model", ")", ":", "required_roles", "=", "(", "ATTRIBUTED_ROLE_KEYS", "[", "0", "]", ",", "ATTRIBUTED_ROLE_KEYS", "[", "4", "]", ",", ")", "for", "role_key", "in", "ATTRIBUTED_ROLE_KEYS", ":", "try", ":", "roles", "=", "model"...
Given the model, check that all the metadata role values have valid information in them and any required metadata fields contain values.
[ "Given", "the", "model", "check", "that", "all", "the", "metadata", "role", "values", "have", "valid", "information", "in", "them", "and", "any", "required", "metadata", "fields", "contain", "values", "." ]
python
valid
mcieslik-mctp/papy
src/numap/NuMap.py
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/numap/NuMap.py#L449-L469
def _start_workers(self): """ (internal) starts **worker pool** threads or processes. """ # creating the pool of worker process or threads log.debug('%s starts a %s-pool of %s workers.' % \ (self, self.worker_type, self.worker_num)) self.pool = [] for host, worker_num in \ [(None, self.worker_num)] + list(self.worker_remote): for _worker in range(worker_num): __worker = Thread(target=_pool_worker, args=\ (self._inqueue, self._outqueue, host)) \ if self.worker_type == 'thread' else \ Process(target=_pool_worker, args=\ (self._inqueue, self._outqueue, host)) self.pool.append(__worker) for __worker in self.pool: __worker.daemon = True __worker.start() log.debug('%s started the pool' % self)
[ "def", "_start_workers", "(", "self", ")", ":", "# creating the pool of worker process or threads", "log", ".", "debug", "(", "'%s starts a %s-pool of %s workers.'", "%", "(", "self", ",", "self", ".", "worker_type", ",", "self", ".", "worker_num", ")", ")", "self",...
(internal) starts **worker pool** threads or processes.
[ "(", "internal", ")", "starts", "**", "worker", "pool", "**", "threads", "or", "processes", "." ]
python
train
QInfer/python-qinfer
src/qinfer/parallel.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/parallel.py#L183-L224
def likelihood(self, outcomes, modelparams, expparams): """ Returns the likelihood for the underlying (serial) model, distributing the model parameter array across the engines controlled by this parallelized model. Returns what the serial model would return, see :attr:`~Model.likelihood` """ # By calling the superclass implementation, we can consolidate # call counting there. super(DirectViewParallelizedModel, self).likelihood(outcomes, modelparams, expparams) # If there's less models than some threshold, just use the serial model. # By default, we'll set that threshold to be the number of engines * 10. if modelparams.shape[0] <= self._serial_threshold: return self.underlying_model.likelihood(outcomes, modelparams, expparams) if self._dv is None: raise RuntimeError( "No direct view provided; this may be because the instance was " "loaded from a pickle or NumPy saved array without providing a " "new direct view." ) # Need to decorate with interactive to overcome namespace issues with # remote engines. @interactive def serial_likelihood(mps, sm, os, eps): return sm.likelihood(os, mps, eps) # TODO: check whether there's a better way to pass the extra parameters # that doesn't use so much memory. # The trick is that serial_likelihood will be pickled, so we need to be # careful about closures. L = self._dv.map_sync( serial_likelihood, np.array_split(modelparams, self.n_engines, axis=0), [self.underlying_model] * self.n_engines, [outcomes] * self.n_engines, [expparams] * self.n_engines ) return np.concatenate(L, axis=1)
[ "def", "likelihood", "(", "self", ",", "outcomes", ",", "modelparams", ",", "expparams", ")", ":", "# By calling the superclass implementation, we can consolidate", "# call counting there.", "super", "(", "DirectViewParallelizedModel", ",", "self", ")", ".", "likelihood", ...
Returns the likelihood for the underlying (serial) model, distributing the model parameter array across the engines controlled by this parallelized model. Returns what the serial model would return, see :attr:`~Model.likelihood`
[ "Returns", "the", "likelihood", "for", "the", "underlying", "(", "serial", ")", "model", "distributing", "the", "model", "parameter", "array", "across", "the", "engines", "controlled", "by", "this", "parallelized", "model", ".", "Returns", "what", "the", "serial...
python
train
svenkreiss/databench
databench/app.py
https://github.com/svenkreiss/databench/blob/99d4adad494b60a42af6b8bfba94dd0c41ba0786/databench/app.py#L345-L352
def build(self): """Run the build command specified in index.yaml.""" for cmd in self.build_cmds: log.info('building command: {}'.format(cmd)) full_cmd = 'cd {}; {}'.format(self.analyses_path, cmd) log.debug('full command: {}'.format(full_cmd)) subprocess.call(full_cmd, shell=True) log.info('build done')
[ "def", "build", "(", "self", ")", ":", "for", "cmd", "in", "self", ".", "build_cmds", ":", "log", ".", "info", "(", "'building command: {}'", ".", "format", "(", "cmd", ")", ")", "full_cmd", "=", "'cd {}; {}'", ".", "format", "(", "self", ".", "analyse...
Run the build command specified in index.yaml.
[ "Run", "the", "build", "command", "specified", "in", "index", ".", "yaml", "." ]
python
train
galactics/beyond
beyond/orbits/tle.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/orbits/tle.py#L199-L211
def orbit(self): """Convert TLE to Orbit object, in order to make computations on it Return: ~beyond.orbits.orbit.Orbit: """ data = { 'bstar': self.bstar, 'ndot': self.ndot, 'ndotdot': self.ndotdot, 'tle': self.text } return Orbit(self.epoch, self.to_list(), "TLE", "TEME", 'Sgp4', **data)
[ "def", "orbit", "(", "self", ")", ":", "data", "=", "{", "'bstar'", ":", "self", ".", "bstar", ",", "'ndot'", ":", "self", ".", "ndot", ",", "'ndotdot'", ":", "self", ".", "ndotdot", ",", "'tle'", ":", "self", ".", "text", "}", "return", "Orbit", ...
Convert TLE to Orbit object, in order to make computations on it Return: ~beyond.orbits.orbit.Orbit:
[ "Convert", "TLE", "to", "Orbit", "object", "in", "order", "to", "make", "computations", "on", "it" ]
python
train
utiasSTARS/pykitti
pykitti/odometry.py
https://github.com/utiasSTARS/pykitti/blob/d3e1bb81676e831886726cc5ed79ce1f049aef2c/pykitti/odometry.py#L201-L214
def _load_timestamps(self): """Load timestamps from file.""" timestamp_file = os.path.join(self.sequence_path, 'times.txt') # Read and parse the timestamps self.timestamps = [] with open(timestamp_file, 'r') as f: for line in f.readlines(): t = dt.timedelta(seconds=float(line)) self.timestamps.append(t) # Subselect the chosen range of frames, if any if self.frames is not None: self.timestamps = [self.timestamps[i] for i in self.frames]
[ "def", "_load_timestamps", "(", "self", ")", ":", "timestamp_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "sequence_path", ",", "'times.txt'", ")", "# Read and parse the timestamps", "self", ".", "timestamps", "=", "[", "]", "with", "open", ...
Load timestamps from file.
[ "Load", "timestamps", "from", "file", "." ]
python
train
pyca/pynacl
src/nacl/bindings/crypto_scalarmult.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_scalarmult.py#L29-L44
def crypto_scalarmult_base(n): """ Computes and returns the scalar product of a standard group element and an integer ``n``. :param n: bytes :rtype: bytes """ q = ffi.new("unsigned char[]", crypto_scalarmult_BYTES) rc = lib.crypto_scalarmult_base(q, n) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(q, crypto_scalarmult_SCALARBYTES)[:]
[ "def", "crypto_scalarmult_base", "(", "n", ")", ":", "q", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "crypto_scalarmult_BYTES", ")", "rc", "=", "lib", ".", "crypto_scalarmult_base", "(", "q", ",", "n", ")", "ensure", "(", "rc", "==", "0", "...
Computes and returns the scalar product of a standard group element and an integer ``n``. :param n: bytes :rtype: bytes
[ "Computes", "and", "returns", "the", "scalar", "product", "of", "a", "standard", "group", "element", "and", "an", "integer", "n", "." ]
python
train
pmacosta/pexdoc
docs/support/trace_my_module_1.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/docs/support/trace_my_module_1.py#L14-L29
def trace_module(no_print=True): """Trace my_module exceptions.""" pwd = os.path.dirname(__file__) script_name = os.path.join(pwd, "test_my_module.py") with pexdoc.ExDocCxt() as exdoc_obj: if pytest.main(["-s", "-vv", "-x", "{0}".format(script_name)]): raise RuntimeError("Tracing did not complete successfully") if not no_print: module_prefix = "docs.support.my_module." callable_names = ["func", "MyClass.value"] for callable_name in callable_names: callable_name = module_prefix + callable_name print("\nCallable: {0}".format(callable_name)) print(exdoc_obj.get_sphinx_doc(callable_name, width=70)) print("\n") return copy.copy(exdoc_obj)
[ "def", "trace_module", "(", "no_print", "=", "True", ")", ":", "pwd", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "script_name", "=", "os", ".", "path", ".", "join", "(", "pwd", ",", "\"test_my_module.py\"", ")", "with", "pexdoc", "."...
Trace my_module exceptions.
[ "Trace", "my_module", "exceptions", "." ]
python
train
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L2464-L2470
def prop_budget(self, budget): """ Set limit on the number of propagations. """ if self.maplesat: pysolvers.maplesat_pbudget(self.maplesat, budget)
[ "def", "prop_budget", "(", "self", ",", "budget", ")", ":", "if", "self", ".", "maplesat", ":", "pysolvers", ".", "maplesat_pbudget", "(", "self", ".", "maplesat", ",", "budget", ")" ]
Set limit on the number of propagations.
[ "Set", "limit", "on", "the", "number", "of", "propagations", "." ]
python
train
Dentosal/python-sc2
sc2/bot_ai.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/bot_ai.py#L331-L364
async def find_placement(self, building: UnitTypeId, near: Union[Unit, Point2, Point3], max_distance: int=20, random_alternative: bool=True, placement_step: int=2) -> Optional[Point2]: """Finds a placement location for building.""" assert isinstance(building, (AbilityId, UnitTypeId)) assert isinstance(near, Point2) if isinstance(building, UnitTypeId): building = self._game_data.units[building.value].creation_ability else: # AbilityId building = self._game_data.abilities[building.value] if await self.can_place(building, near): return near if max_distance == 0: return None for distance in range(placement_step, max_distance, placement_step): possible_positions = [Point2(p).offset(near).to2 for p in ( [(dx, -distance) for dx in range(-distance, distance + 1, placement_step)] + [(dx, distance) for dx in range(-distance, distance + 1, placement_step)] + [(-distance, dy) for dy in range(-distance, distance + 1, placement_step)] + [(distance, dy) for dy in range(-distance, distance + 1, placement_step)] )] res = await self._client.query_building_placement(building, possible_positions) possible = [p for r, p in zip(res, possible_positions) if r == ActionResult.Success] if not possible: continue if random_alternative: return random.choice(possible) else: return min(possible, key=lambda p: p.distance_to(near)) return None
[ "async", "def", "find_placement", "(", "self", ",", "building", ":", "UnitTypeId", ",", "near", ":", "Union", "[", "Unit", ",", "Point2", ",", "Point3", "]", ",", "max_distance", ":", "int", "=", "20", ",", "random_alternative", ":", "bool", "=", "True",...
Finds a placement location for building.
[ "Finds", "a", "placement", "location", "for", "building", "." ]
python
train
observermedia/django-wordpress-rest
wordpress/loading.py
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L807-L816
def get_or_create_media(self, api_media): """ Find or create a Media object given API data. :param api_media: the API data for the Media :return: a tuple of an Media instance and a boolean indicating whether the Media was created or not """ return Media.objects.get_or_create(site_id=self.site_id, wp_id=api_media["ID"], defaults=self.api_object_data("media", api_media))
[ "def", "get_or_create_media", "(", "self", ",", "api_media", ")", ":", "return", "Media", ".", "objects", ".", "get_or_create", "(", "site_id", "=", "self", ".", "site_id", ",", "wp_id", "=", "api_media", "[", "\"ID\"", "]", ",", "defaults", "=", "self", ...
Find or create a Media object given API data. :param api_media: the API data for the Media :return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
[ "Find", "or", "create", "a", "Media", "object", "given", "API", "data", "." ]
python
train
Phylliade/ikpy
src/ikpy/URDF_utils.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/URDF_utils.py#L261-L271
def _convert_angle_limit(angle, joint, **kwargs): """Converts the limit angle of the PyPot JSON file to the internal format""" angle_pypot = angle # No need to take care of orientation if joint["orientation"] == "indirect": angle_pypot = 1 * angle_pypot # angle_pypot = angle_pypot + offset return angle_pypot * np.pi / 180
[ "def", "_convert_angle_limit", "(", "angle", ",", "joint", ",", "*", "*", "kwargs", ")", ":", "angle_pypot", "=", "angle", "# No need to take care of orientation", "if", "joint", "[", "\"orientation\"", "]", "==", "\"indirect\"", ":", "angle_pypot", "=", "1", "*...
Converts the limit angle of the PyPot JSON file to the internal format
[ "Converts", "the", "limit", "angle", "of", "the", "PyPot", "JSON", "file", "to", "the", "internal", "format" ]
python
train
Robpol86/colorclass
colorclass/codes.py
https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/codes.py#L196-L229
def list_tags(): """List the available tags. :return: List of 4-item tuples: opening tag, closing tag, main ansi value, closing ansi value. :rtype: list """ # Build reverse dictionary. Keys are closing tags, values are [closing ansi, opening tag, opening ansi]. reverse_dict = dict() for tag, ansi in sorted(BASE_CODES.items()): if tag.startswith('/'): reverse_dict[tag] = [ansi, None, None] else: reverse_dict['/' + tag][1:] = [tag, ansi] # Collapse four_item_tuples = [(v[1], k, v[2], v[0]) for k, v in reverse_dict.items()] # Sort. def sorter(four_item): """Sort /all /fg /bg first, then b i u flash, then auto colors, then dark colors, finally light colors. :param iter four_item: [opening tag, closing tag, main ansi value, closing ansi value] :return Sorting weight. :rtype: int """ if not four_item[2]: # /all /fg /bg return four_item[3] - 200 if four_item[2] < 10 or four_item[0].startswith('auto'): # b f i u or auto colors return four_item[2] - 100 return four_item[2] four_item_tuples.sort(key=sorter) return four_item_tuples
[ "def", "list_tags", "(", ")", ":", "# Build reverse dictionary. Keys are closing tags, values are [closing ansi, opening tag, opening ansi].", "reverse_dict", "=", "dict", "(", ")", "for", "tag", ",", "ansi", "in", "sorted", "(", "BASE_CODES", ".", "items", "(", ")", ")...
List the available tags. :return: List of 4-item tuples: opening tag, closing tag, main ansi value, closing ansi value. :rtype: list
[ "List", "the", "available", "tags", "." ]
python
train
Metatab/tableintuit
tableintuit/types.py
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/types.py#L423-L450
def promote_type(orig_type, new_type): """Given a table with an original type, decide whether a new determination of a new applicable type should overide the existing one""" if not new_type: return orig_type if not orig_type: return new_type try: orig_type = orig_type.__name__ except AttributeError: pass try: new_type = new_type.__name__ except AttributeError: pass type_precidence = ['unknown', 'int', 'float', 'date', 'time', 'datetime', 'str', 'bytes', 'unicode'] # TODO This will fail for dates and times. if type_precidence.index(new_type) > type_precidence.index(orig_type): return new_type else: return orig_type
[ "def", "promote_type", "(", "orig_type", ",", "new_type", ")", ":", "if", "not", "new_type", ":", "return", "orig_type", "if", "not", "orig_type", ":", "return", "new_type", "try", ":", "orig_type", "=", "orig_type", ".", "__name__", "except", "AttributeError"...
Given a table with an original type, decide whether a new determination of a new applicable type should overide the existing one
[ "Given", "a", "table", "with", "an", "original", "type", "decide", "whether", "a", "new", "determination", "of", "a", "new", "applicable", "type", "should", "overide", "the", "existing", "one" ]
python
train
boundlessgeo/gsconfig
src/geoserver/catalog.py
https://github.com/boundlessgeo/gsconfig/blob/532f561f32b91ea8debea0573c503dd20988bf40/src/geoserver/catalog.py#L932-L976
def get_layergroups(self, names=None, workspaces=None): ''' names and workspaces can be provided as a comma delimited strings or as arrays, and are used for filtering. If no workspaces are provided, will return all layer groups in the catalog (global and workspace specific). Will always return an array. ''' layergroups = [] if workspaces is None or len(workspaces) == 0: # Add global layergroups url = "{}/layergroups.xml".format(self.service_url) groups = self.get_xml(url) layergroups.extend([LayerGroup(self, g.find("name").text, None) for g in groups.findall("layerGroup")]) workspaces = [] elif isinstance(workspaces, basestring): workspaces = [s.strip() for s in workspaces.split(',') if s.strip()] elif isinstance(workspaces, Workspace): workspaces = [workspaces] if not workspaces: workspaces = self.get_workspaces() for ws in workspaces: ws_name = _name(ws) url = "{}/workspaces/{}/layergroups.xml".format(self.service_url, ws_name) try: groups = self.get_xml(url) except FailedRequestError as e: if "no such workspace" in str(e).lower(): continue else: raise FailedRequestError("Failed to get layergroups: {}".format(e)) layergroups.extend([LayerGroup(self, g.find("name").text, ws_name) for g in groups.findall("layerGroup")]) if names is None: names = [] elif isinstance(names, basestring): names = [s.strip() for s in names.split(',') if s.strip()] if layergroups and names: return ([lg for lg in layergroups if lg.name in names]) return layergroups
[ "def", "get_layergroups", "(", "self", ",", "names", "=", "None", ",", "workspaces", "=", "None", ")", ":", "layergroups", "=", "[", "]", "if", "workspaces", "is", "None", "or", "len", "(", "workspaces", ")", "==", "0", ":", "# Add global layergroups", "...
names and workspaces can be provided as a comma delimited strings or as arrays, and are used for filtering. If no workspaces are provided, will return all layer groups in the catalog (global and workspace specific). Will always return an array.
[ "names", "and", "workspaces", "can", "be", "provided", "as", "a", "comma", "delimited", "strings", "or", "as", "arrays", "and", "are", "used", "for", "filtering", ".", "If", "no", "workspaces", "are", "provided", "will", "return", "all", "layer", "groups", ...
python
valid
i3visio/osrframework
osrframework/utils/general.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/general.py#L294-L342
def usufyToTextExport(d, fPath=None): """ Workaround to export to a .txt file or to show the information. Args: ----- d: Data to export. fPath: File path for the output file. If None was provided, it will assume that it has to print it. Returns: -------- unicode: It sometimes returns a unicode representation of the Sheet received. """ # Manual check... if d == []: return "+------------------+\n| No data found... |\n+------------------+" import pyexcel as pe import pyexcel.ext.text as text if fPath == None: isTerminal = True else: isTerminal = False try: oldData = get_data(fPath) except: # No information has been recovered oldData = {"OSRFramework":[]} # Generating the new tabular data tabularData = _generateTabularData(d, {"OSRFramework":[[]]}, True, canUnicode=False) # The tabular data contains a dict representing the whole book and we need only the sheet!! sheet = pe.Sheet(tabularData["OSRFramework"]) sheet.name = "Profiles recovered (" + getCurrentStrDatetime() +")." # Defining the headers sheet.name_columns_by_row(0) text.TABLEFMT = "grid" try: with open(fPath, "w") as oF: oF.write(str(sheet)) except Exception as e: # If a fPath was not provided... We will only print the info: return unicode(sheet)
[ "def", "usufyToTextExport", "(", "d", ",", "fPath", "=", "None", ")", ":", "# Manual check...", "if", "d", "==", "[", "]", ":", "return", "\"+------------------+\\n| No data found... |\\n+------------------+\"", "import", "pyexcel", "as", "pe", "import", "pyexcel", ...
Workaround to export to a .txt file or to show the information. Args: ----- d: Data to export. fPath: File path for the output file. If None was provided, it will assume that it has to print it. Returns: -------- unicode: It sometimes returns a unicode representation of the Sheet received.
[ "Workaround", "to", "export", "to", "a", ".", "txt", "file", "or", "to", "show", "the", "information", "." ]
python
train
saltstack/salt
salt/utils/pbm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/pbm.py#L98-L120
def get_placement_solver(service_instance): ''' Returns a placement solver service_instance Service instance to the host or vCenter ''' stub = salt.utils.vmware.get_new_service_instance_stub( service_instance, ns='pbm/2.0', path='/pbm/sdk') pbm_si = pbm.ServiceInstance('ServiceInstance', stub) try: profile_manager = pbm_si.RetrieveContent().placementSolver except vim.fault.NoPermission as exc: log.exception(exc) raise VMwareApiError('Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) return profile_manager
[ "def", "get_placement_solver", "(", "service_instance", ")", ":", "stub", "=", "salt", ".", "utils", ".", "vmware", ".", "get_new_service_instance_stub", "(", "service_instance", ",", "ns", "=", "'pbm/2.0'", ",", "path", "=", "'/pbm/sdk'", ")", "pbm_si", "=", ...
Returns a placement solver service_instance Service instance to the host or vCenter
[ "Returns", "a", "placement", "solver" ]
python
train
manrajgrover/py-spinners
examples/examples.py
https://github.com/manrajgrover/py-spinners/blob/2400b5f355049a691202671cb2ccf2b269eef4a3/examples/examples.py#L75-L96
def animate(frames, interval, name, iterations=2): """Animate given frame for set number of iterations. Parameters ---------- frames : list Frames for animating interval : float Interval between two frames name : str Name of animation iterations : int, optional Number of loops for animations """ for i in range(iterations): for frame in frames: frame = get_coded_text(frame) output = "\r{0} {1}".format(frame, name) sys.stdout.write(output) sys.stdout.write(CLEAR_LINE) sys.stdout.flush() time.sleep(0.001 * interval)
[ "def", "animate", "(", "frames", ",", "interval", ",", "name", ",", "iterations", "=", "2", ")", ":", "for", "i", "in", "range", "(", "iterations", ")", ":", "for", "frame", "in", "frames", ":", "frame", "=", "get_coded_text", "(", "frame", ")", "out...
Animate given frame for set number of iterations. Parameters ---------- frames : list Frames for animating interval : float Interval between two frames name : str Name of animation iterations : int, optional Number of loops for animations
[ "Animate", "given", "frame", "for", "set", "number", "of", "iterations", "." ]
python
test
SpriteLink/NIPAP
nipap-www/nipapwww/controllers/xhr.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-www/nipapwww/controllers/xhr.py#L522-L625
def add_prefix(self): """ Add prefix according to the specification. The following keys can be used: vrf ID of VRF to place the prefix in prefix the prefix to add if already known family address family (4 or 6) description A short description expires Expiry time of assignment comment Longer comment node Hostname of node type Type of prefix; reservation, assignment, host status Status of prefix; assigned, reserved, quarantine pool ID of pool country Country where the prefix is used order_id Order identifier customer_id Customer identifier vlan VLAN ID alarm_priority Alarm priority of prefix monitor If the prefix should be monitored or not from-prefix A prefix the prefix is to be allocated from from-pool A pool (ID) the prefix is to be allocated from prefix_length Prefix length of allocated prefix """ p = Prefix() # Sanitize input parameters if 'vrf' in request.json: try: if request.json['vrf'] is None or len(unicode(request.json['vrf'])) == 0: p.vrf = None else: p.vrf = VRF.get(int(request.json['vrf'])) except ValueError: return json.dumps({'error': 1, 'message': "Invalid VRF ID '%s'" % request.json['vrf']}) except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) if 'description' in request.json: p.description = validate_string(request.json, 'description') if 'expires' in request.json: p.expires = validate_string(request.json, 'expires') if 'comment' in request.json: p.comment = validate_string(request.json, 'comment') if 'node' in request.json: p.node = validate_string(request.json, 'node') if 'status' in request.json: p.status = validate_string(request.json, 'status') if 'type' in request.json: p.type = validate_string(request.json, 'type') if 'pool' in request.json: if request.json['pool'] is not None: try: p.pool = Pool.get(int(request.json['pool'])) except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) if 'country' in request.json: p.country = validate_string(request.json, 'country') if 'order_id' in request.json: p.order_id = validate_string(request.json, 'order_id') if 'customer_id' in request.json: p.customer_id = validate_string(request.json, 'customer_id') if 'alarm_priority' in request.json: p.alarm_priority = validate_string(request.json, 'alarm_priority') if 'monitor' in request.json: p.monitor = request.json['monitor'] if 'vlan' in request.json: p.vlan = request.json['vlan'] if 'tags' in request.json: p.tags = request.json['tags'] if 'avps' in request.json: p.avps = request.json['avps'] # arguments args = {} if 'from_prefix' in request.json: args['from-prefix'] = request.json['from_prefix'] if 'from_pool' in request.json: try: args['from-pool'] = Pool.get(int(request.json['from_pool'])) except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) if 'family' in request.json: args['family'] = request.json['family'] if 'prefix_length' in request.json: args['prefix_length'] = request.json['prefix_length'] # manual allocation? if args == {}: if 'prefix' in request.json: p.prefix = request.json['prefix'] try: p.save(args) except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(p, cls=NipapJSONEncoder)
[ "def", "add_prefix", "(", "self", ")", ":", "p", "=", "Prefix", "(", ")", "# Sanitize input parameters", "if", "'vrf'", "in", "request", ".", "json", ":", "try", ":", "if", "request", ".", "json", "[", "'vrf'", "]", "is", "None", "or", "len", "(", "u...
Add prefix according to the specification. The following keys can be used: vrf ID of VRF to place the prefix in prefix the prefix to add if already known family address family (4 or 6) description A short description expires Expiry time of assignment comment Longer comment node Hostname of node type Type of prefix; reservation, assignment, host status Status of prefix; assigned, reserved, quarantine pool ID of pool country Country where the prefix is used order_id Order identifier customer_id Customer identifier vlan VLAN ID alarm_priority Alarm priority of prefix monitor If the prefix should be monitored or not from-prefix A prefix the prefix is to be allocated from from-pool A pool (ID) the prefix is to be allocated from prefix_length Prefix length of allocated prefix
[ "Add", "prefix", "according", "to", "the", "specification", "." ]
python
train
OSSOS/MOP
src/ossos/core/ossos/cameras.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/cameras.py#L307-L311
def separation(self, ra, dec): """Compute the separation between self and (ra,dec)""" if self.coord is None: return None return self.coord.separation(SkyCoord(ra, dec, unit=('degree', 'degree')))
[ "def", "separation", "(", "self", ",", "ra", ",", "dec", ")", ":", "if", "self", ".", "coord", "is", "None", ":", "return", "None", "return", "self", ".", "coord", ".", "separation", "(", "SkyCoord", "(", "ra", ",", "dec", ",", "unit", "=", "(", ...
Compute the separation between self and (ra,dec)
[ "Compute", "the", "separation", "between", "self", "and", "(", "ra", "dec", ")" ]
python
train
CZ-NIC/yangson
yangson/datamodel.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/datamodel.py#L169-L177
def schema_digest(self) -> str: """Generate schema digest (to be used primarily by clients). Returns: Condensed information about the schema in JSON format. """ res = self.schema._node_digest() res["config"] = True return json.dumps(res)
[ "def", "schema_digest", "(", "self", ")", "->", "str", ":", "res", "=", "self", ".", "schema", ".", "_node_digest", "(", ")", "res", "[", "\"config\"", "]", "=", "True", "return", "json", ".", "dumps", "(", "res", ")" ]
Generate schema digest (to be used primarily by clients). Returns: Condensed information about the schema in JSON format.
[ "Generate", "schema", "digest", "(", "to", "be", "used", "primarily", "by", "clients", ")", "." ]
python
train
aarongarrett/inspyred
inspyred/ec/selectors.py
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/selectors.py#L63-L83
def truncation_selection(random, population, args): """Selects the best individuals from the population. This function performs truncation selection, which means that only the best individuals from the current population are selected. This is a completely deterministic selection mechanism. .. Arguments: random -- the random number generator object population -- the population of individuals args -- a dictionary of keyword arguments Optional keyword arguments in args: - *num_selected* -- the number of individuals to be selected (default len(population)) """ num_selected = args.setdefault('num_selected', len(population)) population.sort(reverse=True) return population[:num_selected]
[ "def", "truncation_selection", "(", "random", ",", "population", ",", "args", ")", ":", "num_selected", "=", "args", ".", "setdefault", "(", "'num_selected'", ",", "len", "(", "population", ")", ")", "population", ".", "sort", "(", "reverse", "=", "True", ...
Selects the best individuals from the population. This function performs truncation selection, which means that only the best individuals from the current population are selected. This is a completely deterministic selection mechanism. .. Arguments: random -- the random number generator object population -- the population of individuals args -- a dictionary of keyword arguments Optional keyword arguments in args: - *num_selected* -- the number of individuals to be selected (default len(population))
[ "Selects", "the", "best", "individuals", "from", "the", "population", ".", "This", "function", "performs", "truncation", "selection", "which", "means", "that", "only", "the", "best", "individuals", "from", "the", "current", "population", "are", "selected", ".", ...
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/external.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/external.py#L403-L413
def get_is_authorized(request, pid): """MNAuthorization.isAuthorized(did, action) -> Boolean.""" if 'action' not in request.GET: raise d1_common.types.exceptions.InvalidRequest( 0, 'Missing required parameter. required="action"' ) # Convert action string to action level. Raises InvalidRequest if the # action string is not valid. level = d1_gmn.app.auth.action_to_level(request.GET['action']) d1_gmn.app.auth.assert_allowed(request, level, pid) return d1_gmn.app.views.util.http_response_with_boolean_true_type()
[ "def", "get_is_authorized", "(", "request", ",", "pid", ")", ":", "if", "'action'", "not", "in", "request", ".", "GET", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Missing required parameter. required=\"act...
MNAuthorization.isAuthorized(did, action) -> Boolean.
[ "MNAuthorization", ".", "isAuthorized", "(", "did", "action", ")", "-", ">", "Boolean", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/core/utils/gcs_utils.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L52-L57
def gcs_files(prefix_filter=None): """List all files in GCS bucket.""" top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter) xml_root = ElementTree.fromstring(top_level_xml_str) filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")] return filenames
[ "def", "gcs_files", "(", "prefix_filter", "=", "None", ")", ":", "top_level_xml_str", "=", "download_gcs_file", "(", "\"\"", ",", "prefix_filter", "=", "prefix_filter", ")", "xml_root", "=", "ElementTree", ".", "fromstring", "(", "top_level_xml_str", ")", "filenam...
List all files in GCS bucket.
[ "List", "all", "files", "in", "GCS", "bucket", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/network/ovs/__init__.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/network/ovs/__init__.py#L200-L209
def check_for_eni_source(): ''' Juju removes the source line when setting up interfaces, replace if missing ''' with open('/etc/network/interfaces', 'r') as eni: for line in eni: if line == 'source /etc/network/interfaces.d/*': return with open('/etc/network/interfaces', 'a') as eni: eni.write('\nsource /etc/network/interfaces.d/*')
[ "def", "check_for_eni_source", "(", ")", ":", "with", "open", "(", "'/etc/network/interfaces'", ",", "'r'", ")", "as", "eni", ":", "for", "line", "in", "eni", ":", "if", "line", "==", "'source /etc/network/interfaces.d/*'", ":", "return", "with", "open", "(", ...
Juju removes the source line when setting up interfaces, replace if missing
[ "Juju", "removes", "the", "source", "line", "when", "setting", "up", "interfaces", "replace", "if", "missing" ]
python
train
MacHu-GWU/macro-project
macro/bot.py
https://github.com/MacHu-GWU/macro-project/blob/dae909d2d28acbfa2be623aa2dffe988f3882d4d/macro/bot.py#L339-L348
def tab(self, n=1, interval=0, pre_dl=None, post_dl=None): """Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval. **中文文档** 以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。 """ self.delay(pre_dl) self.k.tap_key(self.k.tab_key, n, interval) self.delay(post_dl)
[ "def", "tab", "(", "self", ",", "n", "=", "1", ",", "interval", "=", "0", ",", "pre_dl", "=", "None", ",", "post_dl", "=", "None", ")", ":", "self", ".", "delay", "(", "pre_dl", ")", "self", ".", "k", ".", "tap_key", "(", "self", ".", "k", "....
Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval. **中文文档** 以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。
[ "Tap", "tab", "key", "for", "n", "times", "with", "interval", "seconds", "of", "interval", "." ]
python
train
freelancer/freelancer-sdk-python
freelancersdk/resources/projects/projects.py
https://github.com/freelancer/freelancer-sdk-python/blob/e09034936d6f13b3909a9464ee329c81c1834941/freelancersdk/resources/projects/projects.py#L170-L190
def get_project_by_id(session, project_id, project_details=None, user_details=None): """ Get a single project by ID """ # GET /api/projects/0.1/projects/<int:project_id> query = {} if project_details: query.update(project_details) if user_details: query.update(user_details) response = make_get_request( session, 'projects/{}'.format(project_id), params_data=query) json_data = response.json() if response.status_code == 200: return json_data['result'] else: raise ProjectsNotFoundException( message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'] )
[ "def", "get_project_by_id", "(", "session", ",", "project_id", ",", "project_details", "=", "None", ",", "user_details", "=", "None", ")", ":", "# GET /api/projects/0.1/projects/<int:project_id>", "query", "=", "{", "}", "if", "project_details", ":", "query", ".", ...
Get a single project by ID
[ "Get", "a", "single", "project", "by", "ID" ]
python
valid
fhcrc/taxtastic
taxtastic/taxonomy.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L487-L541
def add_node(self, tax_id, parent_id, rank, names, source_name, children=None, is_valid=True, execute=True, **ignored): """Add a node to the taxonomy. ``source_name`` is added to table "source" if necessary. """ if ignored: log.info('some arguments were ignored: {} '.format(str(ignored))) children = children or [] self.verify_rank_integrity(tax_id, rank, parent_id, children) source_id, __ = self.add_source(source_name) assert isinstance(is_valid, bool) statements = [] # add node statements.append( self.nodes.insert().values( tax_id=tax_id, parent_id=parent_id, rank=rank, source_id=source_id)) # add names. Since this is a new node, at least one name must # be provided; if only one is provided, it is the primary # name. If more than one is primary, an error will be raised # from add_names() if len(names) == 1: names[0]['is_primary'] = True else: primary_names = [n['tax_name'] for n in names if n.get('is_primary')] if len(primary_names) != 1: raise ValueError( '`is_primary` must be True for exactly one name in `names`') for namedict in names: namedict['source_id'] = source_id if 'source_name' in namedict: del namedict['source_name'] statements.extend(self.add_names(tax_id, names, execute=False)) # add children and update source_id for child in children: statements.append(self.nodes.update( whereclause=self.nodes.c.tax_id == child, values={'parent_id': tax_id, 'source_id': source_id})) if execute: self.execute(statements) else: return statements
[ "def", "add_node", "(", "self", ",", "tax_id", ",", "parent_id", ",", "rank", ",", "names", ",", "source_name", ",", "children", "=", "None", ",", "is_valid", "=", "True", ",", "execute", "=", "True", ",", "*", "*", "ignored", ")", ":", "if", "ignore...
Add a node to the taxonomy. ``source_name`` is added to table "source" if necessary.
[ "Add", "a", "node", "to", "the", "taxonomy", "." ]
python
train
European-XFEL/karabo-bridge-py
karabo_bridge/client.py
https://github.com/European-XFEL/karabo-bridge-py/blob/ca20d72b8beb0039649d10cb01d027db42efd91c/karabo_bridge/client.py#L90-L122
def next(self): """Request next data container. This function call is blocking. Returns ------- data : dict The data for this train, keyed by source name. meta : dict The metadata for this train, keyed by source name. This dictionary is populated for protocol version 1.0 and 2.2. For other protocol versions, metadata information is available in `data` dict. Raises ------ TimeoutError If timeout is reached before receiving data. """ if self._pattern == zmq.REQ and not self._recv_ready: self._socket.send(b'next') self._recv_ready = True try: msg = self._socket.recv_multipart(copy=False) except zmq.error.Again: raise TimeoutError( 'No data received from {} in the last {} ms'.format( self._socket.getsockopt_string(zmq.LAST_ENDPOINT), self._socket.getsockopt(zmq.RCVTIMEO))) self._recv_ready = False return self._deserialize(msg)
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_pattern", "==", "zmq", ".", "REQ", "and", "not", "self", ".", "_recv_ready", ":", "self", ".", "_socket", ".", "send", "(", "b'next'", ")", "self", ".", "_recv_ready", "=", "True", "try", "...
Request next data container. This function call is blocking. Returns ------- data : dict The data for this train, keyed by source name. meta : dict The metadata for this train, keyed by source name. This dictionary is populated for protocol version 1.0 and 2.2. For other protocol versions, metadata information is available in `data` dict. Raises ------ TimeoutError If timeout is reached before receiving data.
[ "Request", "next", "data", "container", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtoolbar.py#L213-L228
def setCollapsed(self, state): """ Sets whether or not this toolbar is in a collapsed state. :return <bool> changed """ if state == self._collapsed: return False self._collapsed = state self.refreshButton() if not self.signalsBlocked(): self.collapseToggled.emit(state) return True
[ "def", "setCollapsed", "(", "self", ",", "state", ")", ":", "if", "state", "==", "self", ".", "_collapsed", ":", "return", "False", "self", ".", "_collapsed", "=", "state", "self", ".", "refreshButton", "(", ")", "if", "not", "self", ".", "signalsBlocked...
Sets whether or not this toolbar is in a collapsed state. :return <bool> changed
[ "Sets", "whether", "or", "not", "this", "toolbar", "is", "in", "a", "collapsed", "state", ".", ":", "return", "<bool", ">", "changed" ]
python
train
Visgean/urljects
urljects/urljects.py
https://github.com/Visgean/urljects/blob/29a3ca03f639ea7a9ee2f795ed17941c86b278ba/urljects/urljects.py#L111-L148
def view_include(view_module, namespace=None, app_name=None): """ Includes view in the url, works similar to django include function. Auto imports all class based views that are subclass of ``URLView`` and all functional views that have been decorated with ``url_view``. :param view_module: object of the module or string with importable path :param namespace: name of the namespaces, it will be guessed otherwise :param app_name: application name :return: result of urls.include """ # since Django 1.8 patterns() are deprecated, list should be used instead # {priority:[views,]} view_dict = defaultdict(list) if isinstance(view_module, six.string_types): view_module = importlib.import_module(view_module) # pylint:disable=unused-variable for member_name, member in inspect.getmembers(view_module): is_class_view = inspect.isclass(member) and issubclass(member, URLView) is_func_view = (inspect.isfunction(member) and hasattr(member, 'urljects_view') and member.urljects_view) if (is_class_view and member is not URLView) or is_func_view: view_dict[member.url_priority].append( url(member.url, member, name=member.url_name)) view_patterns = list(*[ view_dict[priority] for priority in sorted(view_dict) ]) return urls.include( arg=view_patterns, namespace=namespace, app_name=app_name)
[ "def", "view_include", "(", "view_module", ",", "namespace", "=", "None", ",", "app_name", "=", "None", ")", ":", "# since Django 1.8 patterns() are deprecated, list should be used instead", "# {priority:[views,]}", "view_dict", "=", "defaultdict", "(", "list", ")", "if",...
Includes view in the url, works similar to django include function. Auto imports all class based views that are subclass of ``URLView`` and all functional views that have been decorated with ``url_view``. :param view_module: object of the module or string with importable path :param namespace: name of the namespaces, it will be guessed otherwise :param app_name: application name :return: result of urls.include
[ "Includes", "view", "in", "the", "url", "works", "similar", "to", "django", "include", "function", ".", "Auto", "imports", "all", "class", "based", "views", "that", "are", "subclass", "of", "URLView", "and", "all", "functional", "views", "that", "have", "bee...
python
train
g2p/rfc6266
rfc6266.py
https://github.com/g2p/rfc6266/blob/cad58963ed13f5e1068fcc9e4326123b6b2bdcf8/rfc6266.py#L244-L249
def parse_requests_response(response, **kwargs): """Build a ContentDisposition from a requests (PyPI) response. """ return parse_headers( response.headers.get('content-disposition'), response.url, **kwargs)
[ "def", "parse_requests_response", "(", "response", ",", "*", "*", "kwargs", ")", ":", "return", "parse_headers", "(", "response", ".", "headers", ".", "get", "(", "'content-disposition'", ")", ",", "response", ".", "url", ",", "*", "*", "kwargs", ")" ]
Build a ContentDisposition from a requests (PyPI) response.
[ "Build", "a", "ContentDisposition", "from", "a", "requests", "(", "PyPI", ")", "response", "." ]
python
train
apache/incubator-mxnet
python/mxnet/module/base_module.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/base_module.py#L278-L316
def iter_predict(self, eval_data, num_batch=None, reset=True, sparse_row_id_fn=None): """Iterates over predictions. Examples -------- >>> for pred, i_batch, batch in module.iter_predict(eval_data): ... # pred is a list of outputs from the module ... # i_batch is a integer ... # batch is the data batch from the data iterator Parameters ---------- eval_data : DataIter Evaluation data to run prediction on. num_batch : int Default is ``None``, indicating running all the batches in the data iterator. reset : bool Default is ``True``, indicating whether we should reset the data iter before start doing prediction. sparse_row_id_fn : A callback function The function takes `data_batch` as an input and returns a dict of str -> NDArray. The resulting dict is used for pulling row_sparse parameters from the kvstore, where the str key is the name of the param, and the value is the row id of the param to pull. """ assert self.binded and self.params_initialized if reset: eval_data.reset() for nbatch, eval_batch in enumerate(eval_data): if num_batch is not None and nbatch == num_batch: break self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn) self.forward(eval_batch, is_train=False) pad = eval_batch.pad outputs = [out[0:out.shape[0]-pad] for out in self.get_outputs()] yield (outputs, nbatch, eval_batch)
[ "def", "iter_predict", "(", "self", ",", "eval_data", ",", "num_batch", "=", "None", ",", "reset", "=", "True", ",", "sparse_row_id_fn", "=", "None", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "if", "reset", ":", ...
Iterates over predictions. Examples -------- >>> for pred, i_batch, batch in module.iter_predict(eval_data): ... # pred is a list of outputs from the module ... # i_batch is a integer ... # batch is the data batch from the data iterator Parameters ---------- eval_data : DataIter Evaluation data to run prediction on. num_batch : int Default is ``None``, indicating running all the batches in the data iterator. reset : bool Default is ``True``, indicating whether we should reset the data iter before start doing prediction. sparse_row_id_fn : A callback function The function takes `data_batch` as an input and returns a dict of str -> NDArray. The resulting dict is used for pulling row_sparse parameters from the kvstore, where the str key is the name of the param, and the value is the row id of the param to pull.
[ "Iterates", "over", "predictions", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/lib/grapher.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/grapher.py#L155-L159
def flightmode_colour(self, flightmode): '''return colour to be used for rendering a flight mode background''' if flightmode not in self.flightmode_colourmap: self.flightmode_colourmap[flightmode] = self.next_flightmode_colour() return self.flightmode_colourmap[flightmode]
[ "def", "flightmode_colour", "(", "self", ",", "flightmode", ")", ":", "if", "flightmode", "not", "in", "self", ".", "flightmode_colourmap", ":", "self", ".", "flightmode_colourmap", "[", "flightmode", "]", "=", "self", ".", "next_flightmode_colour", "(", ")", ...
return colour to be used for rendering a flight mode background
[ "return", "colour", "to", "be", "used", "for", "rendering", "a", "flight", "mode", "background" ]
python
train
juju/charm-helpers
charmhelpers/fetch/snap.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/snap.py#L48-L75
def _snap_exec(commands): """ Execute snap commands. :param commands: List commands :return: Integer exit code """ assert type(commands) == list retry_count = 0 return_code = None while return_code is None or return_code == SNAP_NO_LOCK: try: return_code = subprocess.check_call(['snap'] + commands, env=os.environ) except subprocess.CalledProcessError as e: retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: raise CouldNotAcquireLockException( 'Could not aquire lock after {} attempts' .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code
[ "def", "_snap_exec", "(", "commands", ")", ":", "assert", "type", "(", "commands", ")", "==", "list", "retry_count", "=", "0", "return_code", "=", "None", "while", "return_code", "is", "None", "or", "return_code", "==", "SNAP_NO_LOCK", ":", "try", ":", "re...
Execute snap commands. :param commands: List commands :return: Integer exit code
[ "Execute", "snap", "commands", "." ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_azure/c7n_azure/query.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_azure/c7n_azure/query.py#L81-L106
def filter(self, resource_manager, **params): """Query a set of resources.""" m = self.resolve(resource_manager.resource_type) client = resource_manager.get_client() enum_op, list_op, extra_args = m.enum_spec parent_type, annotate_parent = m.parent_spec parents = self.manager.get_resource_manager(parent_type) # Have to query separately for each parent's children. results = [] for parent in parents.resources(): if extra_args: params.update({key: parent[extra_args[key]] for key in extra_args.keys()}) op = getattr(getattr(client, enum_op), list_op) subset = [r.serialize(True) for r in op(**params)] if annotate_parent: for r in subset: r[self.parent_key] = parent[parents.resource_type.id] if subset: results.extend(subset) return results
[ "def", "filter", "(", "self", ",", "resource_manager", ",", "*", "*", "params", ")", ":", "m", "=", "self", ".", "resolve", "(", "resource_manager", ".", "resource_type", ")", "client", "=", "resource_manager", ".", "get_client", "(", ")", "enum_op", ",", ...
Query a set of resources.
[ "Query", "a", "set", "of", "resources", "." ]
python
train
nchopin/particles
particles/mcmc.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/mcmc.py#L161-L172
def update(self, v): """Adds point v""" self.t += 1 g = self.gamma() self.mu = (1. - g) * self.mu + g * v mv = v - self.mu self.Sigma = ((1. - g) * self.Sigma + g * np.dot(mv[:, np.newaxis], mv[np.newaxis, :])) try: self.L = cholesky(self.Sigma, lower=True) except LinAlgError: self.L = self.L0
[ "def", "update", "(", "self", ",", "v", ")", ":", "self", ".", "t", "+=", "1", "g", "=", "self", ".", "gamma", "(", ")", "self", ".", "mu", "=", "(", "1.", "-", "g", ")", "*", "self", ".", "mu", "+", "g", "*", "v", "mv", "=", "v", "-", ...
Adds point v
[ "Adds", "point", "v" ]
python
train
xolox/python-coloredlogs
coloredlogs/__init__.py
https://github.com/xolox/python-coloredlogs/blob/1cbf0c6bbee400c6ddbc43008143809934ec3e79/coloredlogs/__init__.py#L552-L562
def increase_verbosity(): """ Increase the verbosity of the root handler by one defined level. Understands custom logging levels like defined by my ``verboselogs`` module. """ defined_levels = sorted(set(find_defined_levels().values())) current_index = defined_levels.index(get_level()) selected_index = max(0, current_index - 1) set_level(defined_levels[selected_index])
[ "def", "increase_verbosity", "(", ")", ":", "defined_levels", "=", "sorted", "(", "set", "(", "find_defined_levels", "(", ")", ".", "values", "(", ")", ")", ")", "current_index", "=", "defined_levels", ".", "index", "(", "get_level", "(", ")", ")", "select...
Increase the verbosity of the root handler by one defined level. Understands custom logging levels like defined by my ``verboselogs`` module.
[ "Increase", "the", "verbosity", "of", "the", "root", "handler", "by", "one", "defined", "level", "." ]
python
train
pixelogik/NearPy
nearpy/experiments/distanceratioexperiment.py
https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/experiments/distanceratioexperiment.py#L127-L214
def perform_experiment(self, engine_list): """ Performs nearest neighbour experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (distance_ratio, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. """ # We will fill this array with measures for all the engines. result = [] # For each engine, first index vectors and then retrieve neighbours for engine in engine_list: print('Engine %d / %d' % (engine_list.index(engine), len(engine_list))) # Clean storage engine.clean_all_buckets() # Use this to compute average distance_ratio avg_distance_ratio = 0.0 # Use this to compute average result set size avg_result_size = 0.0 # Use this to compute average search time avg_search_time = 0.0 # Index all vectors and store them for index in range(self.vectors.shape[1]): engine.store_vector(self.vectors[:, index], 'data_%d' % index) # Look for N nearest neighbours for query vectors for index in self.query_indices: # We have to time the search search_time_start = time.time() # Get nearest N according to engine nearest = engine.neighbours(self.vectors[:, index]) # Get search time search_time = time.time() - search_time_start # Get average distance ratio (with respect to radius # of real N closest neighbours) distance_ratio = 0.0 for n in nearest: # If the vector is outside the real neighbour radius if n[2] > self.nearest_radius[index]: # Compute distance to real neighbour radius d = (n[2] - self.nearest_radius[index]) # And normalize it. 1.0 means: distance to # real neighbour radius is identical to radius d /= self.nearest_radius[index] # If all neighbours are in the radius, the # distance ratio is 0.0 distance_ratio += d # Normalize distance ratio over all neighbours distance_ratio /= len(nearest) # Add to accumulator avg_distance_ratio += distance_ratio # Add to accumulator avg_result_size += len(nearest) # Add to accumulator avg_search_time += search_time # Normalize distance ratio over query set avg_distance_ratio /= float(len(self.query_indices)) # Normalize avg result size avg_result_size /= float(len(self.query_indices)) # Normalize search time over query set avg_search_time = avg_search_time / float(len(self.query_indices)) # Normalize search time with respect to exact search avg_search_time /= self.exact_search_time_per_vector print(' distance_ratio=%f, result_size=%f, time=%f' % (avg_distance_ratio, avg_result_size, avg_search_time)) result.append((avg_distance_ratio, avg_result_size, avg_search_time)) return result
[ "def", "perform_experiment", "(", "self", ",", "engine_list", ")", ":", "# We will fill this array with measures for all the engines.", "result", "=", "[", "]", "# For each engine, first index vectors and then retrieve neighbours", "for", "engine", "in", "engine_list", ":", "pr...
Performs nearest neighbour experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (distance_ratio, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time.
[ "Performs", "nearest", "neighbour", "experiments", "with", "custom", "vector", "data", "for", "all", "engines", "in", "the", "specified", "list", "." ]
python
train
baliame/http-hmac-python
httphmac/request.py
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L141-L150
def with_header(self, key, value): """Sets a header on the request and returns the request itself. The header key will be canonicalized before use. (see also: canonicalize_header) Keyword arguments: key -- the header's name value -- the string value for the header """ self.header[canonicalize_header(key)] = value return self
[ "def", "with_header", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "header", "[", "canonicalize_header", "(", "key", ")", "]", "=", "value", "return", "self" ]
Sets a header on the request and returns the request itself. The header key will be canonicalized before use. (see also: canonicalize_header) Keyword arguments: key -- the header's name value -- the string value for the header
[ "Sets", "a", "header", "on", "the", "request", "and", "returns", "the", "request", "itself", ".", "The", "header", "key", "will", "be", "canonicalized", "before", "use", ".", "(", "see", "also", ":", "canonicalize_header", ")" ]
python
train
saltstack/salt
salt/returners/local_cache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/local_cache.py#L413-L466
def clean_old_jobs(): ''' Clean out the old jobs from the job cache ''' if __opts__['keep_jobs'] != 0: jid_root = _job_dir() if not os.path.exists(jid_root): return # Keep track of any empty t_path dirs that need to be removed later dirs_to_remove = set() for top in os.listdir(jid_root): t_path = os.path.join(jid_root, top) if not os.path.exists(t_path): continue # Check if there are any stray/empty JID t_path dirs t_path_dirs = os.listdir(t_path) if not t_path_dirs and t_path not in dirs_to_remove: dirs_to_remove.add(t_path) continue for final in t_path_dirs: f_path = os.path.join(t_path, final) jid_file = os.path.join(f_path, 'jid') if not os.path.isfile(jid_file) and os.path.exists(f_path): # No jid file means corrupted cache entry, scrub it # by removing the entire f_path directory shutil.rmtree(f_path) elif os.path.isfile(jid_file): jid_ctime = os.stat(jid_file).st_ctime hours_difference = (time.time() - jid_ctime) / 3600.0 if hours_difference > __opts__['keep_jobs'] and os.path.exists(t_path): # Remove the entire f_path from the original JID dir try: shutil.rmtree(f_path) except OSError as err: log.error('Unable to remove %s: %s', f_path, err) # Remove empty JID dirs from job cache, if they're old enough. # JID dirs may be empty either from a previous cache-clean with the bug # Listed in #29286 still present, or the JID dir was only recently made # And the jid file hasn't been created yet. if dirs_to_remove: for t_path in dirs_to_remove: # Checking the time again prevents a possible race condition where # t_path JID dirs were created, but not yet populated by a jid file. t_path_ctime = os.stat(t_path).st_ctime hours_difference = (time.time() - t_path_ctime) / 3600.0 if hours_difference > __opts__['keep_jobs']: shutil.rmtree(t_path)
[ "def", "clean_old_jobs", "(", ")", ":", "if", "__opts__", "[", "'keep_jobs'", "]", "!=", "0", ":", "jid_root", "=", "_job_dir", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "jid_root", ")", ":", "return", "# Keep track of any empty t_path ...
Clean out the old jobs from the job cache
[ "Clean", "out", "the", "old", "jobs", "from", "the", "job", "cache" ]
python
train
oscarbranson/latools
latools/filtering/filt_obj.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filt_obj.py#L379-L407
def make_keydict(self, analyte=None): """ Make logical expressions describing the filter(s) for specified analyte(s). Parameters ---------- analyte : optional, str or array_like Name or list of names of analytes. Defaults to all analytes. Returns ------- dict containing the logical filter expression for each analyte. """ if analyte is None: analyte = self.analytes elif isinstance(analyte, str): analyte = [analyte] out = {} for a in analyte: key = [] for f in self.components.keys(): if self.switches[a][f]: key.append(f) out[a] = ' & '.join(sorted(key)) self.keydict = out return out
[ "def", "make_keydict", "(", "self", ",", "analyte", "=", "None", ")", ":", "if", "analyte", "is", "None", ":", "analyte", "=", "self", ".", "analytes", "elif", "isinstance", "(", "analyte", ",", "str", ")", ":", "analyte", "=", "[", "analyte", "]", "...
Make logical expressions describing the filter(s) for specified analyte(s). Parameters ---------- analyte : optional, str or array_like Name or list of names of analytes. Defaults to all analytes. Returns ------- dict containing the logical filter expression for each analyte.
[ "Make", "logical", "expressions", "describing", "the", "filter", "(", "s", ")", "for", "specified", "analyte", "(", "s", ")", "." ]
python
test
mitsei/dlkit
dlkit/json_/assessment_authoring/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/sessions.py#L1341-L1357
def get_banks_by_assessment_part(self, assessment_part_id): """Gets the ``Banks`` mapped to an ``AssessmentPart``. arg: assessment_part_id (osid.id.Id): ``Id`` of an ``AssessmentPart`` return: (osid.assessment.BankList) - list of banks raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) return lookup_session.get_banks_by_ids( self.get_bank_ids_by_assessment_part(assessment_part_id))
[ "def", "get_banks_by_assessment_part", "(", "self", ",", "assessment_part_id", ")", ":", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_bank_lookup_session", "(", "prox...
Gets the ``Banks`` mapped to an ``AssessmentPart``. arg: assessment_part_id (osid.id.Id): ``Id`` of an ``AssessmentPart`` return: (osid.assessment.BankList) - list of banks raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "Banks", "mapped", "to", "an", "AssessmentPart", "." ]
python
train
mathiasertl/xmpp-backends
xmpp_backends/base.py
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L416-L427
def block_user(self, username, domain): """Block the specified user. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str """ self.set_password(username, domain, self.get_random_password())
[ "def", "block_user", "(", "self", ",", "username", ",", "domain", ")", ":", "self", ".", "set_password", "(", "username", ",", "domain", ",", "self", ".", "get_random_password", "(", ")", ")" ]
Block the specified user. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str
[ "Block", "the", "specified", "user", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/local_env.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/local_env.py#L2111-L2218
def get_q4(self, thetas=None, phis=None): """ Calculates the value of the bond orientational order parameter of weight l=4. If the function is called with non-empty lists of polar and azimuthal angles the corresponding trigonometric terms are computed afresh. Otherwise, it is expected that the compute_trigonometric_terms function has been just called. Args: thetas ([float]): polar angles of all neighbors in radians. phis ([float]): azimuth angles of all neighbors in radians. Returns: float: bond orientational order parameter of weight l=4 corresponding to the input angles thetas and phis. """ if thetas is not None and phis is not None: self.compute_trigonometric_terms(thetas, phis) nnn = len(self._pow_sin_t[1]) nnn_range = range(nnn) i16_3 = 3.0 / 16.0 i8_3 = 3.0 / 8.0 sqrt_35_pi = sqrt(35.0 / pi) sqrt_35_2pi = sqrt(35.0 / (2.0 * pi)) sqrt_5_pi = sqrt(5.0 / pi) sqrt_5_2pi = sqrt(5.0 / (2.0 * pi)) sqrt_1_pi = sqrt(1.0 / pi) pre_y_4_4 = [i16_3 * sqrt_35_2pi * val for val in self._pow_sin_t[4]] pre_y_4_3 = [i8_3 * sqrt_35_pi * val[0] * val[1] \ for val in zip(self._pow_sin_t[3], self._pow_cos_t[1])] pre_y_4_2 = [i8_3 * sqrt_5_2pi * val[0] * (7.0 * val[1] - 1.0) \ for val in zip(self._pow_sin_t[2], self._pow_cos_t[2])] pre_y_4_1 = [i8_3 * sqrt_5_pi * val[0] * (7.0 * val[1] - 3.0 * val[2]) \ for val in zip(self._pow_sin_t[1], self._pow_cos_t[3], \ self._pow_cos_t[1])] acc = 0.0 # Y_4_-4 real = imag = 0.0 for i in nnn_range: real += pre_y_4_4[i] * self._cos_n_p[4][i] imag -= pre_y_4_4[i] * self._sin_n_p[4][i] acc += (real * real + imag * imag) # Y_4_-3 real = imag = 0.0 for i in nnn_range: real += pre_y_4_3[i] * self._cos_n_p[3][i] imag -= pre_y_4_3[i] * self._sin_n_p[3][i] acc += (real * real + imag * imag) # Y_4_-2 real = imag = 0.0 for i in nnn_range: real += pre_y_4_2[i] * self._cos_n_p[2][i] imag -= pre_y_4_2[i] * self._sin_n_p[2][i] acc += (real * real + imag * imag) # Y_4_-1 real = imag = 0.0 for i in nnn_range: real += pre_y_4_1[i] * self._cos_n_p[1][i] imag -= pre_y_4_1[i] * self._sin_n_p[1][i] acc += (real * real + imag * imag) # Y_4_0 real = imag = 0.0 for i in nnn_range: real += i16_3 * sqrt_1_pi * (35.0 * self._pow_cos_t[4][i] - \ 30.0 * self._pow_cos_t[2][i] + 3.0) acc += (real * real) # Y_4_1 real = imag = 0.0 for i in nnn_range: real -= pre_y_4_1[i] * self._cos_n_p[1][i] imag -= pre_y_4_1[i] * self._sin_n_p[1][i] acc += (real * real + imag * imag) # Y_4_2 real = imag = 0.0 for i in nnn_range: real += pre_y_4_2[i] * self._cos_n_p[2][i] imag += pre_y_4_2[i] * self._sin_n_p[2][i] acc += (real * real + imag * imag) # Y_4_3 real = imag = 0.0 for i in nnn_range: real -= pre_y_4_3[i] * self._cos_n_p[3][i] imag -= pre_y_4_3[i] * self._sin_n_p[3][i] acc += (real * real + imag * imag) # Y_4_4 real = imag = 0.0 for i in nnn_range: real += pre_y_4_4[i] * self._cos_n_p[4][i] imag += pre_y_4_4[i] * self._sin_n_p[4][i] acc += (real * real + imag * imag) q4 = sqrt(4.0 * pi * acc / (9.0 * float(nnn * nnn))) return q4
[ "def", "get_q4", "(", "self", ",", "thetas", "=", "None", ",", "phis", "=", "None", ")", ":", "if", "thetas", "is", "not", "None", "and", "phis", "is", "not", "None", ":", "self", ".", "compute_trigonometric_terms", "(", "thetas", ",", "phis", ")", "...
Calculates the value of the bond orientational order parameter of weight l=4. If the function is called with non-empty lists of polar and azimuthal angles the corresponding trigonometric terms are computed afresh. Otherwise, it is expected that the compute_trigonometric_terms function has been just called. Args: thetas ([float]): polar angles of all neighbors in radians. phis ([float]): azimuth angles of all neighbors in radians. Returns: float: bond orientational order parameter of weight l=4 corresponding to the input angles thetas and phis.
[ "Calculates", "the", "value", "of", "the", "bond", "orientational", "order", "parameter", "of", "weight", "l", "=", "4", ".", "If", "the", "function", "is", "called", "with", "non", "-", "empty", "lists", "of", "polar", "and", "azimuthal", "angles", "the",...
python
train
numenta/nupic
src/nupic/swarming/hypersearch/permutation_helpers.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/permutation_helpers.py#L346-L350
def setState(self, state): """See comments in base class.""" self._positionIdx = self.choices.index(state['_position']) self._bestPositionIdx = self.choices.index(state['bestPosition']) self._bestResult = state['bestResult']
[ "def", "setState", "(", "self", ",", "state", ")", ":", "self", ".", "_positionIdx", "=", "self", ".", "choices", ".", "index", "(", "state", "[", "'_position'", "]", ")", "self", ".", "_bestPositionIdx", "=", "self", ".", "choices", ".", "index", "(",...
See comments in base class.
[ "See", "comments", "in", "base", "class", "." ]
python
valid
pyparsing/pyparsing
examples/pymicko.py
https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/pymicko.py#L428-L431
def insert_local_var(self, vname, vtype, position): "Inserts a new local variable" index = self.insert_id(vname, SharedData.KINDS.LOCAL_VAR, [SharedData.KINDS.LOCAL_VAR, SharedData.KINDS.PARAMETER], vtype) self.table[index].attribute = position
[ "def", "insert_local_var", "(", "self", ",", "vname", ",", "vtype", ",", "position", ")", ":", "index", "=", "self", ".", "insert_id", "(", "vname", ",", "SharedData", ".", "KINDS", ".", "LOCAL_VAR", ",", "[", "SharedData", ".", "KINDS", ".", "LOCAL_VAR"...
Inserts a new local variable
[ "Inserts", "a", "new", "local", "variable" ]
python
train
knipknap/exscript
Exscript/util/buffer.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/buffer.py#L137-L144
def clear(self): """ Removes all data from the buffer. """ self.io.seek(0) self.io.truncate() for item in self.monitors: item[2] = 0
[ "def", "clear", "(", "self", ")", ":", "self", ".", "io", ".", "seek", "(", "0", ")", "self", ".", "io", ".", "truncate", "(", ")", "for", "item", "in", "self", ".", "monitors", ":", "item", "[", "2", "]", "=", "0" ]
Removes all data from the buffer.
[ "Removes", "all", "data", "from", "the", "buffer", "." ]
python
train
sci-bots/dmf-device-ui
dmf_device_ui/canvas.py
https://github.com/sci-bots/dmf-device-ui/blob/05b480683c9fa43f91ce5a58de2fa90cdf363fc8/dmf_device_ui/canvas.py#L946-L1002
def on_widget__button_release_event(self, widget, event): ''' Called when any mouse button is released. .. versionchanged:: 0.11.3 Always reset pending route, regardless of whether a route was completed. This includes a) removing temporary routes from routes table, and b) resetting the state of the current route electrode queue. This fixes https://github.com/sci-bots/microdrop/issues/256. ''' event = event.copy() if self.mode == 'register_video' and (event.button == 1 and self.start_event is not None): self.emit('point-pair-selected', {'start_event': self.start_event, 'end_event': event.copy()}) self.start_event = None return elif self.mode == 'control': # XXX Negative `route_i` corresponds to temporary route being # drawn. Since release of mouse button terminates route drawing, # clear any rows corresponding to negative `route_i` values from # the routes table. self.df_routes = self.df_routes.loc[self.df_routes.route_i >= 0].copy() shape = self.canvas.find_shape(event.x, event.y) if shape is not None: electrode_data = {'electrode_id': shape, 'event': event.copy()} if event.button == 1: if gtk.gdk.BUTTON1_MASK == event.get_state(): if self._route.append(shape): self.emit('route-electrode-added', shape) if len(self._route.electrode_ids) == 1: # Single electrode, so select electrode. self.emit('electrode-selected', electrode_data) else: # Multiple electrodes, so select route. route = self._route self.emit('route-selected', route) elif (event.get_state() == (gtk.gdk.MOD1_MASK | gtk.gdk.BUTTON1_MASK) and self.last_pressed != shape): # `<Alt>` key was held down. self.emit('electrode-pair-selected', {'source_id': self.last_pressed, 'target_id': shape, 'event': event.copy()}) self.last_pressed = None elif event.button == 3: # Create right-click pop-up menu. menu = self.create_context_menu(event, shape) # Display menu popup menu.popup(None, None, None, event.button, event.time) # Clear route. self._route = None
[ "def", "on_widget__button_release_event", "(", "self", ",", "widget", ",", "event", ")", ":", "event", "=", "event", ".", "copy", "(", ")", "if", "self", ".", "mode", "==", "'register_video'", "and", "(", "event", ".", "button", "==", "1", "and", "self",...
Called when any mouse button is released. .. versionchanged:: 0.11.3 Always reset pending route, regardless of whether a route was completed. This includes a) removing temporary routes from routes table, and b) resetting the state of the current route electrode queue. This fixes https://github.com/sci-bots/microdrop/issues/256.
[ "Called", "when", "any", "mouse", "button", "is", "released", "." ]
python
train
galaxy-genome-annotation/python-apollo
apollo/users/__init__.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/users/__init__.py#L137-L151
def add_to_group(self, group, user): """ Add a user to a group :type user: str :param user: User's email :type group: str :param group: Group name :rtype: dict :return: an empty dictionary """ data = {'group': group, 'user': user} return self.post('addUserToGroup', data)
[ "def", "add_to_group", "(", "self", ",", "group", ",", "user", ")", ":", "data", "=", "{", "'group'", ":", "group", ",", "'user'", ":", "user", "}", "return", "self", ".", "post", "(", "'addUserToGroup'", ",", "data", ")" ]
Add a user to a group :type user: str :param user: User's email :type group: str :param group: Group name :rtype: dict :return: an empty dictionary
[ "Add", "a", "user", "to", "a", "group" ]
python
train
aleju/imgaug
imgaug/augmentables/lines.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/lines.py#L941-L1041
def draw_on_image(self, image, color=(0, 255, 0), color_lines=None, color_points=None, alpha=1.0, alpha_lines=None, alpha_points=None, size=1, size_lines=None, size_points=None, antialiased=True, raise_if_out_of_image=False): """ Draw the line string on an image. Parameters ---------- image : ndarray The `(H,W,C)` `uint8` image onto which to draw the line string. color : iterable of int, optional Color to use as RGB, i.e. three values. The color of the line and points are derived from this value, unless they are set. color_lines : None or iterable of int Color to use for the line segments as RGB, i.e. three values. If ``None``, this value is derived from `color`. color_points : None or iterable of int Color to use for the points as RGB, i.e. three values. If ``None``, this value is derived from ``0.5 * color``. alpha : float, optional Opacity of the line string. Higher values denote more visible points. The alphas of the line and points are derived from this value, unless they are set. alpha_lines : None or float, optional Opacity of the line string. Higher values denote more visible line string. If ``None``, this value is derived from `alpha`. alpha_points : None or float, optional Opacity of the line string points. Higher values denote more visible points. If ``None``, this value is derived from `alpha`. size : int, optional Size of the line string. The sizes of the line and points are derived from this value, unless they are set. size_lines : None or int, optional Thickness of the line segments. If ``None``, this value is derived from `size`. size_points : None or int, optional Size of the points in pixels. If ``None``, this value is derived from ``3 * size``. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. This does currently not affect the point drawing. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to False, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray Image with line string drawn on it. """ assert color is not None assert alpha is not None assert size is not None color_lines = color_lines if color_lines is not None \ else np.float32(color) color_points = color_points if color_points is not None \ else np.float32(color) * 0.5 alpha_lines = alpha_lines if alpha_lines is not None \ else np.float32(alpha) alpha_points = alpha_points if alpha_points is not None \ else np.float32(alpha) size_lines = size_lines if size_lines is not None else size size_points = size_points if size_points is not None else size * 3 image = self.draw_lines_on_image( image, color=np.array(color_lines).astype(np.uint8), alpha=alpha_lines, size=size_lines, antialiased=antialiased, raise_if_out_of_image=raise_if_out_of_image) image = self.draw_points_on_image( image, color=np.array(color_points).astype(np.uint8), alpha=alpha_points, size=size_points, copy=False, raise_if_out_of_image=raise_if_out_of_image) return image
[ "def", "draw_on_image", "(", "self", ",", "image", ",", "color", "=", "(", "0", ",", "255", ",", "0", ")", ",", "color_lines", "=", "None", ",", "color_points", "=", "None", ",", "alpha", "=", "1.0", ",", "alpha_lines", "=", "None", ",", "alpha_point...
Draw the line string on an image. Parameters ---------- image : ndarray The `(H,W,C)` `uint8` image onto which to draw the line string. color : iterable of int, optional Color to use as RGB, i.e. three values. The color of the line and points are derived from this value, unless they are set. color_lines : None or iterable of int Color to use for the line segments as RGB, i.e. three values. If ``None``, this value is derived from `color`. color_points : None or iterable of int Color to use for the points as RGB, i.e. three values. If ``None``, this value is derived from ``0.5 * color``. alpha : float, optional Opacity of the line string. Higher values denote more visible points. The alphas of the line and points are derived from this value, unless they are set. alpha_lines : None or float, optional Opacity of the line string. Higher values denote more visible line string. If ``None``, this value is derived from `alpha`. alpha_points : None or float, optional Opacity of the line string points. Higher values denote more visible points. If ``None``, this value is derived from `alpha`. size : int, optional Size of the line string. The sizes of the line and points are derived from this value, unless they are set. size_lines : None or int, optional Thickness of the line segments. If ``None``, this value is derived from `size`. size_points : None or int, optional Size of the points in pixels. If ``None``, this value is derived from ``3 * size``. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. This does currently not affect the point drawing. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to False, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray Image with line string drawn on it.
[ "Draw", "the", "line", "string", "on", "an", "image", "." ]
python
valid
SheffieldML/GPyOpt
GPyOpt/interface/driver.py
https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/interface/driver.py#L58-L71
def _get_acquisition(self, model, space): """ Imports the acquisition """ from copy import deepcopy acqOpt_config = deepcopy(self.config['acquisition']['optimizer']) acqOpt_name = acqOpt_config['name'] del acqOpt_config['name'] from ..optimization import AcquisitionOptimizer acqOpt = AcquisitionOptimizer(space, acqOpt_name, **acqOpt_config) from ..acquisitions import select_acquisition return select_acquisition(self.config['acquisition']['type']).fromConfig(model, space, acqOpt, None, self.config['acquisition'])
[ "def", "_get_acquisition", "(", "self", ",", "model", ",", "space", ")", ":", "from", "copy", "import", "deepcopy", "acqOpt_config", "=", "deepcopy", "(", "self", ".", "config", "[", "'acquisition'", "]", "[", "'optimizer'", "]", ")", "acqOpt_name", "=", "...
Imports the acquisition
[ "Imports", "the", "acquisition" ]
python
train
VorskiImagineering/C3PO
c3po/converters/po_ods.py
https://github.com/VorskiImagineering/C3PO/blob/e3e35835e5ac24158848afed4f905ca44ac3ae00/c3po/converters/po_ods.py#L70-L81
def _write_row_into_ods(ods, sheet_no, row_no, row): """ Write row with translations to ods file into specified sheet and row_no. """ ods.content.getSheet(sheet_no) for j, col in enumerate(row): cell = ods.content.getCell(j, row_no+1) cell.stringValue(_escape_apostrophe(col)) if j % 2 == 1: cell.setCellColor(settings.EVEN_COLUMN_BG_COLOR) else: cell.setCellColor(settings.ODD_COLUMN_BG_COLOR)
[ "def", "_write_row_into_ods", "(", "ods", ",", "sheet_no", ",", "row_no", ",", "row", ")", ":", "ods", ".", "content", ".", "getSheet", "(", "sheet_no", ")", "for", "j", ",", "col", "in", "enumerate", "(", "row", ")", ":", "cell", "=", "ods", ".", ...
Write row with translations to ods file into specified sheet and row_no.
[ "Write", "row", "with", "translations", "to", "ods", "file", "into", "specified", "sheet", "and", "row_no", "." ]
python
test
etcher-be/emiz
emiz/avwx/core.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/core.py#L710-L719
def _get_digit_list(alist: [str], from_index: int) -> ([str], [str]): # type: ignore """ Returns a list of items removed from a given list of strings that are all digits from 'from_index' until hitting a non-digit item """ ret = [] alist.pop(from_index) while len(alist) > from_index and alist[from_index].isdigit(): ret.append(alist.pop(from_index)) return alist, ret
[ "def", "_get_digit_list", "(", "alist", ":", "[", "str", "]", ",", "from_index", ":", "int", ")", "->", "(", "[", "str", "]", ",", "[", "str", "]", ")", ":", "# type: ignore", "ret", "=", "[", "]", "alist", ".", "pop", "(", "from_index", ")", "wh...
Returns a list of items removed from a given list of strings that are all digits from 'from_index' until hitting a non-digit item
[ "Returns", "a", "list", "of", "items", "removed", "from", "a", "given", "list", "of", "strings", "that", "are", "all", "digits", "from", "from_index", "until", "hitting", "a", "non", "-", "digit", "item" ]
python
train
sio2project/filetracker
filetracker/scripts/recover.py
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/scripts/recover.py#L150-L158
def _read_stream_for_size(stream, buf_size=65536): """Reads a stream discarding the data read and returns its size.""" size = 0 while True: buf = stream.read(buf_size) size += len(buf) if not buf: break return size
[ "def", "_read_stream_for_size", "(", "stream", ",", "buf_size", "=", "65536", ")", ":", "size", "=", "0", "while", "True", ":", "buf", "=", "stream", ".", "read", "(", "buf_size", ")", "size", "+=", "len", "(", "buf", ")", "if", "not", "buf", ":", ...
Reads a stream discarding the data read and returns its size.
[ "Reads", "a", "stream", "discarding", "the", "data", "read", "and", "returns", "its", "size", "." ]
python
train
sryza/spark-timeseries
python/sparkts/models/RegressionARIMA.py
https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/models/RegressionARIMA.py#L20-L42
def fit_model(ts, regressors, method="cochrane-orcutt", optimizationArgs=None, sc=None): """ Parameters ---------- ts: time series to which to fit an ARIMA model as a Numpy array regressors: regression matrix as a Numpy array method: Regression method. Currently, only "cochrane-orcutt" is supported. optimizationArgs: sc: The SparkContext, required. returns an RegressionARIMAModel """ assert sc != None, "Missing SparkContext" jvm = sc._jvm jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitModel(_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), method, _py2scala_seq(sc, optimizationArgs)) return RegressionARIMAModel(jmodel=jmodel, sc=sc)
[ "def", "fit_model", "(", "ts", ",", "regressors", ",", "method", "=", "\"cochrane-orcutt\"", ",", "optimizationArgs", "=", "None", ",", "sc", "=", "None", ")", ":", "assert", "sc", "!=", "None", ",", "\"Missing SparkContext\"", "jvm", "=", "sc", ".", "_jvm...
Parameters ---------- ts: time series to which to fit an ARIMA model as a Numpy array regressors: regression matrix as a Numpy array method: Regression method. Currently, only "cochrane-orcutt" is supported. optimizationArgs: sc: The SparkContext, required. returns an RegressionARIMAModel
[ "Parameters", "----------", "ts", ":", "time", "series", "to", "which", "to", "fit", "an", "ARIMA", "model", "as", "a", "Numpy", "array", "regressors", ":", "regression", "matrix", "as", "a", "Numpy", "array", "method", ":", "Regression", "method", ".", "C...
python
train
WGBH/wagtail-streamfieldtools
streamfield_tools/registry.py
https://github.com/WGBH/wagtail-streamfieldtools/blob/192f86845532742b0b7d432bef3987357833b8ed/streamfield_tools/registry.py#L32-L49
def _verify_block(self, block_type, block): """ Verifies a block prior to registration. """ if block_type in self._registry: raise AlreadyRegistered( "A block has already been registered to the {} `block_type` " "in the registry. Either unregister that block before trying " "to register this block under a different `block_type`".format( block_type ) ) if not isinstance(block, Block): raise InvalidBlock( "The block you tried register to {} is invalid. Only " "instances of `wagtail.wagtailcore.blocks.Block` may be " "registered with the the block_registry.".format(block_type) )
[ "def", "_verify_block", "(", "self", ",", "block_type", ",", "block", ")", ":", "if", "block_type", "in", "self", ".", "_registry", ":", "raise", "AlreadyRegistered", "(", "\"A block has already been registered to the {} `block_type` \"", "\"in the registry. Either unregist...
Verifies a block prior to registration.
[ "Verifies", "a", "block", "prior", "to", "registration", "." ]
python
test
dask/dask-ml
dask_ml/decomposition/pca.py
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L375-L406
def inverse_transform(self, X): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform. """ check_is_fitted(self, "mean_") if self.whiten: return ( da.dot( X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_, ) + self.mean_ ) else: return da.dot(X, self.components_) + self.mean_
[ "def", "inverse_transform", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "\"mean_\"", ")", "if", "self", ".", "whiten", ":", "return", "(", "da", ".", "dot", "(", "X", ",", "np", ".", "sqrt", "(", "self", ".", "explained_vari...
Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform.
[ "Transform", "data", "back", "to", "its", "original", "space", "." ]
python
train
ergoithz/browsepy
browsepy/manager.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/manager.py#L71-L79
def init_app(self, app): ''' Initialize this Flask extension for given app. ''' self.app = app if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['plugin_manager'] = self self.reload()
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "app", "=", "app", "if", "not", "hasattr", "(", "app", ",", "'extensions'", ")", ":", "app", ".", "extensions", "=", "{", "}", "app", ".", "extensions", "[", "'plugin_manager'", "]", ...
Initialize this Flask extension for given app.
[ "Initialize", "this", "Flask", "extension", "for", "given", "app", "." ]
python
train