repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
tomasbasham/dominos
dominos/api.py
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L173-L190
def add_side_to_basket(self, item, quantity=1): ''' Add a side to the current basket. :param Item item: Item from menu. :param int quantity: The quantity of side to be added. :return: A response having added a side to the current basket. :rtype: requests.Response ''' item_variant = item[VARIANT.PERSONAL] params = { 'productSkuId': item_variant['productSkuId'], 'quantity': quantity, 'ComplimentaryItems': [] } return self.__post('/Basket/AddProduct', json=params)
[ "def", "add_side_to_basket", "(", "self", ",", "item", ",", "quantity", "=", "1", ")", ":", "item_variant", "=", "item", "[", "VARIANT", ".", "PERSONAL", "]", "params", "=", "{", "'productSkuId'", ":", "item_variant", "[", "'productSkuId'", "]", ",", "'qua...
Add a side to the current basket. :param Item item: Item from menu. :param int quantity: The quantity of side to be added. :return: A response having added a side to the current basket. :rtype: requests.Response
[ "Add", "a", "side", "to", "the", "current", "basket", "." ]
python
test
kislyuk/aegea
aegea/packages/github3/repos/repo.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L512-L539
def create_deployment(self, ref, force=False, payload='', auto_merge=False, description='', environment=None): """Create a deployment. :param str ref: (required), The ref to deploy. This can be a branch, tag, or sha. :param bool force: Optional parameter to bypass any ahead/behind checks or commit status checks. Default: False :param str payload: Optional JSON payload with extra information about the deployment. Default: "" :param bool auto_merge: Optional parameter to merge the default branch into the requested deployment branch if necessary. Default: False :param str description: Optional short description. Default: "" :param str environment: Optional name for the target deployment environment (e.g., production, staging, qa). Default: "production" :returns: :class:`Deployment <github3.repos.deployment.Deployment>` """ json = None if ref: url = self._build_url('deployments', base_url=self._api) data = {'ref': ref, 'force': force, 'payload': payload, 'auto_merge': auto_merge, 'description': description, 'environment': environment} self._remove_none(data) headers = Deployment.CUSTOM_HEADERS json = self._json(self._post(url, data=data, headers=headers), 201) return Deployment(json, self) if json else None
[ "def", "create_deployment", "(", "self", ",", "ref", ",", "force", "=", "False", ",", "payload", "=", "''", ",", "auto_merge", "=", "False", ",", "description", "=", "''", ",", "environment", "=", "None", ")", ":", "json", "=", "None", "if", "ref", "...
Create a deployment. :param str ref: (required), The ref to deploy. This can be a branch, tag, or sha. :param bool force: Optional parameter to bypass any ahead/behind checks or commit status checks. Default: False :param str payload: Optional JSON payload with extra information about the deployment. Default: "" :param bool auto_merge: Optional parameter to merge the default branch into the requested deployment branch if necessary. Default: False :param str description: Optional short description. Default: "" :param str environment: Optional name for the target deployment environment (e.g., production, staging, qa). Default: "production" :returns: :class:`Deployment <github3.repos.deployment.Deployment>`
[ "Create", "a", "deployment", "." ]
python
train
logston/py3s3
py3s3/storage.py
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L194-L203
def _prepend_name_prefix(self, name): """Return file name (ie. path) with the prefix directory prepended""" if not self.name_prefix: return name base = self.name_prefix if base[0] != '/': base = '/' + base if name[0] != '/': name = '/' + name return base + name
[ "def", "_prepend_name_prefix", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "name_prefix", ":", "return", "name", "base", "=", "self", ".", "name_prefix", "if", "base", "[", "0", "]", "!=", "'/'", ":", "base", "=", "'/'", "+", "base"...
Return file name (ie. path) with the prefix directory prepended
[ "Return", "file", "name", "(", "ie", ".", "path", ")", "with", "the", "prefix", "directory", "prepended" ]
python
train
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L199-L213
def _get_host_details(self): """Get the system details.""" # Assuming only one system present as part of collection, # as we are dealing with iLO's here. status, headers, system = self._rest_get('/rest/v1/Systems/1') if status < 300: stype = self._get_type(system) if stype not in ['ComputerSystem.0', 'ComputerSystem.1']: msg = "%s is not a valid system type " % stype raise exception.IloError(msg) else: msg = self._get_extended_error(system) raise exception.IloError(msg) return system
[ "def", "_get_host_details", "(", "self", ")", ":", "# Assuming only one system present as part of collection,", "# as we are dealing with iLO's here.", "status", ",", "headers", ",", "system", "=", "self", ".", "_rest_get", "(", "'/rest/v1/Systems/1'", ")", "if", "status", ...
Get the system details.
[ "Get", "the", "system", "details", "." ]
python
train
dsoprea/NsqSpinner
nsq/master.py
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L44-L76
def __start_connection(self, context, node, ccallbacks=None): """Start a new connection, and manage it from a new greenlet.""" _logger.debug("Creating connection object: CONTEXT=[%s] NODE=[%s]", context, node) c = nsq.connection.Connection( context, node, self.__identify, self.__message_handler, self.__quit_ev, ccallbacks, ignore_quit=self.__connection_ignore_quit) g = gevent.spawn(c.run) # Now, wait for the thread to finish the connection. timeout_s = nsq.config.client.NEW_CONNECTION_NEGOTIATE_TIMEOUT_S if c.connected_ev.wait(timeout_s) is False: _logger.error("New connection to server [%s] timed-out. Cleaning-" "up thread.", node) g.kill() g.join() # We'll try again on the next audit. raise EnvironmentError("Connection to server [%s] failed." % (node,)) self.__connections.append((node, c, g))
[ "def", "__start_connection", "(", "self", ",", "context", ",", "node", ",", "ccallbacks", "=", "None", ")", ":", "_logger", ".", "debug", "(", "\"Creating connection object: CONTEXT=[%s] NODE=[%s]\"", ",", "context", ",", "node", ")", "c", "=", "nsq", ".", "co...
Start a new connection, and manage it from a new greenlet.
[ "Start", "a", "new", "connection", "and", "manage", "it", "from", "a", "new", "greenlet", "." ]
python
train
istresearch/scrapy-cluster
utils/scutils/log_factory.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L62-L66
def is_subdict(self, a,b): ''' Return True if a is a subdict of b ''' return all((k in b and b[k]==v) for k,v in a.iteritems())
[ "def", "is_subdict", "(", "self", ",", "a", ",", "b", ")", ":", "return", "all", "(", "(", "k", "in", "b", "and", "b", "[", "k", "]", "==", "v", ")", "for", "k", ",", "v", "in", "a", ".", "iteritems", "(", ")", ")" ]
Return True if a is a subdict of b
[ "Return", "True", "if", "a", "is", "a", "subdict", "of", "b" ]
python
train
rocky/python-spark
example/python2/py2_scan.py
https://github.com/rocky/python-spark/blob/8899954bcf0e166726841a43e87c23790eb3441f/example/python2/py2_scan.py#L170-L186
def t_whitespace_or_comment(self, s): r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)' if '#' in s: # We have a comment matches = re.match('(\s+)(.*[\n]?)', s) if matches and self.is_newline: self.handle_indent_dedent(matches.group(1)) s = matches.group(2) if s.endswith("\n"): self.add_token('COMMENT', s[:-1]) self.add_token('NEWLINE', "\n") else: self.add_token('COMMENT', s) elif self.is_newline: self.handle_indent_dedent(s) pass return
[ "def", "t_whitespace_or_comment", "(", "self", ",", "s", ")", ":", "if", "'#'", "in", "s", ":", "# We have a comment", "matches", "=", "re", ".", "match", "(", "'(\\s+)(.*[\\n]?)'", ",", "s", ")", "if", "matches", "and", "self", ".", "is_newline", ":", "...
r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)
[ "r", "(", "[", "\\", "t", "]", "*", "[", "#", "]", ".", "*", "[", "^", "\\", "x04", "]", "[", "\\", "n", "]", "?", ")", "|", "(", "[", "\\", "t", "]", "+", ")" ]
python
train
geophysics-ubonn/reda
lib/reda/configs/configManager.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L675-L753
def gen_configs_permutate(self, injections_raw, only_same_dipole_length=False, ignore_crossed_dipoles=False, silent=False): """ Create measurement configurations out of a pool of current injections. Use only the provided dipoles for potential dipole selection. This means that we have always reciprocal measurements. Remove quadpoles where electrodes are used both as current and voltage dipoles. Parameters ---------- injections_raw : Nx2 array current injections only_same_dipole_length : bool, optional if True, only generate permutations for the same dipole length ignore_crossed_dipoles : bool, optional If True, potential dipoles will be ignored that lie between current dipoles, e.g. 1-4 3-5. In this case it is possible to not have full normal-reciprocal coverage. silent: bool, optional if True, do not print information on ignored configs (default: False) Returns ------- configs : Nx4 array quadrupoles generated out of the current injections """ injections = np.atleast_2d(injections_raw).astype(int) N = injections.shape[0] measurements = [] for injection in range(0, N): dipole_length = np.abs(injections[injection][1] - injections[injection][0]) # select all dipole EXCEPT for the injection dipole for i in set(range(0, N)) - set([injection]): test_dipole_length = np.abs(injections[i, :][1] - injections[i, :][0]) if (only_same_dipole_length and test_dipole_length != dipole_length): continue quadpole = np.array( [injections[injection, :], injections[i, :]]).flatten() if ignore_crossed_dipoles is True: # check if we need to ignore this dipole # Note: this could be wrong if electrode number are not # ascending! if (quadpole[2] > quadpole[0] and quadpole[2] < quadpole[1]): if not silent: print('A - ignoring', quadpole) elif (quadpole[3] > quadpole[0] and quadpole[3] < quadpole[1]): if not silent: print('B - ignoring', quadpole) else: measurements.append(quadpole) else: # add very quadpole measurements.append(quadpole) # check and remove double use of electrodes filtered = [] for quadpole in measurements: if (not set(quadpole[0:2]).isdisjoint(set(quadpole[2:4]))): if not silent: print('Ignoring quadrupole because of ', 'repeated electrode use:', quadpole) else: filtered.append(quadpole) self.add_to_configs(filtered) return np.array(filtered)
[ "def", "gen_configs_permutate", "(", "self", ",", "injections_raw", ",", "only_same_dipole_length", "=", "False", ",", "ignore_crossed_dipoles", "=", "False", ",", "silent", "=", "False", ")", ":", "injections", "=", "np", ".", "atleast_2d", "(", "injections_raw",...
Create measurement configurations out of a pool of current injections. Use only the provided dipoles for potential dipole selection. This means that we have always reciprocal measurements. Remove quadpoles where electrodes are used both as current and voltage dipoles. Parameters ---------- injections_raw : Nx2 array current injections only_same_dipole_length : bool, optional if True, only generate permutations for the same dipole length ignore_crossed_dipoles : bool, optional If True, potential dipoles will be ignored that lie between current dipoles, e.g. 1-4 3-5. In this case it is possible to not have full normal-reciprocal coverage. silent: bool, optional if True, do not print information on ignored configs (default: False) Returns ------- configs : Nx4 array quadrupoles generated out of the current injections
[ "Create", "measurement", "configurations", "out", "of", "a", "pool", "of", "current", "injections", ".", "Use", "only", "the", "provided", "dipoles", "for", "potential", "dipole", "selection", ".", "This", "means", "that", "we", "have", "always", "reciprocal", ...
python
train
hydpy-dev/hydpy
hydpy/core/sequencetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/sequencetools.py#L1158-L1237
def average_series(self, *args, **kwargs) -> InfoArray: """Average the actual time series of the |Variable| object for all time points. Method |IOSequence.average_series| works similarly as method |Variable.average_values| of class |Variable|, from which we borrow some examples. However, firstly, we have to prepare a |Timegrids| object to define the |IOSequence.series| length: >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-04', '1d' As shown for method |Variable.average_values|, for 0-dimensional |IOSequence| objects the result of |IOSequence.average_series| equals |IOSequence.series| itself: >>> from hydpy.core.sequencetools import IOSequence >>> class SoilMoisture(IOSequence): ... NDIM = 0 >>> sm = SoilMoisture(None) >>> sm.activate_ram() >>> import numpy >>> sm.series = numpy.array([190.0, 200.0, 210.0]) >>> sm.average_series() InfoArray([ 190., 200., 210.]) For |IOSequence| objects with an increased dimensionality, a weighting parameter is required, again: >>> SoilMoisture.NDIM = 1 >>> sm.shape = 3 >>> sm.activate_ram() >>> sm.series = ( ... [190.0, 390.0, 490.0], ... [200.0, 400.0, 500.0], ... [210.0, 410.0, 510.0]) >>> from hydpy.core.parametertools import Parameter >>> class Area(Parameter): ... NDIM = 1 ... shape = (3,) ... value = numpy.array([1.0, 1.0, 2.0]) >>> area = Area(None) >>> SoilMoisture.refweights = property(lambda self: area) >>> sm.average_series() InfoArray([ 390., 400., 410.]) The documentation on method |Variable.average_values| provides many examples on how to use different masks in different ways. Here we restrict ourselves to the first example, where a new mask enforces that |IOSequence.average_series| takes only the first two columns of the `series` into account: >>> from hydpy.core.masktools import DefaultMask >>> class Soil(DefaultMask): ... @classmethod ... def new(cls, variable, **kwargs): ... return cls.array2mask([True, True, False]) >>> SoilMoisture.mask = Soil() >>> sm.average_series() InfoArray([ 290., 300., 310.]) """ try: if not self.NDIM: array = self.series else: mask = self.get_submask(*args, **kwargs) if numpy.any(mask): weights = self.refweights[mask] weights /= numpy.sum(weights) series = self.series[:, mask] axes = tuple(range(1, self.NDIM+1)) array = numpy.sum(weights*series, axis=axes) else: return numpy.nan return InfoArray(array, info={'type': 'mean'}) except BaseException: objecttools.augment_excmessage( 'While trying to calculate the mean value of ' 'the internal time series of sequence %s' % objecttools.devicephrase(self))
[ "def", "average_series", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "InfoArray", ":", "try", ":", "if", "not", "self", ".", "NDIM", ":", "array", "=", "self", ".", "series", "else", ":", "mask", "=", "self", ".", "get_submas...
Average the actual time series of the |Variable| object for all time points. Method |IOSequence.average_series| works similarly as method |Variable.average_values| of class |Variable|, from which we borrow some examples. However, firstly, we have to prepare a |Timegrids| object to define the |IOSequence.series| length: >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-04', '1d' As shown for method |Variable.average_values|, for 0-dimensional |IOSequence| objects the result of |IOSequence.average_series| equals |IOSequence.series| itself: >>> from hydpy.core.sequencetools import IOSequence >>> class SoilMoisture(IOSequence): ... NDIM = 0 >>> sm = SoilMoisture(None) >>> sm.activate_ram() >>> import numpy >>> sm.series = numpy.array([190.0, 200.0, 210.0]) >>> sm.average_series() InfoArray([ 190., 200., 210.]) For |IOSequence| objects with an increased dimensionality, a weighting parameter is required, again: >>> SoilMoisture.NDIM = 1 >>> sm.shape = 3 >>> sm.activate_ram() >>> sm.series = ( ... [190.0, 390.0, 490.0], ... [200.0, 400.0, 500.0], ... [210.0, 410.0, 510.0]) >>> from hydpy.core.parametertools import Parameter >>> class Area(Parameter): ... NDIM = 1 ... shape = (3,) ... value = numpy.array([1.0, 1.0, 2.0]) >>> area = Area(None) >>> SoilMoisture.refweights = property(lambda self: area) >>> sm.average_series() InfoArray([ 390., 400., 410.]) The documentation on method |Variable.average_values| provides many examples on how to use different masks in different ways. Here we restrict ourselves to the first example, where a new mask enforces that |IOSequence.average_series| takes only the first two columns of the `series` into account: >>> from hydpy.core.masktools import DefaultMask >>> class Soil(DefaultMask): ... @classmethod ... def new(cls, variable, **kwargs): ... return cls.array2mask([True, True, False]) >>> SoilMoisture.mask = Soil() >>> sm.average_series() InfoArray([ 290., 300., 310.])
[ "Average", "the", "actual", "time", "series", "of", "the", "|Variable|", "object", "for", "all", "time", "points", "." ]
python
train
saltstack/salt
salt/utils/thin.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L184-L233
def get_ext_tops(config): ''' Get top directories for the dependencies, based on external configuration. :return: ''' config = copy.deepcopy(config) alternatives = {} required = ['jinja2', 'yaml', 'tornado', 'msgpack'] tops = [] for ns, cfg in salt.ext.six.iteritems(config or {}): alternatives[ns] = cfg locked_py_version = cfg.get('py-version') err_msg = None if not locked_py_version: err_msg = 'Alternative Salt library: missing specific locked Python version' elif not isinstance(locked_py_version, (tuple, list)): err_msg = ('Alternative Salt library: specific locked Python version ' 'should be a list of major/minor version') if err_msg: raise salt.exceptions.SaltSystemExit(err_msg) if cfg.get('dependencies') == 'inherit': # TODO: implement inheritance of the modules from _here_ raise NotImplementedError('This feature is not yet implemented') else: for dep in cfg.get('dependencies'): mod = cfg['dependencies'][dep] or '' if not mod: log.warning('Module %s has missing configuration', dep) continue elif mod.endswith('.py') and not os.path.isfile(mod): log.warning('Module %s configured with not a file or does not exist: %s', dep, mod) continue elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')): log.warning('Module %s is not a Python importable module with %s', dep, mod) continue tops.append(mod) if dep in required: required.pop(required.index(dep)) required = ', '.join(required) if required: msg = 'Missing dependencies for the alternative version' \ ' in the external configuration: {}'.format(required) log.error(msg) raise salt.exceptions.SaltSystemExit(msg) alternatives[ns]['dependencies'] = tops return alternatives
[ "def", "get_ext_tops", "(", "config", ")", ":", "config", "=", "copy", ".", "deepcopy", "(", "config", ")", "alternatives", "=", "{", "}", "required", "=", "[", "'jinja2'", ",", "'yaml'", ",", "'tornado'", ",", "'msgpack'", "]", "tops", "=", "[", "]", ...
Get top directories for the dependencies, based on external configuration. :return:
[ "Get", "top", "directories", "for", "the", "dependencies", "based", "on", "external", "configuration", "." ]
python
train
adewes/blitzdb
blitzdb/backends/file/backend.py
https://github.com/adewes/blitzdb/blob/4b459e0bcde9e1f6224dd4e3bea74194586864b0/blitzdb/backends/file/backend.py#L567-L589
def _canonicalize_query(self, query): """ Transform the query dictionary to replace e.g. documents with __ref__ fields. """ def transform_query(q): if isinstance(q, dict): nq = {} for key,value in q.items(): nq[key] = transform_query(value) return nq elif isinstance(q, (list,QuerySet,tuple)): return [transform_query(x) for x in q] elif isinstance(q,Document): collection = self.get_collection_for_obj(q) ref = "%s:%s" % (collection,q.pk) return ref else: return q return transform_query(query)
[ "def", "_canonicalize_query", "(", "self", ",", "query", ")", ":", "def", "transform_query", "(", "q", ")", ":", "if", "isinstance", "(", "q", ",", "dict", ")", ":", "nq", "=", "{", "}", "for", "key", ",", "value", "in", "q", ".", "items", "(", "...
Transform the query dictionary to replace e.g. documents with __ref__ fields.
[ "Transform", "the", "query", "dictionary", "to", "replace", "e", ".", "g", ".", "documents", "with", "__ref__", "fields", "." ]
python
train
hickeroar/simplebayes
simplebayes/__init__.py
https://github.com/hickeroar/simplebayes/blob/b8da72c50d20b6f8c0df2c2f39620715b08ddd32/simplebayes/__init__.py#L60-L75
def count_token_occurrences(cls, words): """ Creates a key/value set of word/count for a given sample of text :param words: full list of all tokens, non-unique :type words: list :return: key/value pairs of words and their counts in the list :rtype: dict """ counts = {} for word in words: if word in counts: counts[word] += 1 else: counts[word] = 1 return counts
[ "def", "count_token_occurrences", "(", "cls", ",", "words", ")", ":", "counts", "=", "{", "}", "for", "word", "in", "words", ":", "if", "word", "in", "counts", ":", "counts", "[", "word", "]", "+=", "1", "else", ":", "counts", "[", "word", "]", "="...
Creates a key/value set of word/count for a given sample of text :param words: full list of all tokens, non-unique :type words: list :return: key/value pairs of words and their counts in the list :rtype: dict
[ "Creates", "a", "key", "/", "value", "set", "of", "word", "/", "count", "for", "a", "given", "sample", "of", "text" ]
python
train
MillionIntegrals/vel
vel/util/math.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/math.py#L3-L11
def divide_ceiling(numerator, denominator): """ Determine the smallest number k such, that denominator * k >= numerator """ split_val = numerator // denominator rest = numerator % denominator if rest > 0: return split_val + 1 else: return split_val
[ "def", "divide_ceiling", "(", "numerator", ",", "denominator", ")", ":", "split_val", "=", "numerator", "//", "denominator", "rest", "=", "numerator", "%", "denominator", "if", "rest", ">", "0", ":", "return", "split_val", "+", "1", "else", ":", "return", ...
Determine the smallest number k such, that denominator * k >= numerator
[ "Determine", "the", "smallest", "number", "k", "such", "that", "denominator", "*", "k", ">", "=", "numerator" ]
python
train
argaen/aiocache
aiocache/base.py
https://github.com/argaen/aiocache/blob/fdd282f37283ca04e22209f4d2ae4900f29e1688/aiocache/base.py#L440-L459
async def raw(self, command, *args, _conn=None, **kwargs): """ Send the raw command to the underlying client. Note that by using this CMD you will lose compatibility with other backends. Due to limitations with aiomcache client, args have to be provided as bytes. For rest of backends, str. :param command: str with the command. :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: whatever the underlying client returns :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout """ start = time.monotonic() ret = await self._raw( command, *args, encoding=self.serializer.encoding, _conn=_conn, **kwargs ) logger.debug("%s (%.4f)s", command, time.monotonic() - start) return ret
[ "async", "def", "raw", "(", "self", ",", "command", ",", "*", "args", ",", "_conn", "=", "None", ",", "*", "*", "kwargs", ")", ":", "start", "=", "time", ".", "monotonic", "(", ")", "ret", "=", "await", "self", ".", "_raw", "(", "command", ",", ...
Send the raw command to the underlying client. Note that by using this CMD you will lose compatibility with other backends. Due to limitations with aiomcache client, args have to be provided as bytes. For rest of backends, str. :param command: str with the command. :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: whatever the underlying client returns :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
[ "Send", "the", "raw", "command", "to", "the", "underlying", "client", ".", "Note", "that", "by", "using", "this", "CMD", "you", "will", "lose", "compatibility", "with", "other", "backends", "." ]
python
train
sprockets/sprockets-influxdb
sprockets_influxdb.py
https://github.com/sprockets/sprockets-influxdb/blob/cce73481b8f26b02e65e3f9914a9a22eceff3063/sprockets_influxdb.py#L896-L906
def set_tags(self, tags): """Set multiple tags for the measurement. :param dict tags: Tag key/value pairs to assign This will overwrite the current value assigned to a tag if one exists with the same name. """ for key, value in tags.items(): self.set_tag(key, value)
[ "def", "set_tags", "(", "self", ",", "tags", ")", ":", "for", "key", ",", "value", "in", "tags", ".", "items", "(", ")", ":", "self", ".", "set_tag", "(", "key", ",", "value", ")" ]
Set multiple tags for the measurement. :param dict tags: Tag key/value pairs to assign This will overwrite the current value assigned to a tag if one exists with the same name.
[ "Set", "multiple", "tags", "for", "the", "measurement", "." ]
python
train
apache/incubator-heron
heron/tools/cli/src/python/args.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/cli/src/python/args.py#L180-L211
def add_dry_run(parser): ''' :param parser: :return: ''' default_format = 'table' resp_formats = ['raw', 'table', 'colored_table', 'json'] available_options = ', '.join(['%s' % opt for opt in resp_formats]) def dry_run_resp_format(value): if value not in resp_formats: raise argparse.ArgumentTypeError( 'Invalid dry-run response format: %s. Available formats: %s' % (value, available_options)) return value parser.add_argument( '--dry-run', default=False, action='store_true', help='Enable dry-run mode. Information about ' 'the command will print but no action will be taken on the topology') parser.add_argument( '--dry-run-format', metavar='DRY_RUN_FORMAT', default='colored_table' if sys.stdout.isatty() else 'table', type=dry_run_resp_format, help='The format of the dry-run output ([%s], default=%s). ' 'Ignored when dry-run mode is not enabled' % ('|'.join(resp_formats), default_format)) return parser
[ "def", "add_dry_run", "(", "parser", ")", ":", "default_format", "=", "'table'", "resp_formats", "=", "[", "'raw'", ",", "'table'", ",", "'colored_table'", ",", "'json'", "]", "available_options", "=", "', '", ".", "join", "(", "[", "'%s'", "%", "opt", "fo...
:param parser: :return:
[ ":", "param", "parser", ":", ":", "return", ":" ]
python
valid
funilrys/PyFunceble
PyFunceble/syntax.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/syntax.py#L74-L104
def get(cls): """ Execute the logic behind the Syntax handling. :return: The syntax status. :rtype: str """ if PyFunceble.INTERN["to_test_type"] == "domain": # We are testing for domain or ip. if Check().is_domain_valid() or Check().is_ip_valid(): # * The domain is valid. # or # * The IP is valid. # We handle and return the valid status. return SyntaxStatus(PyFunceble.STATUS["official"]["valid"]).handle() elif PyFunceble.INTERN["to_test_type"] == "url": # We are testing for URL. if Check().is_url_valid(): # * The url is valid. # We handle and return the valid status. return SyntaxStatus(PyFunceble.STATUS["official"]["valid"]).handle() else: raise Exception("Unknow test type.") # We handle and return the invalid status. return SyntaxStatus(PyFunceble.STATUS["official"]["invalid"]).handle()
[ "def", "get", "(", "cls", ")", ":", "if", "PyFunceble", ".", "INTERN", "[", "\"to_test_type\"", "]", "==", "\"domain\"", ":", "# We are testing for domain or ip.", "if", "Check", "(", ")", ".", "is_domain_valid", "(", ")", "or", "Check", "(", ")", ".", "is...
Execute the logic behind the Syntax handling. :return: The syntax status. :rtype: str
[ "Execute", "the", "logic", "behind", "the", "Syntax", "handling", "." ]
python
test
markchil/gptools
gptools/utils.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1767-L1899
def compute_stats(vals, check_nan=False, robust=False, axis=1, plot_QQ=False, bins=15, name=''): """Compute the average statistics (mean, std dev) for the given values. Parameters ---------- vals : array-like, (`M`, `D`) Values to compute the average statistics along the specified axis of. check_nan : bool, optional Whether or not to check for (and exclude) NaN's. Default is False (do not attempt to handle NaN's). robust : bool, optional Whether or not to use robust estimators (median for mean, IQR for standard deviation). Default is False (use non-robust estimators). axis : int, optional Axis to compute the statistics along. Presently only supported if `robust` is False. Default is 1. plot_QQ : bool, optional Whether or not a QQ plot and histogram should be drawn for each channel. Default is False (do not draw QQ plots). bins : int, optional Number of bins to use when plotting histogram (for plot_QQ=True). Default is 15 name : str, optional Name to put in the title of the QQ/histogram plot. Returns ------- mean : ndarray, (`M`,) Estimator for the mean of `vals`. std : ndarray, (`M`,) Estimator for the standard deviation of `vals`. Raises ------ NotImplementedError If `axis` != 1 when `robust` is True. NotImplementedError If `plot_QQ` is True. """ if axis != 1 and robust: raise NotImplementedError("Values of axis other than 1 are not supported " "with the robust keyword at this time!") if robust: # TODO: This stuff should really be vectorized if there is something that allows it! if check_nan: mean = scipy.stats.nanmedian(vals, axis=axis) # TODO: HANDLE AXIS PROPERLY! std = scipy.zeros(vals.shape[0], dtype=float) for k in xrange(0, len(vals)): ch = vals[k] ok_idxs = ~scipy.isnan(ch) if ok_idxs.any(): std[k] = (scipy.stats.scoreatpercentile(ch[ok_idxs], 75) - scipy.stats.scoreatpercentile(ch[ok_idxs], 25)) else: # Leave a nan where there are no non-nan values: std[k] = scipy.nan std /= IQR_TO_STD else: mean = scipy.median(vals, axis=axis) # TODO: HANDLE AXIS PROPERLY! std = scipy.asarray([scipy.stats.scoreatpercentile(ch, 75.0) - scipy.stats.scoreatpercentile(ch, 25.0) for ch in vals]) / IQR_TO_STD else: if check_nan: mean = scipy.stats.nanmean(vals, axis=axis) std = scipy.stats.nanstd(vals, axis=axis) else: mean = scipy.mean(vals, axis=axis) std = scipy.std(vals, axis=axis) if plot_QQ: f = plt.figure() gs = mplgs.GridSpec(2, 2, height_ratios=[8, 1]) a_QQ = f.add_subplot(gs[0, 0]) a_hist = f.add_subplot(gs[0, 1]) a_slider = f.add_subplot(gs[1, :]) title = f.suptitle("") def update(val): """Update the index from the results to be displayed. """ a_QQ.clear() a_hist.clear() idx = slider.val title.set_text("%s, n=%d" % (name, idx)) nan_idxs = scipy.isnan(vals[idx, :]) if not nan_idxs.all(): osm, osr = scipy.stats.probplot(vals[idx, ~nan_idxs], dist='norm', plot=None, fit=False) a_QQ.plot(osm, osr, 'bo', markersize=10) a_QQ.set_title('QQ plot') a_QQ.set_xlabel('quantiles of $\mathcal{N}(0,1)$') a_QQ.set_ylabel('quantiles of data') a_hist.hist(vals[idx, ~nan_idxs], bins=bins, normed=True) locs = scipy.linspace(vals[idx, ~nan_idxs].min(), vals[idx, ~nan_idxs].max()) a_hist.plot(locs, scipy.stats.norm.pdf(locs, loc=mean[idx], scale=std[idx])) a_hist.set_title('Normalized histogram and reported PDF') a_hist.set_xlabel('value') a_hist.set_ylabel('density') f.canvas.draw() def arrow_respond(slider, event): """Event handler for arrow key events in plot windows. Pass the slider object to update as a masked argument using a lambda function:: lambda evt: arrow_respond(my_slider, evt) Parameters ---------- slider : Slider instance associated with this handler. event : Event to be handled. """ if event.key == 'right': slider.set_val(min(slider.val + 1, slider.valmax)) elif event.key == 'left': slider.set_val(max(slider.val - 1, slider.valmin)) slider = mplw.Slider(a_slider, 'index', 0, len(vals) - 1, valinit=0, valfmt='%d') slider.on_changed(update) update(0) f.canvas.mpl_connect('key_press_event', lambda evt: arrow_respond(slider, evt)) return (mean, std)
[ "def", "compute_stats", "(", "vals", ",", "check_nan", "=", "False", ",", "robust", "=", "False", ",", "axis", "=", "1", ",", "plot_QQ", "=", "False", ",", "bins", "=", "15", ",", "name", "=", "''", ")", ":", "if", "axis", "!=", "1", "and", "robu...
Compute the average statistics (mean, std dev) for the given values. Parameters ---------- vals : array-like, (`M`, `D`) Values to compute the average statistics along the specified axis of. check_nan : bool, optional Whether or not to check for (and exclude) NaN's. Default is False (do not attempt to handle NaN's). robust : bool, optional Whether or not to use robust estimators (median for mean, IQR for standard deviation). Default is False (use non-robust estimators). axis : int, optional Axis to compute the statistics along. Presently only supported if `robust` is False. Default is 1. plot_QQ : bool, optional Whether or not a QQ plot and histogram should be drawn for each channel. Default is False (do not draw QQ plots). bins : int, optional Number of bins to use when plotting histogram (for plot_QQ=True). Default is 15 name : str, optional Name to put in the title of the QQ/histogram plot. Returns ------- mean : ndarray, (`M`,) Estimator for the mean of `vals`. std : ndarray, (`M`,) Estimator for the standard deviation of `vals`. Raises ------ NotImplementedError If `axis` != 1 when `robust` is True. NotImplementedError If `plot_QQ` is True.
[ "Compute", "the", "average", "statistics", "(", "mean", "std", "dev", ")", "for", "the", "given", "values", ".", "Parameters", "----------", "vals", ":", "array", "-", "like", "(", "M", "D", ")", "Values", "to", "compute", "the", "average", "statistics", ...
python
train
googledatalab/pydatalab
google/datalab/stackdriver/commands/_monitoring.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/stackdriver/commands/_monitoring.py#L28-L42
def sd(line, cell=None): """Implements the stackdriver cell magic for ipython notebooks. Args: line: the contents of the storage line. Returns: The results of executing the cell. """ parser = google.datalab.utils.commands.CommandParser(prog='%sd', description=( 'Execute various Stackdriver related operations. Use "%sd ' '<stackdriver_product> -h" for help on a specific Stackdriver product.')) # %%sd monitoring _create_monitoring_subparser(parser) return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
[ "def", "sd", "(", "line", ",", "cell", "=", "None", ")", ":", "parser", "=", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "CommandParser", "(", "prog", "=", "'%sd'", ",", "description", "=", "(", "'Execute various Stackdriver related operati...
Implements the stackdriver cell magic for ipython notebooks. Args: line: the contents of the storage line. Returns: The results of executing the cell.
[ "Implements", "the", "stackdriver", "cell", "magic", "for", "ipython", "notebooks", "." ]
python
train
dbcli/cli_helpers
cli_helpers/tabular_output/tsv_output_adapter.py
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/tsv_output_adapter.py#L13-L16
def adapter(data, headers, **kwargs): """Wrap the formatting inside a function for TabularOutputFormatter.""" for row in chain((headers,), data): yield "\t".join((replace(r, (('\n', r'\n'), ('\t', r'\t'))) for r in row))
[ "def", "adapter", "(", "data", ",", "headers", ",", "*", "*", "kwargs", ")", ":", "for", "row", "in", "chain", "(", "(", "headers", ",", ")", ",", "data", ")", ":", "yield", "\"\\t\"", ".", "join", "(", "(", "replace", "(", "r", ",", "(", "(", ...
Wrap the formatting inside a function for TabularOutputFormatter.
[ "Wrap", "the", "formatting", "inside", "a", "function", "for", "TabularOutputFormatter", "." ]
python
test
materialsproject/pymatgen
pymatgen/core/structure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L3199-L3215
def append(self, species, coords, validate_proximity=True, properties=None): """ Appends a site to the molecule. Args: species: Species of inserted site coords: Coordinates of inserted site validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to True. properties (dict): A dict of properties for the Site. Returns: New molecule with inserted site. """ return self.insert(len(self), species, coords, validate_proximity=validate_proximity, properties=properties)
[ "def", "append", "(", "self", ",", "species", ",", "coords", ",", "validate_proximity", "=", "True", ",", "properties", "=", "None", ")", ":", "return", "self", ".", "insert", "(", "len", "(", "self", ")", ",", "species", ",", "coords", ",", "validate_...
Appends a site to the molecule. Args: species: Species of inserted site coords: Coordinates of inserted site validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to True. properties (dict): A dict of properties for the Site. Returns: New molecule with inserted site.
[ "Appends", "a", "site", "to", "the", "molecule", "." ]
python
train
praekelt/panya
panya/templatetags/panya_template_tags.py
https://github.com/praekelt/panya/blob/0fd621e15a7c11a2716a9554a2f820d6259818e5/panya/templatetags/panya_template_tags.py#L12-L25
def smart_query_string(parser, token): """ Outputs current GET query string with additions appended. Additions are provided in token pairs. """ args = token.split_contents() additions = args[1:] addition_pairs = [] while additions: addition_pairs.append(additions[0:2]) additions = additions[2:] return SmartQueryStringNode(addition_pairs)
[ "def", "smart_query_string", "(", "parser", ",", "token", ")", ":", "args", "=", "token", ".", "split_contents", "(", ")", "additions", "=", "args", "[", "1", ":", "]", "addition_pairs", "=", "[", "]", "while", "additions", ":", "addition_pairs", ".", "a...
Outputs current GET query string with additions appended. Additions are provided in token pairs.
[ "Outputs", "current", "GET", "query", "string", "with", "additions", "appended", ".", "Additions", "are", "provided", "in", "token", "pairs", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/storage/storage.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/storage/storage.py#L374-L467
def load_state_recursively(parent, state_path=None, dirty_states=[]): """Recursively loads the state It calls this method on each sub-state of a container state. :param parent: the root state of the last load call to which the loaded state will be added :param state_path: the path on the filesystem where to find the meta file for the state :param dirty_states: a dict of states which changed during loading :return: """ from rafcon.core.states.execution_state import ExecutionState from rafcon.core.states.container_state import ContainerState from rafcon.core.states.hierarchy_state import HierarchyState path_core_data = os.path.join(state_path, FILE_NAME_CORE_DATA) logger.debug("Load state recursively: {0}".format(str(state_path))) # TODO: Should be removed with next minor release if not os.path.exists(path_core_data): path_core_data = os.path.join(state_path, FILE_NAME_CORE_DATA_OLD) try: state_info = load_data_file(path_core_data) except ValueError as e: logger.exception("Error while loading state data: {0}".format(e)) return except LibraryNotFoundException as e: logger.error("Library could not be loaded: {0}\n" "Skipping library and continuing loading the state machine".format(e)) state_info = storage_utils.load_objects_from_json(path_core_data, as_dict=True) state_id = state_info["state_id"] dummy_state = HierarchyState(LIBRARY_NOT_FOUND_DUMMY_STATE_NAME, state_id=state_id) # set parent of dummy state if isinstance(parent, ContainerState): parent.add_state(dummy_state, storage_load=True) else: dummy_state.parent = parent return dummy_state # Transitions and data flows are not added when loading a state, as also states are not added. # We have to wait until the child states are loaded, before adding transitions and data flows, as otherwise the # validity checks for transitions and data flows would fail if not isinstance(state_info, tuple): state = state_info else: state = state_info[0] transitions = state_info[1] data_flows = state_info[2] # set parent of state if parent is not None and isinstance(parent, ContainerState): parent.add_state(state, storage_load=True) else: state.parent = parent # read script file if an execution state if isinstance(state, ExecutionState): script_text = read_file(state_path, state.script.filename) state.script_text = script_text # load semantic data try: semantic_data = load_data_file(os.path.join(state_path, SEMANTIC_DATA_FILE)) state.semantic_data = semantic_data except Exception as e: # semantic data file does not have to be there pass one_of_my_child_states_not_found = False # load child states for p in os.listdir(state_path): child_state_path = os.path.join(state_path, p) if os.path.isdir(child_state_path): child_state = load_state_recursively(state, child_state_path, dirty_states) if child_state.name is LIBRARY_NOT_FOUND_DUMMY_STATE_NAME: one_of_my_child_states_not_found = True if one_of_my_child_states_not_found: # omit adding transitions and data flows in this case pass else: # Now we can add transitions and data flows, as all child states were added if isinstance(state_info, tuple): state.transitions = transitions state.data_flows = data_flows state.file_system_path = state_path if state.marked_dirty: dirty_states.append(state) return state
[ "def", "load_state_recursively", "(", "parent", ",", "state_path", "=", "None", ",", "dirty_states", "=", "[", "]", ")", ":", "from", "rafcon", ".", "core", ".", "states", ".", "execution_state", "import", "ExecutionState", "from", "rafcon", ".", "core", "."...
Recursively loads the state It calls this method on each sub-state of a container state. :param parent: the root state of the last load call to which the loaded state will be added :param state_path: the path on the filesystem where to find the meta file for the state :param dirty_states: a dict of states which changed during loading :return:
[ "Recursively", "loads", "the", "state" ]
python
train
crs4/hl7apy
hl7apy/core.py
https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/core.py#L788-L809
def encoding_chars(self): """ A ``dict`` with the encoding chars of the :class:`Element <hl7apy.core.Element>`. If the :class:`Element <hl7apy.core.Element>` has a parent it is the parent's ``encoding_chars`` otherwise the ones returned by :func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>` The structure of the ``dict`` is: .. code-block:: python {'SEGMENT' : '\\r', 'GROUP': '\\r', 'FIELD' : '|', 'COMPONENT' : '^', 'SUBCOMPONENT' : '&', 'REPETITION' : '~', 'ESCAPE' : '\\'} """ if self.parent is not None: return self.parent.encoding_chars return get_default_encoding_chars(self.version)
[ "def", "encoding_chars", "(", "self", ")", ":", "if", "self", ".", "parent", "is", "not", "None", ":", "return", "self", ".", "parent", ".", "encoding_chars", "return", "get_default_encoding_chars", "(", "self", ".", "version", ")" ]
A ``dict`` with the encoding chars of the :class:`Element <hl7apy.core.Element>`. If the :class:`Element <hl7apy.core.Element>` has a parent it is the parent's ``encoding_chars`` otherwise the ones returned by :func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>` The structure of the ``dict`` is: .. code-block:: python {'SEGMENT' : '\\r', 'GROUP': '\\r', 'FIELD' : '|', 'COMPONENT' : '^', 'SUBCOMPONENT' : '&', 'REPETITION' : '~', 'ESCAPE' : '\\'}
[ "A", "dict", "with", "the", "encoding", "chars", "of", "the", ":", "class", ":", "Element", "<hl7apy", ".", "core", ".", "Element", ">", ".", "If", "the", ":", "class", ":", "Element", "<hl7apy", ".", "core", ".", "Element", ">", "has", "a", "parent"...
python
train
senaite/senaite.core
bika/lims/browser/referencesample.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/referencesample.py#L209-L237
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ item = super(ReferenceAnalysesView, self).folderitem(obj, item, index) if not item: return None item["Category"] = obj.getCategoryTitle ref_analysis = api.get_object(obj) ws = ref_analysis.getWorksheet() if not ws: logger.warn( "No Worksheet found for ReferenceAnalysis {}" .format(obj.getId)) else: item["Worksheet"] = ws.Title() anchor = "<a href='%s'>%s</a>" % (ws.absolute_url(), ws.Title()) item["replace"]["Worksheet"] = anchor # Add the analysis to the QC Chart self.chart.add_analysis(obj) return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "item", "=", "super", "(", "ReferenceAnalysesView", ",", "self", ")", ".", "folderitem", "(", "obj", ",", "item", ",", "index", ")", "if", "not", "item", ":", "return"...
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
[ "Service", "triggered", "each", "time", "an", "item", "is", "iterated", "in", "folderitems", "." ]
python
train
UDST/orca
orca/orca.py
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L1146-L1169
def column(table_name, column_name=None, cache=False, cache_scope=_CS_FOREVER): """ Decorates functions that return a Series. Decorator version of `add_column`. Series index must match the named table. Column name defaults to name of function. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. The index of the returned Series must match the named table. """ def decorator(func): if column_name: name = column_name else: name = func.__name__ add_column( table_name, name, func, cache=cache, cache_scope=cache_scope) return func return decorator
[ "def", "column", "(", "table_name", ",", "column_name", "=", "None", ",", "cache", "=", "False", ",", "cache_scope", "=", "_CS_FOREVER", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "column_name", ":", "name", "=", "column_name", "else", ":...
Decorates functions that return a Series. Decorator version of `add_column`. Series index must match the named table. Column name defaults to name of function. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. The index of the returned Series must match the named table.
[ "Decorates", "functions", "that", "return", "a", "Series", "." ]
python
train
saltstack/salt
salt/states/pyenv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pyenv.py#L104-L139
def installed(name, default=False, user=None): ''' Verify that the specified python is installed with pyenv. pyenv is installed if necessary. name The version of python to install default : False Whether to make this python the default. user: None The user to run pyenv as. .. versionadded:: 0.17.0 .. versionadded:: 0.16.0 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if name.startswith('python-'): name = re.sub(r'^python-', '', name) if __opts__['test']: ret['comment'] = 'python {0} is set to be installed'.format(name) return ret ret = _check_pyenv(ret, user) if ret['result'] is False: if not __salt__['pyenv.install'](user): ret['comment'] = 'pyenv failed to install' return ret else: return _check_and_install_python(ret, name, default, user=user) else: return _check_and_install_python(ret, name, default, user=user)
[ "def", "installed", "(", "name", ",", "default", "=", "False", ",", "user", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "nam...
Verify that the specified python is installed with pyenv. pyenv is installed if necessary. name The version of python to install default : False Whether to make this python the default. user: None The user to run pyenv as. .. versionadded:: 0.17.0 .. versionadded:: 0.16.0
[ "Verify", "that", "the", "specified", "python", "is", "installed", "with", "pyenv", ".", "pyenv", "is", "installed", "if", "necessary", "." ]
python
train
JoelBender/bacpypes
py27/bacpypes/primitivedata.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py27/bacpypes/primitivedata.py#L1731-L1746
def get_tuple(self): """Return the unsigned integer tuple of the identifier.""" objType, objInstance = self.value if isinstance(objType, int): pass elif isinstance(objType, long): objType = int(objType) elif isinstance(objType, basestring): # turn it back into an integer objType = self.objectTypeClass()[objType] else: raise TypeError("invalid datatype for objType") # pack the components together return (objType, objInstance)
[ "def", "get_tuple", "(", "self", ")", ":", "objType", ",", "objInstance", "=", "self", ".", "value", "if", "isinstance", "(", "objType", ",", "int", ")", ":", "pass", "elif", "isinstance", "(", "objType", ",", "long", ")", ":", "objType", "=", "int", ...
Return the unsigned integer tuple of the identifier.
[ "Return", "the", "unsigned", "integer", "tuple", "of", "the", "identifier", "." ]
python
train
TissueMAPS/TmClient
src/python/tmclient/base.py
https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/base.py#L63-L91
def _init_session(self): ''' Delayed initialization of Requests Session object. This is done in order *not* to share the Session object across a multiprocessing pool. ''' self._real_session = requests.Session() # FIXME: this fails when one runs HTTPS on non-standard ports, # e.g. https://tissuemaps.example.org:8443/ if self._port == 443: logger.debug('initializing HTTPS session') self._real_base_url = 'https://{host}:{port}'.format(host=self._host, port=self._port) self._real_adapter = self._real_session.adapters['https://'] if self._ca_bundle is not None: logger.debug('use CA bundle: %s', self._ca_bundle) ca_bundle = os.path.expanduser(os.path.expandvars(self._ca_bundle)) if not os.path.exists(ca_bundle): raise OSError( 'CA bundle file does not exist: {0}'.format(ca_bundle) ) self._real_session.verify = ca_bundle else: logger.debug('initializing HTTP session') self._real_base_url = 'http://{host}:{port}'.format(host=self._host, port=self._port) self._real_adapter = self._real_session.adapters['http://'] self._real_session.get(self._real_base_url) self._real_session.headers.update({'Host': self._host}) self._login(self._username, self._password)
[ "def", "_init_session", "(", "self", ")", ":", "self", ".", "_real_session", "=", "requests", ".", "Session", "(", ")", "# FIXME: this fails when one runs HTTPS on non-standard ports,", "# e.g. https://tissuemaps.example.org:8443/", "if", "self", ".", "_port", "==", "443"...
Delayed initialization of Requests Session object. This is done in order *not* to share the Session object across a multiprocessing pool.
[ "Delayed", "initialization", "of", "Requests", "Session", "object", "." ]
python
train
zalando/patroni
patroni/scripts/aws.py
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/scripts/aws.py#L43-L47
def _tag_ebs(self, conn, role): """ set tags, carrying the cluster name, instance role and instance id for the EBS storage """ tags = {'Name': 'spilo_' + self.cluster_name, 'Role': role, 'Instance': self.instance_id} volumes = conn.get_all_volumes(filters={'attachment.instance-id': self.instance_id}) conn.create_tags([v.id for v in volumes], tags)
[ "def", "_tag_ebs", "(", "self", ",", "conn", ",", "role", ")", ":", "tags", "=", "{", "'Name'", ":", "'spilo_'", "+", "self", ".", "cluster_name", ",", "'Role'", ":", "role", ",", "'Instance'", ":", "self", ".", "instance_id", "}", "volumes", "=", "c...
set tags, carrying the cluster name, instance role and instance id for the EBS storage
[ "set", "tags", "carrying", "the", "cluster", "name", "instance", "role", "and", "instance", "id", "for", "the", "EBS", "storage" ]
python
train
log2timeline/dfdatetime
dfdatetime/precisions.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/precisions.py#L131-L157
def CopyToDateTimeString(cls, time_elements_tuple, fraction_of_second): """Copies the time elements and fraction of second to a string. Args: time_elements_tuple (tuple[int, int, int, int, int, int]): time elements, contains year, month, day of month, hours, minutes and seconds. fraction_of_second (decimal.Decimal): fraction of second, which must be a value between 0.0 and 1.0. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss.### Raises: ValueError: if the fraction of second is out of bounds. """ if fraction_of_second < 0.0 or fraction_of_second >= 1.0: raise ValueError('Fraction of second value: {0:f} out of bounds.'.format( fraction_of_second)) milliseconds = int(fraction_of_second * definitions.MILLISECONDS_PER_SECOND) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:03d}'.format( time_elements_tuple[0], time_elements_tuple[1], time_elements_tuple[2], time_elements_tuple[3], time_elements_tuple[4], time_elements_tuple[5], milliseconds)
[ "def", "CopyToDateTimeString", "(", "cls", ",", "time_elements_tuple", ",", "fraction_of_second", ")", ":", "if", "fraction_of_second", "<", "0.0", "or", "fraction_of_second", ">=", "1.0", ":", "raise", "ValueError", "(", "'Fraction of second value: {0:f} out of bounds.'"...
Copies the time elements and fraction of second to a string. Args: time_elements_tuple (tuple[int, int, int, int, int, int]): time elements, contains year, month, day of month, hours, minutes and seconds. fraction_of_second (decimal.Decimal): fraction of second, which must be a value between 0.0 and 1.0. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss.### Raises: ValueError: if the fraction of second is out of bounds.
[ "Copies", "the", "time", "elements", "and", "fraction", "of", "second", "to", "a", "string", "." ]
python
train
klen/aioauth-client
aioauth_client.py
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L852-L861
def user_parse(data): """Parse information from provider.""" yield 'id', data.get('id') yield 'email', data.get('email') yield 'first_name', data.get('given_name') yield 'last_name', data.get('family_name') yield 'link', data.get('link') yield 'locale', data.get('locale') yield 'picture', data.get('picture') yield 'gender', data.get('gender')
[ "def", "user_parse", "(", "data", ")", ":", "yield", "'id'", ",", "data", ".", "get", "(", "'id'", ")", "yield", "'email'", ",", "data", ".", "get", "(", "'email'", ")", "yield", "'first_name'", ",", "data", ".", "get", "(", "'given_name'", ")", "yie...
Parse information from provider.
[ "Parse", "information", "from", "provider", "." ]
python
train
saltstack/salt
salt/states/redismod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/redismod.py#L120-L173
def slaveof(name, sentinel_host=None, sentinel_port=None, sentinel_password=None, **connection_args): ''' Set this redis instance as a slave. .. versionadded: 2016.3.0 name Master to make this a slave of sentinel_host Ip of the sentinel to check for the master sentinel_port Port of the sentinel to check for the master ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to setup slave'} kwargs = copy.copy(connection_args) sentinel_master = __salt__['redis.sentinel_get_master_ip'](name, sentinel_host, sentinel_port, sentinel_password) if sentinel_master['master_host'] in __salt__['network.ip_addrs'](): ret['result'] = True ret['comment'] = 'Minion is the master: {0}'.format(name) return ret first_master = __salt__['redis.get_master_ip'](**connection_args) if first_master == sentinel_master: ret['result'] = True ret['comment'] = 'Minion already slave of master: {0}'.format(name) return ret if __opts__['test'] is True: ret['comment'] = 'Minion will be made a slave of {0}: {1}'.format(name, sentinel_master['host']) ret['result'] = None return ret kwargs.update(**sentinel_master) __salt__['redis.slaveof'](**kwargs) current_master = __salt__['redis.get_master_ip'](**connection_args) if current_master != sentinel_master: return ret ret['result'] = True ret['changes'] = { 'old': first_master, 'new': current_master, } ret['comment'] = 'Minion successfully connected to master: {0}'.format(name) return ret
[ "def", "slaveof", "(", "name", ",", "sentinel_host", "=", "None", ",", "sentinel_port", "=", "None", ",", "sentinel_password", "=", "None", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", ...
Set this redis instance as a slave. .. versionadded: 2016.3.0 name Master to make this a slave of sentinel_host Ip of the sentinel to check for the master sentinel_port Port of the sentinel to check for the master
[ "Set", "this", "redis", "instance", "as", "a", "slave", "." ]
python
train
pr-omethe-us/PyKED
pyked/converters.py
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/converters.py#L318-L471
def get_datapoints(root): """Parse datapoints with ignition delay from file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with ignition delay data """ # Shock tube experiment will have one data group, while RCM may have one # or two (one for ignition delay, one for volume-history) dataGroups = root.findall('dataGroup') if not dataGroups: raise MissingElementError('dataGroup') # all situations will have main experimental data in first dataGroup dataGroup = dataGroups[0] property_id = {} unit_id = {} species_id = {} # get properties of dataGroup for prop in dataGroup.findall('property'): unit_id[prop.attrib['id']] = prop.attrib['units'] temp_prop = prop.attrib['name'] if temp_prop not in datagroup_properties + ['composition']: raise KeyError(temp_prop + ' not valid dataPoint property') property_id[prop.attrib['id']] = temp_prop if temp_prop == 'composition': spec = {'species-name': prop.find('speciesLink').attrib['preferredKey']} # use InChI for unique species identifier (if present) try: spec['InChI'] = prop.find('speciesLink').attrib['InChI'] except KeyError: # TODO: add InChI validator/search warn('Missing InChI for species ' + spec['species-name']) pass species_id[prop.attrib['id']] = spec if not property_id: raise MissingElementError('property') # now get data points datapoints = [] for dp in dataGroup.findall('dataPoint'): datapoint = {} if 'composition' in property_id.values(): datapoint['composition'] = {'species': [], 'kind': None} for val in dp: # handle "regular" properties differently than composition if property_id.get(val.tag) in datagroup_properties: units = unit_id[val.tag] if units == 'Torr': units = 'torr' datapoint[property_id[val.tag].replace(' ', '-')] = [val.text + ' ' + units] elif property_id.get(val.tag) == 'composition': spec = {} spec['species-name'] = species_id[val.tag]['species-name'] spec['InChI'] = species_id[val.tag].get('InChI') units = unit_id[val.tag] # If mole or mass fraction, just set value if units in ['mole fraction', 'mass fraction', 'mole percent']: spec['amount'] = [float(val.text)] elif units == 'percent': # assume this means mole percent warn('Assuming percent in composition means mole percent') spec['amount'] = [float(val.text)] units = 'mole percent' elif units == 'ppm': # assume molar ppm, convert to mole fraction warn('Assuming molar ppm in composition and converting to mole fraction') spec['amount'] = [float(val.text) * 1.e-6] units = 'mole fraction' elif units == 'ppb': # assume molar ppb, convert to mole fraction warn('Assuming molar ppb in composition and converting to mole fraction') spec['amount'] = [float(val.text) * 1.e-9] units = 'mole fraction' else: raise KeywordError('composition units need to be one of: mole fraction, ' 'mass fraction, mole percent, percent, ppm, or ppb.' ) # check consistency of composition type if datapoint['composition']['kind'] is None: datapoint['composition']['kind'] = units elif datapoint['composition']['kind'] != units: raise KeywordError( 'composition units ' + units + ' not consistent with ' + datapoint['composition']['kind'] ) datapoint['composition']['species'].append(spec) else: raise KeywordError('value missing from properties: ' + val.tag) datapoints.append(datapoint) if len(datapoints) == 0: raise MissingElementError('dataPoint') # ReSpecTh files can have other dataGroups with pressure, volume, or temperature histories if len(dataGroups) > 1: datapoints[0]['time-histories'] = [] for dataGroup in dataGroups[1:]: time_tag = None quant_tags = [] quant_dicts = [] quant_types = [] for prop in dataGroup.findall('property'): if prop.attrib['name'] == 'time': time_dict = {'units': prop.attrib['units'], 'column': 0} time_tag = prop.attrib['id'] elif prop.attrib['name'] in ['volume', 'temperature', 'pressure']: quant_types.append(prop.attrib['name']) quant_dicts.append({'units': prop.attrib['units'], 'column': 1}) quant_tags.append(prop.attrib['id']) else: raise KeywordError('Only volume, temperature, pressure, and time are allowed ' 'in a time-history dataGroup.') if time_tag is None or len(quant_tags) == 0: raise KeywordError('Both time and quantity properties required for time-history.') time_histories = [ {'time': time_dict, 'quantity': q, 'type': t, 'values': []} for (q, t) in zip(quant_dicts, quant_types) ] # collect volume-time history for dp in dataGroup.findall('dataPoint'): time = None quants = {} for val in dp: if val.tag == time_tag: time = float(val.text) elif val.tag in quant_tags: quant = float(val.text) tag_idx = quant_tags.index(val.tag) quant_type = quant_types[tag_idx] quants[quant_type] = quant else: raise KeywordError('Value tag {} not found in dataGroup tags: ' '{}'.format(val.tag, quant_tags)) if time is None or len(quants) == 0: raise KeywordError('Both time and quantity values required in each ' 'time-history dataPoint.') for t in time_histories: t['values'].append([time, quants[t['type']]]) datapoints[0]['time-histories'].extend(time_histories) return datapoints
[ "def", "get_datapoints", "(", "root", ")", ":", "# Shock tube experiment will have one data group, while RCM may have one", "# or two (one for ignition delay, one for volume-history)", "dataGroups", "=", "root", ".", "findall", "(", "'dataGroup'", ")", "if", "not", "dataGroups", ...
Parse datapoints with ignition delay from file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with ignition delay data
[ "Parse", "datapoints", "with", "ignition", "delay", "from", "file", "." ]
python
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L62-L97
def from_header(cls, header, beam=None, lat=None): """ Create a new WCSHelper class from the given header. Parameters ---------- header : `astropy.fits.HDUHeader` or string The header to be used to create the WCS helper beam : :class:`AegeanTools.fits_image.Beam` or None The synthesized beam. If the supplied beam is None then one is constructed form the header. lat : float The latitude of the telescope. Returns ------- obj : :class:`AegeanTools.wcs_helpers.WCSHelper` A helper object. """ try: wcs = pywcs.WCS(header, naxis=2) except: # TODO: figure out what error is being thrown wcs = pywcs.WCS(str(header), naxis=2) if beam is None: beam = get_beam(header) else: beam = beam if beam is None: logging.critical("Cannot determine beam information") _, pixscale = get_pixinfo(header) refpix = (header['CRPIX1'], header['CRPIX2']) return cls(wcs, beam, pixscale, refpix, lat)
[ "def", "from_header", "(", "cls", ",", "header", ",", "beam", "=", "None", ",", "lat", "=", "None", ")", ":", "try", ":", "wcs", "=", "pywcs", ".", "WCS", "(", "header", ",", "naxis", "=", "2", ")", "except", ":", "# TODO: figure out what error is bein...
Create a new WCSHelper class from the given header. Parameters ---------- header : `astropy.fits.HDUHeader` or string The header to be used to create the WCS helper beam : :class:`AegeanTools.fits_image.Beam` or None The synthesized beam. If the supplied beam is None then one is constructed form the header. lat : float The latitude of the telescope. Returns ------- obj : :class:`AegeanTools.wcs_helpers.WCSHelper` A helper object.
[ "Create", "a", "new", "WCSHelper", "class", "from", "the", "given", "header", "." ]
python
train
DataDog/integrations-core
snmp/datadog_checks/snmp/snmp.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/snmp/datadog_checks/snmp/snmp.py#L444-L471
def report_raw_metrics(self, metrics, results, tags): ''' For all the metrics that are specified as oid, the conf oid is going to exactly match or be a prefix of the oid sent back by the device Use the instance configuration to find the name to give to the metric Submit the results to the aggregator. ''' for metric in metrics: forced_type = metric.get('forced_type') if 'OID' in metric: queried_oid = metric['OID'] if queried_oid in results: value = results[queried_oid] else: for oid in results: if oid.startswith(queried_oid): value = results[oid] break else: self.log.warning("No matching results found for oid %s", queried_oid) continue name = metric.get('name', 'unnamed_metric') metric_tags = tags if metric.get('metric_tags'): metric_tags = metric_tags + metric.get('metric_tags') self.submit_metric(name, value, forced_type, metric_tags)
[ "def", "report_raw_metrics", "(", "self", ",", "metrics", ",", "results", ",", "tags", ")", ":", "for", "metric", "in", "metrics", ":", "forced_type", "=", "metric", ".", "get", "(", "'forced_type'", ")", "if", "'OID'", "in", "metric", ":", "queried_oid", ...
For all the metrics that are specified as oid, the conf oid is going to exactly match or be a prefix of the oid sent back by the device Use the instance configuration to find the name to give to the metric Submit the results to the aggregator.
[ "For", "all", "the", "metrics", "that", "are", "specified", "as", "oid", "the", "conf", "oid", "is", "going", "to", "exactly", "match", "or", "be", "a", "prefix", "of", "the", "oid", "sent", "back", "by", "the", "device", "Use", "the", "instance", "con...
python
train
ARMmbed/yotta
yotta/lib/github_access.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/github_access.py#L278-L285
def availableBranches(self): ''' return a list of GithubComponentVersion objects for the tip of each branch ''' return [ GithubComponentVersion( '', b[0], b[1], self.name, cache_key=None ) for b in _getBranchHeads(self.repo).items() ]
[ "def", "availableBranches", "(", "self", ")", ":", "return", "[", "GithubComponentVersion", "(", "''", ",", "b", "[", "0", "]", ",", "b", "[", "1", "]", ",", "self", ".", "name", ",", "cache_key", "=", "None", ")", "for", "b", "in", "_getBranchHeads"...
return a list of GithubComponentVersion objects for the tip of each branch
[ "return", "a", "list", "of", "GithubComponentVersion", "objects", "for", "the", "tip", "of", "each", "branch" ]
python
valid
ejeschke/ginga
ginga/util/plots.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/plots.py#L160-L170
def cuts(self, data, xtitle=None, ytitle=None, title=None, rtitle=None, color=None): """data: pixel values along a line. """ y = data x = np.arange(len(data)) self.plot(x, y, color=color, drawstyle='steps-mid', xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle, alpha=1.0, linewidth=1.0, linestyle='-')
[ "def", "cuts", "(", "self", ",", "data", ",", "xtitle", "=", "None", ",", "ytitle", "=", "None", ",", "title", "=", "None", ",", "rtitle", "=", "None", ",", "color", "=", "None", ")", ":", "y", "=", "data", "x", "=", "np", ".", "arange", "(", ...
data: pixel values along a line.
[ "data", ":", "pixel", "values", "along", "a", "line", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/attention.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/attention.py#L60-L183
def _build(self, memory, query, memory_mask=None): """Perform a differentiable read. Args: memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, a single embedding to attend over. query: [batch_size, query_word_size]-shaped Tensor of dtype float32. Represents, for each example, a single embedding representing a query. memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype bool. An entry of False indicates that a memory slot should not enter the resulting weighted sum. If None, all memory is used. Returns: An AttentionOutput instance containing: read: [batch_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example, a weighted sum of the contents of the memory. weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the attention weights used to compute the read. weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the logits of the attention weights, that is, `weights` is calculated by taking the softmax of the weight logits. Raises: UnderspecifiedError: if memory_word_size or query_word_size can not be inferred. IncompatibleShapeError: if memory, query, memory_mask, or output of attention_logit_mod do not match expected shapes. """ if len(memory.get_shape()) != 3: raise base.IncompatibleShapeError( "memory must have shape [batch_size, memory_size, memory_word_size].") if len(query.get_shape()) != 2: raise base.IncompatibleShapeError( "query must have shape [batch_size, query_word_size].") if memory_mask is not None and len(memory_mask.get_shape()) != 2: raise base.IncompatibleShapeError( "memory_mask must have shape [batch_size, memory_size].") # Ensure final dimensions are defined, else the attention logit module will # be unable to infer input size when constructing variables. inferred_memory_word_size = memory.get_shape()[2].value inferred_query_word_size = query.get_shape()[1].value if inferred_memory_word_size is None or inferred_query_word_size is None: raise base.UnderspecifiedError( "memory_word_size and query_word_size must be known at graph " "construction time.") memory_shape = tf.shape(memory) batch_size = memory_shape[0] memory_size = memory_shape[1] query_shape = tf.shape(query) query_batch_size = query_shape[0] # Transform query to have same number of words as memory. # # expanded_query: [batch_size, memory_size, query_word_size]. expanded_query = tf.tile(tf.expand_dims(query, dim=1), [1, memory_size, 1]) # Compute attention weights for each memory slot. # # attention_weight_logits: [batch_size, memory_size] with tf.control_dependencies( [tf.assert_equal(batch_size, query_batch_size)]): concatenated_embeddings = tf.concat( values=[memory, expanded_query], axis=2) batch_apply_attention_logit = basic.BatchApply( self._attention_logit_mod, n_dims=2, name="batch_apply_attention_logit") attention_weight_logits = batch_apply_attention_logit( concatenated_embeddings) # Note: basic.BatchApply() will automatically reshape the [batch_size * # memory_size, 1]-shaped result of self._attention_logit_mod(...) into a # [batch_size, memory_size, 1]-shaped Tensor. If # self._attention_logit_mod(...) returns something with more dimensions, # then attention_weight_logits will have extra dimensions, too. if len(attention_weight_logits.get_shape()) != 3: raise base.IncompatibleShapeError( "attention_weight_logits must be a rank-3 Tensor. Are you sure that " "attention_logit_mod() returned [batch_size * memory_size, 1]-shaped" " Tensor?") # Remove final length-1 dimension. attention_weight_logits = tf.squeeze(attention_weight_logits, [2]) # Mask out ignored memory slots by assigning them very small logits. Ensures # that every example has at least one valid memory slot, else we'd end up # averaging all memory slots equally. if memory_mask is not None: num_remaining_memory_slots = tf.reduce_sum( tf.cast(memory_mask, dtype=tf.int32), axis=[1]) with tf.control_dependencies( [tf.assert_positive(num_remaining_memory_slots)]): finfo = np.finfo(np.float32) kept_indices = tf.cast(memory_mask, dtype=tf.float32) ignored_indices = tf.cast(tf.logical_not(memory_mask), dtype=tf.float32) lower_bound = finfo.max * kept_indices + finfo.min * ignored_indices attention_weight_logits = tf.minimum(attention_weight_logits, lower_bound) # attended_memory: [batch_size, memory_word_size]. attention_weight = tf.reshape( tf.nn.softmax(attention_weight_logits), shape=[batch_size, memory_size, 1]) # The multiplication is elementwise and relies on broadcasting the weights # across memory_word_size. Then we sum across the memory slots. attended_memory = tf.reduce_sum(memory * attention_weight, axis=[1]) # Infer shape of result as much as possible. inferred_batch_size, _, inferred_memory_word_size = ( memory.get_shape().as_list()) attended_memory.set_shape([inferred_batch_size, inferred_memory_word_size]) return AttentionOutput( read=attended_memory, weights=tf.squeeze(attention_weight, [2]), weight_logits=attention_weight_logits)
[ "def", "_build", "(", "self", ",", "memory", ",", "query", ",", "memory_mask", "=", "None", ")", ":", "if", "len", "(", "memory", ".", "get_shape", "(", ")", ")", "!=", "3", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "\"memory must have sh...
Perform a differentiable read. Args: memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, a single embedding to attend over. query: [batch_size, query_word_size]-shaped Tensor of dtype float32. Represents, for each example, a single embedding representing a query. memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype bool. An entry of False indicates that a memory slot should not enter the resulting weighted sum. If None, all memory is used. Returns: An AttentionOutput instance containing: read: [batch_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example, a weighted sum of the contents of the memory. weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the attention weights used to compute the read. weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the logits of the attention weights, that is, `weights` is calculated by taking the softmax of the weight logits. Raises: UnderspecifiedError: if memory_word_size or query_word_size can not be inferred. IncompatibleShapeError: if memory, query, memory_mask, or output of attention_logit_mod do not match expected shapes.
[ "Perform", "a", "differentiable", "read", "." ]
python
train
google/pyringe
pyringe/payload/gdb_service.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L492-L511
def Call(self, position, function_call): """Perform a function call in the inferior. WARNING: Since Gdb's concept of threads can't be directly identified with python threads, the function call will be made from what has to be assumed is an arbitrary thread. This *will* interrupt the inferior. Continuing it after the call is the responsibility of the caller. Args: position: the context of the inferior to call the function from. function_call: A string corresponding to a function call. Format: 'foo(0,0)' Returns: Thre return value of the called function. """ self.EnsureGdbPosition(position[0], None, None) if not gdb.selected_thread().is_stopped(): self.Interrupt(position) result_value = gdb.parse_and_eval(function_call) return self._UnpackGdbVal(result_value)
[ "def", "Call", "(", "self", ",", "position", ",", "function_call", ")", ":", "self", ".", "EnsureGdbPosition", "(", "position", "[", "0", "]", ",", "None", ",", "None", ")", "if", "not", "gdb", ".", "selected_thread", "(", ")", ".", "is_stopped", "(", ...
Perform a function call in the inferior. WARNING: Since Gdb's concept of threads can't be directly identified with python threads, the function call will be made from what has to be assumed is an arbitrary thread. This *will* interrupt the inferior. Continuing it after the call is the responsibility of the caller. Args: position: the context of the inferior to call the function from. function_call: A string corresponding to a function call. Format: 'foo(0,0)' Returns: Thre return value of the called function.
[ "Perform", "a", "function", "call", "in", "the", "inferior", "." ]
python
train
saltstack/salt
salt/modules/boto_lambda.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_lambda.py#L788-L819
def update_alias(FunctionName, Name, FunctionVersion=None, Description=None, region=None, key=None, keyid=None, profile=None): ''' Update the named alias to the configuration. Returns {updated: true} if the alias was updated and returns {updated: False} if the alias was not updated. CLI Example: .. code-block:: bash salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) args = {} if FunctionVersion: args['FunctionVersion'] = FunctionVersion if Description: args['Description'] = Description r = conn.update_alias(FunctionName=FunctionName, Name=Name, **args) if r: keys = ('Name', 'FunctionVersion', 'Description') return {'updated': True, 'alias': dict([(k, r.get(k)) for k in keys])} else: log.warning('Alias was not updated') return {'updated': False} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "update_alias", "(", "FunctionName", ",", "Name", ",", "FunctionVersion", "=", "None", ",", "Description", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try",...
Update the named alias to the configuration. Returns {updated: true} if the alias was updated and returns {updated: False} if the alias was not updated. CLI Example: .. code-block:: bash salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST
[ "Update", "the", "named", "alias", "to", "the", "configuration", "." ]
python
train
IAMconsortium/pyam
pyam/core.py
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1036-L1047
def to_csv(self, path, iamc_index=False, **kwargs): """Write timeseries data to a csv file Parameters ---------- path: string file path iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns """ self._to_file_format(iamc_index).to_csv(path, index=False, **kwargs)
[ "def", "to_csv", "(", "self", ",", "path", ",", "iamc_index", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_to_file_format", "(", "iamc_index", ")", ".", "to_csv", "(", "path", ",", "index", "=", "False", ",", "*", "*", "kwargs", ...
Write timeseries data to a csv file Parameters ---------- path: string file path iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns
[ "Write", "timeseries", "data", "to", "a", "csv", "file" ]
python
train
secdev/scapy
scapy/packet.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/packet.py#L1857-L1874
def fuzz(p, _inplace=0): """Transform a layer into a fuzzy layer by replacing some default values by random objects""" # noqa: E501 if not _inplace: p = p.copy() q = p while not isinstance(q, NoPayload): for f in q.fields_desc: if isinstance(f, PacketListField): for r in getattr(q, f.name): print("fuzzing", repr(r)) fuzz(r, _inplace=1) elif f.default is not None: if not isinstance(f, ConditionalField) or f._evalcond(q): rnd = f.randval() if rnd is not None: q.default_fields[f.name] = rnd q = q.payload return p
[ "def", "fuzz", "(", "p", ",", "_inplace", "=", "0", ")", ":", "# noqa: E501", "if", "not", "_inplace", ":", "p", "=", "p", ".", "copy", "(", ")", "q", "=", "p", "while", "not", "isinstance", "(", "q", ",", "NoPayload", ")", ":", "for", "f", "in...
Transform a layer into a fuzzy layer by replacing some default values by random objects
[ "Transform", "a", "layer", "into", "a", "fuzzy", "layer", "by", "replacing", "some", "default", "values", "by", "random", "objects" ]
python
train
neptune-ml/steppy
steppy/base.py
https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L434-L455
def reset(self): """Reset all upstream Steps to the default training parameters and cleans cache for all upstream Steps including this Step. Defaults are: 'mode': 'train', 'is_fittable': True, 'force_fitting': True, 'persist_output': False, 'cache_output': False, 'load_persisted_output': False """ self.clean_cache_upstream() self.set_mode_train() for step_obj in self.all_upstream_steps.values(): step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable'] step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting'] step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output'] step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output'] step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output'] logger.info('Step {}, reset all upstream Steps to default training parameters, ' 'including this Step'.format(self.name)) return self
[ "def", "reset", "(", "self", ")", ":", "self", ".", "clean_cache_upstream", "(", ")", "self", ".", "set_mode_train", "(", ")", "for", "step_obj", "in", "self", ".", "all_upstream_steps", ".", "values", "(", ")", ":", "step_obj", ".", "is_fittable", "=", ...
Reset all upstream Steps to the default training parameters and cleans cache for all upstream Steps including this Step. Defaults are: 'mode': 'train', 'is_fittable': True, 'force_fitting': True, 'persist_output': False, 'cache_output': False, 'load_persisted_output': False
[ "Reset", "all", "upstream", "Steps", "to", "the", "default", "training", "parameters", "and", "cleans", "cache", "for", "all", "upstream", "Steps", "including", "this", "Step", ".", "Defaults", "are", ":", "mode", ":", "train", "is_fittable", ":", "True", "f...
python
train
pneff/wsgiservice
wsgiservice/application.py
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/application.py#L82-L103
def _handle_request(self, request): """Finds the resource to which a request maps and then calls it. Instantiates, fills and returns a :class:`webob.Response` object. If no resource matches the request, a 404 status is set on the response object. :param request: Object representing the current request. :type request: :class:`webob.Request` """ response = webob.Response(request=request) path = request.path_info parsed = self._urlmap(path) if parsed: path_params, resource = parsed else: path_params, resource = {}, self.NOT_FOUND_RESOURCE instance = resource(request=request, response=response, path_params=path_params, application=self) response = instance() if request.method == 'HEAD': response.body = '' return response
[ "def", "_handle_request", "(", "self", ",", "request", ")", ":", "response", "=", "webob", ".", "Response", "(", "request", "=", "request", ")", "path", "=", "request", ".", "path_info", "parsed", "=", "self", ".", "_urlmap", "(", "path", ")", "if", "p...
Finds the resource to which a request maps and then calls it. Instantiates, fills and returns a :class:`webob.Response` object. If no resource matches the request, a 404 status is set on the response object. :param request: Object representing the current request. :type request: :class:`webob.Request`
[ "Finds", "the", "resource", "to", "which", "a", "request", "maps", "and", "then", "calls", "it", ".", "Instantiates", "fills", "and", "returns", "a", ":", "class", ":", "webob", ".", "Response", "object", ".", "If", "no", "resource", "matches", "the", "r...
python
train
juju/charm-helpers
charmhelpers/contrib/storage/linux/ceph.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/ceph.py#L1168-L1207
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None, app_name=None, max_bytes=None, max_objects=None): """Adds an operation to create a replicated pool. :param name: Name of pool to create :type name: str :param replica_count: Number of copies Ceph should keep of your data. :type replica_count: int :param pg_num: Request specific number of Placement Groups to create for pool. :type pg_num: int :param weight: The percentage of data that is expected to be contained in the pool from the total available space on the OSDs. Used to calculate number of Placement Groups to create for pool. :type weight: float :param group: Group to add pool to :type group: str :param namespace: Group namespace :type namespace: str :param app_name: (Optional) Tag pool with application name. Note that there is certain protocols emerging upstream with regard to meaningful application names to use. Examples are ``rbd`` and ``rgw``. :type app_name: str :param max_bytes: Maximum bytes quota to apply :type max_bytes: int :param max_objects: Maximum objects quota to apply :type max_objects: int """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, 'group-namespace': namespace, 'app-name': app_name, 'max-bytes': max_bytes, 'max-objects': max_objects})
[ "def", "add_op_create_replicated_pool", "(", "self", ",", "name", ",", "replica_count", "=", "3", ",", "pg_num", "=", "None", ",", "weight", "=", "None", ",", "group", "=", "None", ",", "namespace", "=", "None", ",", "app_name", "=", "None", ",", "max_by...
Adds an operation to create a replicated pool. :param name: Name of pool to create :type name: str :param replica_count: Number of copies Ceph should keep of your data. :type replica_count: int :param pg_num: Request specific number of Placement Groups to create for pool. :type pg_num: int :param weight: The percentage of data that is expected to be contained in the pool from the total available space on the OSDs. Used to calculate number of Placement Groups to create for pool. :type weight: float :param group: Group to add pool to :type group: str :param namespace: Group namespace :type namespace: str :param app_name: (Optional) Tag pool with application name. Note that there is certain protocols emerging upstream with regard to meaningful application names to use. Examples are ``rbd`` and ``rgw``. :type app_name: str :param max_bytes: Maximum bytes quota to apply :type max_bytes: int :param max_objects: Maximum objects quota to apply :type max_objects: int
[ "Adds", "an", "operation", "to", "create", "a", "replicated", "pool", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiNetworkIPv4.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiNetworkIPv4.py#L61-L71
def undeploy(self, id_networkv4): """Remove deployment of network in equipments and set column 'active = 0' in tables redeipv4 ] :param id_networkv4: ID for NetworkIPv4 :return: Equipments configuration output """ uri = 'api/networkv4/%s/equipments/' % id_networkv4 return super(ApiNetworkIPv4, self).delete(uri)
[ "def", "undeploy", "(", "self", ",", "id_networkv4", ")", ":", "uri", "=", "'api/networkv4/%s/equipments/'", "%", "id_networkv4", "return", "super", "(", "ApiNetworkIPv4", ",", "self", ")", ".", "delete", "(", "uri", ")" ]
Remove deployment of network in equipments and set column 'active = 0' in tables redeipv4 ] :param id_networkv4: ID for NetworkIPv4 :return: Equipments configuration output
[ "Remove", "deployment", "of", "network", "in", "equipments", "and", "set", "column", "active", "=", "0", "in", "tables", "redeipv4", "]" ]
python
train
ewels/MultiQC
multiqc/plots/heatmap.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/plots/heatmap.py#L40-L84
def highcharts_heatmap (data, xcats, ycats, pconfig=None): """ Build the HTML needed for a HighCharts line graph. Should be called by plot_xy_data, which properly formats input data. """ if pconfig is None: pconfig = {} # Reformat the data for highcharts pdata = [] for i, arr in enumerate(data): for j, val in enumerate(arr): pdata.append([j,i,val]) # Get the plot ID if pconfig.get('id') is None: pconfig['id'] = 'mqc_hcplot_'+''.join(random.sample(letters, 10)) # Sanitise plot ID and check for duplicates pconfig['id'] = report.save_htmlid(pconfig['id']) # Build the HTML for the page html = '<div class="mqc_hcplot_plotgroup">' # The 'sort by highlights button' html += """<div class="btn-group hc_switch_group"> <button type="button" class="mqc_heatmap_sortHighlight btn btn-default btn-sm" data-target="#{id}" disabled="disabled"> <span class="glyphicon glyphicon-sort-by-attributes-alt"></span> Sort by highlight </button> </div>""".format(id=pconfig['id']) # The plot div html += '<div class="hc-plot-wrapper"><div id="{id}" class="hc-plot not_rendered hc-heatmap"><small>loading..</small></div></div></div> \n'.format(id=pconfig['id']) report.num_hc_plots += 1 report.plot_data[pconfig['id']] = { 'plot_type': 'heatmap', 'data': pdata, 'xcats': xcats, 'ycats': ycats, 'config': pconfig } return html
[ "def", "highcharts_heatmap", "(", "data", ",", "xcats", ",", "ycats", ",", "pconfig", "=", "None", ")", ":", "if", "pconfig", "is", "None", ":", "pconfig", "=", "{", "}", "# Reformat the data for highcharts", "pdata", "=", "[", "]", "for", "i", ",", "arr...
Build the HTML needed for a HighCharts line graph. Should be called by plot_xy_data, which properly formats input data.
[ "Build", "the", "HTML", "needed", "for", "a", "HighCharts", "line", "graph", ".", "Should", "be", "called", "by", "plot_xy_data", "which", "properly", "formats", "input", "data", "." ]
python
train
Erotemic/utool
utool/util_alg.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L547-L591
def colwise_diag_idxs(size, num=2): r""" dont trust this implementation or this function name Args: size (int): Returns: ?: upper_diag_idxs CommandLine: python -m utool.util_alg --exec-colwise_diag_idxs --size=5 --num=2 python -m utool.util_alg --exec-colwise_diag_idxs --size=3 --num=3 Example: >>> # DISABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> size = ut.get_argval('--size', default=5) >>> num = ut.get_argval('--num', default=2) >>> mat = np.zeros([size] * num, dtype=np.int) >>> upper_diag_idxs = colwise_diag_idxs(size, num) >>> poses = np.array(upper_diag_idxs) >>> idxs = np.ravel_multi_index(poses.T, mat.shape) >>> print('poses.T =\n%s' % (ut.repr2(poses.T),)) >>> mat[tuple(poses.T)] = np.arange(1, len(poses) + 1) >>> print(mat) poses.T = np.array([[0, 0, 1, 0, 1, 2, 0, 1, 2, 3], [1, 2, 2, 3, 3, 3, 4, 4, 4, 4]]) """ # diag_idxs = list(diagonalized_iter(size)) # upper_diag_idxs = [(r, c) for r, c in diag_idxs if r < c] # # diag_idxs = list(diagonalized_iter(size)) import utool as ut diag_idxs = ut.iprod(*[range(size) for _ in range(num)]) #diag_idxs = list(ut.iprod(range(size), range(size))) # this is pretty much a simple c ordering upper_diag_idxs = [ tup[::-1] for tup in diag_idxs if all([a > b for a, b in ut.itertwo(tup)]) #if all([a > b for a, b in ut.itertwo(tup[:2])]) ] #upper_diag_idxs = [(c, r) for r, c in diag_idxs if r > c] # # upper_diag_idxs = [(r, c) for r, c in diag_idxs if r > c] return upper_diag_idxs
[ "def", "colwise_diag_idxs", "(", "size", ",", "num", "=", "2", ")", ":", "# diag_idxs = list(diagonalized_iter(size))", "# upper_diag_idxs = [(r, c) for r, c in diag_idxs if r < c]", "# # diag_idxs = list(diagonalized_iter(size))", "import", "utool", "as", "ut", "diag_idxs", "=",...
r""" dont trust this implementation or this function name Args: size (int): Returns: ?: upper_diag_idxs CommandLine: python -m utool.util_alg --exec-colwise_diag_idxs --size=5 --num=2 python -m utool.util_alg --exec-colwise_diag_idxs --size=3 --num=3 Example: >>> # DISABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> size = ut.get_argval('--size', default=5) >>> num = ut.get_argval('--num', default=2) >>> mat = np.zeros([size] * num, dtype=np.int) >>> upper_diag_idxs = colwise_diag_idxs(size, num) >>> poses = np.array(upper_diag_idxs) >>> idxs = np.ravel_multi_index(poses.T, mat.shape) >>> print('poses.T =\n%s' % (ut.repr2(poses.T),)) >>> mat[tuple(poses.T)] = np.arange(1, len(poses) + 1) >>> print(mat) poses.T = np.array([[0, 0, 1, 0, 1, 2, 0, 1, 2, 3], [1, 2, 2, 3, 3, 3, 4, 4, 4, 4]])
[ "r", "dont", "trust", "this", "implementation", "or", "this", "function", "name" ]
python
train
robotframework/Rammbock
src/Rammbock/core.py
https://github.com/robotframework/Rammbock/blob/c906058d055a6f7c68fe1a6096d78c2e3f642b1c/src/Rammbock/core.py#L935-L950
def value(self, name, value): """Defines a default `value` for a template field identified by `name`. Default values for header fields can be set with header:field syntax. Examples: | Value | foo | 42 | | Value | struct.sub_field | 0xcafe | | Value | header:version | 0x02 | """ if isinstance(value, _StructuredElement): self._struct_fields_as_values(name, value) elif name.startswith('header:'): self._header_values[name.partition(':')[-1]] = value else: self._field_values[name] = value
[ "def", "value", "(", "self", ",", "name", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "_StructuredElement", ")", ":", "self", ".", "_struct_fields_as_values", "(", "name", ",", "value", ")", "elif", "name", ".", "startswith", "(", "'he...
Defines a default `value` for a template field identified by `name`. Default values for header fields can be set with header:field syntax. Examples: | Value | foo | 42 | | Value | struct.sub_field | 0xcafe | | Value | header:version | 0x02 |
[ "Defines", "a", "default", "value", "for", "a", "template", "field", "identified", "by", "name", "." ]
python
train
tamasgal/km3pipe
km3pipe/math.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L257-L278
def rotation_matrix(axis, theta): """The Euler–Rodrigues formula. Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians. Parameters ---------- axis: vector to rotate around theta: rotation angle, in rad """ axis = np.asarray(axis) axis = axis / np.linalg.norm(axis) a = np.cos(theta / 2) b, c, d = -axis * np.sin(theta / 2) aa, bb, cc, dd = a * a, b * b, c * c, d * d bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d return np.array([ [aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc], ])
[ "def", "rotation_matrix", "(", "axis", ",", "theta", ")", ":", "axis", "=", "np", ".", "asarray", "(", "axis", ")", "axis", "=", "axis", "/", "np", ".", "linalg", ".", "norm", "(", "axis", ")", "a", "=", "np", ".", "cos", "(", "theta", "/", "2"...
The Euler–Rodrigues formula. Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians. Parameters ---------- axis: vector to rotate around theta: rotation angle, in rad
[ "The", "Euler–Rodrigues", "formula", "." ]
python
train
django-ses/django-ses
django_ses/views.py
https://github.com/django-ses/django-ses/blob/2f0fd8e3fdc76d3512982c0bb8e2f6e93e09fa3c/django_ses/views.py#L40-L63
def stats_to_list(stats_dict, localize=pytz): """ Parse the output of ``SESConnection.get_send_statistics()`` in to an ordered list of 15-minute summaries. """ result = stats_dict['GetSendStatisticsResponse']['GetSendStatisticsResult'] # Make a copy, so we don't change the original stats_dict. result = copy.deepcopy(result) datapoints = [] if localize: current_tz = localize.timezone(settings.TIME_ZONE) else: current_tz = None for dp in result['SendDataPoints']: if current_tz: utc_dt = datetime.strptime(dp['Timestamp'], '%Y-%m-%dT%H:%M:%SZ') utc_dt = localize.utc.localize(utc_dt) dp['Timestamp'] = current_tz.normalize( utc_dt.astimezone(current_tz)) datapoints.append(dp) datapoints.sort(key=lambda x: x['Timestamp']) return datapoints
[ "def", "stats_to_list", "(", "stats_dict", ",", "localize", "=", "pytz", ")", ":", "result", "=", "stats_dict", "[", "'GetSendStatisticsResponse'", "]", "[", "'GetSendStatisticsResult'", "]", "# Make a copy, so we don't change the original stats_dict.", "result", "=", "co...
Parse the output of ``SESConnection.get_send_statistics()`` in to an ordered list of 15-minute summaries.
[ "Parse", "the", "output", "of", "SESConnection", ".", "get_send_statistics", "()", "in", "to", "an", "ordered", "list", "of", "15", "-", "minute", "summaries", "." ]
python
train
TC01/python-xkcd
xkcd.py
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L350-L371
def getComic(number, silent=True): """ Produces a :class:`Comic` object with index equal to the provided argument. Prints an error in the event of a failure (i.e. the number is less than zero or greater than the latest comic number) and returns an empty Comic object. Arguments: an integer or string that represents a number, "number", that is the index of the comic in question. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the resulting Comic object for the provided index if successful, or a Comic object with -1 as the index if not.""" numComics = getLatestComicNum() if type(number) is str and number.isdigit(): number = int(number) if number > numComics or number <= 0: if not silent: print("Error: You have requested an invalid comic.") return Comic(-1) return Comic(number)
[ "def", "getComic", "(", "number", ",", "silent", "=", "True", ")", ":", "numComics", "=", "getLatestComicNum", "(", ")", "if", "type", "(", "number", ")", "is", "str", "and", "number", ".", "isdigit", "(", ")", ":", "number", "=", "int", "(", "number...
Produces a :class:`Comic` object with index equal to the provided argument. Prints an error in the event of a failure (i.e. the number is less than zero or greater than the latest comic number) and returns an empty Comic object. Arguments: an integer or string that represents a number, "number", that is the index of the comic in question. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the resulting Comic object for the provided index if successful, or a Comic object with -1 as the index if not.
[ "Produces", "a", ":", "class", ":", "Comic", "object", "with", "index", "equal", "to", "the", "provided", "argument", ".", "Prints", "an", "error", "in", "the", "event", "of", "a", "failure", "(", "i", ".", "e", ".", "the", "number", "is", "less", "t...
python
train
coleifer/irc
irc.py
https://github.com/coleifer/irc/blob/f9d2bd6369aafe6cb0916c9406270ca8ecea2080/irc.py#L284-L291
def register_callbacks(self): """\ Hook for registering callbacks with connection -- handled by __init__() """ self.conn.register_callbacks(( (re.compile(pattern), callback) \ for pattern, callback in self.command_patterns() ))
[ "def", "register_callbacks", "(", "self", ")", ":", "self", ".", "conn", ".", "register_callbacks", "(", "(", "(", "re", ".", "compile", "(", "pattern", ")", ",", "callback", ")", "for", "pattern", ",", "callback", "in", "self", ".", "command_patterns", ...
\ Hook for registering callbacks with connection -- handled by __init__()
[ "\\", "Hook", "for", "registering", "callbacks", "with", "connection", "--", "handled", "by", "__init__", "()" ]
python
test
tchellomello/python-arlo
pyarlo/camera.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/camera.py#L128-L132
def unseen_videos_reset(self): """Reset the unseen videos counter.""" url = RESET_CAM_ENDPOINT.format(self.unique_id) ret = self._session.query(url).get('success') return ret
[ "def", "unseen_videos_reset", "(", "self", ")", ":", "url", "=", "RESET_CAM_ENDPOINT", ".", "format", "(", "self", ".", "unique_id", ")", "ret", "=", "self", ".", "_session", ".", "query", "(", "url", ")", ".", "get", "(", "'success'", ")", "return", "...
Reset the unseen videos counter.
[ "Reset", "the", "unseen", "videos", "counter", "." ]
python
train
jlmadurga/permabots
permabots/views/api/state.py
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/state.py#L25-L34
def get(self, request, bot_id, format=None): """ Get list of states --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated """ return super(StateList, self).get(request, bot_id, format)
[ "def", "get", "(", "self", ",", "request", ",", "bot_id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "StateList", ",", "self", ")", ".", "get", "(", "request", ",", "bot_id", ",", "format", ")" ]
Get list of states --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated
[ "Get", "list", "of", "states", "---", "serializer", ":", "StateSerializer", "responseMessages", ":", "-", "code", ":", "401", "message", ":", "Not", "authenticated" ]
python
train
edx/edx-enterprise
integrated_channels/degreed/client.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/degreed/client.py#L175-L188
def _delete(self, url, data, scope): """ Make a DELETE request using the session object to a Degreed endpoint. Args: url (str): The url to send a DELETE request to. data (str): The json encoded payload to DELETE. scope (str): Must be one of the scopes Degreed expects: - `CONTENT_PROVIDER_SCOPE` - `COMPLETION_PROVIDER_SCOPE` """ self._create_session(scope) response = self.session.delete(url, data=data) return response.status_code, response.text
[ "def", "_delete", "(", "self", ",", "url", ",", "data", ",", "scope", ")", ":", "self", ".", "_create_session", "(", "scope", ")", "response", "=", "self", ".", "session", ".", "delete", "(", "url", ",", "data", "=", "data", ")", "return", "response"...
Make a DELETE request using the session object to a Degreed endpoint. Args: url (str): The url to send a DELETE request to. data (str): The json encoded payload to DELETE. scope (str): Must be one of the scopes Degreed expects: - `CONTENT_PROVIDER_SCOPE` - `COMPLETION_PROVIDER_SCOPE`
[ "Make", "a", "DELETE", "request", "using", "the", "session", "object", "to", "a", "Degreed", "endpoint", "." ]
python
valid
krinj/k-util
k_util/core.py
https://github.com/krinj/k-util/blob/b118826b1d6f49ca4e1ca7327d5b171db332ac23/k_util/core.py#L21-L23
def interpolate(f1: float, f2: float, factor: float) -> float: """ Linearly interpolate between two float values. """ return f1 + (f2 - f1) * factor
[ "def", "interpolate", "(", "f1", ":", "float", ",", "f2", ":", "float", ",", "factor", ":", "float", ")", "->", "float", ":", "return", "f1", "+", "(", "f2", "-", "f1", ")", "*", "factor" ]
Linearly interpolate between two float values.
[ "Linearly", "interpolate", "between", "two", "float", "values", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAARP/QARisk.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QARisk.py#L643-L733
def plot_assets_curve(self, length=14, height=12): """ 资金曲线叠加图 @Roy T.Burns 2018/05/29 修改百分比显示错误 """ plt.style.use('ggplot') plt.figure(figsize=(length, height)) plt.subplot(211) plt.title('BASIC INFO', fontsize=12) plt.axis([0, length, 0, 0.6]) plt.axis('off') i = 0 for item in ['account_cookie', 'portfolio_cookie', 'user_cookie']: plt.text( i, 0.5, '{} : {}'.format(item, self.message[item]), fontsize=10, rotation=0, wrap=True ) i += (length / 2.8) i = 0 for item in ['benchmark_code', 'time_gap', 'max_dropback']: plt.text( i, 0.4, '{} : {}'.format(item, self.message[item]), fontsize=10, ha='left', rotation=0, wrap=True ) i += (length / 2.8) i = 0 for item in ['annualize_return', 'bm_annualizereturn', 'profit']: plt.text( i, 0.3, '{} : {} %'.format(item, self.message.get(item, 0) * 100), fontsize=10, ha='left', rotation=0, wrap=True ) i += length / 2.8 i = 0 for item in ['init_cash', 'last_assets', 'volatility']: plt.text( i, 0.2, '{} : {} '.format(item, self.message[item]), fontsize=10, ha='left', rotation=0, wrap=True ) i += length / 2.8 i = 0 for item in ['alpha', 'beta', 'sharpe']: plt.text( i, 0.1, '{} : {}'.format(item, self.message[item]), ha='left', fontsize=10, rotation=0, wrap=True ) i += length / 2.8 plt.subplot(212) self.assets.plot() self.benchmark_assets.xs(self.benchmark_code, level=1).plot() asset_p = mpatches.Patch( color='red', label='{}'.format(self.account.account_cookie) ) asset_b = mpatches.Patch( label='benchmark {}'.format(self.benchmark_code) ) plt.legend(handles=[asset_p, asset_b], loc=0) plt.title('ASSET AND BENCKMARK') return plt
[ "def", "plot_assets_curve", "(", "self", ",", "length", "=", "14", ",", "height", "=", "12", ")", ":", "plt", ".", "style", ".", "use", "(", "'ggplot'", ")", "plt", ".", "figure", "(", "figsize", "=", "(", "length", ",", "height", ")", ")", "plt", ...
资金曲线叠加图 @Roy T.Burns 2018/05/29 修改百分比显示错误
[ "资金曲线叠加图" ]
python
train
klahnakoski/pyLibrary
tuid/clogger.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/tuid/clogger.py#L399-L463
def update_tip(self): ''' Returns False if the tip is already at the newest, or True if an update has taken place. :return: ''' clog_obj = self._get_clog(self.tuid_service.hg_url / self.config.hg.branch / 'json-log' / 'tip') # Get current tip in DB with self.conn.transaction() as t: _, newest_known_rev = self.get_tip(t) # If we are still at the newest, wait for CSET_TIP_WAIT_TIME seconds # before checking again. first_clog_entry = clog_obj['changesets'][0]['node'][:12] if newest_known_rev == first_clog_entry: return False csets_to_gather = None if not newest_known_rev: Log.note( "No revisions found in table, adding {{minim}} entries...", minim=MINIMUM_PERMANENT_CSETS ) csets_to_gather = MINIMUM_PERMANENT_CSETS found_newest_known = False csets_to_add = [] csets_found = 0 clogs_seen = 0 Log.note("Found new revisions. Updating csetLog tip to {{rev}}...", rev=first_clog_entry) while not found_newest_known and clogs_seen < MAX_TIPFILL_CLOGS: clog_csets_list = list(clog_obj['changesets']) for clog_cset in clog_csets_list[:-1]: nodes_cset = clog_cset['node'][:12] if not csets_to_gather: if nodes_cset == newest_known_rev: found_newest_known = True break else: if csets_found >= csets_to_gather: found_newest_known = True break csets_found += 1 csets_to_add.append(nodes_cset) if not found_newest_known: # Get the next page clogs_seen += 1 final_rev = clog_csets_list[-1]['node'][:12] clog_url = self.tuid_service.hg_url / self.config.hg.branch / 'json-log' / final_rev clog_obj = self._get_clog(clog_url) if clogs_seen >= MAX_TIPFILL_CLOGS: Log.error( "Too many changesets, can't find last tip or the number is too high: {{rev}}. " "Maximum possible to request is {{maxnum}}", rev=coalesce(newest_known_rev, csets_to_gather), maxnum=MAX_TIPFILL_CLOGS * CHANGESETS_PER_CLOG ) return False with self.working_locker: Log.note("Adding {{csets}}", csets=csets_to_add) self.add_cset_entries(csets_to_add, timestamp=False) return True
[ "def", "update_tip", "(", "self", ")", ":", "clog_obj", "=", "self", ".", "_get_clog", "(", "self", ".", "tuid_service", ".", "hg_url", "/", "self", ".", "config", ".", "hg", ".", "branch", "/", "'json-log'", "/", "'tip'", ")", "# Get current tip in DB", ...
Returns False if the tip is already at the newest, or True if an update has taken place. :return:
[ "Returns", "False", "if", "the", "tip", "is", "already", "at", "the", "newest", "or", "True", "if", "an", "update", "has", "taken", "place", ".", ":", "return", ":" ]
python
train
globus/globus-cli
globus_cli/parsing/shared_options.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/shared_options.py#L431-L531
def task_submission_options(f): """ Options shared by both transfer and delete task submission """ def notify_opt_callback(ctx, param, value): """ Parse --notify - "" is the same as "off" - parse by lowercase, comma-split, strip spaces - "off,x" is invalid for any x - "on,x" is valid for any valid x (other than "off") - "failed", "succeeded", "inactive" are normal vals In code, produces True, False, or a set """ # if no value was set, don't set any explicit options # the API default is "everything on" if value is None: return {} value = value.lower() value = [x.strip() for x in value.split(",")] # [""] is what you'll get if value is "" to start with # special-case it into "off", which helps avoid surprising scripts # which take a notification settings as inputs and build --notify if value == [""]: value = ["off"] off = "off" in value on = "on" in value # set-ize it -- duplicates are fine vals = set([x for x in value if x not in ("off", "on")]) if (vals or on) and off: raise click.UsageError('--notify cannot accept "off" and another value') allowed_vals = set(("on", "succeeded", "failed", "inactive")) if not vals <= allowed_vals: raise click.UsageError( "--notify received at least one invalid value among {}".format( list(vals) ) ) # return the notification options to send! # on means don't set anything (default) if on: return {} # off means turn off everything if off: return { "notify_on_succeeded": False, "notify_on_failed": False, "notify_on_inactive": False, } # otherwise, return the exact set of values seen else: return { "notify_on_succeeded": "succeeded" in vals, "notify_on_failed": "failed" in vals, "notify_on_inactive": "inactive" in vals, } f = click.option( "--dry-run", is_flag=True, help=("Don't actually submit the task, print submission " "data instead"), )(f) f = click.option( "--notify", callback=notify_opt_callback, help=( "Comma separated list of task events which notify by email. " "'on' and 'off' may be used to enable or disable notifications " "for all event types. Otherwise, use 'succeeded', 'failed', or " "'inactive'" ), )(f) f = click.option( "--submission-id", help=( "Task submission ID, as generated by `globus task " "generate-submission-id`. Used for safe resubmission in the " "presence of network failures." ), )(f) f = click.option("--label", default=None, help="Set a label for this task.")(f) f = click.option( "--deadline", default=None, type=ISOTimeType(), help="Set a deadline for this to be canceled if not completed by.", )(f) f = click.option( "--skip-activation-check", is_flag=True, help=("Submit the task even if the endpoint(s) " "aren't currently activated."), )(f) return f
[ "def", "task_submission_options", "(", "f", ")", ":", "def", "notify_opt_callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "\"\"\"\n Parse --notify\n - \"\" is the same as \"off\"\n - parse by lowercase, comma-split, strip spaces\n - \"off,x\" i...
Options shared by both transfer and delete task submission
[ "Options", "shared", "by", "both", "transfer", "and", "delete", "task", "submission" ]
python
train
santoshphilip/eppy
eppy/modeleditor.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L598-L612
def setidd(cls, iddinfo, iddindex, block, idd_version): """Set the IDD to be used by eppy. Parameters ---------- iddinfo : list Comments and metadata about fields in the IDD. block : list Field names in the IDD. """ cls.idd_info = iddinfo cls.block = block cls.idd_index = iddindex cls.idd_version = idd_version
[ "def", "setidd", "(", "cls", ",", "iddinfo", ",", "iddindex", ",", "block", ",", "idd_version", ")", ":", "cls", ".", "idd_info", "=", "iddinfo", "cls", ".", "block", "=", "block", "cls", ".", "idd_index", "=", "iddindex", "cls", ".", "idd_version", "=...
Set the IDD to be used by eppy. Parameters ---------- iddinfo : list Comments and metadata about fields in the IDD. block : list Field names in the IDD.
[ "Set", "the", "IDD", "to", "be", "used", "by", "eppy", "." ]
python
train
CalebBell/ht
ht/conv_internal.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_internal.py#L1554-L1624
def helical_turbulent_Nu_Mori_Nakayama(Re, Pr, Di, Dc): r'''Calculates Nusselt number for a fluid flowing inside a curved pipe such as a helical coil under turbulent conditions, using the method of Mori and Nakayama [1]_, also shown in [2]_ and [3]_. For :math:`Pr < 1`: .. math:: Nu = \frac{Pr}{26.2(Pr^{2/3}-0.074)}Re^{0.8}\left(\frac{D_i}{D_c} \right)^{0.1}\left[1 + \frac{0.098}{\left[Re\left(\frac{D_i}{D_c} \right)^2\right]^{0.2}}\right] For :math:`Pr \ge 1`: .. math:: Nu = \frac{Pr^{0.4}}{41}Re^{5/6}\left(\frac{D_i}{D_c}\right)^{1/12} \left[1 + \frac{0.061}{\left[Re\left(\frac{D_i}{D_c}\right)^{2.5} \right]^{1/6}}\right] Parameters ---------- Re : float Reynolds number with `D=Di`, [-] Pr : float Prandtl number with bulk properties [-] Di : float Inner diameter of the coil, [m] Dc : float Diameter of the helix/coil measured from the center of the tube on one side to the center of the tube on the other side, [m] Returns ------- Nu : float Nusselt number with respect to `Di`, [-] Notes ----- At very low curvatures, the predicted heat transfer coefficient grows unbounded. Applicable for :math:`Re\left(\frac{D_i}{D_c}\right)^2 > 0.1` Examples -------- >>> helical_turbulent_Nu_Mori_Nakayama(2E5, 0.7, 0.01, .2) 496.2522480663327 References ---------- .. [1] Mori, Yasuo, and Wataru Nakayama. "Study on Forced Convective Heat Transfer in Curved Pipes." International Journal of Heat and Mass Transfer 10, no. 5 (May 1, 1967): 681-95. doi:10.1016/0017-9310(67)90113-5. .. [2] El-Genk, Mohamed S., and Timothy M. Schriener. "A Review and Correlations for Convection Heat Transfer and Pressure Losses in Toroidal and Helically Coiled Tubes." Heat Transfer Engineering 0, no. 0 (June 7, 2016): 1-28. doi:10.1080/01457632.2016.1194693. .. [3] Hardik, B. K., P. K. Baburajan, and S. V. Prabhu. "Local Heat Transfer Coefficient in Helical Coils with Single Phase Flow." International Journal of Heat and Mass Transfer 89 (October 2015): 522-38. doi:10.1016/j.ijheatmasstransfer.2015.05.069. ''' D_ratio = Di/Dc if Pr < 1: term1 = Pr/(26.2*(Pr**(2/3.) - 0.074))*Re**0.8*D_ratio**0.1 term2 = 1. + 0.098*(Re*D_ratio*D_ratio)**-0.2 else: term1 = Pr**0.4/41.*Re**(5/6.)*(Di/Dc)**(1/12.) term2 = 1. + 0.061/(Re*(Di/Dc)**2.5)**(1/6.) return term1*term2
[ "def", "helical_turbulent_Nu_Mori_Nakayama", "(", "Re", ",", "Pr", ",", "Di", ",", "Dc", ")", ":", "D_ratio", "=", "Di", "/", "Dc", "if", "Pr", "<", "1", ":", "term1", "=", "Pr", "/", "(", "26.2", "*", "(", "Pr", "**", "(", "2", "/", "3.", ")",...
r'''Calculates Nusselt number for a fluid flowing inside a curved pipe such as a helical coil under turbulent conditions, using the method of Mori and Nakayama [1]_, also shown in [2]_ and [3]_. For :math:`Pr < 1`: .. math:: Nu = \frac{Pr}{26.2(Pr^{2/3}-0.074)}Re^{0.8}\left(\frac{D_i}{D_c} \right)^{0.1}\left[1 + \frac{0.098}{\left[Re\left(\frac{D_i}{D_c} \right)^2\right]^{0.2}}\right] For :math:`Pr \ge 1`: .. math:: Nu = \frac{Pr^{0.4}}{41}Re^{5/6}\left(\frac{D_i}{D_c}\right)^{1/12} \left[1 + \frac{0.061}{\left[Re\left(\frac{D_i}{D_c}\right)^{2.5} \right]^{1/6}}\right] Parameters ---------- Re : float Reynolds number with `D=Di`, [-] Pr : float Prandtl number with bulk properties [-] Di : float Inner diameter of the coil, [m] Dc : float Diameter of the helix/coil measured from the center of the tube on one side to the center of the tube on the other side, [m] Returns ------- Nu : float Nusselt number with respect to `Di`, [-] Notes ----- At very low curvatures, the predicted heat transfer coefficient grows unbounded. Applicable for :math:`Re\left(\frac{D_i}{D_c}\right)^2 > 0.1` Examples -------- >>> helical_turbulent_Nu_Mori_Nakayama(2E5, 0.7, 0.01, .2) 496.2522480663327 References ---------- .. [1] Mori, Yasuo, and Wataru Nakayama. "Study on Forced Convective Heat Transfer in Curved Pipes." International Journal of Heat and Mass Transfer 10, no. 5 (May 1, 1967): 681-95. doi:10.1016/0017-9310(67)90113-5. .. [2] El-Genk, Mohamed S., and Timothy M. Schriener. "A Review and Correlations for Convection Heat Transfer and Pressure Losses in Toroidal and Helically Coiled Tubes." Heat Transfer Engineering 0, no. 0 (June 7, 2016): 1-28. doi:10.1080/01457632.2016.1194693. .. [3] Hardik, B. K., P. K. Baburajan, and S. V. Prabhu. "Local Heat Transfer Coefficient in Helical Coils with Single Phase Flow." International Journal of Heat and Mass Transfer 89 (October 2015): 522-38. doi:10.1016/j.ijheatmasstransfer.2015.05.069.
[ "r", "Calculates", "Nusselt", "number", "for", "a", "fluid", "flowing", "inside", "a", "curved", "pipe", "such", "as", "a", "helical", "coil", "under", "turbulent", "conditions", "using", "the", "method", "of", "Mori", "and", "Nakayama", "[", "1", "]", "_"...
python
train
maxfischer2781/include
include/__init__.py
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/__init__.py#L81-L91
def enable(identifier, exclude_children=False): """ Enable a previously disabled include type :param identifier: module or name of the include type :param exclude_children: disable the include type only for child processes, not the current process The ``identifier`` can be specified in multiple ways to disable an include type. See :py:meth:`~.DisabledIncludeTypes.disable` for details. """ DISABLED_TYPES.enable(identifier=identifier, exclude_children=exclude_children)
[ "def", "enable", "(", "identifier", ",", "exclude_children", "=", "False", ")", ":", "DISABLED_TYPES", ".", "enable", "(", "identifier", "=", "identifier", ",", "exclude_children", "=", "exclude_children", ")" ]
Enable a previously disabled include type :param identifier: module or name of the include type :param exclude_children: disable the include type only for child processes, not the current process The ``identifier`` can be specified in multiple ways to disable an include type. See :py:meth:`~.DisabledIncludeTypes.disable` for details.
[ "Enable", "a", "previously", "disabled", "include", "type" ]
python
train
aguinane/nem-reader
nemreader/nem_reader.py
https://github.com/aguinane/nem-reader/blob/5405a5cba4bb8ebdad05c28455d12bb34a6d3ce5/nemreader/nem_reader.py#L175-L183
def parse_250_row(row: list) -> BasicMeterData: """ Parse basic meter data record (250) """ return BasicMeterData(row[1], row[2], row[3], row[4], row[5], row[6], row[7], float(row[8]), parse_datetime(row[9]), row[10], row[11], row[12], float(row[13]), parse_datetime( row[14]), row[15], row[16], row[17], float(row[18]), row[19], row[20], parse_datetime(row[21]), parse_datetime(row[22]))
[ "def", "parse_250_row", "(", "row", ":", "list", ")", "->", "BasicMeterData", ":", "return", "BasicMeterData", "(", "row", "[", "1", "]", ",", "row", "[", "2", "]", ",", "row", "[", "3", "]", ",", "row", "[", "4", "]", ",", "row", "[", "5", "]"...
Parse basic meter data record (250)
[ "Parse", "basic", "meter", "data", "record", "(", "250", ")" ]
python
train
CityOfZion/neo-python
neo/Core/State/AccountState.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/AccountState.py#L232-L242
def AllBalancesZeroOrLess(self): """ Flag indicating if all balances are 0 or less. Returns: bool: True if all balances are <= 0. False, otherwise. """ for key, fixed8 in self.Balances.items(): if fixed8.value > 0: return False return True
[ "def", "AllBalancesZeroOrLess", "(", "self", ")", ":", "for", "key", ",", "fixed8", "in", "self", ".", "Balances", ".", "items", "(", ")", ":", "if", "fixed8", ".", "value", ">", "0", ":", "return", "False", "return", "True" ]
Flag indicating if all balances are 0 or less. Returns: bool: True if all balances are <= 0. False, otherwise.
[ "Flag", "indicating", "if", "all", "balances", "are", "0", "or", "less", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/show/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/show/__init__.py#L503-L526
def _set_vnetwork(self, v, load=False): """ Setter method for vnetwork, mapped from YANG variable /show/vnetwork (container) If this variable is read-only (config: false) in the source YANG file, then _set_vnetwork is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vnetwork() directly. YANG Description: Shows virtual infrastructure information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vnetwork.vnetwork, is_container='container', presence=False, yang_name="vnetwork", rest_name="vnetwork", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Shows virtual infrastructure information', u'action': u'pgs'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vnetwork must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vnetwork.vnetwork, is_container='container', presence=False, yang_name="vnetwork", rest_name="vnetwork", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Shows virtual infrastructure information', u'action': u'pgs'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='container', is_config=True)""", }) self.__vnetwork = t if hasattr(self, '_set'): self._set()
[ "def", "_set_vnetwork", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base"...
Setter method for vnetwork, mapped from YANG variable /show/vnetwork (container) If this variable is read-only (config: false) in the source YANG file, then _set_vnetwork is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vnetwork() directly. YANG Description: Shows virtual infrastructure information
[ "Setter", "method", "for", "vnetwork", "mapped", "from", "YANG", "variable", "/", "show", "/", "vnetwork", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file"...
python
train
mommermi/callhorizons
callhorizons/callhorizons.py
https://github.com/mommermi/callhorizons/blob/fdd7ad9e87cac107c1b7f88e594d118210da3b1a/callhorizons/callhorizons.py#L46-L53
def _char2int(char): """ translate characters to integer values (upper and lower case)""" if char.isdigit(): return int(float(char)) if char.isupper(): return int(char, 36) else: return 26 + int(char, 36)
[ "def", "_char2int", "(", "char", ")", ":", "if", "char", ".", "isdigit", "(", ")", ":", "return", "int", "(", "float", "(", "char", ")", ")", "if", "char", ".", "isupper", "(", ")", ":", "return", "int", "(", "char", ",", "36", ")", "else", ":"...
translate characters to integer values (upper and lower case)
[ "translate", "characters", "to", "integer", "values", "(", "upper", "and", "lower", "case", ")" ]
python
train
Erotemic/utool
utool/util_cplat.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L1264-L1300
def change_term_title(title): """ only works on unix systems only tested on Ubuntu GNOME changes text on terminal title for identifying debugging tasks. The title will remain until python exists Args: title (str): References: http://stackoverflow.com/questions/5343265/setting-title-for-tabs-in-terminator-console-application-in-ubuntu/8850484#8850484 CommandLine: python -m utool change_term_title echo -en "\033]0;newtitle\a" printf "\e]2;newtitle\a"; echo -en "\033]0;DocTest /home/joncrall/code/ibeis/ibeis.algo.graph.core.py --test-AnnotInference._make_state_delta\a" Example: >>> # DISABLE_DOCTEST >>> from utool.util_cplat import * # NOQA >>> title = 'change title test' >>> result = change_term_title(title) >>> print(result) """ if True: # Disabled return if not WIN32: #print("CHANGE TERM TITLE to %r" % (title,)) if title: #os.environ['PS1'] = os.environ['PS1'] + '''"\e]2;\"''' + title + '''\"\a"''' cmd_str = r'''echo -en "\033]0;''' + title + '''\a"''' os.system(cmd_str)
[ "def", "change_term_title", "(", "title", ")", ":", "if", "True", ":", "# Disabled", "return", "if", "not", "WIN32", ":", "#print(\"CHANGE TERM TITLE to %r\" % (title,))", "if", "title", ":", "#os.environ['PS1'] = os.environ['PS1'] + '''\"\\e]2;\\\"''' + title + '''\\\"\\a\"'''...
only works on unix systems only tested on Ubuntu GNOME changes text on terminal title for identifying debugging tasks. The title will remain until python exists Args: title (str): References: http://stackoverflow.com/questions/5343265/setting-title-for-tabs-in-terminator-console-application-in-ubuntu/8850484#8850484 CommandLine: python -m utool change_term_title echo -en "\033]0;newtitle\a" printf "\e]2;newtitle\a"; echo -en "\033]0;DocTest /home/joncrall/code/ibeis/ibeis.algo.graph.core.py --test-AnnotInference._make_state_delta\a" Example: >>> # DISABLE_DOCTEST >>> from utool.util_cplat import * # NOQA >>> title = 'change title test' >>> result = change_term_title(title) >>> print(result)
[ "only", "works", "on", "unix", "systems", "only", "tested", "on", "Ubuntu", "GNOME", "changes", "text", "on", "terminal", "title", "for", "identifying", "debugging", "tasks", "." ]
python
train
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L393-L402
def create_adjusted_model_for_percentages(model_src, model_use): """Replace logreg layer by sigmoid to get probabilities.""" # Copy model file shutil.copyfile(model_src, model_use) # Adjust model file with open(model_src) as f: content = f.read() content = content.replace("logreg", "sigmoid") with open(model_use, "w") as f: f.write(content)
[ "def", "create_adjusted_model_for_percentages", "(", "model_src", ",", "model_use", ")", ":", "# Copy model file", "shutil", ".", "copyfile", "(", "model_src", ",", "model_use", ")", "# Adjust model file", "with", "open", "(", "model_src", ")", "as", "f", ":", "co...
Replace logreg layer by sigmoid to get probabilities.
[ "Replace", "logreg", "layer", "by", "sigmoid", "to", "get", "probabilities", "." ]
python
train
edx/edx-enterprise
enterprise/admin/views.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/views.py#L796-L814
def get(self, request, customer_uuid): """ Handle GET request - render linked learners list and "Link learner" form. Arguments: request (django.http.request.HttpRequest): Request instance customer_uuid (str): Enterprise Customer UUID Returns: django.http.response.HttpResponse: HttpResponse """ context = self._build_context(request, customer_uuid) manage_learners_form = ManageLearnersForm( user=request.user, enterprise_customer=context[self.ContextParameters.ENTERPRISE_CUSTOMER] ) context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form}) return render(request, self.template, context)
[ "def", "get", "(", "self", ",", "request", ",", "customer_uuid", ")", ":", "context", "=", "self", ".", "_build_context", "(", "request", ",", "customer_uuid", ")", "manage_learners_form", "=", "ManageLearnersForm", "(", "user", "=", "request", ".", "user", ...
Handle GET request - render linked learners list and "Link learner" form. Arguments: request (django.http.request.HttpRequest): Request instance customer_uuid (str): Enterprise Customer UUID Returns: django.http.response.HttpResponse: HttpResponse
[ "Handle", "GET", "request", "-", "render", "linked", "learners", "list", "and", "Link", "learner", "form", "." ]
python
valid
openearth/mmi-python
mmi/runner.py
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/runner.py#L92-L110
def create_ports(port, mpi, rank): """create a list of ports for the current rank""" if port == "random" or port is None: # ports will be filled in using random binding ports = {} else: port = int(port) ports = { "REQ": port + 0, "PUSH": port + 1, "SUB": port + 2 } # if we want to communicate with separate domains # we have to setup a socket for each of them if mpi == 'all': # use a socket for each rank rank for port in ports: ports[port] += (rank * 3) return ports
[ "def", "create_ports", "(", "port", ",", "mpi", ",", "rank", ")", ":", "if", "port", "==", "\"random\"", "or", "port", "is", "None", ":", "# ports will be filled in using random binding", "ports", "=", "{", "}", "else", ":", "port", "=", "int", "(", "port"...
create a list of ports for the current rank
[ "create", "a", "list", "of", "ports", "for", "the", "current", "rank" ]
python
train
KnorrFG/pyparadigm
pyparadigm/extras.py
https://github.com/KnorrFG/pyparadigm/blob/69944cdf3ce2f6414ae1aa1d27a0d8c6e5fb3fd3/pyparadigm/extras.py#L20-L23
def to_24bit_gray(mat: np.ndarray): """returns a matrix that contains RGB channels, and colors scaled from 0 to 255""" return np.repeat(np.expand_dims(_normalize(mat), axis=2), 3, axis=2)
[ "def", "to_24bit_gray", "(", "mat", ":", "np", ".", "ndarray", ")", ":", "return", "np", ".", "repeat", "(", "np", ".", "expand_dims", "(", "_normalize", "(", "mat", ")", ",", "axis", "=", "2", ")", ",", "3", ",", "axis", "=", "2", ")" ]
returns a matrix that contains RGB channels, and colors scaled from 0 to 255
[ "returns", "a", "matrix", "that", "contains", "RGB", "channels", "and", "colors", "scaled", "from", "0", "to", "255" ]
python
train
joke2k/faker
faker/utils/datasets.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/utils/datasets.py#L8-L24
def add_dicts(*args): """ Adds two or more dicts together. Common keys will have their values added. For example:: >>> t1 = {'a':1, 'b':2} >>> t2 = {'b':1, 'c':3} >>> t3 = {'d':4} >>> add_dicts(t1, t2, t3) {'a': 1, 'c': 3, 'b': 3, 'd': 4} """ counters = [Counter(arg) for arg in args] return dict(reduce(operator.add, counters))
[ "def", "add_dicts", "(", "*", "args", ")", ":", "counters", "=", "[", "Counter", "(", "arg", ")", "for", "arg", "in", "args", "]", "return", "dict", "(", "reduce", "(", "operator", ".", "add", ",", "counters", ")", ")" ]
Adds two or more dicts together. Common keys will have their values added. For example:: >>> t1 = {'a':1, 'b':2} >>> t2 = {'b':1, 'c':3} >>> t3 = {'d':4} >>> add_dicts(t1, t2, t3) {'a': 1, 'c': 3, 'b': 3, 'd': 4}
[ "Adds", "two", "or", "more", "dicts", "together", ".", "Common", "keys", "will", "have", "their", "values", "added", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/helpers/helpers.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/helpers/helpers.py#L145-L156
def _compress_for_repeater(max_vol, plan, **kwargs): """ Reduce size of transfer plan, if mode is distribute or consolidate """ max_vol = float(max_vol) mode = kwargs.get('mode', 'transfer') if mode == 'distribute': # combine target volumes into single aspirate return _compress_for_distribute(max_vol, plan, **kwargs) if mode == 'consolidate': # combine target volumes into multiple aspirates return _compress_for_consolidate(max_vol, plan, **kwargs) else: return plan
[ "def", "_compress_for_repeater", "(", "max_vol", ",", "plan", ",", "*", "*", "kwargs", ")", ":", "max_vol", "=", "float", "(", "max_vol", ")", "mode", "=", "kwargs", ".", "get", "(", "'mode'", ",", "'transfer'", ")", "if", "mode", "==", "'distribute'", ...
Reduce size of transfer plan, if mode is distribute or consolidate
[ "Reduce", "size", "of", "transfer", "plan", "if", "mode", "is", "distribute", "or", "consolidate" ]
python
train
google/grumpy
third_party/stdlib/optparse.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1706-L1728
def _match_abbrev(s, wordmap): """_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError. """ # Is there an exact match? if s in wordmap: return s else: # Isolate all words with s as a prefix. possibilities = [word for word in wordmap.keys() if word.startswith(s)] # No exact match, so there had better be just one possibility. if len(possibilities) == 1: return possibilities[0] elif not possibilities: raise BadOptionError(s) else: # More than one possible completion: ambiguous prefix. possibilities.sort() raise AmbiguousOptionError(s, possibilities)
[ "def", "_match_abbrev", "(", "s", ",", "wordmap", ")", ":", "# Is there an exact match?", "if", "s", "in", "wordmap", ":", "return", "s", "else", ":", "# Isolate all words with s as a prefix.", "possibilities", "=", "[", "word", "for", "word", "in", "wordmap", "...
_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError.
[ "_match_abbrev", "(", "s", ":", "string", "wordmap", ":", "{", "string", ":", "Option", "}", ")", "-", ">", "string" ]
python
valid
fastai/fastai
fastai/tabular/transform.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/tabular/transform.py#L106-L113
def cont_cat_split(df, max_card=20, dep_var=None)->Tuple[List,List]: "Helper function that returns column names of cont and cat variables from given df." cont_names, cat_names = [], [] for label in df: if label == dep_var: continue if df[label].dtype == int and df[label].unique().shape[0] > max_card or df[label].dtype == float: cont_names.append(label) else: cat_names.append(label) return cont_names, cat_names
[ "def", "cont_cat_split", "(", "df", ",", "max_card", "=", "20", ",", "dep_var", "=", "None", ")", "->", "Tuple", "[", "List", ",", "List", "]", ":", "cont_names", ",", "cat_names", "=", "[", "]", ",", "[", "]", "for", "label", "in", "df", ":", "i...
Helper function that returns column names of cont and cat variables from given df.
[ "Helper", "function", "that", "returns", "column", "names", "of", "cont", "and", "cat", "variables", "from", "given", "df", "." ]
python
train
numberoverzero/bloop
bloop/models.py
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L943-L1047
def bind_index(model, name, index, force=False, recursive=True, copy=False) -> Index: """Bind an index to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new index to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key="email") bound = bloop.models.bind_index(User, "by_email", by_email) assert bound is by_email # rebind with force, and use a copy bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True) assert bound is not by_email If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`. :param model: The model to bind the index to. :param name: The name to bind the index as. In effect, used for ``setattr(model, name, index)`` :param index: The index to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the index instead of the index directly. Default is False. :return: The bound index. This is a new column when ``copy`` is True, otherwise the input index. """ if not subclassof(model, BaseModel): raise InvalidModel(f"{model} is not a subclass of BaseModel") meta = model.Meta if copy: index = copyfn(index) # TODO elif index.model is not None: logger.warning(f"Trying to rebind index bound to {index.model}") index._name = name safe_repr = unbound_repr(index) # Guard against name, dynamo_name collisions; if force=True, unbind any matches same_dynamo_name = ( util.index(meta.columns, "dynamo_name").get(index.dynamo_name) or util.index(meta.indexes, "dynamo_name").get(index.dynamo_name) ) same_name = ( meta.columns_by_name.get(index.name) or util.index(meta.indexes, "name").get(index.name) ) if isinstance(index, LocalSecondaryIndex) and not meta.range_key: raise InvalidModel("An LSI requires the Model to have a range key.") if force: if same_name: unbind(meta, name=index.name) if same_dynamo_name: unbind(meta, dynamo_name=index.dynamo_name) else: if same_name: raise InvalidModel( f"The index {safe_repr} has the same name as an existing index " f"or column {same_name}. Did you mean to bind with force=True?") if same_dynamo_name: raise InvalidModel( f"The index {safe_repr} has the same dynamo_name as an existing " f"index or column {same_name}. Did you mean to bind with force=True?") # success! # -------------------------------- index.model = meta.model meta.indexes.add(index) setattr(meta.model, name, index) if isinstance(index, LocalSecondaryIndex): meta.lsis.add(index) if isinstance(index, GlobalSecondaryIndex): meta.gsis.add(index) try: refresh_index(meta, index) except KeyError as e: raise InvalidModel("Index expected a hash or range key that does not exist") from e if recursive: for subclass in util.walk_subclasses(meta.model): try: bind_index(subclass, name, index, force=False, recursive=False, copy=True) except InvalidModel: pass return index
[ "def", "bind_index", "(", "model", ",", "name", ",", "index", ",", "force", "=", "False", ",", "recursive", "=", "True", ",", "copy", "=", "False", ")", "->", "Index", ":", "if", "not", "subclassof", "(", "model", ",", "BaseModel", ")", ":", "raise",...
Bind an index to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new index to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key="email") bound = bloop.models.bind_index(User, "by_email", by_email) assert bound is by_email # rebind with force, and use a copy bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True) assert bound is not by_email If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`. :param model: The model to bind the index to. :param name: The name to bind the index as. In effect, used for ``setattr(model, name, index)`` :param index: The index to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the index instead of the index directly. Default is False. :return: The bound index. This is a new column when ``copy`` is True, otherwise the input index.
[ "Bind", "an", "index", "to", "the", "model", "with", "the", "given", "name", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/utils/decorators.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/decorators.py#L27-L51
def memoized(fget): """ Return a property attribute for new-style classes that only calls its getter on the first access. The result is stored and on subsequent accesses is returned, preventing the need to call the getter any more. Parameters ---------- fget: function The getter method to memoize for subsequent access. See also -------- python-memoized-property `python-memoized-property <https://github.com/estebistec/python-memoized-property>`_ """ attr_name = '_{0}'.format(fget.__name__) @wraps(fget) def fget_memoized(self): if not hasattr(self, attr_name): setattr(self, attr_name, fget(self)) return getattr(self, attr_name) return property(fget_memoized)
[ "def", "memoized", "(", "fget", ")", ":", "attr_name", "=", "'_{0}'", ".", "format", "(", "fget", ".", "__name__", ")", "@", "wraps", "(", "fget", ")", "def", "fget_memoized", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "attr_nam...
Return a property attribute for new-style classes that only calls its getter on the first access. The result is stored and on subsequent accesses is returned, preventing the need to call the getter any more. Parameters ---------- fget: function The getter method to memoize for subsequent access. See also -------- python-memoized-property `python-memoized-property <https://github.com/estebistec/python-memoized-property>`_
[ "Return", "a", "property", "attribute", "for", "new", "-", "style", "classes", "that", "only", "calls", "its", "getter", "on", "the", "first", "access", ".", "The", "result", "is", "stored", "and", "on", "subsequent", "accesses", "is", "returned", "preventin...
python
train
DigitalGlobe/gbdxtools
gbdxtools/simpleworkflows.py
https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/simpleworkflows.py#L769-L806
def stderr(self): '''Get stderr from all the tasks of a workflow. Returns: (list): tasks with their stderr Example: >>> workflow.stderr [ { "id": "4488895771403082552", "taskType": "AOP_Strip_Processor", "name": "Task1", "stderr": "............" } ] ''' if not self.id: raise WorkflowError('Workflow is not running. Cannot get stderr.') if self.batch_values: raise NotImplementedError("Query Each Workflow Id within the Batch Workflow for stderr.") wf = self.workflow.get(self.id) stderr_list = [] for task in wf['tasks']: stderr_list.append( { 'id': task['id'], 'taskType': task['taskType'], 'name': task['name'], 'stderr': self.workflow.get_stderr(self.id, task['id']) } ) return stderr_list
[ "def", "stderr", "(", "self", ")", ":", "if", "not", "self", ".", "id", ":", "raise", "WorkflowError", "(", "'Workflow is not running. Cannot get stderr.'", ")", "if", "self", ".", "batch_values", ":", "raise", "NotImplementedError", "(", "\"Query Each Workflow Id ...
Get stderr from all the tasks of a workflow. Returns: (list): tasks with their stderr Example: >>> workflow.stderr [ { "id": "4488895771403082552", "taskType": "AOP_Strip_Processor", "name": "Task1", "stderr": "............" } ]
[ "Get", "stderr", "from", "all", "the", "tasks", "of", "a", "workflow", "." ]
python
valid
Hackerfleet/hfos
hfos/tool/templates.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/templates.py#L53-L63
def write_template_file(source, target, content): """Write a new file from a given pystache template file and content""" # print(formatTemplateFile(source, content)) print(target) data = format_template_file(source, content) with open(target, 'w') as f: for line in data: if type(line) != str: line = line.encode('utf-8') f.write(line)
[ "def", "write_template_file", "(", "source", ",", "target", ",", "content", ")", ":", "# print(formatTemplateFile(source, content))", "print", "(", "target", ")", "data", "=", "format_template_file", "(", "source", ",", "content", ")", "with", "open", "(", "target...
Write a new file from a given pystache template file and content
[ "Write", "a", "new", "file", "from", "a", "given", "pystache", "template", "file", "and", "content" ]
python
train
EventTeam/beliefs
src/beliefs/cells/posets.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/posets.py#L67-L78
def set_domain(clz, dag): """ Sets the domain. Should only be called once per class instantiation. """ logging.info("Setting domain for poset %s" % clz.__name__) if nx.number_of_nodes(dag) == 0: raise CellConstructionFailure("Empty DAG structure.") if not nx.is_directed_acyclic_graph(dag): raise CellConstructionFailure("Must be directed and acyclic") if not nx.is_weakly_connected(dag): raise CellConstructionFailure("Must be connected") clz.domain_map[clz] = dag
[ "def", "set_domain", "(", "clz", ",", "dag", ")", ":", "logging", ".", "info", "(", "\"Setting domain for poset %s\"", "%", "clz", ".", "__name__", ")", "if", "nx", ".", "number_of_nodes", "(", "dag", ")", "==", "0", ":", "raise", "CellConstructionFailure", ...
Sets the domain. Should only be called once per class instantiation.
[ "Sets", "the", "domain", ".", "Should", "only", "be", "called", "once", "per", "class", "instantiation", "." ]
python
train
riga/scinum
scinum.py
https://github.com/riga/scinum/blob/55eb6d8aa77beacee5a07443392954b8a0aad8cb/scinum.py#L1433-L1460
def match_precision(val, ref, *args, **kwargs): """ Returns a string version of a value *val* matching the significant digits as given in *ref*. *val* might also be a numpy array. All remaining *args* and *kwargs* are forwarded to ``Decimal.quantize``. Example: .. code-block:: python match_precision(1.234, ".1") # -> "1.2" match_precision(1.234, "1.") # -> "1" match_precision(1.234, ".1", decimal.ROUND_UP) # -> "1.3" a = np.array([1.234, 5.678, -9.101]) match_precision(a, ".1") # -> ["1.2", "5.7", "-9.1"] """ val = ensure_nominal(val) if not is_numpy(val): ret = _match_precision(val, ref, *args, **kwargs) else: # strategy: map into a flat list, create chararray with max itemsize, reshape strings = [_match_precision(v, r, *args, **kwargs) for v, r in np.nditer([val, ref])] ret = np.chararray(len(strings), itemsize=max(len(s) for s in strings)) ret[:] = strings ret = ret.reshape(val.shape) return ret
[ "def", "match_precision", "(", "val", ",", "ref", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "val", "=", "ensure_nominal", "(", "val", ")", "if", "not", "is_numpy", "(", "val", ")", ":", "ret", "=", "_match_precision", "(", "val", ",", "r...
Returns a string version of a value *val* matching the significant digits as given in *ref*. *val* might also be a numpy array. All remaining *args* and *kwargs* are forwarded to ``Decimal.quantize``. Example: .. code-block:: python match_precision(1.234, ".1") # -> "1.2" match_precision(1.234, "1.") # -> "1" match_precision(1.234, ".1", decimal.ROUND_UP) # -> "1.3" a = np.array([1.234, 5.678, -9.101]) match_precision(a, ".1") # -> ["1.2", "5.7", "-9.1"]
[ "Returns", "a", "string", "version", "of", "a", "value", "*", "val", "*", "matching", "the", "significant", "digits", "as", "given", "in", "*", "ref", "*", ".", "*", "val", "*", "might", "also", "be", "a", "numpy", "array", ".", "All", "remaining", "...
python
train
mattja/distob
distob/arrays.py
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1030-L1062
def mean(self, axis=None, dtype=None, out=None, keepdims=False): """Compute the arithmetic mean along the specified axis. See np.mean() for details.""" if axis == -1: axis = self.ndim if axis is None: results = vectorize(mean)(self, axis, dtype, keepdims=False) weights = self._sublengths res = np.average(results, axis=None, weights=weights) if keepdims: for i in range(self.ndim): res = expand_dims(res, res.ndim) elif axis == self._distaxis: results = vectorize(mean)(self, axis, dtype, keepdims=True) results = gather(results) # Average manually (np.average doesn't preserve ndarray subclasses) weights = (np.array(self._sublengths, dtype=np.float64) / sum(self._sublengths)) ix = [slice(None)] * self.ndim ix[axis] = 0 res = results[ix] * weights[0] for i in range(1, self._n): ix[axis] = i res = res + results[ix] * weights[i] if keepdims: res = expand_dims(res, axis) else: res = vectorize(mean)(self, axis, dtype, keepdims=False) if keepdims: res = expand_dims(res, axis) if out is not None: out[:] = res return res
[ "def", "mean", "(", "self", ",", "axis", "=", "None", ",", "dtype", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "False", ")", ":", "if", "axis", "==", "-", "1", ":", "axis", "=", "self", ".", "ndim", "if", "axis", "is", "None", ...
Compute the arithmetic mean along the specified axis. See np.mean() for details.
[ "Compute", "the", "arithmetic", "mean", "along", "the", "specified", "axis", ".", "See", "np", ".", "mean", "()", "for", "details", "." ]
python
valid
PmagPy/PmagPy
dialogs/pmag_gui_dialogs2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs2.py#L2697-L2782
def on_m_calc_orient(self,event): ''' This fucntion does exactly what the 'import orientation' fuction does in MagIC.py after some dialog boxes the function calls orientation_magic.py ''' # first see if demag_orient.txt self.on_m_save_file(None) orient_convention_dia = orient_convention(None) orient_convention_dia.Center() #orient_convention_dia.ShowModal() if orient_convention_dia.ShowModal() == wx.ID_OK: ocn_flag = orient_convention_dia.ocn_flag dcn_flag = orient_convention_dia.dcn_flag gmt_flags = orient_convention_dia.gmt_flags orient_convention_dia.Destroy() else: return or_con = orient_convention_dia.ocn dec_correction_con = int(orient_convention_dia.dcn) try: hours_from_gmt = float(orient_convention_dia.gmt) except: hours_from_gmt = 0 try: dec_correction = float(orient_convention_dia.correct_dec) except: dec_correction = 0 method_code_dia=method_code_dialog(None) method_code_dia.Center() if method_code_dia.ShowModal() == wx.ID_OK: bedding_codes_flags=method_code_dia.bedding_codes_flags methodcodes_flags=method_code_dia.methodcodes_flags method_code_dia.Destroy() else: print("-I- Canceling calculation") return method_codes = method_code_dia.methodcodes average_bedding = method_code_dia.average_bedding bed_correction = method_code_dia.bed_correction command_args=['orientation_magic.py'] command_args.append("-WD %s"%self.WD) command_args.append("-Fsa er_samples_orient.txt") command_args.append("-Fsi er_sites_orient.txt ") command_args.append("-f %s"%"demag_orient.txt") command_args.append(ocn_flag) command_args.append(dcn_flag) command_args.append(gmt_flags) command_args.append(bedding_codes_flags) command_args.append(methodcodes_flags) commandline = " ".join(command_args) print("-I- executing command: %s" %commandline) os.chdir(self.WD) if os.path.exists(os.path.join(self.WD, 'er_samples.txt')) or os.path.exists(os.path.join(self.WD, 'er_sites.txt')): append = True else: append = False samp_file = "er_samples.txt" site_file = "er_sites.txt" success, error_message = ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, hours_from_gmt=hours_from_gmt, method_codes=method_codes, average_bedding=average_bedding, orient_file='demag_orient.txt', samp_file=samp_file, site_file=site_file, input_dir_path=self.WD, output_dir_path=self.WD, append=append, data_model=3) if not success: dlg1 = wx.MessageDialog(None,caption="Message:", message="-E- ERROR: Error in running orientation_magic.py\n{}".format(error_message) ,style=wx.OK|wx.ICON_INFORMATION) dlg1.ShowModal() dlg1.Destroy() print("-E- ERROR: Error in running orientation_magic.py") return else: dlg2 = wx.MessageDialog(None,caption="Message:", message="-I- Successfully ran orientation_magic", style=wx.OK|wx.ICON_INFORMATION) dlg2.ShowModal() dlg2.Destroy() self.Parent.Show() self.Parent.Raise() self.Destroy() self.contribution.add_magic_table('samples') return
[ "def", "on_m_calc_orient", "(", "self", ",", "event", ")", ":", "# first see if demag_orient.txt", "self", ".", "on_m_save_file", "(", "None", ")", "orient_convention_dia", "=", "orient_convention", "(", "None", ")", "orient_convention_dia", ".", "Center", "(", ")",...
This fucntion does exactly what the 'import orientation' fuction does in MagIC.py after some dialog boxes the function calls orientation_magic.py
[ "This", "fucntion", "does", "exactly", "what", "the", "import", "orientation", "fuction", "does", "in", "MagIC", ".", "py", "after", "some", "dialog", "boxes", "the", "function", "calls", "orientation_magic", ".", "py" ]
python
train
nevimov/django-easycart
easycart/cart.py
https://github.com/nevimov/django-easycart/blob/81b7d7d4b197e34d21dcd8cb9eb9104b565041a9/easycart/cart.py#L437-L469
def create_items(self, session_items): """Instantiate cart items from session data. The value returned by this method is used to populate the cart's `items` attribute. Parameters ---------- session_items : dict A dictionary of pk-quantity mappings (each pk is a string). For example: ``{'1': 5, '3': 2}``. Returns ------- dict A map between the `session_items` keys and instances of :attr:`item_class`. For example:: {'1': <CartItem: obj=foo, quantity=5>, '3': <CartItem: obj=bar, quantity=2>} """ pks = list(session_items.keys()) items = {} item_class = self.item_class process_object = self.process_object for obj in self.get_queryset(pks): pk = str(obj.pk) obj = process_object(obj) items[pk] = item_class(obj, **session_items[pk]) if len(items) < len(session_items): self._stale_pks = set(session_items).difference(items) return items
[ "def", "create_items", "(", "self", ",", "session_items", ")", ":", "pks", "=", "list", "(", "session_items", ".", "keys", "(", ")", ")", "items", "=", "{", "}", "item_class", "=", "self", ".", "item_class", "process_object", "=", "self", ".", "process_o...
Instantiate cart items from session data. The value returned by this method is used to populate the cart's `items` attribute. Parameters ---------- session_items : dict A dictionary of pk-quantity mappings (each pk is a string). For example: ``{'1': 5, '3': 2}``. Returns ------- dict A map between the `session_items` keys and instances of :attr:`item_class`. For example:: {'1': <CartItem: obj=foo, quantity=5>, '3': <CartItem: obj=bar, quantity=2>}
[ "Instantiate", "cart", "items", "from", "session", "data", "." ]
python
train
pydata/xarray
xarray/core/variable.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L2027-L2040
def broadcast_variables(*variables): """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple(var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables)
[ "def", "broadcast_variables", "(", "*", "variables", ")", ":", "dims_map", "=", "_unified_dims", "(", "variables", ")", "dims_tuple", "=", "tuple", "(", "dims_map", ")", "return", "tuple", "(", "var", ".", "set_dims", "(", "dims_map", ")", "if", "var", "."...
Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions.
[ "Given", "any", "number", "of", "variables", "return", "variables", "with", "matching", "dimensions", "and", "broadcast", "data", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/two_gram_model.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/two_gram_model.py#L152-L176
def read(cls, proto): """ :param proto: capnp TwoGramModelProto message reader """ instance = object.__new__(cls) super(TwoGramModel, instance).__init__(proto=proto.modelBase) instance._logger = opf_utils.initLogger(instance) instance._reset = proto.reset instance._hashToValueDict = {x.hash: x.value for x in proto.hashToValueDict} instance._learningEnabled = proto.learningEnabled instance._encoder = encoders.MultiEncoder.read(proto.encoder) instance._fieldNames = instance._encoder.getScalarNames() instance._prevValues = list(proto.prevValues) instance._twoGramDicts = [dict() for _ in xrange(len(proto.twoGramDicts))] for idx, field in enumerate(proto.twoGramDicts): for entry in field: prev = None if entry.value == -1 else entry.value instance._twoGramDicts[idx][prev] = collections.defaultdict(int) for bucket in entry.buckets: instance._twoGramDicts[idx][prev][bucket.index] = bucket.count return instance
[ "def", "read", "(", "cls", ",", "proto", ")", ":", "instance", "=", "object", ".", "__new__", "(", "cls", ")", "super", "(", "TwoGramModel", ",", "instance", ")", ".", "__init__", "(", "proto", "=", "proto", ".", "modelBase", ")", "instance", ".", "_...
:param proto: capnp TwoGramModelProto message reader
[ ":", "param", "proto", ":", "capnp", "TwoGramModelProto", "message", "reader" ]
python
valid
Nexmo/nexmo-python
nexmo/__init__.py
https://github.com/Nexmo/nexmo-python/blob/c5300541233f3dbd7319c7d2ca6d9f7f70404d11/nexmo/__init__.py#L172-L188
def submit_sms_conversion(self, message_id, delivered=True, timestamp=None): """ Notify Nexmo that an SMS was successfully received. :param message_id: The `message-id` str returned by the send_message call. :param delivered: A `bool` indicating that the message was or was not successfully delivered. :param timestamp: A `datetime` object containing the time the SMS arrived. :return: The parsed response from the server. On success, the bytestring b'OK' """ params = { "message-id": message_id, "delivered": delivered, "timestamp": timestamp or datetime.now(pytz.utc), } # Ensure timestamp is a string: _format_date_param(params, "timestamp") return self.post(self.api_host, "/conversions/sms", params)
[ "def", "submit_sms_conversion", "(", "self", ",", "message_id", ",", "delivered", "=", "True", ",", "timestamp", "=", "None", ")", ":", "params", "=", "{", "\"message-id\"", ":", "message_id", ",", "\"delivered\"", ":", "delivered", ",", "\"timestamp\"", ":", ...
Notify Nexmo that an SMS was successfully received. :param message_id: The `message-id` str returned by the send_message call. :param delivered: A `bool` indicating that the message was or was not successfully delivered. :param timestamp: A `datetime` object containing the time the SMS arrived. :return: The parsed response from the server. On success, the bytestring b'OK'
[ "Notify", "Nexmo", "that", "an", "SMS", "was", "successfully", "received", "." ]
python
train
gwpy/gwpy
gwpy/segments/io/hdf5.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L239-L287
def write_hdf5_dict(flags, output, path=None, append=False, overwrite=False, **kwargs): """Write this `DataQualityFlag` to a `h5py.Group`. This allows writing to an HDF5-format file. Parameters ---------- output : `str`, :class:`h5py.Group` path to new output file, or open h5py `Group` to write to. path : `str` the HDF5 group path in which to write a new group for this flag **kwargs other keyword arguments passed to :meth:`h5py.Group.create_dataset` Returns ------- dqfgroup : :class:`h5py.Group` HDF group containing these data. This group contains 'active' and 'known' datasets, and metadata attrs. See also -------- astropy.io for details on acceptable keyword arguments when writing a :class:`~astropy.table.Table` to HDF5 """ if path: try: parent = output[path] except KeyError: parent = output.create_group(path) else: parent = output for name in flags: # handle existing group if name in parent: if not (overwrite and append): raise IOError("Group '%s' already exists, give ``append=True, " "overwrite=True`` to overwrite it" % os.path.join(parent.name, name)) del parent[name] # create group group = parent.create_group(name) # write flag write_hdf5_flag_group(flags[name], group, **kwargs)
[ "def", "write_hdf5_dict", "(", "flags", ",", "output", ",", "path", "=", "None", ",", "append", "=", "False", ",", "overwrite", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "path", ":", "try", ":", "parent", "=", "output", "[", "path", "]...
Write this `DataQualityFlag` to a `h5py.Group`. This allows writing to an HDF5-format file. Parameters ---------- output : `str`, :class:`h5py.Group` path to new output file, or open h5py `Group` to write to. path : `str` the HDF5 group path in which to write a new group for this flag **kwargs other keyword arguments passed to :meth:`h5py.Group.create_dataset` Returns ------- dqfgroup : :class:`h5py.Group` HDF group containing these data. This group contains 'active' and 'known' datasets, and metadata attrs. See also -------- astropy.io for details on acceptable keyword arguments when writing a :class:`~astropy.table.Table` to HDF5
[ "Write", "this", "DataQualityFlag", "to", "a", "h5py", ".", "Group", "." ]
python
train
OldhamMade/PySO8601
PySO8601/durations.py
https://github.com/OldhamMade/PySO8601/blob/b7d3b3cb3ed3e12eb2a21caa26a3abeab3c96fe4/PySO8601/durations.py#L12-L28
def parse_duration(duration, start=None, end=None): """ Attepmt to parse an ISO8601 formatted duration. Accepts a ``duration`` and optionally a start or end ``datetime``. ``duration`` must be an ISO8601 formatted string. Returns a ``datetime.timedelta`` object. """ if not start and not end: return parse_simple_duration(duration) if start: return parse_duration_with_start(start, duration) if end: return parse_duration_with_end(duration, end)
[ "def", "parse_duration", "(", "duration", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "not", "start", "and", "not", "end", ":", "return", "parse_simple_duration", "(", "duration", ")", "if", "start", ":", "return", "parse_duration_w...
Attepmt to parse an ISO8601 formatted duration. Accepts a ``duration`` and optionally a start or end ``datetime``. ``duration`` must be an ISO8601 formatted string. Returns a ``datetime.timedelta`` object.
[ "Attepmt", "to", "parse", "an", "ISO8601", "formatted", "duration", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/protocol_api/labware.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/labware.py#L452-L482
def next_tip(self, num_tips: int = 1) -> Optional[Well]: """ Find the next valid well for pick-up. Determines the next valid start tip from which to retrieve the specified number of tips. There must be at least `num_tips` sequential wells for which all wells have tips, in the same column. :param num_tips: target number of sequential tips in the same column :type num_tips: int :return: the :py:class:`.Well` meeting the target criteria, or None """ assert num_tips > 0 columns: List[List[Well]] = self.columns() drop_leading_empties = [ list(dropwhile(lambda x: not x.has_tip, column)) for column in columns] drop_at_first_gap = [ list(takewhile(lambda x: x.has_tip, column)) for column in drop_leading_empties] long_enough = [ column for column in drop_at_first_gap if len(column) >= num_tips] try: first_long_enough = long_enough[0] result: Optional[Well] = first_long_enough[0] except IndexError: result = None return result
[ "def", "next_tip", "(", "self", ",", "num_tips", ":", "int", "=", "1", ")", "->", "Optional", "[", "Well", "]", ":", "assert", "num_tips", ">", "0", "columns", ":", "List", "[", "List", "[", "Well", "]", "]", "=", "self", ".", "columns", "(", ")"...
Find the next valid well for pick-up. Determines the next valid start tip from which to retrieve the specified number of tips. There must be at least `num_tips` sequential wells for which all wells have tips, in the same column. :param num_tips: target number of sequential tips in the same column :type num_tips: int :return: the :py:class:`.Well` meeting the target criteria, or None
[ "Find", "the", "next", "valid", "well", "for", "pick", "-", "up", "." ]
python
train
fastai/fastai
fastai/callbacks/tensorboard.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tensorboard.py#L221-L228
def _queue_processor(self)->None: "Processes queued up write requests asynchronously to Tensorboard." while not self.stop_request.isSet(): while not self.queue.empty(): if self.stop_request.isSet(): return request = self.queue.get() request.write() sleep(0.2)
[ "def", "_queue_processor", "(", "self", ")", "->", "None", ":", "while", "not", "self", ".", "stop_request", ".", "isSet", "(", ")", ":", "while", "not", "self", ".", "queue", ".", "empty", "(", ")", ":", "if", "self", ".", "stop_request", ".", "isSe...
Processes queued up write requests asynchronously to Tensorboard.
[ "Processes", "queued", "up", "write", "requests", "asynchronously", "to", "Tensorboard", "." ]
python
train
StyXman/ayrton
ayrton/parser/error.py
https://github.com/StyXman/ayrton/blob/e1eed5c7ef230e3c2340a1f0bf44c72bbdc0debb/ayrton/parser/error.py#L147-L164
def print_detailed_traceback(self, space=None, file=None): """NOT_RPYTHON: Dump a nice detailed interpreter- and application-level traceback, useful to debug the interpreter.""" if file is None: file = sys.stderr f = io.StringIO() for i in range(len(self.debug_excs)-1, -1, -1): print >> f, "Traceback (interpreter-level):" traceback.print_tb(self.debug_excs[i][2], file=f) f.seek(0) debug_print(''.join(['|| ' + line for line in f.readlines()]), file) if self.debug_excs: from pypy.tool import tb_server tb_server.publish_exc(self.debug_excs[-1]) self.print_app_tb_only(file) print >> file, '(application-level)', self.errorstr(space) if AUTO_DEBUG: debug.fire(self)
[ "def", "print_detailed_traceback", "(", "self", ",", "space", "=", "None", ",", "file", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "sys", ".", "stderr", "f", "=", "io", ".", "StringIO", "(", ")", "for", "i", "in", "range",...
NOT_RPYTHON: Dump a nice detailed interpreter- and application-level traceback, useful to debug the interpreter.
[ "NOT_RPYTHON", ":", "Dump", "a", "nice", "detailed", "interpreter", "-", "and", "application", "-", "level", "traceback", "useful", "to", "debug", "the", "interpreter", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/xception.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/xception.py#L33-L45
def residual_block(x, hparams): """A stack of convolution blocks with residual connection.""" k = (hparams.kernel_height, hparams.kernel_width) dilations_and_kernels = [((1, 1), k) for _ in range(3)] y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels, padding="SAME", separability=0, name="residual_block") x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm") return tf.nn.dropout(x, 1.0 - hparams.dropout)
[ "def", "residual_block", "(", "x", ",", "hparams", ")", ":", "k", "=", "(", "hparams", ".", "kernel_height", ",", "hparams", ".", "kernel_width", ")", "dilations_and_kernels", "=", "[", "(", "(", "1", ",", "1", ")", ",", "k", ")", "for", "_", "in", ...
A stack of convolution blocks with residual connection.
[ "A", "stack", "of", "convolution", "blocks", "with", "residual", "connection", "." ]
python
train
senaite/senaite.core
bika/lims/upgrade/v01_03_000.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_03_000.py#L1758-L1764
def remove_stale_css(portal): """Removes stale CSS """ logger.info("Removing stale css ...") for css in CSS_TO_REMOVE: logger.info("Unregistering CSS %s" % css) portal.portal_css.unregisterResource(css)
[ "def", "remove_stale_css", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale css ...\"", ")", "for", "css", "in", "CSS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering CSS %s\"", "%", "css", ")", "portal", ".", "portal_css", "...
Removes stale CSS
[ "Removes", "stale", "CSS" ]
python
train
ninjaaron/libaaron
libaaron/libaaron.py
https://github.com/ninjaaron/libaaron/blob/a2ee417b784ca72c89c05bddb2e3e815a6b95154/libaaron/libaaron.py#L249-L255
def pipe(value, *functions, funcs=None): """pipe(value, f, g, h) == h(g(f(value)))""" if funcs: functions = funcs for function in functions: value = function(value) return value
[ "def", "pipe", "(", "value", ",", "*", "functions", ",", "funcs", "=", "None", ")", ":", "if", "funcs", ":", "functions", "=", "funcs", "for", "function", "in", "functions", ":", "value", "=", "function", "(", "value", ")", "return", "value" ]
pipe(value, f, g, h) == h(g(f(value)))
[ "pipe", "(", "value", "f", "g", "h", ")", "==", "h", "(", "g", "(", "f", "(", "value", ")))" ]
python
test
wummel/dosage
dosagelib/plugins/s.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/plugins/s.py#L481-L486
def namer(cls, imageUrl, pageUrl): """Use page URL to construct meaningful image name.""" parts, year, month, stripname = pageUrl.rsplit('/', 3) stripname = stripname.rsplit('.', 1)[0] parts, imagename = imageUrl.rsplit('/', 1) return '%s-%s-%s-%s' % (year, month, stripname, imagename)
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "parts", ",", "year", ",", "month", ",", "stripname", "=", "pageUrl", ".", "rsplit", "(", "'/'", ",", "3", ")", "stripname", "=", "stripname", ".", "rsplit", "(", "'.'", ",", "1...
Use page URL to construct meaningful image name.
[ "Use", "page", "URL", "to", "construct", "meaningful", "image", "name", "." ]
python
train
infothrill/python-dyndnsc
dyndnsc/detector/webcheck.py
https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/detector/webcheck.py#L63-L70
def _parser_jsonip(text): """Parse response text like the one returned by http://jsonip.com/.""" import json try: return str(json.loads(text).get("ip")) except ValueError as exc: LOG.debug("Text '%s' could not be parsed", exc_info=exc) return None
[ "def", "_parser_jsonip", "(", "text", ")", ":", "import", "json", "try", ":", "return", "str", "(", "json", ".", "loads", "(", "text", ")", ".", "get", "(", "\"ip\"", ")", ")", "except", "ValueError", "as", "exc", ":", "LOG", ".", "debug", "(", "\"...
Parse response text like the one returned by http://jsonip.com/.
[ "Parse", "response", "text", "like", "the", "one", "returned", "by", "http", ":", "//", "jsonip", ".", "com", "/", "." ]
python
train
google/grr
grr/server/grr_response_server/aff4_objects/aff4_grr.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/aff4_grr.py#L636-L642
def Initialize(self): """Open the delegate object.""" if "r" in self.mode: delegate = self.Get(self.Schema.DELEGATE) if delegate: self.delegate = aff4.FACTORY.Open( delegate, mode=self.mode, token=self.token, age=self.age_policy)
[ "def", "Initialize", "(", "self", ")", ":", "if", "\"r\"", "in", "self", ".", "mode", ":", "delegate", "=", "self", ".", "Get", "(", "self", ".", "Schema", ".", "DELEGATE", ")", "if", "delegate", ":", "self", ".", "delegate", "=", "aff4", ".", "FAC...
Open the delegate object.
[ "Open", "the", "delegate", "object", "." ]
python
train