repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
mikedh/trimesh
trimesh/base.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/base.py#L2014-L2030
def apply_obb(self): """ Apply the oriented bounding box transform to the current mesh. This will result in a mesh with an AABB centered at the origin and the same dimensions as the OBB. Returns ---------- matrix : (4, 4) float Transformation matrix that was applied to mesh to move it into OBB frame """ matrix = self.bounding_box_oriented.primitive.transform matrix = np.linalg.inv(matrix) self.apply_transform(matrix) return matrix
[ "def", "apply_obb", "(", "self", ")", ":", "matrix", "=", "self", ".", "bounding_box_oriented", ".", "primitive", ".", "transform", "matrix", "=", "np", ".", "linalg", ".", "inv", "(", "matrix", ")", "self", ".", "apply_transform", "(", "matrix", ")", "r...
Apply the oriented bounding box transform to the current mesh. This will result in a mesh with an AABB centered at the origin and the same dimensions as the OBB. Returns ---------- matrix : (4, 4) float Transformation matrix that was applied to mesh to move it into OBB frame
[ "Apply", "the", "oriented", "bounding", "box", "transform", "to", "the", "current", "mesh", "." ]
python
train
31.529412
senaite/senaite.core
bika/lims/content/contact.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/contact.py#L110-L117
def getUser(self): """Returns the linked Plone User or None """ username = self.getUsername() if not username: return None user = api.user.get(userid=username) return user
[ "def", "getUser", "(", "self", ")", ":", "username", "=", "self", ".", "getUsername", "(", ")", "if", "not", "username", ":", "return", "None", "user", "=", "api", ".", "user", ".", "get", "(", "userid", "=", "username", ")", "return", "user" ]
Returns the linked Plone User or None
[ "Returns", "the", "linked", "Plone", "User", "or", "None" ]
python
train
28
theno/utlz
fabfile.py
https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L135-L167
def pythons(): '''Install latest pythons with pyenv. The python version will be activated in the projects base dir. Will skip already installed latest python versions. ''' if not _pyenv_exists(): print('\npyenv is not installed. You can install it with fabsetup ' '(https://github.com/theno/fabsetup):\n\n ' + cyan('mkdir ~/repos && cd ~/repos\n ' 'git clone https://github.com/theno/fabsetup.git\n ' 'cd fabsetup && fab setup.pyenv -H localhost')) return 1 latest_pythons = _determine_latest_pythons() print(cyan('\n## install latest python versions')) for version in latest_pythons: local(flo('pyenv install --skip-existing {version}')) print(cyan('\n## activate pythons')) basedir = dirname(__file__) latest_pythons_str = ' '.join(latest_pythons) local(flo('cd {basedir} && pyenv local system {latest_pythons_str}')) highest_python = latest_pythons[-1] print(cyan(flo( '\n## prepare Python-{highest_python} for testing and packaging'))) packages_for_testing = 'pytest tox' packages_for_packaging = 'pypandoc twine' local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade ' 'pip {packages_for_testing} {packages_for_packaging}'))
[ "def", "pythons", "(", ")", ":", "if", "not", "_pyenv_exists", "(", ")", ":", "print", "(", "'\\npyenv is not installed. You can install it with fabsetup '", "'(https://github.com/theno/fabsetup):\\n\\n '", "+", "cyan", "(", "'mkdir ~/repos && cd ~/repos\\n '", "'git clon...
Install latest pythons with pyenv. The python version will be activated in the projects base dir. Will skip already installed latest python versions.
[ "Install", "latest", "pythons", "with", "pyenv", "." ]
python
train
40.121212
Yubico/python-pyhsm
pyhsm/base.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/base.py#L294-L312
def load_secret(self, secret): """ Ask YubiHSM to load a pre-existing YubiKey secret. The data is stored internally in the YubiHSM in temporary memory - this operation would typically be followed by one or more L{generate_aead} commands to actually retreive the generated secret (in encrypted form). @param secret: YubiKey secret to load @type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string @returns: Number of bytes in YubiHSM internal buffer after load @rtype: integer @see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load} """ if isinstance(secret, pyhsm.aead_cmd.YHSM_YubiKeySecret): secret = secret.pack() return pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load(self.stick, secret).execute()
[ "def", "load_secret", "(", "self", ",", "secret", ")", ":", "if", "isinstance", "(", "secret", ",", "pyhsm", ".", "aead_cmd", ".", "YHSM_YubiKeySecret", ")", ":", "secret", "=", "secret", ".", "pack", "(", ")", "return", "pyhsm", ".", "buffer_cmd", ".", ...
Ask YubiHSM to load a pre-existing YubiKey secret. The data is stored internally in the YubiHSM in temporary memory - this operation would typically be followed by one or more L{generate_aead} commands to actually retreive the generated secret (in encrypted form). @param secret: YubiKey secret to load @type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string @returns: Number of bytes in YubiHSM internal buffer after load @rtype: integer @see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load}
[ "Ask", "YubiHSM", "to", "load", "a", "pre", "-", "existing", "YubiKey", "secret", "." ]
python
train
41.421053
cytoscape/py2cytoscape
py2cytoscape/cyrest/networks.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/networks.py#L966-L985
def updateColumnValues(self, networkId, tableType, columnName, default, body, verbose=None): """ Sets the values for cells in the table specified by the `tableType` and `networkId` parameters. If the 'default` parameter is not specified, the message body should consist of key-value pairs with which to set values. If the `default` parameter is specified, its value will be used for every cell in the column. This is useful to set columns like "selected." :param networkId: SUID of the network containing the table :param tableType: The type of table :param columnName: Name of the column in which to set values :param default: Default Value. If this value is provided, all cells will be set to this. -- Not required, can be None :param body: Array of SUID Keyed values :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'/columns/'+str(columnName)+'', method="PUT", body=body, verbose=verbose) return response
[ "def", "updateColumnValues", "(", "self", ",", "networkId", ",", "tableType", ",", "columnName", ",", "default", ",", "body", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'networks/'", "+", ...
Sets the values for cells in the table specified by the `tableType` and `networkId` parameters. If the 'default` parameter is not specified, the message body should consist of key-value pairs with which to set values. If the `default` parameter is specified, its value will be used for every cell in the column. This is useful to set columns like "selected." :param networkId: SUID of the network containing the table :param tableType: The type of table :param columnName: Name of the column in which to set values :param default: Default Value. If this value is provided, all cells will be set to this. -- Not required, can be None :param body: Array of SUID Keyed values :param verbose: print more :returns: default: successful operation
[ "Sets", "the", "values", "for", "cells", "in", "the", "table", "specified", "by", "the", "tableType", "and", "networkId", "parameters", ".", "If", "the", "default", "parameter", "is", "not", "specified", "the", "message", "body", "should", "consist", "of", "...
python
train
56.3
theodoregoetz/wernher
wernher/colorline.py
https://github.com/theodoregoetz/wernher/blob/ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e/wernher/colorline.py#L6-L28
def colorline(ax, x, y, z, **kwargs): """ http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb http://matplotlib.org/examples/pylab_examples/multicolored_line.html Plot a colored line with coordinates x and y Optionally specify colors in the array z Optionally specify a colormap, a norm function and a line width """ # Special case if a single number: if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack z = np.array([z]) z = np.asarray(z) segments = make_segments(x, y) lc = mcoll.LineCollection(segments, array=z, **kwargs) ax.add_collection(lc) if ax.get_autoscale_on(): ax.autoscale_view() return lc
[ "def", "colorline", "(", "ax", ",", "x", ",", "y", ",", "z", ",", "*", "*", "kwargs", ")", ":", "# Special case if a single number:", "if", "not", "hasattr", "(", "z", ",", "\"__iter__\"", ")", ":", "# to check for numerical input -- this is a hack", "z", "=",...
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb http://matplotlib.org/examples/pylab_examples/multicolored_line.html Plot a colored line with coordinates x and y Optionally specify colors in the array z Optionally specify a colormap, a norm function and a line width
[ "http", ":", "//", "nbviewer", ".", "ipython", ".", "org", "/", "github", "/", "dpsanders", "/", "matplotlib", "-", "examples", "/", "blob", "/", "master", "/", "colorline", ".", "ipynb", "http", ":", "//", "matplotlib", ".", "org", "/", "examples", "/...
python
train
31.826087
giancosta86/Iris
info/gianlucacosta/iris/ioc.py
https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/ioc.py#L151-L158
def dispose(self): """ Disposes every performed registration; the container can then be used again """ for registration in self._registrations.values(): registration.dispose() self._registrations = {}
[ "def", "dispose", "(", "self", ")", ":", "for", "registration", "in", "self", ".", "_registrations", ".", "values", "(", ")", ":", "registration", ".", "dispose", "(", ")", "self", ".", "_registrations", "=", "{", "}" ]
Disposes every performed registration; the container can then be used again
[ "Disposes", "every", "performed", "registration", ";", "the", "container", "can", "then", "be", "used", "again" ]
python
train
30.75
tradenity/python-sdk
tradenity/resources/store_profile.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/store_profile.py#L404-L424
def get_store_profile_by_id(cls, store_profile_id, **kwargs): """Find StoreProfile Return single instance of StoreProfile by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_store_profile_by_id(store_profile_id, async=True) >>> result = thread.get() :param async bool :param str store_profile_id: ID of storeProfile to return (required) :return: StoreProfile If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs) else: (data) = cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs) return data
[ "def", "get_store_profile_by_id", "(", "cls", ",", "store_profile_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_store_pro...
Find StoreProfile Return single instance of StoreProfile by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_store_profile_by_id(store_profile_id, async=True) >>> result = thread.get() :param async bool :param str store_profile_id: ID of storeProfile to return (required) :return: StoreProfile If the method is called asynchronously, returns the request thread.
[ "Find", "StoreProfile" ]
python
train
44.095238
geopy/geopy
geopy/geocoders/mapbox.py
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/mapbox.py#L161-L197
def reverse( self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, ): """ Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``. """ params = {} params['access_token'] = self.api_key point = self._coerce_point_to_string(query, "%(lon)s,%(lat)s") quoted_query = quote(point.encode('utf-8')) url = "?".join((self.api % dict(query=quoted_query), urlencode(params))) logger.debug("%s.reverse: %s", self.__class__.__name__, url) return self._parse_json( self._call_geocoder(url, timeout=timeout), exactly_one )
[ "def", "reverse", "(", "self", ",", "query", ",", "exactly_one", "=", "True", ",", "timeout", "=", "DEFAULT_SENTINEL", ",", ")", ":", "params", "=", "{", "}", "params", "[", "'access_token'", "]", "=", "self", ".", "api_key", "point", "=", "self", ".",...
Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.
[ "Return", "an", "address", "by", "location", "point", "." ]
python
train
38.432432
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/namespacebrowser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/namespacebrowser.py#L173-L215
def setup_option_actions(self, exclude_private, exclude_uppercase, exclude_capitalized, exclude_unsupported): """Setup the actions to show in the cog menu.""" self.setup_in_progress = True self.exclude_private_action = create_action(self, _("Exclude private references"), tip=_("Exclude references which name starts" " with an underscore"), toggled=lambda state: self.sig_option_changed.emit('exclude_private', state)) self.exclude_private_action.setChecked(exclude_private) self.exclude_uppercase_action = create_action(self, _("Exclude all-uppercase references"), tip=_("Exclude references which name is uppercase"), toggled=lambda state: self.sig_option_changed.emit('exclude_uppercase', state)) self.exclude_uppercase_action.setChecked(exclude_uppercase) self.exclude_capitalized_action = create_action(self, _("Exclude capitalized references"), tip=_("Exclude references which name starts with an " "uppercase character"), toggled=lambda state: self.sig_option_changed.emit('exclude_capitalized', state)) self.exclude_capitalized_action.setChecked(exclude_capitalized) self.exclude_unsupported_action = create_action(self, _("Exclude unsupported data types"), tip=_("Exclude references to unsupported data types" " (i.e. which won't be handled/saved correctly)"), toggled=lambda state: self.sig_option_changed.emit('exclude_unsupported', state)) self.exclude_unsupported_action.setChecked(exclude_unsupported) self.actions = [ self.exclude_private_action, self.exclude_uppercase_action, self.exclude_capitalized_action, self.exclude_unsupported_action] if is_module_installed('numpy'): self.actions.extend([MENU_SEPARATOR, self.editor.minmax_action]) self.setup_in_progress = False
[ "def", "setup_option_actions", "(", "self", ",", "exclude_private", ",", "exclude_uppercase", ",", "exclude_capitalized", ",", "exclude_unsupported", ")", ":", "self", ".", "setup_in_progress", "=", "True", "self", ".", "exclude_private_action", "=", "create_action", ...
Setup the actions to show in the cog menu.
[ "Setup", "the", "actions", "to", "show", "in", "the", "cog", "menu", "." ]
python
train
51.604651
couchbase/couchbase-python-client
couchbase/n1ql.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/n1ql.py#L247-L262
def timeout(self): """ Optional per-query timeout. If set, this will limit the amount of time in which the query can be executed and waited for. .. note:: The effective timeout for the query will be either this property or the value of :attr:`couchbase.bucket.Bucket.n1ql_timeout` property, whichever is *lower*. .. seealso:: couchbase.bucket.Bucket.n1ql_timeout """ value = self._body.get('timeout', '0s') value = value[:-1] return float(value)
[ "def", "timeout", "(", "self", ")", ":", "value", "=", "self", ".", "_body", ".", "get", "(", "'timeout'", ",", "'0s'", ")", "value", "=", "value", "[", ":", "-", "1", "]", "return", "float", "(", "value", ")" ]
Optional per-query timeout. If set, this will limit the amount of time in which the query can be executed and waited for. .. note:: The effective timeout for the query will be either this property or the value of :attr:`couchbase.bucket.Bucket.n1ql_timeout` property, whichever is *lower*. .. seealso:: couchbase.bucket.Bucket.n1ql_timeout
[ "Optional", "per", "-", "query", "timeout", ".", "If", "set", "this", "will", "limit", "the", "amount", "of", "time", "in", "which", "the", "query", "can", "be", "executed", "and", "waited", "for", "." ]
python
train
33.8125
MacHu-GWU/angora-project
angora/math/img2waveform.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/math/img2waveform.py#L44-L69
def expand_window(center, window_size, array_size): """Generate a bounded windows. maxlength = 2 * window_size + 1, lower bound is 0 and upper bound is ``array_size - 1``. Example:: >>> expand_window(center=50, window_size=3, max=100) [47, 48, 49, 50, 51, 52, 53] >>> expand_window(center=2, window_size=3, max=100) [0, 1, 2, 3, 4, 5] >>> expand_window(center=98, window_size=3, max=100) [95, 96, 97, 98, 99] """ if center - window_size < 0: lower = 0 else: lower = center - window_size if center + window_size + 1 > array_size: upper = array_size else: upper = center + window_size + 1 return np.array(range(lower, upper))
[ "def", "expand_window", "(", "center", ",", "window_size", ",", "array_size", ")", ":", "if", "center", "-", "window_size", "<", "0", ":", "lower", "=", "0", "else", ":", "lower", "=", "center", "-", "window_size", "if", "center", "+", "window_size", "+"...
Generate a bounded windows. maxlength = 2 * window_size + 1, lower bound is 0 and upper bound is ``array_size - 1``. Example:: >>> expand_window(center=50, window_size=3, max=100) [47, 48, 49, 50, 51, 52, 53] >>> expand_window(center=2, window_size=3, max=100) [0, 1, 2, 3, 4, 5] >>> expand_window(center=98, window_size=3, max=100) [95, 96, 97, 98, 99]
[ "Generate", "a", "bounded", "windows", "." ]
python
train
27.884615
google/grumpy
third_party/stdlib/base64.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/base64.py#L185-L252
def b32decode(s, casefold=False, map01=None): """Decode a Base32 encoded string. s is the string to decode. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to either the letter I (eye) or letter L (el). The optional argument map01 when not None, specifies which letter the digit 1 should be mapped to (when map01 is not None, the digit 0 is always mapped to the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. The decoded string is returned. A TypeError is raised if s were incorrectly padded or if there are non-alphabet characters present in the string. """ quanta, leftover = divmod(len(s), 8) if leftover: raise TypeError('Incorrect padding') # Handle section 2.4 zero and one mapping. The flag map01 will be either # False, or the character to map the digit 1 (one) to. It should be # either L (el) or I (eye). if map01: s = s.translate(string.maketrans(b'01', b'O' + map01)) if casefold: s = s.upper() # Strip off pad characters from the right. We need to count the pad # characters because this will tell us how many null bytes to remove from # the end of the decoded string. padchars = 0 mo = re.search('(?P<pad>[=]*)$', s) if mo: padchars = len(mo.group('pad')) if padchars > 0: s = s[:-padchars] # Now decode the full quanta parts = [] acc = 0 shift = 35 for c in s: val = _b32rev.get(c) if val is None: raise TypeError('Non-base32 digit found') acc += _b32rev[c] << shift shift -= 5 if shift < 0: parts.append(binascii.unhexlify('%010x' % acc)) acc = 0 shift = 35 # Process the last, partial quanta last = binascii.unhexlify('%010x' % acc) if padchars == 0: last = '' # No characters elif padchars == 1: last = last[:-1] elif padchars == 3: last = last[:-2] elif padchars == 4: last = last[:-3] elif padchars == 6: last = last[:-4] else: raise TypeError('Incorrect padding') parts.append(last) return EMPTYSTRING.join(parts)
[ "def", "b32decode", "(", "s", ",", "casefold", "=", "False", ",", "map01", "=", "None", ")", ":", "quanta", ",", "leftover", "=", "divmod", "(", "len", "(", "s", ")", ",", "8", ")", "if", "leftover", ":", "raise", "TypeError", "(", "'Incorrect paddin...
Decode a Base32 encoded string. s is the string to decode. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to either the letter I (eye) or letter L (el). The optional argument map01 when not None, specifies which letter the digit 1 should be mapped to (when map01 is not None, the digit 0 is always mapped to the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. The decoded string is returned. A TypeError is raised if s were incorrectly padded or if there are non-alphabet characters present in the string.
[ "Decode", "a", "Base32", "encoded", "string", "." ]
python
valid
35.720588
chaoss/grimoirelab-elk
grimoire_elk/enriched/mediawiki.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/mediawiki.py#L67-L80
def get_identities(self, item): """ Return the identities from an item """ identities = [] if 'data' not in item: return identities if 'revisions' not in item['data']: return identities revisions = item['data']['revisions'] for revision in revisions: user = self.get_sh_identity(revision) yield user
[ "def", "get_identities", "(", "self", ",", "item", ")", ":", "identities", "=", "[", "]", "if", "'data'", "not", "in", "item", ":", "return", "identities", "if", "'revisions'", "not", "in", "item", "[", "'data'", "]", ":", "return", "identities", "revisi...
Return the identities from an item
[ "Return", "the", "identities", "from", "an", "item" ]
python
train
27.5
galaxyproject/gravity
gravity/config_manager.py
https://github.com/galaxyproject/gravity/blob/2f792497fc60874f881c9ef74a5905a286a9ce3e/gravity/config_manager.py#L190-L198
def _register_config_file(self, key, val): """ Persist a newly added config file, or update (overwrite) the value of a previously persisted config. """ state = self.__load_state() if 'config_files' not in state: state['config_files'] = {} state['config_files'][key] = val self.__dump_state(state)
[ "def", "_register_config_file", "(", "self", ",", "key", ",", "val", ")", ":", "state", "=", "self", ".", "__load_state", "(", ")", "if", "'config_files'", "not", "in", "state", ":", "state", "[", "'config_files'", "]", "=", "{", "}", "state", "[", "'c...
Persist a newly added config file, or update (overwrite) the value of a previously persisted config.
[ "Persist", "a", "newly", "added", "config", "file", "or", "update", "(", "overwrite", ")", "the", "value", "of", "a", "previously", "persisted", "config", "." ]
python
train
39.555556
facelessuser/pyspelling
pyspelling/__init__.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/__init__.py#L471-L482
def setup_dictionary(self, task): """Setup dictionary.""" dictionary_options = task.get('dictionary', {}) output = os.path.abspath(dictionary_options.get('output', self.dict_bin)) lang = dictionary_options.get('lang', 'en_US') wordlists = dictionary_options.get('wordlists', []) if lang and wordlists: self.compile_dictionary(lang, dictionary_options.get('wordlists', []), None, output) else: output = None return output
[ "def", "setup_dictionary", "(", "self", ",", "task", ")", ":", "dictionary_options", "=", "task", ".", "get", "(", "'dictionary'", ",", "{", "}", ")", "output", "=", "os", ".", "path", ".", "abspath", "(", "dictionary_options", ".", "get", "(", "'output'...
Setup dictionary.
[ "Setup", "dictionary", "." ]
python
train
41.5
django-fluent/django-fluent-blogs
fluent_blogs/models/query.py
https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/models/query.py#L198-L220
def get_date_range(year=None, month=None, day=None): """ Return a start..end range to query for a specific month, day or year. """ if year is None: return None if month is None: # year only start = datetime(year, 1, 1, 0, 0, 0, tzinfo=utc) end = datetime(year, 12, 31, 23, 59, 59, 999, tzinfo=utc) return (start, end) if day is None: # year + month only start = datetime(year, month, 1, 0, 0, 0, tzinfo=utc) end = start + timedelta(days=monthrange(year, month)[1], microseconds=-1) return (start, end) else: # Exact day start = datetime(year, month, day, 0, 0, 0, tzinfo=utc) end = start + timedelta(days=1, microseconds=-1) return (start, end)
[ "def", "get_date_range", "(", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ")", ":", "if", "year", "is", "None", ":", "return", "None", "if", "month", "is", "None", ":", "# year only", "start", "=", "datetime", "(", "year"...
Return a start..end range to query for a specific month, day or year.
[ "Return", "a", "start", "..", "end", "range", "to", "query", "for", "a", "specific", "month", "day", "or", "year", "." ]
python
train
32.869565
tdryer/hangups
hangups/auth.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/auth.py#L145-L158
def get(self): """Get cached refresh token. Returns: Cached refresh token, or ``None`` on failure. """ logger.info( 'Loading refresh_token from %s', repr(self._filename) ) try: with open(self._filename) as f: return f.read() except IOError as e: logger.info('Failed to load refresh_token: %s', e)
[ "def", "get", "(", "self", ")", ":", "logger", ".", "info", "(", "'Loading refresh_token from %s'", ",", "repr", "(", "self", ".", "_filename", ")", ")", "try", ":", "with", "open", "(", "self", ".", "_filename", ")", "as", "f", ":", "return", "f", "...
Get cached refresh token. Returns: Cached refresh token, or ``None`` on failure.
[ "Get", "cached", "refresh", "token", "." ]
python
valid
28.857143
openthread/openthread
tools/harness-thci/OpenThread.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L671-L689
def setNetworkName(self, networkName='GRL'): """set Thread Network name Args: networkName: the networkname string to be set Returns: True: successful to set the Thread Networkname False: fail to set the Thread Networkname """ print '%s call setNetworkName' % self.port print networkName try: cmd = 'networkname %s' % networkName datasetCmd = 'dataset networkname %s' % networkName self.hasActiveDatasetToCommit = True return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("setNetworkName() Error: " + str(e))
[ "def", "setNetworkName", "(", "self", ",", "networkName", "=", "'GRL'", ")", ":", "print", "'%s call setNetworkName'", "%", "self", ".", "port", "print", "networkName", "try", ":", "cmd", "=", "'networkname %s'", "%", "networkName", "datasetCmd", "=", "'dataset ...
set Thread Network name Args: networkName: the networkname string to be set Returns: True: successful to set the Thread Networkname False: fail to set the Thread Networkname
[ "set", "Thread", "Network", "name" ]
python
train
39.210526
APSL/transmanager
transmanager/manager.py
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L680-L711
def delete_translations_for_item_and_its_children(self, item, languages=None): """ deletes the translations task of an item and its children used when a model is not enabled anymore :param item: :param languages: :return: """ self.log('--- Deleting translations ---') if not self.master: self.set_master(item) object_name = '{} - {}'.format(item._meta.app_label.lower(), item._meta.verbose_name) object_class = item.__class__.__name__ object_pk = item.pk filter_by = { 'object_class': object_class, 'object_name': object_name, 'object_pk': object_pk, 'done': False } if languages: filter_by.update({'language__code__in': languages}) TransTask.objects.filter(**filter_by).delete() # then process child objects from main children = self.get_translatable_children(item) for child in children: self.delete_translations_for_item_and_its_children(child, languages)
[ "def", "delete_translations_for_item_and_its_children", "(", "self", ",", "item", ",", "languages", "=", "None", ")", ":", "self", ".", "log", "(", "'--- Deleting translations ---'", ")", "if", "not", "self", ".", "master", ":", "self", ".", "set_master", "(", ...
deletes the translations task of an item and its children used when a model is not enabled anymore :param item: :param languages: :return:
[ "deletes", "the", "translations", "task", "of", "an", "item", "and", "its", "children", "used", "when", "a", "model", "is", "not", "enabled", "anymore", ":", "param", "item", ":", ":", "param", "languages", ":", ":", "return", ":" ]
python
train
33.375
saltstack/salt
salt/cloud/clouds/aliyun.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/aliyun.py#L223-L238
def list_availability_zones(call=None): ''' List all availability zones in the current region ''' ret = {} params = {'Action': 'DescribeZones', 'RegionId': get_location()} items = query(params) for zone in items['Zones']['Zone']: ret[zone['ZoneId']] = {} for item in zone: ret[zone['ZoneId']][item] = six.text_type(zone[item]) return ret
[ "def", "list_availability_zones", "(", "call", "=", "None", ")", ":", "ret", "=", "{", "}", "params", "=", "{", "'Action'", ":", "'DescribeZones'", ",", "'RegionId'", ":", "get_location", "(", ")", "}", "items", "=", "query", "(", "params", ")", "for", ...
List all availability zones in the current region
[ "List", "all", "availability", "zones", "in", "the", "current", "region" ]
python
train
24.9375
agoragames/chai
chai/stub.py
https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L568-L574
def call_orig(self, *args, **kwargs): ''' Calls the original function. Simulates __new__ and __init__ together. ''' rval = super(StubNew, self).call_orig(self._type) rval.__init__(*args, **kwargs) return rval
[ "def", "call_orig", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rval", "=", "super", "(", "StubNew", ",", "self", ")", ".", "call_orig", "(", "self", ".", "_type", ")", "rval", ".", "__init__", "(", "*", "args", ",", "*", ...
Calls the original function. Simulates __new__ and __init__ together.
[ "Calls", "the", "original", "function", ".", "Simulates", "__new__", "and", "__init__", "together", "." ]
python
train
35.714286
gwpy/gwpy
gwpy/io/nds2.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L355-L367
def parse_nds2_enums(func): """Decorate a function to translate a type string into an integer """ @wraps(func) def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring for kwd, enum_ in (('type', Nds2ChannelType), ('dtype', Nds2DataType)): if kwargs.get(kwd, None) is None: kwargs[kwd] = enum_.any() elif not isinstance(kwargs[kwd], int): kwargs[kwd] = enum_.find(kwargs[kwd]).value return func(*args, **kwargs) return wrapped_func
[ "def", "parse_nds2_enums", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=missing-docstring", "for", "kwd", ",", "enum_", "in", "(", "(", "'type'", ","...
Decorate a function to translate a type string into an integer
[ "Decorate", "a", "function", "to", "translate", "a", "type", "string", "into", "an", "integer" ]
python
train
42.538462
blockstack/virtualchain
virtualchain/lib/blockchain/address.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/address.py#L25-L32
def address_reencode(address, blockchain='bitcoin', **blockchain_opts): """ Reencode an address """ if blockchain == 'bitcoin': return btc_address_reencode(address, **blockchain_opts) else: raise ValueError("Unknown blockchain '{}'".format(blockchain))
[ "def", "address_reencode", "(", "address", ",", "blockchain", "=", "'bitcoin'", ",", "*", "*", "blockchain_opts", ")", ":", "if", "blockchain", "==", "'bitcoin'", ":", "return", "btc_address_reencode", "(", "address", ",", "*", "*", "blockchain_opts", ")", "el...
Reencode an address
[ "Reencode", "an", "address" ]
python
train
35.125
newfies-dialer/python-msspeak
msspeak/msspeak.py
https://github.com/newfies-dialer/python-msspeak/blob/106475122be73df152865c4fe6e9388caf974085/msspeak/msspeak.py#L193-L210
def speak(self, textstr, lang='en-US', gender='female', format='riff-16khz-16bit-mono-pcm'): """ Run will call Microsoft Translate API and and produce audio """ # print("speak(textstr=%s, lang=%s, gender=%s, format=%s)" % (textstr, lang, gender, format)) concatkey = '%s-%s-%s-%s' % (textstr, lang.lower(), gender.lower(), format) key = self.tts_engine + '' + str(hash(concatkey)) self.filename = '%s-%s.mp3' % (key, lang) # check if file exists fileloc = self.directory + self.filename if self.cache and os.path.isfile(self.directory + self.filename): return self.filename else: with open(fileloc, 'wb') as f: self.speech.speak_to_file(f, textstr, lang, gender, format) return self.filename return False
[ "def", "speak", "(", "self", ",", "textstr", ",", "lang", "=", "'en-US'", ",", "gender", "=", "'female'", ",", "format", "=", "'riff-16khz-16bit-mono-pcm'", ")", ":", "# print(\"speak(textstr=%s, lang=%s, gender=%s, format=%s)\" % (textstr, lang, gender, format))", "concatk...
Run will call Microsoft Translate API and and produce audio
[ "Run", "will", "call", "Microsoft", "Translate", "API", "and", "and", "produce", "audio" ]
python
train
46.888889
saltstack/salt
salt/modules/inspectlib/dbhandle.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/dbhandle.py#L46-L51
def _run_init_queries(self): ''' Initialization queries ''' for obj in (Package, PackageCfgFile, PayloadFile, IgnoredDir, AllowedDir): self._db.create_table_from_object(obj())
[ "def", "_run_init_queries", "(", "self", ")", ":", "for", "obj", "in", "(", "Package", ",", "PackageCfgFile", ",", "PayloadFile", ",", "IgnoredDir", ",", "AllowedDir", ")", ":", "self", ".", "_db", ".", "create_table_from_object", "(", "obj", "(", ")", ")"...
Initialization queries
[ "Initialization", "queries" ]
python
train
35.666667
awslabs/aws-serverlessrepo-python
serverlessrepo/parser.py
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/parser.py#L98-L113
def get_app_metadata(template_dict): """ Get the application metadata from a SAM template. :param template_dict: SAM template as a dictionary :type template_dict: dict :return: Application metadata as defined in the template :rtype: ApplicationMetadata :raises ApplicationMetadataNotFoundError """ if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}): app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION) return ApplicationMetadata(app_metadata_dict) raise ApplicationMetadataNotFoundError( error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))
[ "def", "get_app_metadata", "(", "template_dict", ")", ":", "if", "SERVERLESS_REPO_APPLICATION", "in", "template_dict", ".", "get", "(", "METADATA", ",", "{", "}", ")", ":", "app_metadata_dict", "=", "template_dict", ".", "get", "(", "METADATA", ")", ".", "get"...
Get the application metadata from a SAM template. :param template_dict: SAM template as a dictionary :type template_dict: dict :return: Application metadata as defined in the template :rtype: ApplicationMetadata :raises ApplicationMetadataNotFoundError
[ "Get", "the", "application", "metadata", "from", "a", "SAM", "template", "." ]
python
train
42.1875
SpikeInterface/spiketoolkit
spiketoolkit/comparison/sortingcomparison.py
https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/comparison/sortingcomparison.py#L279-L325
def compute_performance(SC, verbose=True, output='dict'): """ Return some performance value for comparison. Parameters ------- SC: SortingComparison instance The SortingComparison verbose: bool Display on console or not output: dict or pandas Returns ---------- performance: dict or pandas.Serie depending output param """ counts = SC._counts tp_rate = float(counts['TP']) / counts['TOT_ST1'] * 100 cl_rate = float(counts['CL']) / counts['TOT_ST1'] * 100 fn_rate = float(counts['FN']) / counts['TOT_ST1'] * 100 fp_st1 = float(counts['FP']) / counts['TOT_ST1'] * 100 fp_st2 = float(counts['FP']) / counts['TOT_ST2'] * 100 accuracy = tp_rate / (tp_rate + fn_rate + fp_st1) * 100 sensitivity = tp_rate / (tp_rate + fn_rate) * 100 miss_rate = fn_rate / (tp_rate + fn_rate) * 100 precision = tp_rate / (tp_rate + fp_st1) * 100 false_discovery_rate = fp_st1 / (tp_rate + fp_st1) * 100 performance = {'tp': tp_rate, 'cl': cl_rate, 'fn': fn_rate, 'fp_st1': fp_st1, 'fp_st2': fp_st2, 'accuracy': accuracy, 'sensitivity': sensitivity, 'precision': precision, 'miss_rate': miss_rate, 'false_disc_rate': false_discovery_rate} if verbose: txt = _txt_performance.format(**performance) print(txt) if output == 'dict': return performance elif output == 'pandas': return pd.Series(performance)
[ "def", "compute_performance", "(", "SC", ",", "verbose", "=", "True", ",", "output", "=", "'dict'", ")", ":", "counts", "=", "SC", ".", "_counts", "tp_rate", "=", "float", "(", "counts", "[", "'TP'", "]", ")", "/", "counts", "[", "'TOT_ST1'", "]", "*...
Return some performance value for comparison. Parameters ------- SC: SortingComparison instance The SortingComparison verbose: bool Display on console or not output: dict or pandas Returns ---------- performance: dict or pandas.Serie depending output param
[ "Return", "some", "performance", "value", "for", "comparison", "." ]
python
train
30.468085
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L672-L686
def fcoe_get_login_output_fcoe_login_list_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login output = ET.SubElement(fcoe_get_login, "output") fcoe_login_list = ET.SubElement(output, "fcoe-login-list") fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac") fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac') interface_type = ET.SubElement(fcoe_login_list, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_get_login_output_fcoe_login_list_interface_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe_get_login", "=", "ET", ".", "Element", "(", "\"fcoe_get_login\"", ")", "config", "="...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
49.8
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_master/app/sdp_master_device.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_master/app/sdp_master_device.py#L247-L255
def _set_master_state(self, state): """Set the state of the SDPMaster.""" if state == 'init': self._service_state.update_current_state('init', force=True) self.set_state(DevState.INIT) elif state == 'on': self.set_state(DevState.ON) self._service_state.update_current_state('on')
[ "def", "_set_master_state", "(", "self", ",", "state", ")", ":", "if", "state", "==", "'init'", ":", "self", ".", "_service_state", ".", "update_current_state", "(", "'init'", ",", "force", "=", "True", ")", "self", ".", "set_state", "(", "DevState", ".", ...
Set the state of the SDPMaster.
[ "Set", "the", "state", "of", "the", "SDPMaster", "." ]
python
train
38.222222
datastax/python-driver
cassandra/cluster.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L4492-L4516
def was_applied(self): """ For LWT results, returns whether the transaction was applied. Result is indeterminate if called on a result that was not an LWT request or on a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch succeeds or fails. Only valid when one of the of the internal row factories is in use. """ if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,)) is_batch_statement = isinstance(self.response_future.query, BatchStatement) if is_batch_statement and (not self.column_names or self.column_names[0] != "[applied]"): raise RuntimeError("No LWT were present in the BatchStatement") if not is_batch_statement and len(self.current_rows) != 1: raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows))) row = self.current_rows[0] if isinstance(row, tuple): return row[0] else: return row['[applied]']
[ "def", "was_applied", "(", "self", ")", ":", "if", "self", ".", "response_future", ".", "row_factory", "not", "in", "(", "named_tuple_factory", ",", "dict_factory", ",", "tuple_factory", ")", ":", "raise", "RuntimeError", "(", "\"Cannot determine LWT result with row...
For LWT results, returns whether the transaction was applied. Result is indeterminate if called on a result that was not an LWT request or on a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch succeeds or fails. Only valid when one of the of the internal row factories is in use.
[ "For", "LWT", "results", "returns", "whether", "the", "transaction", "was", "applied", "." ]
python
train
47.72
rootpy/rootpy
rootpy/utils/extras.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/utils/extras.py#L90-L136
def izip_exact(*iterables): """ A lazy izip() that ensures that all iterables have the same length. A LengthMismatch exception is raised if the iterables' lengths differ. Examples -------- >>> list(zip_exc([])) [] >>> list(zip_exc((), (), ())) [] >>> list(zip_exc("abc", range(3))) [('a', 0), ('b', 1), ('c', 2)] >>> try: ... list(zip_exc("", range(3))) ... except LengthMismatch: ... print "mismatch" mismatch >>> try: ... list(zip_exc(range(3), ())) ... except LengthMismatch: ... print "mismatch" mismatch >>> try: ... list(zip_exc(range(3), range(2), range(4))) ... except LengthMismatch: ... print "mismatch" mismatch >>> items = zip_exc(range(3), range(2), range(4)) >>> next(items) (0, 0, 0) >>> next(items) (1, 1, 1) >>> try: next(items) ... except LengthMismatch: print "mismatch" mismatch References ---------- [1] http://code.activestate.com/recipes/497006-zip_exc-a-lazy-zip-that-ensures-that-all-iterables/ """ rest = [chain(i, _throw()) for i in iterables[1:]] first = chain(iterables[0], _check(rest)) return zip(*[first] + rest)
[ "def", "izip_exact", "(", "*", "iterables", ")", ":", "rest", "=", "[", "chain", "(", "i", ",", "_throw", "(", ")", ")", "for", "i", "in", "iterables", "[", "1", ":", "]", "]", "first", "=", "chain", "(", "iterables", "[", "0", "]", ",", "_chec...
A lazy izip() that ensures that all iterables have the same length. A LengthMismatch exception is raised if the iterables' lengths differ. Examples -------- >>> list(zip_exc([])) [] >>> list(zip_exc((), (), ())) [] >>> list(zip_exc("abc", range(3))) [('a', 0), ('b', 1), ('c', 2)] >>> try: ... list(zip_exc("", range(3))) ... except LengthMismatch: ... print "mismatch" mismatch >>> try: ... list(zip_exc(range(3), ())) ... except LengthMismatch: ... print "mismatch" mismatch >>> try: ... list(zip_exc(range(3), range(2), range(4))) ... except LengthMismatch: ... print "mismatch" mismatch >>> items = zip_exc(range(3), range(2), range(4)) >>> next(items) (0, 0, 0) >>> next(items) (1, 1, 1) >>> try: next(items) ... except LengthMismatch: print "mismatch" mismatch References ---------- [1] http://code.activestate.com/recipes/497006-zip_exc-a-lazy-zip-that-ensures-that-all-iterables/
[ "A", "lazy", "izip", "()", "that", "ensures", "that", "all", "iterables", "have", "the", "same", "length", ".", "A", "LengthMismatch", "exception", "is", "raised", "if", "the", "iterables", "lengths", "differ", "." ]
python
train
27.808511
mfcloud/python-zvm-sdk
smtLayer/vmUtils.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/vmUtils.py#L206-L269
def getPerfInfo(rh, useridlist): """ Get the performance information for a userid Input: Request Handle Userid to query <- may change this to a list later. Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from SMCLI if overallRC = 0. rs - RS returned from SMCLI if overallRC = 0. errno - Errno returned from SMCLI if overallRC = 0. response - Stripped and reformatted output of the SMCLI command. """ rh.printSysLog("Enter vmUtils.getPerfInfo, userid: " + useridlist) parms = ["-T", rh.userid, "-c", "1"] results = invokeSMCLI(rh, "Image_Performance_Query", parms) if results['overallRC'] != 0: # SMCLI failed. rh.printLn("ES", results['response']) rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " + str(results['overallRC'])) return results lines = results['response'].split("\n") usedTime = 0 totalCpu = 0 totalMem = 0 usedMem = 0 try: for line in lines: if "Used CPU time:" in line: usedTime = line.split()[3].strip('"') # Value is in us, need make it seconds usedTime = int(usedTime) / 1000000 if "Guest CPUs:" in line: totalCpu = line.split()[2].strip('"') if "Max memory:" in line: totalMem = line.split()[2].strip('"') # Value is in Kb, need to make it Mb totalMem = int(totalMem) / 1024 if "Used memory:" in line: usedMem = line.split()[2].strip('"') usedMem = int(usedMem) / 1024 except Exception as e: msg = msgs.msg['0412'][1] % (modId, type(e).__name__, str(e), results['response']) rh.printLn("ES", msg) results['overallRC'] = 4 results['rc'] = 4 results['rs'] = 412 if results['overallRC'] == 0: memstr = "Total Memory: %iM\n" % totalMem usedmemstr = "Used Memory: %iM\n" % usedMem procstr = "Processors: %s\n" % totalCpu timestr = "CPU Used Time: %i sec\n" % usedTime results['response'] = memstr + usedmemstr + procstr + timestr rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " + str(results['rc'])) return results
[ "def", "getPerfInfo", "(", "rh", ",", "useridlist", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter vmUtils.getPerfInfo, userid: \"", "+", "useridlist", ")", "parms", "=", "[", "\"-T\"", ",", "rh", ".", "userid", ",", "\"-c\"", ",", "\"1\"", "]", "results"...
Get the performance information for a userid Input: Request Handle Userid to query <- may change this to a list later. Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from SMCLI if overallRC = 0. rs - RS returned from SMCLI if overallRC = 0. errno - Errno returned from SMCLI if overallRC = 0. response - Stripped and reformatted output of the SMCLI command.
[ "Get", "the", "performance", "information", "for", "a", "userid" ]
python
train
37.34375
mkouhei/tonicdnscli
src/tonicdnscli/command.py
https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/command.py#L604-L616
def parse_create(prs, conn): """Create record. Arguments: prs: parser object of argparse conn: dictionary of connection information """ prs_create = prs.add_parser( 'create', help='create record of specific zone') set_option(prs_create, 'domain') conn_options(prs_create, conn) prs_create.set_defaults(func=create)
[ "def", "parse_create", "(", "prs", ",", "conn", ")", ":", "prs_create", "=", "prs", ".", "add_parser", "(", "'create'", ",", "help", "=", "'create record of specific zone'", ")", "set_option", "(", "prs_create", ",", "'domain'", ")", "conn_options", "(", "prs_...
Create record. Arguments: prs: parser object of argparse conn: dictionary of connection information
[ "Create", "record", "." ]
python
train
27.461538
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/batch_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/batch_v1_api.py#L623-L652
def list_namespaced_job(self, namespace, **kwargs): # noqa: E501 """list_namespaced_job # noqa: E501 list or watch objects of kind Job # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501 else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501 return data
[ "def", "list_namespaced_job", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "l...
list_namespaced_job # noqa: E501 list or watch objects of kind Job # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread.
[ "list_namespaced_job", "#", "noqa", ":", "E501" ]
python
train
161
gem/oq-engine
openquake/hmtk/seismicity/completeness/comp_stepp_1971.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/completeness/comp_stepp_1971.py#L62-L89
def get_bilinear_residuals_stepp(input_params, xvals, yvals, slope1_fit): ''' Returns the residual sum-of-squares value of a bilinear fit to a data set - with a segment - 1 gradient fixed by an input value (slope_1_fit) :param list input_params: Input parameters for the bilinear model [slope2, crossover_point, intercept] :param numpy.ndarray xvals: x-values of the data to be fit :param numpy.ndarray yvals: y-values of the data to be fit :param float slope1_fit: Gradient of the first slope :returns: Residual sum-of-squares of fit ''' params = np.hstack([slope1_fit, input_params]) num_x = len(xvals) y_model = np.zeros(num_x, dtype=float) residuals = np.zeros(num_x, dtype=float) for iloc in range(0, num_x): y_model[iloc] = piecewise_linear_scalar(params, xvals[iloc]) residuals[iloc] = (yvals[iloc] - y_model[iloc]) ** 2.0 return np.sum(residuals)
[ "def", "get_bilinear_residuals_stepp", "(", "input_params", ",", "xvals", ",", "yvals", ",", "slope1_fit", ")", ":", "params", "=", "np", ".", "hstack", "(", "[", "slope1_fit", ",", "input_params", "]", ")", "num_x", "=", "len", "(", "xvals", ")", "y_model...
Returns the residual sum-of-squares value of a bilinear fit to a data set - with a segment - 1 gradient fixed by an input value (slope_1_fit) :param list input_params: Input parameters for the bilinear model [slope2, crossover_point, intercept] :param numpy.ndarray xvals: x-values of the data to be fit :param numpy.ndarray yvals: y-values of the data to be fit :param float slope1_fit: Gradient of the first slope :returns: Residual sum-of-squares of fit
[ "Returns", "the", "residual", "sum", "-", "of", "-", "squares", "value", "of", "a", "bilinear", "fit", "to", "a", "data", "set", "-", "with", "a", "segment", "-", "1", "gradient", "fixed", "by", "an", "input", "value", "(", "slope_1_fit", ")" ]
python
train
35.571429
aouyar/PyMunin
pymunin/plugins/diskiostats.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L226-L246
def _fetchDevAll(self, namestr, devlist, statsfunc): """Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device. """ for dev in devlist: stats = statsfunc(dev) name = 'diskio_%s_requests' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rios']) self.setGraphVal(name, dev + '_write', stats['wios']) name = 'diskio_%s_bytes' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rbytes']) self.setGraphVal(name, dev + '_write', stats['wbytes']) name = 'diskio_%s_active' % namestr if self.hasGraph(name): self.setGraphVal(name, dev, stats['ios_active'])
[ "def", "_fetchDevAll", "(", "self", ",", "namestr", ",", "devlist", ",", "statsfunc", ")", ":", "for", "dev", "in", "devlist", ":", "stats", "=", "statsfunc", "(", "dev", ")", "name", "=", "'diskio_%s_requests'", "%", "namestr", "if", "self", ".", "hasGr...
Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device.
[ "Initialize", "I", "/", "O", "stats", "for", "devices", "." ]
python
train
45.333333
senaite/senaite.core
bika/lims/workflow/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/__init__.py#L144-L165
def call_workflow_event(instance, event, after=True): """Calls the instance's workflow event """ if not event.transition: return False portal_type = instance.portal_type wf_module = _load_wf_module('{}.events'.format(portal_type.lower())) if not wf_module: return False # Inspect if event_<transition_id> function exists in the module prefix = after and "after" or "before" func_name = "{}_{}".format(prefix, event.transition.id) func = getattr(wf_module, func_name, False) if not func: return False logger.info('WF event: {0}.events.{1}' .format(portal_type.lower(), func_name)) func(instance) return True
[ "def", "call_workflow_event", "(", "instance", ",", "event", ",", "after", "=", "True", ")", ":", "if", "not", "event", ".", "transition", ":", "return", "False", "portal_type", "=", "instance", ".", "portal_type", "wf_module", "=", "_load_wf_module", "(", "...
Calls the instance's workflow event
[ "Calls", "the", "instance", "s", "workflow", "event" ]
python
train
31.090909
puiterwijk/flask-oidc
flask_oidc/__init__.py
https://github.com/puiterwijk/flask-oidc/blob/7f16e27b926fc12953d6b2ae78a9b9cc9b8d1769/flask_oidc/__init__.py#L296-L336
def _retrieve_userinfo(self, access_token=None): """ Requests extra user information from the Provider's UserInfo and returns the result. :returns: The contents of the UserInfo endpoint. :rtype: dict """ if 'userinfo_uri' not in self.client_secrets: logger.debug('Userinfo uri not specified') raise AssertionError('UserInfo URI not specified') # Cache the info from this request if '_oidc_userinfo' in g: return g._oidc_userinfo http = httplib2.Http() if access_token is None: try: credentials = OAuth2Credentials.from_json( self.credentials_store[g.oidc_id_token['sub']]) except KeyError: logger.debug("Expired ID token, credentials missing", exc_info=True) return None credentials.authorize(http) resp, content = http.request(self.client_secrets['userinfo_uri']) else: # We have been manually overriden with an access token resp, content = http.request( self.client_secrets['userinfo_uri'], "POST", body=urlencode({"access_token": access_token}), headers={'Content-Type': 'application/x-www-form-urlencoded'}) logger.debug('Retrieved user info: %s' % content) info = _json_loads(content) g._oidc_userinfo = info return info
[ "def", "_retrieve_userinfo", "(", "self", ",", "access_token", "=", "None", ")", ":", "if", "'userinfo_uri'", "not", "in", "self", ".", "client_secrets", ":", "logger", ".", "debug", "(", "'Userinfo uri not specified'", ")", "raise", "AssertionError", "(", "'Use...
Requests extra user information from the Provider's UserInfo and returns the result. :returns: The contents of the UserInfo endpoint. :rtype: dict
[ "Requests", "extra", "user", "information", "from", "the", "Provider", "s", "UserInfo", "and", "returns", "the", "result", "." ]
python
train
36.292683
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/cloudstorage_api.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/cloudstorage_api.py#L47-L107
def open(filename, mode='r', content_type=None, options=None, read_buffer_size=storage_api.ReadBuffer.DEFAULT_BUFFER_SIZE, retry_params=None, _account_id=None, offset=0): """Opens a Google Cloud Storage file and returns it as a File-like object. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. mode: 'r' for reading mode. 'w' for writing mode. In reading mode, the file must exist. In writing mode, a file will be created or be overrode. content_type: The MIME type of the file. str. Only valid in writing mode. options: A str->basestring dict to specify additional headers to pass to GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}. Supported options are x-goog-acl, x-goog-meta-, cache-control, content-disposition, and content-encoding. Only valid in writing mode. See https://developers.google.com/storage/docs/reference-headers for details. read_buffer_size: The buffer size for read. Read keeps a buffer and prefetches another one. To minimize blocking for large files, always read by buffer size. To minimize number of RPC requests for small files, set a large buffer size. Max is 30MB. retry_params: An instance of api_utils.RetryParams for subsequent calls to GCS from this file handle. If None, the default one is used. _account_id: Internal-use only. offset: Number of bytes to skip at the start of the file. If None, 0 is used. Returns: A reading or writing buffer that supports File-like interface. Buffer must be closed after operations are done. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't. ValueError: invalid open mode or if content_type or options are specified in reading mode. """ common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) filename = api_utils._quote_filename(filename) if mode == 'w': common.validate_options(options) return storage_api.StreamingBuffer(api, filename, content_type, options) elif mode == 'r': if content_type or options: raise ValueError('Options and content_type can only be specified ' 'for writing mode.') return storage_api.ReadBuffer(api, filename, buffer_size=read_buffer_size, offset=offset) else: raise ValueError('Invalid mode %s.' % mode)
[ "def", "open", "(", "filename", ",", "mode", "=", "'r'", ",", "content_type", "=", "None", ",", "options", "=", "None", ",", "read_buffer_size", "=", "storage_api", ".", "ReadBuffer", ".", "DEFAULT_BUFFER_SIZE", ",", "retry_params", "=", "None", ",", "_accou...
Opens a Google Cloud Storage file and returns it as a File-like object. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. mode: 'r' for reading mode. 'w' for writing mode. In reading mode, the file must exist. In writing mode, a file will be created or be overrode. content_type: The MIME type of the file. str. Only valid in writing mode. options: A str->basestring dict to specify additional headers to pass to GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}. Supported options are x-goog-acl, x-goog-meta-, cache-control, content-disposition, and content-encoding. Only valid in writing mode. See https://developers.google.com/storage/docs/reference-headers for details. read_buffer_size: The buffer size for read. Read keeps a buffer and prefetches another one. To minimize blocking for large files, always read by buffer size. To minimize number of RPC requests for small files, set a large buffer size. Max is 30MB. retry_params: An instance of api_utils.RetryParams for subsequent calls to GCS from this file handle. If None, the default one is used. _account_id: Internal-use only. offset: Number of bytes to skip at the start of the file. If None, 0 is used. Returns: A reading or writing buffer that supports File-like interface. Buffer must be closed after operations are done. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't. ValueError: invalid open mode or if content_type or options are specified in reading mode.
[ "Opens", "a", "Google", "Cloud", "Storage", "file", "and", "returns", "it", "as", "a", "File", "-", "like", "object", "." ]
python
train
43.327869
shoebot/shoebot
lib/graph/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/graph/__init__.py#L551-L554
def nodes_by_category(self, category): """ Returns nodes with the given category attribute. """ return [n for n in self.nodes if n.category == category]
[ "def", "nodes_by_category", "(", "self", ",", "category", ")", ":", "return", "[", "n", "for", "n", "in", "self", ".", "nodes", "if", "n", ".", "category", "==", "category", "]" ]
Returns nodes with the given category attribute.
[ "Returns", "nodes", "with", "the", "given", "category", "attribute", "." ]
python
valid
43.25
shakefu/pyconfig
pyconfig/scripts.py
https://github.com/shakefu/pyconfig/blob/000cb127db51e03cb4070aae6943e956193cbad5/pyconfig/scripts.py#L156-L170
def get_key(self): """ Return the call key, even if it has to be parsed from the source. """ if not isinstance(self.key, Unparseable): return self.key line = self.source[self.col_offset:] regex = re.compile('''pyconfig\.[eginst]+\(([^,]+).*?\)''') match = regex.match(line) if not match: return Unparseable() return "<%s>" % match.group(1)
[ "def", "get_key", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "key", ",", "Unparseable", ")", ":", "return", "self", ".", "key", "line", "=", "self", ".", "source", "[", "self", ".", "col_offset", ":", "]", "regex", "=", "re...
Return the call key, even if it has to be parsed from the source.
[ "Return", "the", "call", "key", "even", "if", "it", "has", "to", "be", "parsed", "from", "the", "source", "." ]
python
valid
28.266667
JnyJny/Geometry
Geometry/rectangle.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/rectangle.py#L41-L54
def randomSize(cls, widthLimits, heightLimits, origin=None): ''' :param: widthLimits - iterable of integers with length >= 2 :param: heightLimits - iterable of integers with length >= 2 :param: origin - optional Point subclass :return: Rectangle ''' r = cls(0, 0, origin) r.w = random.randint(widthLimits[0], widthLimits[1]) r.h = random.randint(heightLimits[0], heightLimits[1]) return r
[ "def", "randomSize", "(", "cls", ",", "widthLimits", ",", "heightLimits", ",", "origin", "=", "None", ")", ":", "r", "=", "cls", "(", "0", ",", "0", ",", "origin", ")", "r", ".", "w", "=", "random", ".", "randint", "(", "widthLimits", "[", "0", "...
:param: widthLimits - iterable of integers with length >= 2 :param: heightLimits - iterable of integers with length >= 2 :param: origin - optional Point subclass :return: Rectangle
[ ":", "param", ":", "widthLimits", "-", "iterable", "of", "integers", "with", "length", ">", "=", "2", ":", "param", ":", "heightLimits", "-", "iterable", "of", "integers", "with", "length", ">", "=", "2", ":", "param", ":", "origin", "-", "optional", "...
python
train
33.214286
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L1214-L1313
def _do_poll_problems(self): """Poll the server for the status of a set of problems. Note: This method is always run inside of a daemon thread. """ try: # grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME) frame_futures = {} def task_done(): self._poll_queue.task_done() def add(future): # add future to query frame_futures # returns: worker lives on? # `None` task signifies thread termination if future is None: task_done() return False if future.id not in frame_futures and not future.done(): frame_futures[future.id] = future else: task_done() return True while True: frame_futures.clear() # blocking add first scheduled frame_earliest, future = self._poll_queue.get() if not add(future): return # try grouping if scheduled within grouping timeframe while len(frame_futures) < self._STATUS_QUERY_SIZE: try: task = self._poll_queue.get_nowait() except queue.Empty: break at, future = task if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME: if not add(future): return else: task_done() self._poll_queue.put(task) break # build a query string with ids of all futures in this frame ids = [future.id for future in frame_futures.values()] _LOGGER.debug("Polling for status of futures: %s", ids) query_string = 'problems/?id=' + ','.join(ids) # if futures were cancelled while `add`ing, skip empty frame if not ids: continue # wait until `frame_earliest` before polling delay = frame_earliest - time.time() if delay > 0: _LOGGER.debug("Pausing polling %.2f sec for futures: %s", delay, ids) time.sleep(delay) else: _LOGGER.trace("Skipping non-positive delay of %.2f sec", delay) try: _LOGGER.trace("Executing poll API request") try: response = self.session.get(posixpath.join(self.endpoint, query_string)) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() statuses = response.json() for status in statuses: self._handle_problem_status(status, frame_futures[status['id']]) except BaseException as exception: if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for id_ in frame_futures.keys(): frame_futures[id_]._set_error(IOError(exception), sys.exc_info()) for id_ in frame_futures.keys(): task_done() time.sleep(0) except Exception as err: _LOGGER.exception(err)
[ "def", "_do_poll_problems", "(", "self", ")", ":", "try", ":", "# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)", "frame_futures", "=", "{", "}", "def", "task_done", "(", ")", ":", "self", ".", "_poll_queue", ".", "task_done", "(", ")", "def", "add"...
Poll the server for the status of a set of problems. Note: This method is always run inside of a daemon thread.
[ "Poll", "the", "server", "for", "the", "status", "of", "a", "set", "of", "problems", "." ]
python
train
36.02
tensorflow/tensor2tensor
tensor2tensor/models/resnet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L427-L511
def resnet_v2(inputs, block_fn, layer_blocks, filters, data_format="channels_first", is_training=False, is_cifar=False, use_td=False, targeting_rate=None, keep_prob=None): """Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations. """ inputs = block_layer( inputs=inputs, filters=filters[1], block_fn=block_fn, blocks=layer_blocks[0], strides=1, is_training=is_training, name="block_layer1", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[2], block_fn=block_fn, blocks=layer_blocks[1], strides=2, is_training=is_training, name="block_layer2", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[3], block_fn=block_fn, blocks=layer_blocks[2], strides=2, is_training=is_training, name="block_layer3", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) if not is_cifar: inputs = block_layer( inputs=inputs, filters=filters[4], block_fn=block_fn, blocks=layer_blocks[3], strides=2, is_training=is_training, name="block_layer4", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return inputs
[ "def", "resnet_v2", "(", "inputs", ",", "block_fn", ",", "layer_blocks", ",", "filters", ",", "data_format", "=", "\"channels_first\"", ",", "is_training", "=", "False", ",", "is_cifar", "=", "False", ",", "use_td", "=", "False", ",", "targeting_rate", "=", ...
Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations.
[ "Resnet", "model", "." ]
python
train
30.011765
hmpf/dataporten-auth
src/dataporten/psa.py
https://github.com/hmpf/dataporten-auth/blob/bc2ff5e11a1fce2c3d7bffe3f2b513bd7e2c0fcc/src/dataporten/psa.py#L95-L108
def get_user_details(self, response): """ Return user details from Dataporten Set username to eduPersonPrincipalName """ user = super(DataportenFeideOAuth2, self).get_user_details(response) sec_userids = user['userid_sec'] for userid in sec_userids: usertype, username = userid.split(':') if usertype == 'feide': user['username'] = username break return user
[ "def", "get_user_details", "(", "self", ",", "response", ")", ":", "user", "=", "super", "(", "DataportenFeideOAuth2", ",", "self", ")", ".", "get_user_details", "(", "response", ")", "sec_userids", "=", "user", "[", "'userid_sec'", "]", "for", "userid", "in...
Return user details from Dataporten Set username to eduPersonPrincipalName
[ "Return", "user", "details", "from", "Dataporten" ]
python
train
33.285714
PmagPy/PmagPy
programs/conversion_scripts/iodp_samples_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/iodp_samples_magic.py#L7-L28
def main(): """ iodp_samples_magic.py OPTIONS: -f FILE, input csv file -Fsa FILE, output samples file for updating, default is to overwrite existing samples file """ if "-h" in sys.argv: print(main.__doc__) sys.exit() dataframe = extractor.command_line_dataframe([['WD', False, '.'], ['ID', False, '.'], ['f', True, ''], ['Fsa', False, 'samples.txt'], ['DM', False, 3]]) args = sys.argv checked_args = extractor.extract_and_check_args(args, dataframe) samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'Fsa', 'WD', 'ID', 'DM'], checked_args) data_model_num = int(float(data_model_num)) if '-Fsa' not in args and data_model_num == 2: output_samp_file = "er_samples.txt" ran, error = convert.iodp_samples(samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num=data_model_num) if not ran: print("-W- " + error)
[ "def", "main", "(", ")", ":", "if", "\"-h\"", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "dataframe", "=", "extractor", ".", "command_line_dataframe", "(", "[", "[", "'WD'", ",", "False", ...
iodp_samples_magic.py OPTIONS: -f FILE, input csv file -Fsa FILE, output samples file for updating, default is to overwrite existing samples file
[ "iodp_samples_magic", ".", "py", "OPTIONS", ":", "-", "f", "FILE", "input", "csv", "file", "-", "Fsa", "FILE", "output", "samples", "file", "for", "updating", "default", "is", "to", "overwrite", "existing", "samples", "file" ]
python
train
45.5
waqasbhatti/astrobase
astrobase/varbase/autocorr.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/autocorr.py#L61-L106
def _autocorr_func2(mags, lag, maglen, magmed, magstd): ''' This is an alternative function to calculate the autocorrelation. This version is from (first definition): https://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations Parameters ---------- mags : np.array This is the magnitudes array. MUST NOT have any nans. lag : float The specific lag value to calculate the auto-correlation for. This MUST be less than total number of observations in `mags`. maglen : int The number of elements in the `mags` array. magmed : float The median of the `mags` array. magstd : float The standard deviation of the `mags` array. Returns ------- float The auto-correlation at this specific `lag` value. ''' lagindex = nparange(0,maglen-lag) products = (mags[lagindex] - magmed) * (mags[lagindex+lag] - magmed) autocovarfunc = npsum(products)/lagindex.size varfunc = npsum( (mags[lagindex] - magmed)*(mags[lagindex] - magmed) )/mags.size acorr = autocovarfunc/varfunc return acorr
[ "def", "_autocorr_func2", "(", "mags", ",", "lag", ",", "maglen", ",", "magmed", ",", "magstd", ")", ":", "lagindex", "=", "nparange", "(", "0", ",", "maglen", "-", "lag", ")", "products", "=", "(", "mags", "[", "lagindex", "]", "-", "magmed", ")", ...
This is an alternative function to calculate the autocorrelation. This version is from (first definition): https://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations Parameters ---------- mags : np.array This is the magnitudes array. MUST NOT have any nans. lag : float The specific lag value to calculate the auto-correlation for. This MUST be less than total number of observations in `mags`. maglen : int The number of elements in the `mags` array. magmed : float The median of the `mags` array. magstd : float The standard deviation of the `mags` array. Returns ------- float The auto-correlation at this specific `lag` value.
[ "This", "is", "an", "alternative", "function", "to", "calculate", "the", "autocorrelation", "." ]
python
valid
23.978261
LukeB42/Window
window.py
https://github.com/LukeB42/Window/blob/6d91c5ff94b8127e9c60f6eb78b7f9026d2faf62/window.py#L719-L750
def palette(fg, bg=-1): """ Since curses only supports a finite amount of initialised colour pairs we memoise any selections you've made as an attribute on this function """ if not hasattr(palette, "counter"): palette.counter = 1 if not hasattr(palette, "selections"): palette.selections = {} selection = "%s%s" % (str(fg), str(bg)) if not selection in palette.selections: palette.selections[selection] = palette.counter palette.counter += 1 # Get available colours colors = [c for c in dir(_curses) if c.startswith('COLOR')] if isinstance(fg, str): if not "COLOR_"+fg.upper() in colors: fg = -1 else: fg = getattr(_curses, "COLOR_"+fg.upper()) if isinstance(bg, str): if not "COLOR_"+bg.upper() in colors: bg = -1 else: bg = getattr(_curses, "COLOR_"+bg.upper()) _curses.init_pair(palette.selections[selection], fg, bg) return _curses.color_pair(palette.selections[selection])
[ "def", "palette", "(", "fg", ",", "bg", "=", "-", "1", ")", ":", "if", "not", "hasattr", "(", "palette", ",", "\"counter\"", ")", ":", "palette", ".", "counter", "=", "1", "if", "not", "hasattr", "(", "palette", ",", "\"selections\"", ")", ":", "pa...
Since curses only supports a finite amount of initialised colour pairs we memoise any selections you've made as an attribute on this function
[ "Since", "curses", "only", "supports", "a", "finite", "amount", "of", "initialised", "colour", "pairs", "we", "memoise", "any", "selections", "you", "ve", "made", "as", "an", "attribute", "on", "this", "function" ]
python
train
31.84375
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/tfvc/tfvc_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/tfvc/tfvc_client.py#L553-L574
def get_label_items(self, label_id, top=None, skip=None): """GetLabelItems. Get items under a label. :param str label_id: Unique identifier of label :param int top: Max number of items to return :param int skip: Number of items to skip :rtype: [TfvcItem] """ route_values = {} if label_id is not None: route_values['labelId'] = self._serialize.url('label_id', label_id, 'str') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='06166e34-de17-4b60-8cd1-23182a346fda', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcItem]', self._unwrap_collection(response))
[ "def", "get_label_items", "(", "self", ",", "label_id", ",", "top", "=", "None", ",", "skip", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "label_id", "is", "not", "None", ":", "route_values", "[", "'labelId'", "]", "=", "self", ".", "...
GetLabelItems. Get items under a label. :param str label_id: Unique identifier of label :param int top: Max number of items to return :param int skip: Number of items to skip :rtype: [TfvcItem]
[ "GetLabelItems", ".", "Get", "items", "under", "a", "label", ".", ":", "param", "str", "label_id", ":", "Unique", "identifier", "of", "label", ":", "param", "int", "top", ":", "Max", "number", "of", "items", "to", "return", ":", "param", "int", "skip", ...
python
train
48.590909
uchicago-cs/deepdish
deepdish/io/ls.py
https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/io/ls.py#L117-L140
def abbreviate(s, maxlength=25): """Color-aware abbreviator""" assert maxlength >= 4 skip = False abbrv = None i = 0 for j, c in enumerate(s): if c == '\033': skip = True elif skip: if c == 'm': skip = False else: i += 1 if i == maxlength - 1: abbrv = s[:j] + '\033[0m...' elif i > maxlength: break if i <= maxlength: return s else: return abbrv
[ "def", "abbreviate", "(", "s", ",", "maxlength", "=", "25", ")", ":", "assert", "maxlength", ">=", "4", "skip", "=", "False", "abbrv", "=", "None", "i", "=", "0", "for", "j", ",", "c", "in", "enumerate", "(", "s", ")", ":", "if", "c", "==", "'\...
Color-aware abbreviator
[ "Color", "-", "aware", "abbreviator" ]
python
train
20.333333
by46/simplekit
simplekit/config/__init__.py
https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/config/__init__.py#L161-L194
def get_namespace(self, namespace, lowercase=True, trim_namespace=True): """Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage: app.config['IMAGE_STORE_TYPE']='fs' app.config['IMAGE_STORE_PATH']='/var/app/images' app.config['IMAGE_STORE_BASE_URL']='http://img.website.com' The result dictionary `image_store` would look like: { 'type': 'fs', 'path': '/var/app/images', 'base_url':'http://image.website.com' } This is often useful when configuration options map directly to keyword arguments in functions or class constructors. :param namespace: a configuration namespace :param lowercase: a flag indicating if the keys of the resulting dictionary should be lowercase :param trim_namespace: a flag indicating if the keys of the resulting dictionary should not include the namespace :return: a dict instance """ rv = {} for key, value in six.iteritems(self): if not key.startswith(namespace): continue if trim_namespace: key = key[len(namespace):] else: key = key if lowercase: key = key.lower() rv[key] = value return rv
[ "def", "get_namespace", "(", "self", ",", "namespace", ",", "lowercase", "=", "True", ",", "trim_namespace", "=", "True", ")", ":", "rv", "=", "{", "}", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "self", ")", ":", "if", "not", ...
Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage: app.config['IMAGE_STORE_TYPE']='fs' app.config['IMAGE_STORE_PATH']='/var/app/images' app.config['IMAGE_STORE_BASE_URL']='http://img.website.com' The result dictionary `image_store` would look like: { 'type': 'fs', 'path': '/var/app/images', 'base_url':'http://image.website.com' } This is often useful when configuration options map directly to keyword arguments in functions or class constructors. :param namespace: a configuration namespace :param lowercase: a flag indicating if the keys of the resulting dictionary should be lowercase :param trim_namespace: a flag indicating if the keys of the resulting dictionary should not include the namespace :return: a dict instance
[ "Returns", "a", "dictionary", "containing", "a", "subset", "of", "configuration", "options", "that", "match", "the", "specified", "namespace", "/", "prefix", ".", "Example", "usage", ":", "app", ".", "config", "[", "IMAGE_STORE_TYPE", "]", "=", "fs", "app", ...
python
train
42.147059
openego/ding0
ding0/core/structure/regions.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/structure/regions.py#L236-L248
def add_lv_grid_district(self, lv_grid_district): # TODO: check docstring """Adds a LV grid district to _lv_grid_districts if not already existing Args ---- lv_grid_district: :shapely:`Shapely Polygon object<polygons>` Descr """ if lv_grid_district not in self._lv_grid_districts and \ isinstance(lv_grid_district, LVGridDistrictDing0): self._lv_grid_districts.append(lv_grid_district)
[ "def", "add_lv_grid_district", "(", "self", ",", "lv_grid_district", ")", ":", "# TODO: check docstring", "if", "lv_grid_district", "not", "in", "self", ".", "_lv_grid_districts", "and", "isinstance", "(", "lv_grid_district", ",", "LVGridDistrictDing0", ")", ":", "sel...
Adds a LV grid district to _lv_grid_districts if not already existing Args ---- lv_grid_district: :shapely:`Shapely Polygon object<polygons>` Descr
[ "Adds", "a", "LV", "grid", "district", "to", "_lv_grid_districts", "if", "not", "already", "existing", "Args", "----", "lv_grid_district", ":", ":", "shapely", ":", "Shapely", "Polygon", "object<polygons", ">", "Descr" ]
python
train
36.846154
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/external/path/_path.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/external/path/_path.py#L478-L487
def glob(self, pattern): """ Return a list of path objects that match the pattern. pattern - a path relative to this directory, with wildcards. For example, path('/users').glob('*/bin/*') returns a list of all the files users have in their bin directories. """ cls = self.__class__ return [cls(s) for s in glob.glob(unicode(self / pattern))]
[ "def", "glob", "(", "self", ",", "pattern", ")", ":", "cls", "=", "self", ".", "__class__", "return", "[", "cls", "(", "s", ")", "for", "s", "in", "glob", ".", "glob", "(", "unicode", "(", "self", "/", "pattern", ")", ")", "]" ]
Return a list of path objects that match the pattern. pattern - a path relative to this directory, with wildcards. For example, path('/users').glob('*/bin/*') returns a list of all the files users have in their bin directories.
[ "Return", "a", "list", "of", "path", "objects", "that", "match", "the", "pattern", "." ]
python
test
39
matousc89/padasip
padasip/filters/__init__.py
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/__init__.py#L230-L281
def AdaptiveFilter(model="lms", **kwargs): """ Function that filter data with selected adaptive filter. **Args:** * `d` : desired value (1 dimensional array) * `x` : input matrix (2-dimensional array). Rows are samples, columns are input arrays. **Kwargs:** * Any key argument that can be accepted with selected filter model. For more information see documentation of desired adaptive filter. * It should be at least filter size `n`. **Returns:** * `y` : output value (1 dimensional array). The size corresponds with the desired value. * `e` : filter error for every sample (1 dimensional array). The size corresponds with the desired value. * `w` : history of all weights (2 dimensional array). Every row is set of the weights for given sample. """ # check if the filter size was specified if not "n" in kwargs: raise ValueError('Filter size is not defined (n=?).') # create filter according model if model in ["LMS", "lms"]: f = FilterLMS(**kwargs) elif model in ["NLMS", "nlms"]: f = FilterNLMS(**kwargs) elif model in ["RLS", "rls"]: f = FilterRLS(**kwargs) elif model in ["GNGD", "gngd"]: f = FilterGNGD(**kwargs) elif model in ["AP", "ap"]: f = FilterAP(**kwargs) elif model in ["LMF", "lmf"]: f = FilterLMF(**kwargs) elif model in ["NLMF", "nlmf"]: f = FilterNLMF(**kwargs) else: raise ValueError('Unknown model of filter {}'.format(model)) # return filter return f
[ "def", "AdaptiveFilter", "(", "model", "=", "\"lms\"", ",", "*", "*", "kwargs", ")", ":", "# check if the filter size was specified", "if", "not", "\"n\"", "in", "kwargs", ":", "raise", "ValueError", "(", "'Filter size is not defined (n=?).'", ")", "# create filter ac...
Function that filter data with selected adaptive filter. **Args:** * `d` : desired value (1 dimensional array) * `x` : input matrix (2-dimensional array). Rows are samples, columns are input arrays. **Kwargs:** * Any key argument that can be accepted with selected filter model. For more information see documentation of desired adaptive filter. * It should be at least filter size `n`. **Returns:** * `y` : output value (1 dimensional array). The size corresponds with the desired value. * `e` : filter error for every sample (1 dimensional array). The size corresponds with the desired value. * `w` : history of all weights (2 dimensional array). Every row is set of the weights for given sample.
[ "Function", "that", "filter", "data", "with", "selected", "adaptive", "filter", ".", "**", "Args", ":", "**" ]
python
train
30.192308
log2timeline/plaso
plaso/cli/helpers/status_view.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/status_view.py#L37-L56
def ParseOptions(cls, options, configuration_object): """Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. """ if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') status_view_mode = cls._ParseStringOption( options, 'status_view_mode', default_value=status_view.StatusView.MODE_WINDOW) setattr(configuration_object, '_status_view_mode', status_view_mode)
[ "def", "ParseOptions", "(", "cls", ",", "options", ",", "configuration_object", ")", ":", "if", "not", "isinstance", "(", "configuration_object", ",", "tools", ".", "CLITool", ")", ":", "raise", "errors", ".", "BadConfigObject", "(", "'Configuration object is not ...
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
[ "Parses", "and", "validates", "options", "." ]
python
train
35.15
openvax/isovar
isovar/variant_sequences.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L339-L352
def filter_variant_sequences( variant_sequences, preferred_sequence_length, min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,): """ Drop variant sequences which are shorter than request or don't have enough supporting reads. """ variant_sequences = trim_variant_sequences( variant_sequences, min_variant_sequence_coverage) return filter_variant_sequences_by_length( variant_sequences=variant_sequences, preferred_sequence_length=preferred_sequence_length)
[ "def", "filter_variant_sequences", "(", "variant_sequences", ",", "preferred_sequence_length", ",", "min_variant_sequence_coverage", "=", "MIN_VARIANT_SEQUENCE_COVERAGE", ",", ")", ":", "variant_sequences", "=", "trim_variant_sequences", "(", "variant_sequences", ",", "min_vari...
Drop variant sequences which are shorter than request or don't have enough supporting reads.
[ "Drop", "variant", "sequences", "which", "are", "shorter", "than", "request", "or", "don", "t", "have", "enough", "supporting", "reads", "." ]
python
train
37.571429
gwpy/gwpy
gwpy/types/array.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array.py#L307-L318
def epoch(self): """GPS epoch associated with these data :type: `~astropy.time.Time` """ try: if self._epoch is None: return None return Time(*modf(self._epoch)[::-1], format='gps', scale='utc') except AttributeError: self._epoch = None return self._epoch
[ "def", "epoch", "(", "self", ")", ":", "try", ":", "if", "self", ".", "_epoch", "is", "None", ":", "return", "None", "return", "Time", "(", "*", "modf", "(", "self", ".", "_epoch", ")", "[", ":", ":", "-", "1", "]", ",", "format", "=", "'gps'",...
GPS epoch associated with these data :type: `~astropy.time.Time`
[ "GPS", "epoch", "associated", "with", "these", "data" ]
python
train
29.083333
dshean/pygeotools
pygeotools/lib/geolib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1288-L1314
def ds_IsEmpty(ds): """Check to see if dataset is empty after warp """ out = False b = ds.GetRasterBand(1) #Looks like this throws: #ERROR 1: Failed to compute min/max, no valid pixels found in sampling. #Should just catch this rater than bothering with logic below try: mm = b.ComputeRasterMinMax() if (mm[0] == mm[1]): ndv = b.GetNoDataValue() if ndv is None: out = True else: if (mm[0] == ndv): out = True except Exception: out = True #Check for std of nan #import math #stats = b.ComputeStatistics(1) #for x in stats: # if math.isnan(x): # out = True # break return out
[ "def", "ds_IsEmpty", "(", "ds", ")", ":", "out", "=", "False", "b", "=", "ds", ".", "GetRasterBand", "(", "1", ")", "#Looks like this throws:", "#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.", "#Should just catch this rater than bothering with logic be...
Check to see if dataset is empty after warp
[ "Check", "to", "see", "if", "dataset", "is", "empty", "after", "warp" ]
python
train
27.592593
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/wrappers.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/wrappers.py#L533-L545
def finish(self): """Wait for GL commands to to finish This creates a GLIR command for glFinish and then processes the GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this function will return before GL has finished processing the commands. """ if hasattr(self, 'flush_commands'): context = self else: context = get_current_canvas().context context.glir.command('FUNC', 'glFinish') context.flush_commands()
[ "def", "finish", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'flush_commands'", ")", ":", "context", "=", "self", "else", ":", "context", "=", "get_current_canvas", "(", ")", ".", "context", "context", ".", "glir", ".", "command", "(", "...
Wait for GL commands to to finish This creates a GLIR command for glFinish and then processes the GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this function will return before GL has finished processing the commands.
[ "Wait", "for", "GL", "commands", "to", "to", "finish", "This", "creates", "a", "GLIR", "command", "for", "glFinish", "and", "then", "processes", "the", "GLIR", "commands", ".", "If", "the", "GLIR", "interpreter", "is", "remote", "(", "e", ".", "g", ".", ...
python
train
39.538462
howie6879/ruia
ruia/spider.py
https://github.com/howie6879/ruia/blob/2dc5262fc9c3e902a8faa7d5fa2f046f9d9ee1fa/ruia/spider.py#L315-L325
async def handle_callback(self, aws_callback: typing.Coroutine, response): """Process coroutine callback function""" callback_result = None try: callback_result = await aws_callback except NothingMatchedError as e: self.logger.error(f'<Item: {str(e).lower()}>') except Exception as e: self.logger.error(f'<Callback[{aws_callback.__name__}]: {e}') return callback_result, response
[ "async", "def", "handle_callback", "(", "self", ",", "aws_callback", ":", "typing", ".", "Coroutine", ",", "response", ")", ":", "callback_result", "=", "None", "try", ":", "callback_result", "=", "await", "aws_callback", "except", "NothingMatchedError", "as", "...
Process coroutine callback function
[ "Process", "coroutine", "callback", "function" ]
python
test
41.272727
ShadowBlip/Neteria
neteria/core.py
https://github.com/ShadowBlip/Neteria/blob/1a8c976eb2beeca0a5a272a34ac58b2c114495a4/neteria/core.py#L244-L267
def scheduler(self, sleep_time=0.2): """Starts the scheduler to check for scheduled calls and execute them at the correct time. Args: sleep_time (float): The amount of time to wait in seconds between each loop iteration. This prevents the scheduler from consuming 100% of the host's CPU. Defaults to 0.2 seconds. Returns: None """ while self.listening: # If we have any scheduled calls, execute them and remove them from # our list of scheduled calls. if self.scheduled_calls: timestamp = time.time() self.scheduled_calls[:] = [item for item in self.scheduled_calls if not self.time_reached(timestamp, item)] time.sleep(sleep_time) logger.info("Shutting down the call scheduler...")
[ "def", "scheduler", "(", "self", ",", "sleep_time", "=", "0.2", ")", ":", "while", "self", ".", "listening", ":", "# If we have any scheduled calls, execute them and remove them from", "# our list of scheduled calls.", "if", "self", ".", "scheduled_calls", ":", "timestamp...
Starts the scheduler to check for scheduled calls and execute them at the correct time. Args: sleep_time (float): The amount of time to wait in seconds between each loop iteration. This prevents the scheduler from consuming 100% of the host's CPU. Defaults to 0.2 seconds. Returns: None
[ "Starts", "the", "scheduler", "to", "check", "for", "scheduled", "calls", "and", "execute", "them", "at", "the", "correct", "time", "." ]
python
train
36.958333
django-parler/django-parler
parler/models.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/models.py#L349-L357
def set_current_language(self, language_code, initialize=False): """ Switch the currently activate language of the object. """ self._current_language = normalize_language_code(language_code or get_language()) # Ensure the translation is present for __get__ queries. if initialize: self._get_translated_model(use_fallback=False, auto_create=True)
[ "def", "set_current_language", "(", "self", ",", "language_code", ",", "initialize", "=", "False", ")", ":", "self", ".", "_current_language", "=", "normalize_language_code", "(", "language_code", "or", "get_language", "(", ")", ")", "# Ensure the translation is prese...
Switch the currently activate language of the object.
[ "Switch", "the", "currently", "activate", "language", "of", "the", "object", "." ]
python
train
44.222222
ikegami-yukino/madoka-python
madoka/madoka.py
https://github.com/ikegami-yukino/madoka-python/blob/a9a1efecbc85ac4a24a78cbb19f9aed77b7162d3/madoka/madoka.py#L346-L359
def set(self, key, value, key_length=0): """Set value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value """ if key_length < 1: key_length = len(key) if self.k: self._update(key, value) return self.set_method(self, key, key_length, value)
[ "def", "set", "(", "self", ",", "key", ",", "value", ",", "key_length", "=", "0", ")", ":", "if", "key_length", "<", "1", ":", "key_length", "=", "len", "(", "key", ")", "if", "self", ".", "k", ":", "self", ".", "_update", "(", "key", ",", "val...
Set value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value
[ "Set", "value", "to", "key", "-", "value", "Params", ":", "<str", ">", "key", "<int", ">", "value", "<int", ">", "key_length", "Return", ":", "<int", ">", "key_value" ]
python
train
27.571429
abarker/pdfCropMargins
src/pdfCropMargins/main_pdfCropMargins.py
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L207-L236
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False): """Get a list of all the full-page box values for each page. The argument input_doc should be a PdfFileReader object. The boxes on the list are in the simple 4-float list format used by this program, not RectangleObject format.""" full_page_box_list = [] rotation_list = [] if args.verbose and not quiet: print("\nOriginal full page sizes, in PDF format (lbrt):") for page_num in range(input_doc.getNumPages()): # Get the current page and find the full-page box. curr_page = input_doc.getPage(page_num) full_page_box = get_full_page_box_assigning_media_and_crop(curr_page) if args.verbose and not quiet: # want to display page num numbering from 1, so add one print("\t"+str(page_num+1), " rot =", curr_page.rotationAngle, "\t", full_page_box) # Convert the RectangleObject to floats in an ordinary list and append. ordinary_box = [float(b) for b in full_page_box] full_page_box_list.append(ordinary_box) # Append the rotation value to the rotation_list. rotation_list.append(curr_page.rotationAngle) return full_page_box_list, rotation_list
[ "def", "get_full_page_box_list_assigning_media_and_crop", "(", "input_doc", ",", "quiet", "=", "False", ")", ":", "full_page_box_list", "=", "[", "]", "rotation_list", "=", "[", "]", "if", "args", ".", "verbose", "and", "not", "quiet", ":", "print", "(", "\"\\...
Get a list of all the full-page box values for each page. The argument input_doc should be a PdfFileReader object. The boxes on the list are in the simple 4-float list format used by this program, not RectangleObject format.
[ "Get", "a", "list", "of", "all", "the", "full", "-", "page", "box", "values", "for", "each", "page", ".", "The", "argument", "input_doc", "should", "be", "a", "PdfFileReader", "object", ".", "The", "boxes", "on", "the", "list", "are", "in", "the", "sim...
python
train
41.8
ladybug-tools/ladybug
ladybug/psychrometrics.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/psychrometrics.py#L8-L40
def saturated_vapor_pressure(t_kelvin): """Saturated Vapor Pressure (Pa) at t_kelvin (K). This function accounts for the different behaviour above vs. below the freezing point of water. Note: [1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic Properties of Ordinary Water Substance for General and Scientific Use ", Journal of Physical and Chemical Reference Data, June 2002 ,Volume 31, Issue 2, pp. 387535 [2] Vaisala. (2013) Humidity Conversion Formulas: Calculation Formulas for Humidity. www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf """ if t_kelvin >= 273.15: # Calculate saturation vapor pressure above freezing sig = 1 - (t_kelvin / 647.096) sig_polynomial = (-7.85951783 * sig) + (1.84408259 * sig ** 1.5) + \ (-11.7866487 * sig ** 3) + (22.6807411 * sig ** 3.5) + \ (-15.9618719 * sig ** 4) + (1.80122502 * sig ** 7.5) crit_temp = 647.096 / t_kelvin exponent = crit_temp * sig_polynomial p_ws = math.exp(exponent) * 22064000 else: # Calculate saturation vapor pressure below freezing theta = t_kelvin / 273.15 exponent = -13.928169 * (1 - theta ** -1.5) + \ 34.707823 * (1 - theta ** -1.25) p_ws = math.exp(exponent) * 611.657 return p_ws
[ "def", "saturated_vapor_pressure", "(", "t_kelvin", ")", ":", "if", "t_kelvin", ">=", "273.15", ":", "# Calculate saturation vapor pressure above freezing", "sig", "=", "1", "-", "(", "t_kelvin", "/", "647.096", ")", "sig_polynomial", "=", "(", "-", "7.85951783", ...
Saturated Vapor Pressure (Pa) at t_kelvin (K). This function accounts for the different behaviour above vs. below the freezing point of water. Note: [1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic Properties of Ordinary Water Substance for General and Scientific Use ", Journal of Physical and Chemical Reference Data, June 2002 ,Volume 31, Issue 2, pp. 387535 [2] Vaisala. (2013) Humidity Conversion Formulas: Calculation Formulas for Humidity. www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
[ "Saturated", "Vapor", "Pressure", "(", "Pa", ")", "at", "t_kelvin", "(", "K", ")", "." ]
python
train
42.515152
major/supernova
supernova/credentials.py
https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L53-L70
def pull_env_credential(env, param, value): """ Dissects a keyring credential lookup string from the supernova config file and returns the username/password combo """ rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]" # This is the old-style, per-environment keyring credential if value == "USE_KEYRING": username = utils.assemble_username(env, param) # This is the new-style, global keyring credential that can be applied # to multiple environments else: global_identifier = re.match(rex, value).group(2) username = utils.assemble_username('global', global_identifier) return (username, password_get(username))
[ "def", "pull_env_credential", "(", "env", ",", "param", ",", "value", ")", ":", "rex", "=", "\"USE_KEYRING\\[([\\x27\\x22])(.*)\\\\1\\]\"", "# This is the old-style, per-environment keyring credential", "if", "value", "==", "\"USE_KEYRING\"", ":", "username", "=", "utils", ...
Dissects a keyring credential lookup string from the supernova config file and returns the username/password combo
[ "Dissects", "a", "keyring", "credential", "lookup", "string", "from", "the", "supernova", "config", "file", "and", "returns", "the", "username", "/", "password", "combo" ]
python
train
36.5
dereneaton/ipyrad
ipyrad/core/assembly.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/assembly.py#L560-L620
def _link_barcodes(self): """ Private function. Links Sample barcodes in a dictionary as [Assembly].barcodes, with barcodes parsed from the 'barcodes_path' parameter. This function is called during set_params() when setting the barcodes_path. """ ## parse barcodefile try: ## allows fuzzy match to barcodefile name barcodefile = glob.glob(self.paramsdict["barcodes_path"])[0] ## read in the file bdf = pd.read_csv(barcodefile, header=None, delim_whitespace=1, dtype=str) bdf = bdf.dropna() ## make sure bars are upper case bdf[1] = bdf[1].str.upper() ## if replicates are present then print a warning reps = bdf[0].unique().shape[0] != bdf[0].shape[0] if reps: print("{spacer}Warning: technical replicates (same name) will be combined."\ .format(**{'spacer': self._spacer})) ## add -technical-replicate-N to replicate names reps = [i for i in bdf[0] if list(bdf[0]).count(i) > 1] ureps = list(set(reps)) for name in ureps: idxs = bdf[bdf[0] == ureps[0]].index.tolist() for num, idx in enumerate(idxs): bdf.ix[idx][0] = bdf.ix[idx][0] + "-technical-replicate-" + str(num+1) ## make sure chars are all proper if not all(bdf[1].apply(set("RKSYWMCATG").issuperset)): LOGGER.warn(BAD_BARCODE) raise IPyradError(BAD_BARCODE) ## 3rad/seqcap use multiplexed barcodes ## We'll concatenate them with a plus and split them later if "3rad" in self.paramsdict["datatype"]: try: bdf[2] = bdf[2].str.upper() self.barcodes = dict(zip(bdf[0], bdf[1] + "+" + bdf[2])) except KeyError as inst: msg = " 3rad assumes multiplexed barcodes. Doublecheck your barcodes file." LOGGER.error(msg) raise IPyradError(msg) else: ## set attribute on Assembly object self.barcodes = dict(zip(bdf[0], bdf[1])) except (IOError, IndexError): raise IPyradWarningExit(\ " Barcodes file not found. You entered: {}"\ .format(self.paramsdict["barcodes_path"])) except ValueError as inst: msg = " Barcodes file format error." LOGGER.warn(msg) raise IPyradError(inst)
[ "def", "_link_barcodes", "(", "self", ")", ":", "## parse barcodefile", "try", ":", "## allows fuzzy match to barcodefile name", "barcodefile", "=", "glob", ".", "glob", "(", "self", ".", "paramsdict", "[", "\"barcodes_path\"", "]", ")", "[", "0", "]", "## read in...
Private function. Links Sample barcodes in a dictionary as [Assembly].barcodes, with barcodes parsed from the 'barcodes_path' parameter. This function is called during set_params() when setting the barcodes_path.
[ "Private", "function", ".", "Links", "Sample", "barcodes", "in", "a", "dictionary", "as", "[", "Assembly", "]", ".", "barcodes", "with", "barcodes", "parsed", "from", "the", "barcodes_path", "parameter", ".", "This", "function", "is", "called", "during", "set_...
python
valid
42.540984
danielperna84/pyhomematic
pyhomematic/devicetypes/helper.py
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/devicetypes/helper.py#L158-L166
def set_level(self, position, channel=None): """Seek a specific value by specifying a float() from 0.0 to 1.0.""" try: position = float(position) except Exception as err: LOG.debug("HelperLevel.set_level: Exception %s" % (err,)) return False self.writeNodeData("LEVEL", position, channel)
[ "def", "set_level", "(", "self", ",", "position", ",", "channel", "=", "None", ")", ":", "try", ":", "position", "=", "float", "(", "position", ")", "except", "Exception", "as", "err", ":", "LOG", ".", "debug", "(", "\"HelperLevel.set_level: Exception %s\"",...
Seek a specific value by specifying a float() from 0.0 to 1.0.
[ "Seek", "a", "specific", "value", "by", "specifying", "a", "float", "()", "from", "0", ".", "0", "to", "1", ".", "0", "." ]
python
train
38.777778
Cognexa/cxflow
cxflow/cli/ls.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L35-L40
def humanize_filesize(filesize: int) -> Tuple[str, str]: """Return human readable pair of size and unit from the given filesize in bytes.""" for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if filesize < 1024.0: return '{:3.1f}'.format(filesize), unit+'B' filesize /= 1024.0
[ "def", "humanize_filesize", "(", "filesize", ":", "int", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "for", "unit", "in", "[", "''", ",", "'K'", ",", "'M'", ",", "'G'", ",", "'T'", ",", "'P'", ",", "'E'", ",", "'Z'", "]", ":", "if", ...
Return human readable pair of size and unit from the given filesize in bytes.
[ "Return", "human", "readable", "pair", "of", "size", "and", "unit", "from", "the", "given", "filesize", "in", "bytes", "." ]
python
train
51.5
zetaops/zengine
zengine/auth/permissions.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/auth/permissions.py#L95-L106
def _get_object_menu_models(): """ we need to create basic permissions for only CRUD enabled models """ from pyoko.conf import settings enabled_models = [] for entry in settings.OBJECT_MENU.values(): for mdl in entry: if 'wf' not in mdl: enabled_models.append(mdl['name']) return enabled_models
[ "def", "_get_object_menu_models", "(", ")", ":", "from", "pyoko", ".", "conf", "import", "settings", "enabled_models", "=", "[", "]", "for", "entry", "in", "settings", ".", "OBJECT_MENU", ".", "values", "(", ")", ":", "for", "mdl", "in", "entry", ":", "i...
we need to create basic permissions for only CRUD enabled models
[ "we", "need", "to", "create", "basic", "permissions", "for", "only", "CRUD", "enabled", "models" ]
python
train
29.25
abilian/abilian-core
abilian/web/errors.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/errors.py#L141-L151
def install_default_handler(self, http_error_code): """Install a default error handler for `http_error_code`. The default error handler renders a template named error404.html for http_error_code 404. """ logger.debug( "Set Default HTTP error handler for status code %d", http_error_code ) handler = partial(self.handle_http_error, http_error_code) self.errorhandler(http_error_code)(handler)
[ "def", "install_default_handler", "(", "self", ",", "http_error_code", ")", ":", "logger", ".", "debug", "(", "\"Set Default HTTP error handler for status code %d\"", ",", "http_error_code", ")", "handler", "=", "partial", "(", "self", ".", "handle_http_error", ",", "...
Install a default error handler for `http_error_code`. The default error handler renders a template named error404.html for http_error_code 404.
[ "Install", "a", "default", "error", "handler", "for", "http_error_code", "." ]
python
train
41.636364
berndca/xmodels
xmodels/models.py
https://github.com/berndca/xmodels/blob/8265522229a1ce482a2866cdbd1938293a74bb67/xmodels/models.py#L252-L259
def from_dict(cls, raw_data, **kwargs): """ This factory for :class:`Model` creates a Model from a dict object. """ instance = cls() instance.populate(raw_data, **kwargs) instance.validate(**kwargs) return instance
[ "def", "from_dict", "(", "cls", ",", "raw_data", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "populate", "(", "raw_data", ",", "*", "*", "kwargs", ")", "instance", ".", "validate", "(", "*", "*", "kwargs", ...
This factory for :class:`Model` creates a Model from a dict object.
[ "This", "factory", "for", ":", "class", ":", "Model", "creates", "a", "Model", "from", "a", "dict", "object", "." ]
python
train
32.875
olitheolix/qtmacs
qtmacs/auxiliary.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/auxiliary.py#L694-L721
def appendQKeyEvent(self, keyEvent: QtGui.QKeyEvent): """ Append another key to the key sequence represented by this object. |Args| * ``keyEvent`` (**QKeyEvent**): the key to add. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # Store the QKeyEvent. self.keylistKeyEvent.append(keyEvent) # Convenience shortcuts. mod = keyEvent.modifiers() key = keyEvent.key() # Add the modifier and key to the list. The modifier is a # QFlag structure and must by typecast to an integer to avoid # difficulties with the hashing in the ``match`` routine of # the ``QtmacsKeymap`` object. self.keylistQtConstants.append((int(mod), key))
[ "def", "appendQKeyEvent", "(", "self", ",", "keyEvent", ":", "QtGui", ".", "QKeyEvent", ")", ":", "# Store the QKeyEvent.", "self", ".", "keylistKeyEvent", ".", "append", "(", "keyEvent", ")", "# Convenience shortcuts.", "mod", "=", "keyEvent", ".", "modifiers", ...
Append another key to the key sequence represented by this object. |Args| * ``keyEvent`` (**QKeyEvent**): the key to add. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
[ "Append", "another", "key", "to", "the", "key", "sequence", "represented", "by", "this", "object", "." ]
python
train
28.892857
tdryer/hangups
hangups/conversation.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation.py#L337-L352
def _wrap_event(event_): """Wrap hangouts_pb2.Event in ConversationEvent subclass.""" cls = conversation_event.ConversationEvent if event_.HasField('chat_message'): cls = conversation_event.ChatMessageEvent elif event_.HasField('otr_modification'): cls = conversation_event.OTREvent elif event_.HasField('conversation_rename'): cls = conversation_event.RenameEvent elif event_.HasField('membership_change'): cls = conversation_event.MembershipChangeEvent elif event_.HasField('hangout_event'): cls = conversation_event.HangoutEvent elif event_.HasField('group_link_sharing_modification'): cls = conversation_event.GroupLinkSharingModificationEvent return cls(event_)
[ "def", "_wrap_event", "(", "event_", ")", ":", "cls", "=", "conversation_event", ".", "ConversationEvent", "if", "event_", ".", "HasField", "(", "'chat_message'", ")", ":", "cls", "=", "conversation_event", ".", "ChatMessageEvent", "elif", "event_", ".", "HasFie...
Wrap hangouts_pb2.Event in ConversationEvent subclass.
[ "Wrap", "hangouts_pb2", ".", "Event", "in", "ConversationEvent", "subclass", "." ]
python
valid
49.6875
hammerlab/cohorts
cohorts/varcode_utils.py
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/varcode_utils.py#L79-L105
def filter_variants(variant_collection, patient, filter_fn, **kwargs): """Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter """ if filter_fn: return variant_collection.clone_with_new_elements([ variant for variant in variant_collection if filter_fn(FilterableVariant( variant=variant, variant_collection=variant_collection, patient=patient, ), **kwargs) ]) else: return variant_collection
[ "def", "filter_variants", "(", "variant_collection", ",", "patient", ",", "filter_fn", ",", "*", "*", "kwargs", ")", ":", "if", "filter_fn", ":", "return", "variant_collection", ".", "clone_with_new_elements", "(", "[", "variant", "for", "variant", "in", "varian...
Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter
[ "Filter", "variants", "from", "the", "Variant", "Collection" ]
python
train
33.296296
Kozea/pygal
pygal/graph/graph.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/graph.py#L861-L868
def _secondary_min(self): """Getter for the minimum series value""" return ( self.secondary_range[0] if (self.secondary_range and self.secondary_range[0] is not None) else (min(self._secondary_values) if self._secondary_values else None) )
[ "def", "_secondary_min", "(", "self", ")", ":", "return", "(", "self", ".", "secondary_range", "[", "0", "]", "if", "(", "self", ".", "secondary_range", "and", "self", ".", "secondary_range", "[", "0", "]", "is", "not", "None", ")", "else", "(", "min",...
Getter for the minimum series value
[ "Getter", "for", "the", "minimum", "series", "value" ]
python
train
38.5
shoyer/cyordereddict
python3/cyordereddict/benchmark/magic_timeit.py
https://github.com/shoyer/cyordereddict/blob/248e3e8616441554c87175820204e269a3afb32a/python3/cyordereddict/benchmark/magic_timeit.py#L7-L77
def magic_timeit(setup, stmt, ncalls=None, repeat=3, force_ms=False): """Time execution of a Python statement or expression Usage:\\ %timeit [-n<N> -r<R> [-t|-c]] statement Time execution of a Python statement or expression using the timeit module. Options: -n<N>: execute the given statement <N> times in a loop. If this value is not given, a fitting value is chosen. -r<R>: repeat the loop iteration <R> times and take the best result. Default: 3 -t: use time.time to measure the time, which is the default on Unix. This function measures wall time. -c: use time.clock to measure the time, which is the default on Windows and measures wall time. On Unix, resource.getrusage is used instead and returns the CPU user time. -p<P>: use a precision of <P> digits to display the timing result. Default: 3 Examples: In [1]: %timeit pass 10000000 loops, best of 3: 53.3 ns per loop In [2]: u = None In [3]: %timeit u is None 10000000 loops, best of 3: 184 ns per loop In [4]: %timeit -r 4 u == None 1000000 loops, best of 4: 242 ns per loop In [5]: import time In [6]: %timeit -n1 time.sleep(2) 1 loops, best of 3: 2 s per loop The times reported by %timeit will be slightly higher than those reported by the timeit.py script when variables are accessed. This is due to the fact that %timeit executes the statement in the namespace of the shell, compared with timeit.py, which uses a single setup statement to import function or create variables. Generally, the bias does not matter as long as results from timeit.py are not mixed with those from %timeit.""" import timeit import math units = ["s", "ms", 'us', "ns"] scaling = [1, 1e3, 1e6, 1e9] timer = timeit.Timer(stmt, setup) if ncalls is None: # determine number so that 0.2 <= total time < 2.0 number = 1 for _ in range(1, 10): if timer.timeit(number) >= 0.1: break number *= 10 else: number = ncalls best = min(timer.repeat(repeat, number)) / number if force_ms: order = 1 else: if best > 0.0 and best < 1000.0: order = min(-int(math.floor(math.log10(best)) // 3), 3) elif best >= 1000.0: order = 0 else: order = 3 return {'loops': number, 'repeat': repeat, 'timing': best * scaling[order], 'units': units[order]}
[ "def", "magic_timeit", "(", "setup", ",", "stmt", ",", "ncalls", "=", "None", ",", "repeat", "=", "3", ",", "force_ms", "=", "False", ")", ":", "import", "timeit", "import", "math", "units", "=", "[", "\"s\"", ",", "\"ms\"", ",", "'us'", ",", "\"ns\"...
Time execution of a Python statement or expression Usage:\\ %timeit [-n<N> -r<R> [-t|-c]] statement Time execution of a Python statement or expression using the timeit module. Options: -n<N>: execute the given statement <N> times in a loop. If this value is not given, a fitting value is chosen. -r<R>: repeat the loop iteration <R> times and take the best result. Default: 3 -t: use time.time to measure the time, which is the default on Unix. This function measures wall time. -c: use time.clock to measure the time, which is the default on Windows and measures wall time. On Unix, resource.getrusage is used instead and returns the CPU user time. -p<P>: use a precision of <P> digits to display the timing result. Default: 3 Examples: In [1]: %timeit pass 10000000 loops, best of 3: 53.3 ns per loop In [2]: u = None In [3]: %timeit u is None 10000000 loops, best of 3: 184 ns per loop In [4]: %timeit -r 4 u == None 1000000 loops, best of 4: 242 ns per loop In [5]: import time In [6]: %timeit -n1 time.sleep(2) 1 loops, best of 3: 2 s per loop The times reported by %timeit will be slightly higher than those reported by the timeit.py script when variables are accessed. This is due to the fact that %timeit executes the statement in the namespace of the shell, compared with timeit.py, which uses a single setup statement to import function or create variables. Generally, the bias does not matter as long as results from timeit.py are not mixed with those from %timeit.
[ "Time", "execution", "of", "a", "Python", "statement", "or", "expression", "Usage", ":", "\\\\", "%timeit", "[", "-", "n<N", ">", "-", "r<R", ">", "[", "-", "t|", "-", "c", "]]", "statement", "Time", "execution", "of", "a", "Python", "statement", "or",...
python
train
34.971831
ajenhl/tacl
tacl/data_store.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/data_store.py#L266-L291
def _csv(self, cursor, fieldnames, output_fh): """Writes the rows of `cursor` in CSV format to `output_fh` and returns it. :param cursor: database cursor containing data to be output :type cursor: `sqlite3.Cursor` :param fieldnames: row headings :type fieldnames: `list` :param output_fh: file to write data to :type output_fh: file object :rtype: file object """ self._logger.info('Finished query; outputting results in CSV format') # Specify a lineterminator to avoid an extra \r being added on # Windows; see # https://stackoverflow.com/questions/3191528/csv-in-python-adding-extra-carriage-return if sys.platform in ('win32', 'cygwin') and output_fh is sys.stdout: writer = csv.writer(output_fh, lineterminator='\n') else: writer = csv.writer(output_fh) writer.writerow(fieldnames) for row in cursor: writer.writerow(row) self._logger.info('Finished outputting results') return output_fh
[ "def", "_csv", "(", "self", ",", "cursor", ",", "fieldnames", ",", "output_fh", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Finished query; outputting results in CSV format'", ")", "# Specify a lineterminator to avoid an extra \\r being added on", "# Windows; see"...
Writes the rows of `cursor` in CSV format to `output_fh` and returns it. :param cursor: database cursor containing data to be output :type cursor: `sqlite3.Cursor` :param fieldnames: row headings :type fieldnames: `list` :param output_fh: file to write data to :type output_fh: file object :rtype: file object
[ "Writes", "the", "rows", "of", "cursor", "in", "CSV", "format", "to", "output_fh", "and", "returns", "it", "." ]
python
train
40.923077
KeplerGO/K2fov
K2fov/fov.py
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/fov.py#L756-L770
def draw(self, **kwargs): """Draw the polygon Optional Inputs: ------------ All optional inputs are passed to ``matplotlib.patches.Polygon`` Notes: --------- Does not accept maptype as an argument. """ ax = mp.gca() shape = matplotlib.patches.Polygon(self.polygon, **kwargs) ax.add_artist(shape)
[ "def", "draw", "(", "self", ",", "*", "*", "kwargs", ")", ":", "ax", "=", "mp", ".", "gca", "(", ")", "shape", "=", "matplotlib", ".", "patches", ".", "Polygon", "(", "self", ".", "polygon", ",", "*", "*", "kwargs", ")", "ax", ".", "add_artist", ...
Draw the polygon Optional Inputs: ------------ All optional inputs are passed to ``matplotlib.patches.Polygon`` Notes: --------- Does not accept maptype as an argument.
[ "Draw", "the", "polygon" ]
python
train
24.8
olsoneric/pedemath
pedemath/vec2.py
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/vec2.py#L80-L86
def cross_v2(vec1, vec2): """Return the crossproduct of the two vectors as a Vec2. Cross product doesn't really make sense in 2D, but return the Z component of the 3d result. """ return vec1.y * vec2.x - vec1.x * vec2.y
[ "def", "cross_v2", "(", "vec1", ",", "vec2", ")", ":", "return", "vec1", ".", "y", "*", "vec2", ".", "x", "-", "vec1", ".", "x", "*", "vec2", ".", "y" ]
Return the crossproduct of the two vectors as a Vec2. Cross product doesn't really make sense in 2D, but return the Z component of the 3d result.
[ "Return", "the", "crossproduct", "of", "the", "two", "vectors", "as", "a", "Vec2", ".", "Cross", "product", "doesn", "t", "really", "make", "sense", "in", "2D", "but", "return", "the", "Z", "component", "of", "the", "3d", "result", "." ]
python
train
33.428571
apache/incubator-mxnet
tools/coreml/converter/_layers.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/coreml/converter/_layers.py#L188-L220
def convert_activation(net, node, module, builder): """Convert an activation layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] mx_non_linearity = _get_attrs(node)['act_type'] #TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR if mx_non_linearity == 'relu': non_linearity = 'RELU' elif mx_non_linearity == 'tanh': non_linearity = 'TANH' elif mx_non_linearity == 'sigmoid': non_linearity = 'SIGMOID' else: raise TypeError('Unknown activation type %s' % mx_non_linearity) builder.add_activation(name = name, non_linearity = non_linearity, input_name = input_name, output_name = output_name)
[ "def", "convert_activation", "(", "net", ",", "node", ",", "module", ",", "builder", ")", ":", "input_name", ",", "output_name", "=", "_get_input_output_name", "(", "net", ",", "node", ")", "name", "=", "node", "[", "'name'", "]", "mx_non_linearity", "=", ...
Convert an activation layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
[ "Convert", "an", "activation", "layer", "from", "mxnet", "to", "coreml", "." ]
python
train
33.121212
log2timeline/dfvfs
dfvfs/vfs/cpio_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/cpio_file_entry.py#L108-L116
def _GetDirectory(self): """Retrieves a directory. Returns: CPIODirectory: a directory or None if not available. """ if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY: return None return CPIODirectory(self._file_system, self.path_spec)
[ "def", "_GetDirectory", "(", "self", ")", ":", "if", "self", ".", "entry_type", "!=", "definitions", ".", "FILE_ENTRY_TYPE_DIRECTORY", ":", "return", "None", "return", "CPIODirectory", "(", "self", ".", "_file_system", ",", "self", ".", "path_spec", ")" ]
Retrieves a directory. Returns: CPIODirectory: a directory or None if not available.
[ "Retrieves", "a", "directory", "." ]
python
train
30
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L107-L111
def p_pragma_assign(self, p): 'pragma : LPAREN TIMES ID EQUALS expression TIMES RPAREN' p[0] = Pragma(PragmaEntry(p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_pragma_assign", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Pragma", "(", "PragmaEntry", "(", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", ",", "lineno", "...
pragma : LPAREN TIMES ID EQUALS expression TIMES RPAREN
[ "pragma", ":", "LPAREN", "TIMES", "ID", "EQUALS", "expression", "TIMES", "RPAREN" ]
python
train
47.4
googledatalab/pydatalab
google/datalab/contrib/mlworkbench/_prediction_explainer.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L56-L71
def _make_text_predict_fn(self, labels, instance, column_to_explain): """Create a predict_fn that can be used by LIME text explainer. """ def _predict_fn(perturbed_text): predict_input = [] for x in perturbed_text: instance_copy = dict(instance) instance_copy[column_to_explain] = x predict_input.append(instance_copy) df = _local_predict.get_prediction_results(self._model_dir, predict_input, self._headers, with_source=False) probs = _local_predict.get_probs_for_labels(labels, df) return np.asarray(probs) return _predict_fn
[ "def", "_make_text_predict_fn", "(", "self", ",", "labels", ",", "instance", ",", "column_to_explain", ")", ":", "def", "_predict_fn", "(", "perturbed_text", ")", ":", "predict_input", "=", "[", "]", "for", "x", "in", "perturbed_text", ":", "instance_copy", "=...
Create a predict_fn that can be used by LIME text explainer.
[ "Create", "a", "predict_fn", "that", "can", "be", "used", "by", "LIME", "text", "explainer", "." ]
python
train
43.875
Fantomas42/mots-vides
mots_vides/factory.py
https://github.com/Fantomas42/mots-vides/blob/eaeccf73bdb415d0c5559ccd74de360b37a2bbac/mots_vides/factory.py#L111-L118
def write_collection(self, filename, collection): """ Writes a collection of stop words into a file. """ collection = sorted(list(collection)) with open(filename, 'wb+') as fd: fd.truncate() fd.write('\n'.join(collection).encode('utf-8'))
[ "def", "write_collection", "(", "self", ",", "filename", ",", "collection", ")", ":", "collection", "=", "sorted", "(", "list", "(", "collection", ")", ")", "with", "open", "(", "filename", ",", "'wb+'", ")", "as", "fd", ":", "fd", ".", "truncate", "("...
Writes a collection of stop words into a file.
[ "Writes", "a", "collection", "of", "stop", "words", "into", "a", "file", "." ]
python
train
36.875
dacut/python-aws-sig
awssig/sigv4.py
https://github.com/dacut/python-aws-sig/blob/7f6054dca4b32e67ca3d39db31c1b4be5efe54bd/awssig/sigv4.py#L302-L325
def canonical_request(self): """ The AWS SigV4 canonical request given parameters from an HTTP request. This process is outlined here: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html The canonical request is: request_method + '\n' + canonical_uri_path + '\n' + canonical_query_string + '\n' + signed_headers + '\n' + sha256(body).hexdigest() """ signed_headers = self.signed_headers header_lines = "".join( ["%s:%s\n" % item for item in iteritems(signed_headers)]) header_keys = ";".join([key for key in iterkeys(self.signed_headers)]) return (self.request_method + "\n" + self.canonical_uri_path + "\n" + self.canonical_query_string + "\n" + header_lines + "\n" + header_keys + "\n" + sha256(self.body).hexdigest())
[ "def", "canonical_request", "(", "self", ")", ":", "signed_headers", "=", "self", ".", "signed_headers", "header_lines", "=", "\"\"", ".", "join", "(", "[", "\"%s:%s\\n\"", "%", "item", "for", "item", "in", "iteritems", "(", "signed_headers", ")", "]", ")", ...
The AWS SigV4 canonical request given parameters from an HTTP request. This process is outlined here: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html The canonical request is: request_method + '\n' + canonical_uri_path + '\n' + canonical_query_string + '\n' + signed_headers + '\n' + sha256(body).hexdigest()
[ "The", "AWS", "SigV4", "canonical", "request", "given", "parameters", "from", "an", "HTTP", "request", ".", "This", "process", "is", "outlined", "here", ":", "http", ":", "//", "docs", ".", "aws", ".", "amazon", ".", "com", "/", "general", "/", "latest",...
python
train
40.333333
bukun/TorCMS
torcms/handlers/log_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/log_handler.py#L151-L183
def pageview(self, cur_p=''): ''' View the list of the Log. ''' if cur_p == '': current_page_number = 1 else: current_page_number = int(cur_p) current_page_number = 1 if current_page_number < 1 else current_page_number pager_num = int(MLog.total_number() / CMS_CFG['list_num']) kwd = { 'pager': '', 'title': '', 'current_page': current_page_number, } arr_num = [] postinfo = MLog.query_all_current_url() for i in postinfo: postnum = MLog.count_of_current_url(i.current_url) arr_num.append(postnum) self.render('misc/log/pageview.html', kwd=kwd, infos=MLog.query_all_pageview(current_page_num=current_page_number), postinfo=postinfo, arr_num=arr_num, format_date=tools.format_date, userinfo=self.userinfo)
[ "def", "pageview", "(", "self", ",", "cur_p", "=", "''", ")", ":", "if", "cur_p", "==", "''", ":", "current_page_number", "=", "1", "else", ":", "current_page_number", "=", "int", "(", "cur_p", ")", "current_page_number", "=", "1", "if", "current_page_numb...
View the list of the Log.
[ "View", "the", "list", "of", "the", "Log", "." ]
python
train
29.878788
google/prettytensor
prettytensor/tutorial/data_utils.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/tutorial/data_utils.py#L37-L46
def maybe_download(url, filename): """Download the data from Yann's website, unless it's already here.""" if not os.path.exists(WORK_DIRECTORY): os.mkdir(WORK_DIRECTORY) filepath = os.path.join(WORK_DIRECTORY, filename) if not os.path.exists(filepath): filepath, _ = request.urlretrieve(url + filename, filepath) statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath
[ "def", "maybe_download", "(", "url", ",", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "WORK_DIRECTORY", ")", ":", "os", ".", "mkdir", "(", "WORK_DIRECTORY", ")", "filepath", "=", "os", ".", "path", ".", "join", "(", "WOR...
Download the data from Yann's website, unless it's already here.
[ "Download", "the", "data", "from", "Yann", "s", "website", "unless", "it", "s", "already", "here", "." ]
python
train
44.5
dshean/pygeotools
pygeotools/lib/geolib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L422-L434
def pixelToMap(pX, pY, geoTransform): """Convert pixel coordinates to map coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform) """ pX = np.asarray(pX, dtype=float) pY = np.asarray(pY, dtype=float) pX += 0.5 pY += 0.5 mX, mY = applyGeoTransform(pX, pY, geoTransform) return mX, mY
[ "def", "pixelToMap", "(", "pX", ",", "pY", ",", "geoTransform", ")", ":", "pX", "=", "np", ".", "asarray", "(", "pX", ",", "dtype", "=", "float", ")", "pY", "=", "np", ".", "asarray", "(", "pY", ",", "dtype", "=", "float", ")", "pX", "+=", "0.5...
Convert pixel coordinates to map coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
[ "Convert", "pixel", "coordinates", "to", "map", "coordinates", "based", "on", "geotransform", "Accepts", "float", "or", "NumPy", "arrays" ]
python
train
32.153846
mitsei/dlkit
dlkit/services/cataloging.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/cataloging.py#L100-L111
def _set_catalog_view(self, session): """Sets the underlying catalog view to match current view""" if self._catalog_view == COMPARATIVE: try: session.use_comparative_catalog_view() except AttributeError: pass else: try: session.use_plenary_catalog_view() except AttributeError: pass
[ "def", "_set_catalog_view", "(", "self", ",", "session", ")", ":", "if", "self", ".", "_catalog_view", "==", "COMPARATIVE", ":", "try", ":", "session", ".", "use_comparative_catalog_view", "(", ")", "except", "AttributeError", ":", "pass", "else", ":", "try", ...
Sets the underlying catalog view to match current view
[ "Sets", "the", "underlying", "catalog", "view", "to", "match", "current", "view" ]
python
train
33.916667
thespacedoctor/sherlock
sherlock/imports/ned_d.py
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L276-L323
def _clean_up_columns( self): """clean up columns of the NED table .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_clean_up_columns`` method') tableName = self.dbTableName print "cleaning up %(tableName)s columns" % locals() sqlQuery = u""" set sql_mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"; """ % locals() writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, ) sqlQuery = u""" update %(tableName)s set dist_mod_err = null where dist_mod_err = 0; update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = ""; update %(tableName)s set notes = null where notes = ""; update %(tableName)s set redshift = null where redshift = 0; update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = ""; update %(tableName)s set hubble_const = null where hubble_const = 0; update %(tableName)s set lmc_mod = null where lmc_mod = 0; update %(tableName)s set master_row = 0; update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias); """ % locals() writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, ) self.log.debug('completed the ``_clean_up_columns`` method') return None
[ "def", "_clean_up_columns", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``_clean_up_columns`` method'", ")", "tableName", "=", "self", ".", "dbTableName", "print", "\"cleaning up %(tableName)s columns\"", "%", "locals", "(", ")", "sq...
clean up columns of the NED table .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
[ "clean", "up", "columns", "of", "the", "NED", "table" ]
python
train
42.395833
jeffh/rpi_courses
rpi_courses/parser/features.py
https://github.com/jeffh/rpi_courses/blob/c97176f73f866f112c785910ebf3ff8a790e8e9a/rpi_courses/parser/features.py#L14-L19
def timestamp_feature(catalog, soup): """The datetime the xml file was last modified. """ catalog.timestamp = int(soup.coursedb['timestamp']) catalog.datetime = datetime.datetime.fromtimestamp(catalog.timestamp) logger.info('Catalog last updated on %s' % catalog.datetime)
[ "def", "timestamp_feature", "(", "catalog", ",", "soup", ")", ":", "catalog", ".", "timestamp", "=", "int", "(", "soup", ".", "coursedb", "[", "'timestamp'", "]", ")", "catalog", ".", "datetime", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(",...
The datetime the xml file was last modified.
[ "The", "datetime", "the", "xml", "file", "was", "last", "modified", "." ]
python
train
47.833333
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/worker/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/worker/__init__.py#L87-L123
def list(self, activity_name=values.unset, activity_sid=values.unset, available=values.unset, friendly_name=values.unset, target_workers_expression=values.unset, task_queue_name=values.unset, task_queue_sid=values.unset, limit=None, page_size=None): """ Lists WorkerInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode activity_name: Filter by workers that are in a particular Activity by Friendly Name :param unicode activity_sid: Filter by workers that are in a particular Activity by SID :param unicode available: Filter by workers that are available or unavailable. :param unicode friendly_name: Filter by a worker's friendly name :param unicode target_workers_expression: Filter by workers that would match an expression on a TaskQueue. :param unicode task_queue_name: Filter by workers that are eligible for a TaskQueue by Friendly Name :param unicode task_queue_sid: Filter by workers that are eligible for a TaskQueue by SID :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance] """ return list(self.stream( activity_name=activity_name, activity_sid=activity_sid, available=available, friendly_name=friendly_name, target_workers_expression=target_workers_expression, task_queue_name=task_queue_name, task_queue_sid=task_queue_sid, limit=limit, page_size=page_size, ))
[ "def", "list", "(", "self", ",", "activity_name", "=", "values", ".", "unset", ",", "activity_sid", "=", "values", ".", "unset", ",", "available", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "target_workers_expression...
Lists WorkerInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode activity_name: Filter by workers that are in a particular Activity by Friendly Name :param unicode activity_sid: Filter by workers that are in a particular Activity by SID :param unicode available: Filter by workers that are available or unavailable. :param unicode friendly_name: Filter by a worker's friendly name :param unicode target_workers_expression: Filter by workers that would match an expression on a TaskQueue. :param unicode task_queue_name: Filter by workers that are eligible for a TaskQueue by Friendly Name :param unicode task_queue_sid: Filter by workers that are eligible for a TaskQueue by SID :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance]
[ "Lists", "WorkerInstance", "records", "from", "the", "API", "as", "a", "list", ".", "Unlike", "stream", "()", "this", "operation", "is", "eager", "and", "will", "load", "limit", "records", "into", "memory", "before", "returning", "." ]
python
train
60.243243
alecthomas/voluptuous
voluptuous/validators.py
https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L451-L466
def IsDir(v): """Verify the directory exists. >>> IsDir()('/') '/' >>> with raises(DirInvalid, 'Not a directory'): ... IsDir()(None) """ try: if v: v = str(v) return os.path.isdir(v) else: raise DirInvalid("Not a directory") except TypeError: raise DirInvalid("Not a directory")
[ "def", "IsDir", "(", "v", ")", ":", "try", ":", "if", "v", ":", "v", "=", "str", "(", "v", ")", "return", "os", ".", "path", ".", "isdir", "(", "v", ")", "else", ":", "raise", "DirInvalid", "(", "\"Not a directory\"", ")", "except", "TypeError", ...
Verify the directory exists. >>> IsDir()('/') '/' >>> with raises(DirInvalid, 'Not a directory'): ... IsDir()(None)
[ "Verify", "the", "directory", "exists", "." ]
python
train
22.375
mitsei/dlkit
dlkit/authz_adapter/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/authz_adapter/assessment/sessions.py#L725-L731
def get_items_by_search(self, item_query, item_search): """Pass through to provider ItemSearchSession.get_items_by_search""" # Implemented from azosid template for - # osid.resource.ResourceSearchSession.get_resources_by_search_template if not self._can('search'): raise PermissionDenied() return self._provider_session.get_items_by_search(item_query, item_search)
[ "def", "get_items_by_search", "(", "self", ",", "item_query", ",", "item_search", ")", ":", "# Implemented from azosid template for -", "# osid.resource.ResourceSearchSession.get_resources_by_search_template", "if", "not", "self", ".", "_can", "(", "'search'", ")", ":", "ra...
Pass through to provider ItemSearchSession.get_items_by_search
[ "Pass", "through", "to", "provider", "ItemSearchSession", ".", "get_items_by_search" ]
python
train
58.571429
saltstack/salt
salt/modules/win_lgpo.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_lgpo.py#L5465-L5472
def _getRightsAssignments(user_right): ''' helper function to return all the user rights assignments/users ''' sids = [] polHandle = win32security.LsaOpenPolicy(None, win32security.POLICY_ALL_ACCESS) sids = win32security.LsaEnumerateAccountsWithUserRight(polHandle, user_right) return sids
[ "def", "_getRightsAssignments", "(", "user_right", ")", ":", "sids", "=", "[", "]", "polHandle", "=", "win32security", ".", "LsaOpenPolicy", "(", "None", ",", "win32security", ".", "POLICY_ALL_ACCESS", ")", "sids", "=", "win32security", ".", "LsaEnumerateAccountsW...
helper function to return all the user rights assignments/users
[ "helper", "function", "to", "return", "all", "the", "user", "rights", "assignments", "/", "users" ]
python
train
38.75
openvax/varcode
varcode/reference.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/reference.py#L45-L56
def _most_recent_assembly(assembly_names): """ Given list of (in this case, matched) assemblies, identify the most recent ("recency" here is determined by sorting based on the numeric element of the assembly name) """ match_recency = [ int(re.search('\d+', assembly_name).group()) for assembly_name in assembly_names ] most_recent = [ x for (y, x) in sorted(zip(match_recency, assembly_names), reverse=True)][0] return most_recent
[ "def", "_most_recent_assembly", "(", "assembly_names", ")", ":", "match_recency", "=", "[", "int", "(", "re", ".", "search", "(", "'\\d+'", ",", "assembly_name", ")", ".", "group", "(", ")", ")", "for", "assembly_name", "in", "assembly_names", "]", "most_rec...
Given list of (in this case, matched) assemblies, identify the most recent ("recency" here is determined by sorting based on the numeric element of the assembly name)
[ "Given", "list", "of", "(", "in", "this", "case", "matched", ")", "assemblies", "identify", "the", "most", "recent", "(", "recency", "here", "is", "determined", "by", "sorting", "based", "on", "the", "numeric", "element", "of", "the", "assembly", "name", "...
python
train
39.833333
Yubico/yubikey-manager
ykman/cli/piv.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/piv.py#L619-L627
def delete_certificate(ctx, slot, management_key, pin): """ Delete a certificate. Delete a certificate from a slot on the YubiKey. """ controller = ctx.obj['controller'] _ensure_authenticated(ctx, controller, pin, management_key) controller.delete_certificate(slot)
[ "def", "delete_certificate", "(", "ctx", ",", "slot", ",", "management_key", ",", "pin", ")", ":", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "_ensure_authenticated", "(", "ctx", ",", "controller", ",", "pin", ",", "management_key", ")",...
Delete a certificate. Delete a certificate from a slot on the YubiKey.
[ "Delete", "a", "certificate", "." ]
python
train
31.777778
mlperf/training
translation/tensorflow/transformer/data_download.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/data_download.py#L210-L214
def txt_line_iterator(path): """Iterate through lines of file.""" with tf.gfile.Open(path) as f: for line in f: yield line.strip()
[ "def", "txt_line_iterator", "(", "path", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "path", ")", "as", "f", ":", "for", "line", "in", "f", ":", "yield", "line", ".", "strip", "(", ")" ]
Iterate through lines of file.
[ "Iterate", "through", "lines", "of", "file", "." ]
python
train
28