repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
geertj/pyskiplist
pyskiplist/skiplist.py
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L403-L415
def index(self, key, default=UNSET): """Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError`` """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: if default is self.UNSET: raise KeyError('key {!r} not in list'.format(key)) return default return self._distance[0]
[ "def", "index", "(", "self", ",", "key", ",", "default", "=", "UNSET", ")", ":", "self", ".", "_find_lt", "(", "key", ")", "node", "=", "self", ".", "_path", "[", "0", "]", "[", "2", "]", "if", "node", "is", "self", ".", "_tail", "or", "key", ...
Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError``
[ "Find", "the", "first", "key", "-", "value", "pair", "with", "key", "*", "key", "*", "and", "return", "its", "position", "." ]
python
train
timkpaine/pyEX
pyEX/stocks.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L1473-L1488
def price(symbol, token='', version=''): '''Price of ticker https://iexcloud.io/docs/api/#price 4:30am-8pm ET Mon-Fri Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/price', token, version)
[ "def", "price", "(", "symbol", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "_raiseIfNotStr", "(", "symbol", ")", "return", "_getJson", "(", "'stock/'", "+", "symbol", "+", "'/price'", ",", "token", ",", "version", ")" ]
Price of ticker https://iexcloud.io/docs/api/#price 4:30am-8pm ET Mon-Fri Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
[ "Price", "of", "ticker" ]
python
valid
jkitzes/macroeco
macroeco/models/_distributions.py
https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/models/_distributions.py#L833-L849
def fit_mle(self, data, k_agg0=0.5): """%(super)s In addition to data, gives an optional keyword argument k_agg0 that specifies the initial value of k_agg used in the optimization. """ mu = np.mean(data) def mle(k): return -np.sum(np.log(self.pmf(data, mu, k))) k = optim.fmin(mle, x0=k_agg0, disp=0) return mu, k[0]
[ "def", "fit_mle", "(", "self", ",", "data", ",", "k_agg0", "=", "0.5", ")", ":", "mu", "=", "np", ".", "mean", "(", "data", ")", "def", "mle", "(", "k", ")", ":", "return", "-", "np", ".", "sum", "(", "np", ".", "log", "(", "self", ".", "pm...
%(super)s In addition to data, gives an optional keyword argument k_agg0 that specifies the initial value of k_agg used in the optimization.
[ "%", "(", "super", ")", "s" ]
python
train
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L3104-L3175
def remove_prefix(self, auth, spec, recursive = False): """ Remove prefix matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [prefix_spec] Specifies prefixe to remove. * `recursive` [bool] When set to True, also remove child prefixes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.remove_prefix` for full understanding. """ self._logger.debug("remove_prefix called; spec: %s" % unicode(spec)) # sanity check - do we have all attributes? if 'id' in spec: # recursive requires a prefix, so translate id to prefix p = self.list_prefix(auth, spec)[0] del spec['id'] spec['prefix'] = p['prefix'] spec['vrf_id'] = p['vrf_id'] elif 'prefix' in spec: pass else: raise NipapMissingInputError('missing prefix or id of prefix') prefixes = self.list_prefix(auth, spec) if recursive: spec['type'] = 'host' self._db_remove_prefix(spec, recursive) del spec['type'] self._db_remove_prefix(spec, recursive) else: self._db_remove_prefix(spec) # write to audit table audit_params = { 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source } for p in prefixes: audit_params['prefix_id'] = p['id'] audit_params['prefix_prefix'] = p['prefix'] audit_params['description'] = 'Removed prefix %s' % p['prefix'] audit_params['vrf_id'] = p['vrf_id'] audit_params['vrf_rt'] = p['vrf_rt'] audit_params['vrf_name'] = p['vrf_name'] sql, params = self._sql_expand_insert(audit_params) self._execute('INSERT INTO ip_net_log %s' % sql, params) if p['pool_id'] is not None: pool = self._get_pool(auth, { 'id': p['pool_id'] }) audit_params2 = { 'pool_id': pool['id'], 'pool_name': pool['name'], 'prefix_id': p['id'], 'prefix_prefix': p['prefix'], 'description': 'Prefix %s removed from pool %s' % (p['prefix'], pool['name']), 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source } sql, params = self._sql_expand_insert(audit_params2) self._execute('INSERT INTO ip_net_log %s' % sql, params)
[ "def", "remove_prefix", "(", "self", ",", "auth", ",", "spec", ",", "recursive", "=", "False", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"remove_prefix called; spec: %s\"", "%", "unicode", "(", "spec", ")", ")", "# sanity check - do we have all attr...
Remove prefix matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [prefix_spec] Specifies prefixe to remove. * `recursive` [bool] When set to True, also remove child prefixes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.remove_prefix` for full understanding.
[ "Remove", "prefix", "matching", "spec", "." ]
python
train
oemof/oemof.db
oemof/db/tools.py
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L332-L351
def grant_db_access(conn, schema, table, role): r"""Gives access to database users/ groups Parameters ---------- conn : sqlalchemy connection object A valid connection to a database schema : str The database schema table : str The database table role : str database role that access is granted to """ grant_str = """GRANT ALL ON TABLE {schema}.{table} TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table, role=role) conn.execute(grant_str)
[ "def", "grant_db_access", "(", "conn", ",", "schema", ",", "table", ",", "role", ")", ":", "grant_str", "=", "\"\"\"GRANT ALL ON TABLE {schema}.{table}\n TO {role} WITH GRANT OPTION;\"\"\"", ".", "format", "(", "schema", "=", "schema", ",", "table", "=", "table", ...
r"""Gives access to database users/ groups Parameters ---------- conn : sqlalchemy connection object A valid connection to a database schema : str The database schema table : str The database table role : str database role that access is granted to
[ "r", "Gives", "access", "to", "database", "users", "/", "groups" ]
python
train
silver-castle/mach9
mach9/websocket.py
https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/websocket.py#L107-L118
def get_disconnect_message(self, code: int): ''' http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection ''' self.order += 1 return { 'channel': 'websocket.disconnect', 'reply_channel': None, 'path': self.path, 'order': self.order, 'code': code, }
[ "def", "get_disconnect_message", "(", "self", ",", "code", ":", "int", ")", ":", "self", ".", "order", "+=", "1", "return", "{", "'channel'", ":", "'websocket.disconnect'", ",", "'reply_channel'", ":", "None", ",", "'path'", ":", "self", ".", "path", ",", ...
http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection
[ "http", ":", "//", "channels", ".", "readthedocs", ".", "io", "/", "en", "/", "stable", "/", "asgi", "/", "www", ".", "html#disconnection" ]
python
train
wmayner/pyphi
pyphi/actual.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/actual.py#L699-L723
def events(network, previous_state, current_state, next_state, nodes, mechanisms=False): """Find all events (mechanisms with actual causes and actual effects).""" actual_causes = _actual_causes(network, previous_state, current_state, nodes, mechanisms) actual_effects = _actual_effects(network, current_state, next_state, nodes, mechanisms) actual_mechanisms = (set(c.mechanism for c in actual_causes) & set(c.mechanism for c in actual_effects)) if not actual_mechanisms: return () def index(actual_causes_or_effects): """Filter out unidirectional occurences and return a dictionary keyed by the mechanism of the cause or effect. """ return {o.mechanism: o for o in actual_causes_or_effects if o.mechanism in actual_mechanisms} actual_causes = index(actual_causes) actual_effects = index(actual_effects) return tuple(Event(actual_causes[m], actual_effects[m]) for m in sorted(actual_mechanisms))
[ "def", "events", "(", "network", ",", "previous_state", ",", "current_state", ",", "next_state", ",", "nodes", ",", "mechanisms", "=", "False", ")", ":", "actual_causes", "=", "_actual_causes", "(", "network", ",", "previous_state", ",", "current_state", ",", ...
Find all events (mechanisms with actual causes and actual effects).
[ "Find", "all", "events", "(", "mechanisms", "with", "actual", "causes", "and", "actual", "effects", ")", "." ]
python
train
numenta/htmresearch
htmresearch/support/neural_correlations_utils.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/neural_correlations_utils.py#L68-L86
def generateHOSequence(sequence, symbolsPerSequence, numColumns, sparsity): """ Generates a high-order sequence by taking an initial sequence and the changing its first and last SDRs by random SDRs @param sequence (array) sequence to be randomized @param symbolsPerSequence (int) number of symbols per sequence @param numColumns (int) number of columns in the TM @param sparsity (float) percentage of sparsity @return randomizedSequence (array) sequence that contains p percentage of new SDRs """ sequenceHO = [] sparseCols = int(numColumns * sparsity) for symbol in range(symbolsPerSequence): if symbol == 0 or symbol == (symbolsPerSequence - 1): sequenceHO.append(generateRandomSymbol(numColumns, sparseCols)) else: sequenceHO.append(sequence[symbol]) return sequenceHO
[ "def", "generateHOSequence", "(", "sequence", ",", "symbolsPerSequence", ",", "numColumns", ",", "sparsity", ")", ":", "sequenceHO", "=", "[", "]", "sparseCols", "=", "int", "(", "numColumns", "*", "sparsity", ")", "for", "symbol", "in", "range", "(", "symbo...
Generates a high-order sequence by taking an initial sequence and the changing its first and last SDRs by random SDRs @param sequence (array) sequence to be randomized @param symbolsPerSequence (int) number of symbols per sequence @param numColumns (int) number of columns in the TM @param sparsity (float) percentage of sparsity @return randomizedSequence (array) sequence that contains p percentage of new SDRs
[ "Generates", "a", "high", "-", "order", "sequence", "by", "taking", "an", "initial", "sequence", "and", "the", "changing", "its", "first", "and", "last", "SDRs", "by", "random", "SDRs" ]
python
train
StellarCN/py-stellar-base
stellar_base/keypair.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/keypair.py#L122-L137
def from_base58_seed(cls, base58_seed): """Generate a :class:`Keypair` object via Base58 encoded seed. .. deprecated:: 0.1.7 Base58 address encoding is DEPRECATED! Use this method only for transition to strkey encoding. :param str base58_seed: A base58 encoded encoded secret seed. :return: A new :class:`Keypair` derived from the secret seed. """ warnings.warn( "Base58 address encoding is DEPRECATED! Use this method only for " "transition to strkey encoding.", DeprecationWarning) raw_seed = b58decode_check(base58_seed)[1:] return cls.from_raw_seed(raw_seed)
[ "def", "from_base58_seed", "(", "cls", ",", "base58_seed", ")", ":", "warnings", ".", "warn", "(", "\"Base58 address encoding is DEPRECATED! Use this method only for \"", "\"transition to strkey encoding.\"", ",", "DeprecationWarning", ")", "raw_seed", "=", "b58decode_check", ...
Generate a :class:`Keypair` object via Base58 encoded seed. .. deprecated:: 0.1.7 Base58 address encoding is DEPRECATED! Use this method only for transition to strkey encoding. :param str base58_seed: A base58 encoded encoded secret seed. :return: A new :class:`Keypair` derived from the secret seed.
[ "Generate", "a", ":", "class", ":", "Keypair", "object", "via", "Base58", "encoded", "seed", "." ]
python
train
pydata/xarray
xarray/core/utils.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/utils.py#L551-L558
def ensure_us_time_resolution(val): """Convert val out of numpy time, for use in to_dict. Needed because of numpy bug GH#7619""" if np.issubdtype(val.dtype, np.datetime64): val = val.astype('datetime64[us]') elif np.issubdtype(val.dtype, np.timedelta64): val = val.astype('timedelta64[us]') return val
[ "def", "ensure_us_time_resolution", "(", "val", ")", ":", "if", "np", ".", "issubdtype", "(", "val", ".", "dtype", ",", "np", ".", "datetime64", ")", ":", "val", "=", "val", ".", "astype", "(", "'datetime64[us]'", ")", "elif", "np", ".", "issubdtype", ...
Convert val out of numpy time, for use in to_dict. Needed because of numpy bug GH#7619
[ "Convert", "val", "out", "of", "numpy", "time", "for", "use", "in", "to_dict", ".", "Needed", "because", "of", "numpy", "bug", "GH#7619" ]
python
train
smarie/python-parsyfiles
parsyfiles/converting_core.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/converting_core.py#L277-L297
def can_be_appended_to(self, left_converter, strict: bool) -> bool: """ Utility method to check if this (self) converter can be appended after the output of the provided converter. This method does not check if it makes sense, it just checks if the output type of the left converter is compliant with the input type of this converter. Compliant means: * strict mode : type equality * non-strict mode : output type of left_converter should be a subclass of input type of this converter In addition, the custom function provided in constructor may be used to reject conversion (see is_able_to_convert for details) :param left_converter: :param strict: boolean to :return: """ is_able_to_take_input = self.is_able_to_convert(strict, from_type=left_converter.to_type, to_type=JOKER) if left_converter.is_generic(): return is_able_to_take_input \ and left_converter.is_able_to_convert(strict, from_type=JOKER, to_type=self.from_type) else: return is_able_to_take_input
[ "def", "can_be_appended_to", "(", "self", ",", "left_converter", ",", "strict", ":", "bool", ")", "->", "bool", ":", "is_able_to_take_input", "=", "self", ".", "is_able_to_convert", "(", "strict", ",", "from_type", "=", "left_converter", ".", "to_type", ",", "...
Utility method to check if this (self) converter can be appended after the output of the provided converter. This method does not check if it makes sense, it just checks if the output type of the left converter is compliant with the input type of this converter. Compliant means: * strict mode : type equality * non-strict mode : output type of left_converter should be a subclass of input type of this converter In addition, the custom function provided in constructor may be used to reject conversion (see is_able_to_convert for details) :param left_converter: :param strict: boolean to :return:
[ "Utility", "method", "to", "check", "if", "this", "(", "self", ")", "converter", "can", "be", "appended", "after", "the", "output", "of", "the", "provided", "converter", ".", "This", "method", "does", "not", "check", "if", "it", "makes", "sense", "it", "...
python
train
patchboard/patchboard-py
patchboard/base.py
https://github.com/patchboard/patchboard-py/blob/3d9f66f3f26d71e769cd3a578b760441a237ce4d/patchboard/base.py#L27-L44
def discover(url, options={}): """ Retrieve the API definition from the given URL and construct a Patchboard to interface with it. """ try: resp = requests.get(url, headers=Patchboard.default_headers) except Exception as e: raise PatchboardError("Problem discovering API: {0}".format(e)) # Parse as JSON (Requests uses json.loads()) try: api_spec = resp.json() except ValueError as e: raise PatchboardError("Unparseable API description: {0}".format(e)) # Return core handle object return Patchboard(api_spec, options)
[ "def", "discover", "(", "url", ",", "options", "=", "{", "}", ")", ":", "try", ":", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "Patchboard", ".", "default_headers", ")", "except", "Exception", "as", "e", ":", "raise", "Patc...
Retrieve the API definition from the given URL and construct a Patchboard to interface with it.
[ "Retrieve", "the", "API", "definition", "from", "the", "given", "URL", "and", "construct", "a", "Patchboard", "to", "interface", "with", "it", "." ]
python
train
chriskiehl/Gooey
gooey/util/functional.py
https://github.com/chriskiehl/Gooey/blob/e598573c6519b953e0ccfc1f3663f827f8cd7e22/gooey/util/functional.py#L27-L34
def associn(m, path, value): """ Copy-on-write associates a value in a nested dict """ def assoc_recursively(m, path, value): if not path: return value p = path[0] return assoc(m, p, assoc_recursively(m.get(p,{}), path[1:], value)) return assoc_recursively(m, path, value)
[ "def", "associn", "(", "m", ",", "path", ",", "value", ")", ":", "def", "assoc_recursively", "(", "m", ",", "path", ",", "value", ")", ":", "if", "not", "path", ":", "return", "value", "p", "=", "path", "[", "0", "]", "return", "assoc", "(", "m",...
Copy-on-write associates a value in a nested dict
[ "Copy", "-", "on", "-", "write", "associates", "a", "value", "in", "a", "nested", "dict" ]
python
train
sckott/habanero
habanero/crossref/crossref.py
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L573-L625
def types(self, ids = None, query = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, works = False, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref types :param ids: [Array] Type identifier, e.g., journal :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.types() cr.types(ids = "journal") cr.types(ids = "journal-article") cr.types(ids = "journal", works = True) # field queries res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100) [ x.get('title') for x in res['message']['items'] ] ''' return request(self.mailto, self.base_url, "/types/", ids, query, filter, offset, limit, sample, sort, order, facet, select, works, cursor, cursor_max, **kwargs)
[ "def", "types", "(", "self", ",", "ids", "=", "None", ",", "query", "=", "None", ",", "filter", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "sample", "=", "None", ",", "sort", "=", "None", ",", "order", "=", "None", ...
Search Crossref types :param ids: [Array] Type identifier, e.g., journal :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.types() cr.types(ids = "journal") cr.types(ids = "journal-article") cr.types(ids = "journal", works = True) # field queries res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100) [ x.get('title') for x in res['message']['items'] ]
[ "Search", "Crossref", "types" ]
python
train
exekias/droplet
droplet/web/tables.py
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/web/tables.py#L190-L202
def actions(self): """ List of :class:`TableAction` elements defined for this table """ actions = [] for a in dir(self): a = getattr(self, a) if isinstance(a, TableAction): actions.append(a) # We are not caching this because array len should be low enough actions.sort(key=lambda action: action.creation_counter) return actions
[ "def", "actions", "(", "self", ")", ":", "actions", "=", "[", "]", "for", "a", "in", "dir", "(", "self", ")", ":", "a", "=", "getattr", "(", "self", ",", "a", ")", "if", "isinstance", "(", "a", ",", "TableAction", ")", ":", "actions", ".", "app...
List of :class:`TableAction` elements defined for this table
[ "List", "of", ":", "class", ":", "TableAction", "elements", "defined", "for", "this", "table" ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/transport/virtualadapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/virtualadapter.py#L23-L42
def stream(self, report, callback=None): """Queue data for streaming Args: report (IOTileReport): A report object to stream to a client callback (callable): An optional callback that will be called with a bool value of True when this report actually gets streamed. If the client disconnects and the report is dropped instead, callback will be called with False """ conn_id = self._find_connection(self.conn_string) if isinstance(report, BroadcastReport): self.adapter.notify_event_nowait(self.conn_string, 'broadcast', report) elif conn_id is not None: self.adapter.notify_event_nowait(self.conn_string, 'report', report) if callback is not None: callback(isinstance(report, BroadcastReport) or (conn_id is not None))
[ "def", "stream", "(", "self", ",", "report", ",", "callback", "=", "None", ")", ":", "conn_id", "=", "self", ".", "_find_connection", "(", "self", ".", "conn_string", ")", "if", "isinstance", "(", "report", ",", "BroadcastReport", ")", ":", "self", ".", ...
Queue data for streaming Args: report (IOTileReport): A report object to stream to a client callback (callable): An optional callback that will be called with a bool value of True when this report actually gets streamed. If the client disconnects and the report is dropped instead, callback will be called with False
[ "Queue", "data", "for", "streaming" ]
python
train
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L2127-L2142
def list_vm_images(self, location=None, publisher=None, category=None): ''' Retrieves a list of the VM Images from the image repository that is associated with the specified subscription. ''' path = self._get_vm_image_path() query = '' if location: query += '&location=' + location if publisher: query += '&publisher=' + publisher if category: query += '&category=' + category if query: path = path + '?' + query.lstrip('&') return self._perform_get(path, VMImages)
[ "def", "list_vm_images", "(", "self", ",", "location", "=", "None", ",", "publisher", "=", "None", ",", "category", "=", "None", ")", ":", "path", "=", "self", ".", "_get_vm_image_path", "(", ")", "query", "=", "''", "if", "location", ":", "query", "+=...
Retrieves a list of the VM Images from the image repository that is associated with the specified subscription.
[ "Retrieves", "a", "list", "of", "the", "VM", "Images", "from", "the", "image", "repository", "that", "is", "associated", "with", "the", "specified", "subscription", "." ]
python
test
bitcraft/PyTMX
pytmx/pytmx.py
https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L632-L653
def get_tile_properties_by_layer(self, layer): """ Get the tile properties of each GID in layer :param layer: layer number :rtype: iterator of (gid, properties) tuples """ try: assert (int(layer) >= 0) layer = int(layer) except (TypeError, AssertionError): msg = "Layer must be a positive integer. Got {0} instead." logger.debug(msg.format(type(layer))) raise ValueError p = product(range(self.width), range(self.height)) layergids = set(self.layers[layer].data[y][x] for x, y in p) for gid in layergids: try: yield gid, self.tile_properties[gid] except KeyError: continue
[ "def", "get_tile_properties_by_layer", "(", "self", ",", "layer", ")", ":", "try", ":", "assert", "(", "int", "(", "layer", ")", ">=", "0", ")", "layer", "=", "int", "(", "layer", ")", "except", "(", "TypeError", ",", "AssertionError", ")", ":", "msg",...
Get the tile properties of each GID in layer :param layer: layer number :rtype: iterator of (gid, properties) tuples
[ "Get", "the", "tile", "properties", "of", "each", "GID", "in", "layer" ]
python
train
quodlibet/mutagen
mutagen/flac.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/flac.py#L751-L757
def add_tags(self): """Add a Vorbis comment block to the file.""" if self.tags is None: self.tags = VCFLACDict() self.metadata_blocks.append(self.tags) else: raise FLACVorbisError("a Vorbis comment already exists")
[ "def", "add_tags", "(", "self", ")", ":", "if", "self", ".", "tags", "is", "None", ":", "self", ".", "tags", "=", "VCFLACDict", "(", ")", "self", ".", "metadata_blocks", ".", "append", "(", "self", ".", "tags", ")", "else", ":", "raise", "FLACVorbisE...
Add a Vorbis comment block to the file.
[ "Add", "a", "Vorbis", "comment", "block", "to", "the", "file", "." ]
python
train
tradenity/python-sdk
tradenity/resources/country.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/country.py#L500-L520
def delete_country_by_id(cls, country_id, **kwargs): """Delete Country Delete an instance of Country by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_country_by_id(country_id, async=True) >>> result = thread.get() :param async bool :param str country_id: ID of country to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_country_by_id_with_http_info(country_id, **kwargs) else: (data) = cls._delete_country_by_id_with_http_info(country_id, **kwargs) return data
[ "def", "delete_country_by_id", "(", "cls", ",", "country_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_delete_country_by_id_w...
Delete Country Delete an instance of Country by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_country_by_id(country_id, async=True) >>> result = thread.get() :param async bool :param str country_id: ID of country to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Delete", "Country" ]
python
train
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/bits.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/bits.py#L1162-L1178
def btc_privkey_scriptsig_classify(private_key_info): """ What kind of scriptsig can this private key make? """ if btc_is_singlesig(private_key_info): return 'p2pkh' if btc_is_multisig(private_key_info): return 'p2sh' if btc_is_singlesig_segwit(private_key_info): return 'p2sh-p2wpkh' if btc_is_multisig_segwit(private_key_info): return 'p2sh-p2wsh' return None
[ "def", "btc_privkey_scriptsig_classify", "(", "private_key_info", ")", ":", "if", "btc_is_singlesig", "(", "private_key_info", ")", ":", "return", "'p2pkh'", "if", "btc_is_multisig", "(", "private_key_info", ")", ":", "return", "'p2sh'", "if", "btc_is_singlesig_segwit",...
What kind of scriptsig can this private key make?
[ "What", "kind", "of", "scriptsig", "can", "this", "private", "key", "make?" ]
python
train
LionelAuroux/pyrser
pyrser/type_system/scope.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/scope.py#L321-L326
def values(self) -> [Signature]: """ Retrieve all values """ if self.state == StateScope.EMBEDDED and self.parent is not None: return list(self._hsig.values()) + list(self.parent().values()) else: return self._hsig.values()
[ "def", "values", "(", "self", ")", "->", "[", "Signature", "]", ":", "if", "self", ".", "state", "==", "StateScope", ".", "EMBEDDED", "and", "self", ".", "parent", "is", "not", "None", ":", "return", "list", "(", "self", ".", "_hsig", ".", "values", ...
Retrieve all values
[ "Retrieve", "all", "values" ]
python
test
systemd/python-systemd
systemd/journal.py
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L295-L306
def wait(self, timeout=None): """Wait for a change in the journal. `timeout` is the maximum time in seconds to wait, or None which means to wait forever. Returns one of NOP (no change), APPEND (new entries have been added to the end of the journal), or INVALIDATE (journal files have been added or removed). """ us = -1 if timeout is None else int(timeout * 1000000) return super(Reader, self).wait(us)
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "us", "=", "-", "1", "if", "timeout", "is", "None", "else", "int", "(", "timeout", "*", "1000000", ")", "return", "super", "(", "Reader", ",", "self", ")", ".", "wait", "(", "us",...
Wait for a change in the journal. `timeout` is the maximum time in seconds to wait, or None which means to wait forever. Returns one of NOP (no change), APPEND (new entries have been added to the end of the journal), or INVALIDATE (journal files have been added or removed).
[ "Wait", "for", "a", "change", "in", "the", "journal", "." ]
python
train
linkedin/Zopkio
zopkio/deployer.py
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L91-L99
def undeploy(self, unique_id, configs=None): """Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use """ self.stop(unique_id, configs) self.uninstall(unique_id, configs)
[ "def", "undeploy", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "stop", "(", "unique_id", ",", "configs", ")", "self", ".", "uninstall", "(", "unique_id", ",", "configs", ")" ]
Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use
[ "Undeploys", "the", "service", ".", "This", "should", "at", "least", "perform", "the", "same", "actions", "as", "stop", "and", "uninstall", "but", "may", "perform", "additional", "tasks", "as", "needed", "." ]
python
train
OTL/jps
jps/forwarder.py
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/forwarder.py#L13-L37
def main(pub_port=None, sub_port=None): '''main of forwarder :param sub_port: port for subscribers :param pub_port: port for publishers ''' try: if sub_port is None: sub_port = get_sub_port() if pub_port is None: pub_port = get_pub_port() context = zmq.Context(1) frontend = context.socket(zmq.SUB) backend = context.socket(zmq.PUB) frontend.bind('tcp://*:{pub_port}'.format(pub_port=pub_port)) frontend.setsockopt(zmq.SUBSCRIBE, b'') backend.bind('tcp://*:{sub_port}'.format(sub_port=sub_port)) zmq.device(zmq.FORWARDER, frontend, backend) except KeyboardInterrupt: pass finally: frontend.close() backend.close() context.term()
[ "def", "main", "(", "pub_port", "=", "None", ",", "sub_port", "=", "None", ")", ":", "try", ":", "if", "sub_port", "is", "None", ":", "sub_port", "=", "get_sub_port", "(", ")", "if", "pub_port", "is", "None", ":", "pub_port", "=", "get_pub_port", "(", ...
main of forwarder :param sub_port: port for subscribers :param pub_port: port for publishers
[ "main", "of", "forwarder" ]
python
train
macacajs/wd.py
macaca/webdriver.py
https://github.com/macacajs/wd.py/blob/6d3c52060013e01a67cd52b68b5230b387427bad/macaca/webdriver.py#L427-L450
def flick(self, element, x, y, speed): """Deprecated use touch('drag', { fromX, fromY, toX, toY, duration(s) }) instead. Flick on the touch screen using finger motion events. This flickcommand starts at a particulat screen location. Support: iOS Args: element(WebElement): WebElement Object where the flick starts. x(float}: The x offset in pixels to flick by. y(float): The y offset in pixels to flick by. speed(float) The speed in pixels per seconds. Returns: WebDriver object. """ self._execute(Command.FLICK, { 'element': element.element_id, 'x': x, 'y': y, 'speed': speed })
[ "def", "flick", "(", "self", ",", "element", ",", "x", ",", "y", ",", "speed", ")", ":", "self", ".", "_execute", "(", "Command", ".", "FLICK", ",", "{", "'element'", ":", "element", ".", "element_id", ",", "'x'", ":", "x", ",", "'y'", ":", "y", ...
Deprecated use touch('drag', { fromX, fromY, toX, toY, duration(s) }) instead. Flick on the touch screen using finger motion events. This flickcommand starts at a particulat screen location. Support: iOS Args: element(WebElement): WebElement Object where the flick starts. x(float}: The x offset in pixels to flick by. y(float): The y offset in pixels to flick by. speed(float) The speed in pixels per seconds. Returns: WebDriver object.
[ "Deprecated", "use", "touch", "(", "drag", "{", "fromX", "fromY", "toX", "toY", "duration", "(", "s", ")", "}", ")", "instead", ".", "Flick", "on", "the", "touch", "screen", "using", "finger", "motion", "events", ".", "This", "flickcommand", "starts", "a...
python
valid
DLR-RM/RAFCON
source/rafcon/core/states/state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L471-L483
def remove_output_data_port(self, data_port_id, force=False, destroy=True): """Remove an output data port from the state :param int data_port_id: the id of the output data port to remove :raises exceptions.AttributeError: if the specified input data port does not exist """ if data_port_id in self._output_data_ports: if destroy: self.remove_data_flows_with_data_port_id(data_port_id) self._output_data_ports[data_port_id].parent = None return self._output_data_ports.pop(data_port_id) else: raise AttributeError("output data port with name %s does not exit", data_port_id)
[ "def", "remove_output_data_port", "(", "self", ",", "data_port_id", ",", "force", "=", "False", ",", "destroy", "=", "True", ")", ":", "if", "data_port_id", "in", "self", ".", "_output_data_ports", ":", "if", "destroy", ":", "self", ".", "remove_data_flows_wit...
Remove an output data port from the state :param int data_port_id: the id of the output data port to remove :raises exceptions.AttributeError: if the specified input data port does not exist
[ "Remove", "an", "output", "data", "port", "from", "the", "state" ]
python
train
transceptor-technology/trender
trender/block_for.py
https://github.com/transceptor-technology/trender/blob/ef2b7374ea2ecc83dceb139b358ec4ad8ce7033b/trender/block_for.py#L43-L52
def _compile(cls, lines): '''Return both variable names used in the #for loop in the current line.''' m = cls.RE_FOR.match(lines.current) if m is None: raise DefineBlockError( 'Incorrect block definition at line {}, {}\nShould be ' 'something like: #for @item in @items:' .format(lines.pos, lines.current)) return m.group(1), m.group(2).replace('.', '-')
[ "def", "_compile", "(", "cls", ",", "lines", ")", ":", "m", "=", "cls", ".", "RE_FOR", ".", "match", "(", "lines", ".", "current", ")", "if", "m", "is", "None", ":", "raise", "DefineBlockError", "(", "'Incorrect block definition at line {}, {}\\nShould be '", ...
Return both variable names used in the #for loop in the current line.
[ "Return", "both", "variable", "names", "used", "in", "the", "#for", "loop", "in", "the", "current", "line", "." ]
python
train
fatiando/pooch
pooch/core.py
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L390-L420
def load_registry(self, fname): """ Load entries from a file and add them to the registry. Use this if you are managing many files. Each line of the file should have file name and its SHA256 hash separate by a space. Only one file per line is allowed. Custom download URLs for individual files can be specified as a third element on the line. Parameters ---------- fname : str File name and path to the registry file. """ with open(fname) as fin: for linenum, line in enumerate(fin): elements = line.strip().split() if len(elements) > 3 or len(elements) < 2: raise IOError( "Expected 2 or 3 elements in line {} but got {}.".format( linenum, len(elements) ) ) file_name = elements[0] file_sha256 = elements[1] if len(elements) == 3: file_url = elements[2] self.urls[file_name] = file_url self.registry[file_name] = file_sha256
[ "def", "load_registry", "(", "self", ",", "fname", ")", ":", "with", "open", "(", "fname", ")", "as", "fin", ":", "for", "linenum", ",", "line", "in", "enumerate", "(", "fin", ")", ":", "elements", "=", "line", ".", "strip", "(", ")", ".", "split",...
Load entries from a file and add them to the registry. Use this if you are managing many files. Each line of the file should have file name and its SHA256 hash separate by a space. Only one file per line is allowed. Custom download URLs for individual files can be specified as a third element on the line. Parameters ---------- fname : str File name and path to the registry file.
[ "Load", "entries", "from", "a", "file", "and", "add", "them", "to", "the", "registry", "." ]
python
train
nbedi/typecaster
typecaster/ssml.py
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/ssml.py#L48-L53
def html_to_ssml(text): """ Replaces specific html tags with probable SSML counterparts. """ ssml_text = reduce(lambda x, y: x.replace(y, html_to_ssml_maps[y]), html_to_ssml_maps, text) return ssml_text
[ "def", "html_to_ssml", "(", "text", ")", ":", "ssml_text", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "replace", "(", "y", ",", "html_to_ssml_maps", "[", "y", "]", ")", ",", "html_to_ssml_maps", ",", "text", ")", "return", "ssml_text" ...
Replaces specific html tags with probable SSML counterparts.
[ "Replaces", "specific", "html", "tags", "with", "probable", "SSML", "counterparts", "." ]
python
train
urschrei/pyzotero
pyzotero/zotero.py
https://github.com/urschrei/pyzotero/blob/b378966b30146a952f7953c23202fb5a1ddf81d9/pyzotero/zotero.py#L429-L443
def _build_query(self, query_string, no_params=False): """ Set request parameters. Will always add the user ID if it hasn't been specifically set by an API method """ try: query = quote(query_string.format(u=self.library_id, t=self.library_type)) except KeyError as err: raise ze.ParamNotPassed("There's a request parameter missing: %s" % err) # Add the URL parameters and the user key, if necessary if no_params is False: if not self.url_params: self.add_parameters() query = "%s?%s" % (query, self.url_params) return query
[ "def", "_build_query", "(", "self", ",", "query_string", ",", "no_params", "=", "False", ")", ":", "try", ":", "query", "=", "quote", "(", "query_string", ".", "format", "(", "u", "=", "self", ".", "library_id", ",", "t", "=", "self", ".", "library_typ...
Set request parameters. Will always add the user ID if it hasn't been specifically set by an API method
[ "Set", "request", "parameters", ".", "Will", "always", "add", "the", "user", "ID", "if", "it", "hasn", "t", "been", "specifically", "set", "by", "an", "API", "method" ]
python
valid
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L227-L276
def check_python(cls): """ checks if the python version is supported :return: True if it is supported """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] if python_version[0] == 2: python_version_s = '.'.join(v_string) if (python_version[0] == 2) and (python_version[1] >= 7) and ( python_version[2] >= 9): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") elif python_version[0] == 3: python_version_s = '.'.join(v_string) if (python_version[0] == 3) and (python_version[1] >= 7) and ( python_version[2] >= 0): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") # pip_version = pip.__version__ python_version, pip_version = cls.get_python() if int(pip_version.split(".")[0]) >= 18: print("You are running a supported version of pip: " + str( pip_version)) else: print("WARNING: You are running an old version of pip: " + str( pip_version)) print(" We recommend you update your pip with \n") print(" pip install -U pip\n")
[ "def", "check_python", "(", "cls", ")", ":", "python_version", "=", "sys", ".", "version_info", "[", ":", "3", "]", "v_string", "=", "[", "str", "(", "i", ")", "for", "i", "in", "python_version", "]", "if", "python_version", "[", "0", "]", "==", "2",...
checks if the python version is supported :return: True if it is supported
[ "checks", "if", "the", "python", "version", "is", "supported", ":", "return", ":", "True", "if", "it", "is", "supported" ]
python
train
mbedmicro/pyOCD
pyocd/probe/stlink_probe.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink_probe.py#L142-L145
def assert_reset(self, asserted): """! @brief Assert or de-assert target reset line""" self._link.drive_nreset(asserted) self._nreset_state = asserted
[ "def", "assert_reset", "(", "self", ",", "asserted", ")", ":", "self", ".", "_link", ".", "drive_nreset", "(", "asserted", ")", "self", ".", "_nreset_state", "=", "asserted" ]
! @brief Assert or de-assert target reset line
[ "!" ]
python
train
gtaylor/EVE-Market-Data-Structures
emds/formats/unified/orders.py
https://github.com/gtaylor/EVE-Market-Data-Structures/blob/77d69b24f2aada3aeff8fba3d75891bfba8fdcf3/emds/formats/unified/orders.py#L37-L73
def parse_from_dict(json_dict): """ Given a Unified Uploader message, parse the contents and return a MarketOrderList. :param dict json_dict: A Unified Uploader message as a JSON dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within. """ order_columns = json_dict['columns'] order_list = MarketOrderList( upload_keys=json_dict['uploadKeys'], order_generator=json_dict['generator'], ) for rowset in json_dict['rowsets']: generated_at = parse_datetime(rowset['generatedAt']) region_id = rowset['regionID'] type_id = rowset['typeID'] order_list.set_empty_region(region_id, type_id, generated_at) for row in rowset['rows']: order_kwargs = _columns_to_kwargs( SPEC_TO_KWARG_CONVERSION, order_columns, row) order_kwargs.update({ 'region_id': region_id, 'type_id': type_id, 'generated_at': generated_at, }) order_kwargs['order_issue_date'] = parse_datetime(order_kwargs['order_issue_date']) order_list.add_order(MarketOrder(**order_kwargs)) return order_list
[ "def", "parse_from_dict", "(", "json_dict", ")", ":", "order_columns", "=", "json_dict", "[", "'columns'", "]", "order_list", "=", "MarketOrderList", "(", "upload_keys", "=", "json_dict", "[", "'uploadKeys'", "]", ",", "order_generator", "=", "json_dict", "[", "...
Given a Unified Uploader message, parse the contents and return a MarketOrderList. :param dict json_dict: A Unified Uploader message as a JSON dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within.
[ "Given", "a", "Unified", "Uploader", "message", "parse", "the", "contents", "and", "return", "a", "MarketOrderList", "." ]
python
train
Tinche/django-bower-cache
registry/tasks.py
https://github.com/Tinche/django-bower-cache/blob/5245b2ee80c33c09d85ce0bf8f047825d9df2118/registry/tasks.py#L25-L30
def pull_all_repos(): """Pull origin updates for all repos with origins.""" repos = ClonedRepo.objects.all() for repo in repos: if repo.origin is not None: pull_repo.delay(repo_name=repo.name)
[ "def", "pull_all_repos", "(", ")", ":", "repos", "=", "ClonedRepo", ".", "objects", ".", "all", "(", ")", "for", "repo", "in", "repos", ":", "if", "repo", ".", "origin", "is", "not", "None", ":", "pull_repo", ".", "delay", "(", "repo_name", "=", "rep...
Pull origin updates for all repos with origins.
[ "Pull", "origin", "updates", "for", "all", "repos", "with", "origins", "." ]
python
train
ambitioninc/newrelic-api
newrelic_api/application_instances.py
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/application_instances.py#L8-L88
def list( self, application_id, filter_hostname=None, filter_ids=None, page=None): """ This API endpoint returns a paginated list of instances associated with the given application. Application instances can be filtered by hostname, or the list of application instance IDs. :type application_id: int :param application_id: Application ID :type filter_hostname: str :param filter_hostname: Filter by server hostname :type filter_ids: list of ints :param filter_ids: Filter by application instance ids :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "application_instances": [ { "id": "integer", "application_name": "string", "host": "string", "port": "integer", "language": "integer", "health_status": "string", "application_summary": { "response_time": "float", "throughput": "float", "error_rate": "float", "apdex_score": "float" }, "end_user_summary": { "response_time": "float", "throughput": "float", "apdex_score": "float" }, "links": { "application": "integer", "application_host": "integer", "server": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "next" } } } """ filters = [ 'filter[hostname]={0}'.format(filter_hostname) if filter_hostname else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'page={0}'.format(page) if page else None ] return self._get( url='{root}applications/{application_id}/instances.json'.format( root=self.URL, application_id=application_id ), headers=self.headers, params=self.build_param_string(filters) )
[ "def", "list", "(", "self", ",", "application_id", ",", "filter_hostname", "=", "None", ",", "filter_ids", "=", "None", ",", "page", "=", "None", ")", ":", "filters", "=", "[", "'filter[hostname]={0}'", ".", "format", "(", "filter_hostname", ")", "if", "fi...
This API endpoint returns a paginated list of instances associated with the given application. Application instances can be filtered by hostname, or the list of application instance IDs. :type application_id: int :param application_id: Application ID :type filter_hostname: str :param filter_hostname: Filter by server hostname :type filter_ids: list of ints :param filter_ids: Filter by application instance ids :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "application_instances": [ { "id": "integer", "application_name": "string", "host": "string", "port": "integer", "language": "integer", "health_status": "string", "application_summary": { "response_time": "float", "throughput": "float", "error_rate": "float", "apdex_score": "float" }, "end_user_summary": { "response_time": "float", "throughput": "float", "apdex_score": "float" }, "links": { "application": "integer", "application_host": "integer", "server": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "next" } } }
[ "This", "API", "endpoint", "returns", "a", "paginated", "list", "of", "instances", "associated", "with", "the", "given", "application", "." ]
python
train
saltstack/salt
salt/modules/useradd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/useradd.py#L793-L814
def _format_info(data): ''' Return user information in a pretty way ''' # Put GECOS info into a list gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(',', 4) # Make sure our list has at least five elements while len(gecos_field) < 5: gecos_field.append('') return {'gid': data.pw_gid, 'groups': list_groups(data.pw_name), 'home': data.pw_dir, 'name': data.pw_name, 'passwd': data.pw_passwd, 'shell': data.pw_shell, 'uid': data.pw_uid, 'fullname': gecos_field[0], 'roomnumber': gecos_field[1], 'workphone': gecos_field[2], 'homephone': gecos_field[3], 'other': gecos_field[4]}
[ "def", "_format_info", "(", "data", ")", ":", "# Put GECOS info into a list", "gecos_field", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "data", ".", "pw_gecos", ")", ".", "split", "(", "','", ",", "4", ")", "# Make sure our list has...
Return user information in a pretty way
[ "Return", "user", "information", "in", "a", "pretty", "way" ]
python
train
mfussenegger/cr8
cr8/run_crate.py
https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L660-L703
def run_crate( version, env=None, setting=None, crate_root=None, keep_data=False, disable_java_magic=False, ): """Launch a crate instance. Supported version specifications: - Concrete version like "0.55.0" or with wildcard: "1.1.x" - An alias (one of [latest-nightly, latest-stable, latest-testing]) - A URI pointing to a CrateDB tarball (in .tar.gz format) - A URI pointing to a checked out CrateDB repo directory run-crate supports command chaining. To launch a CrateDB node and another sub-command use: cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}' To launch any (blocking) subprocess, prefix the name with '@': cr8 run-crate <version> -- @http '{node.http_url}' If run-crate is invoked using command chaining it will exit once all chained commands finished. The postgres host and port are available as {node.addresses.psql.host} and {node.addresses.psql.port} """ with create_node( version, env, setting, crate_root, keep_data, java_magic=not disable_java_magic, ) as n: try: n.start() n.process.wait() except KeyboardInterrupt: print('Stopping Crate...')
[ "def", "run_crate", "(", "version", ",", "env", "=", "None", ",", "setting", "=", "None", ",", "crate_root", "=", "None", ",", "keep_data", "=", "False", ",", "disable_java_magic", "=", "False", ",", ")", ":", "with", "create_node", "(", "version", ",", ...
Launch a crate instance. Supported version specifications: - Concrete version like "0.55.0" or with wildcard: "1.1.x" - An alias (one of [latest-nightly, latest-stable, latest-testing]) - A URI pointing to a CrateDB tarball (in .tar.gz format) - A URI pointing to a checked out CrateDB repo directory run-crate supports command chaining. To launch a CrateDB node and another sub-command use: cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}' To launch any (blocking) subprocess, prefix the name with '@': cr8 run-crate <version> -- @http '{node.http_url}' If run-crate is invoked using command chaining it will exit once all chained commands finished. The postgres host and port are available as {node.addresses.psql.host} and {node.addresses.psql.port}
[ "Launch", "a", "crate", "instance", "." ]
python
train
spyder-ide/spyder
spyder/plugins/ipythonconsole/utils/kernelspec.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/utils/kernelspec.py#L41-L73
def argv(self): """Command to start kernels""" # Python interpreter used to start kernels if CONF.get('main_interpreter', 'default'): pyexec = get_python_executable() else: # Avoid IPython adding the virtualenv on which Spyder is running # to the kernel sys.path os.environ.pop('VIRTUAL_ENV', None) pyexec = CONF.get('main_interpreter', 'executable') if not is_python_interpreter(pyexec): pyexec = get_python_executable() CONF.set('main_interpreter', 'executable', '') CONF.set('main_interpreter', 'default', True) CONF.set('main_interpreter', 'custom', False) # Fixes Issue #3427 if os.name == 'nt': dir_pyexec = osp.dirname(pyexec) pyexec_w = osp.join(dir_pyexec, 'pythonw.exe') if osp.isfile(pyexec_w): pyexec = pyexec_w # Command used to start kernels kernel_cmd = [ pyexec, '-m', 'spyder_kernels.console', '-f', '{connection_file}' ] return kernel_cmd
[ "def", "argv", "(", "self", ")", ":", "# Python interpreter used to start kernels", "if", "CONF", ".", "get", "(", "'main_interpreter'", ",", "'default'", ")", ":", "pyexec", "=", "get_python_executable", "(", ")", "else", ":", "# Avoid IPython adding the virtualenv o...
Command to start kernels
[ "Command", "to", "start", "kernels" ]
python
train
gem/oq-engine
openquake/commonlib/util.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/util.py#L128-L156
def compose_arrays(a1, a2, firstfield='etag'): """ Compose composite arrays by generating an extended datatype containing all the fields. The two arrays must have the same length. """ assert len(a1) == len(a2), (len(a1), len(a2)) if a1.dtype.names is None and len(a1.shape) == 1: # the first array is not composite, but it is one-dimensional a1 = numpy.array(a1, numpy.dtype([(firstfield, a1.dtype)])) fields1 = [(f, a1.dtype.fields[f][0]) for f in a1.dtype.names] if a2.dtype.names is None: # the second array is not composite assert len(a2.shape) == 2, a2.shape width = a2.shape[1] fields2 = [('value%d' % i, a2.dtype) for i in range(width)] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for i in range(width): composite['value%d' % i] = a2[:, i] return composite fields2 = [(f, a2.dtype.fields[f][0]) for f in a2.dtype.names] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for f2 in dict(fields2): composite[f2] = a2[f2] return composite
[ "def", "compose_arrays", "(", "a1", ",", "a2", ",", "firstfield", "=", "'etag'", ")", ":", "assert", "len", "(", "a1", ")", "==", "len", "(", "a2", ")", ",", "(", "len", "(", "a1", ")", ",", "len", "(", "a2", ")", ")", "if", "a1", ".", "dtype...
Compose composite arrays by generating an extended datatype containing all the fields. The two arrays must have the same length.
[ "Compose", "composite", "arrays", "by", "generating", "an", "extended", "datatype", "containing", "all", "the", "fields", ".", "The", "two", "arrays", "must", "have", "the", "same", "length", "." ]
python
train
biocommons/hgvs
hgvs/parser.py
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/parser.py#L114-L153
def _expose_rule_functions(self, expose_all_rules=False): """add parse functions for public grammar rules Defines a function for each public grammar rule, based on introspecting the grammar. For example, the `c_interval` rule is exposed as a method `parse_c_interval` and used like this:: Parser.parse_c_interval('26+2_57-3') -> Interval(...) """ def make_parse_rule_function(rule_name): "builds a wrapper function that parses a string with the specified rule" def rule_fxn(s): try: return self._grammar(s).__getattr__(rule_name)() except ometa.runtime.ParseError as exc: raise HGVSParseError("{s}: char {exc.position}: {reason}".format( s=s, exc=exc, reason=exc.formatReason())) rule_fxn.__doc__ = "parse string s using `%s' rule" % rule_name return rule_fxn exposed_rule_re = re.compile(r"hgvs_(variant|position)|(c|g|m|n|p|r)" r"_(edit|hgvs_position|interval|pos|posedit|variant)") exposed_rules = [ m.replace("rule_", "") for m in dir(self._grammar._grammarClass) if m.startswith("rule_") ] if not expose_all_rules: exposed_rules = [ rule_name for rule_name in exposed_rules if exposed_rule_re.match(rule_name) ] for rule_name in exposed_rules: att_name = "parse_" + rule_name rule_fxn = make_parse_rule_function(rule_name) self.__setattr__(att_name, rule_fxn) self._logger.debug("Exposed {n} rules ({rules})".format( n=len(exposed_rules), rules=", ".join(exposed_rules)))
[ "def", "_expose_rule_functions", "(", "self", ",", "expose_all_rules", "=", "False", ")", ":", "def", "make_parse_rule_function", "(", "rule_name", ")", ":", "\"builds a wrapper function that parses a string with the specified rule\"", "def", "rule_fxn", "(", "s", ")", ":...
add parse functions for public grammar rules Defines a function for each public grammar rule, based on introspecting the grammar. For example, the `c_interval` rule is exposed as a method `parse_c_interval` and used like this:: Parser.parse_c_interval('26+2_57-3') -> Interval(...)
[ "add", "parse", "functions", "for", "public", "grammar", "rules" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9659-L9667
def mission_clear_all_send(self, target_system, target_component, force_mavlink1=False): ''' Delete all mission items at once. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) ''' return self.send(self.mission_clear_all_encode(target_system, target_component), force_mavlink1=force_mavlink1)
[ "def", "mission_clear_all_send", "(", "self", ",", "target_system", ",", "target_component", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "mission_clear_all_encode", "(", "target_system", ",", "target_component", ...
Delete all mission items at once. target_system : System ID (uint8_t) target_component : Component ID (uint8_t)
[ "Delete", "all", "mission", "items", "at", "once", "." ]
python
train
Autodesk/aomi
aomi/vault.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/vault.py#L297-L317
def op_token(self, display_name, opt): """Return a properly annotated token for our use. This token will be revoked at the end of the session. The token will have some decent amounts of metadata tho.""" args = { 'lease': opt.lease, 'display_name': display_name, 'meta': token_meta(opt) } try: token = self.create_token(**args) except (hvac.exceptions.InvalidRequest, hvac.exceptions.Forbidden) as vault_exception: if vault_exception.errors[0] == 'permission denied': emsg = "Permission denied creating operational token" raise aomi.exceptions.AomiCredentials(emsg) else: raise LOG.debug("Created operational token with lease of %s", opt.lease) return token['auth']['client_token']
[ "def", "op_token", "(", "self", ",", "display_name", ",", "opt", ")", ":", "args", "=", "{", "'lease'", ":", "opt", ".", "lease", ",", "'display_name'", ":", "display_name", ",", "'meta'", ":", "token_meta", "(", "opt", ")", "}", "try", ":", "token", ...
Return a properly annotated token for our use. This token will be revoked at the end of the session. The token will have some decent amounts of metadata tho.
[ "Return", "a", "properly", "annotated", "token", "for", "our", "use", ".", "This", "token", "will", "be", "revoked", "at", "the", "end", "of", "the", "session", ".", "The", "token", "will", "have", "some", "decent", "amounts", "of", "metadata", "tho", "....
python
train
gitenberg-dev/gitberg
gitenberg/util/tenprintcover.py
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/util/tenprintcover.py#L120-L131
def ellipse(self, x, y, width, height, color): """ See the Processing function ellipse(): https://processing.org/reference/ellipse_.html """ self.context.set_source_rgb(*color) self.context.save() self.context.translate(self.tx(x + (width / 2.0)), self.ty(y + (height / 2.0))) self.context.scale(self.tx(width / 2.0), self.ty(height / 2.0)) self.context.arc(0.0, 0.0, 1.0, 0.0, 2 * math.pi) self.context.fill() self.context.restore()
[ "def", "ellipse", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ",", "color", ")", ":", "self", ".", "context", ".", "set_source_rgb", "(", "*", "color", ")", "self", ".", "context", ".", "save", "(", ")", "self", ".", "context", ...
See the Processing function ellipse(): https://processing.org/reference/ellipse_.html
[ "See", "the", "Processing", "function", "ellipse", "()", ":", "https", ":", "//", "processing", ".", "org", "/", "reference", "/", "ellipse_", ".", "html" ]
python
train
Yubico/yubikey-manager
ykman/cli/opgp.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/opgp.py#L83-L104
def openpgp(ctx): """ Manage OpenPGP Application. Examples: \b Set the retries for PIN, Reset Code and Admin PIN to 10: $ ykman openpgp set-retries 10 10 10 \b Require touch to use the authentication key: $ ykman openpgp touch aut on """ try: ctx.obj['controller'] = OpgpController(ctx.obj['dev'].driver) except APDUError as e: if e.sw == SW.NOT_FOUND: ctx.fail("The OpenPGP application can't be found on this " 'YubiKey.') logger.debug('Failed to load OpenPGP Application', exc_info=e) ctx.fail('Failed to load OpenPGP Application')
[ "def", "openpgp", "(", "ctx", ")", ":", "try", ":", "ctx", ".", "obj", "[", "'controller'", "]", "=", "OpgpController", "(", "ctx", ".", "obj", "[", "'dev'", "]", ".", "driver", ")", "except", "APDUError", "as", "e", ":", "if", "e", ".", "sw", "=...
Manage OpenPGP Application. Examples: \b Set the retries for PIN, Reset Code and Admin PIN to 10: $ ykman openpgp set-retries 10 10 10 \b Require touch to use the authentication key: $ ykman openpgp touch aut on
[ "Manage", "OpenPGP", "Application", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_resources.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L1632-L1655
def indicator_body(indicators): """Generate the appropriate dictionary content for POST of an File indicator Args: indicators (list): A list of one or more hash value(s). """ hash_patterns = { 'md5': re.compile(r'^([a-fA-F\d]{32})$'), 'sha1': re.compile(r'^([a-fA-F\d]{40})$'), 'sha256': re.compile(r'^([a-fA-F\d]{64})$'), } body = {} for indicator in indicators: if indicator is None: continue if hash_patterns['md5'].match(indicator): body['md5'] = indicator elif hash_patterns['sha1'].match(indicator): body['sha1'] = indicator elif hash_patterns['sha256'].match(indicator): body['sha256'] = indicator return body
[ "def", "indicator_body", "(", "indicators", ")", ":", "hash_patterns", "=", "{", "'md5'", ":", "re", ".", "compile", "(", "r'^([a-fA-F\\d]{32})$'", ")", ",", "'sha1'", ":", "re", ".", "compile", "(", "r'^([a-fA-F\\d]{40})$'", ")", ",", "'sha256'", ":", "re",...
Generate the appropriate dictionary content for POST of an File indicator Args: indicators (list): A list of one or more hash value(s).
[ "Generate", "the", "appropriate", "dictionary", "content", "for", "POST", "of", "an", "File", "indicator" ]
python
train
ministryofjustice/money-to-prisoners-common
mtp_common/build_tasks/executor.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/executor.py#L189-L193
def to_dict(self): """ Converts the set of parameters into a dict """ return dict((parameter.name, parameter.value) for parameter in self.values())
[ "def", "to_dict", "(", "self", ")", ":", "return", "dict", "(", "(", "parameter", ".", "name", ",", "parameter", ".", "value", ")", "for", "parameter", "in", "self", ".", "values", "(", ")", ")" ]
Converts the set of parameters into a dict
[ "Converts", "the", "set", "of", "parameters", "into", "a", "dict" ]
python
train
ArchiveTeam/wpull
wpull/protocol/http/util.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/http/util.py#L6-L19
def parse_charset(header_string): '''Parse a "Content-Type" string for the document encoding. Returns: str, None ''' match = re.search( r'''charset[ ]?=[ ]?["']?([a-z0-9_-]+)''', header_string, re.IGNORECASE ) if match: return match.group(1)
[ "def", "parse_charset", "(", "header_string", ")", ":", "match", "=", "re", ".", "search", "(", "r'''charset[ ]?=[ ]?[\"']?([a-z0-9_-]+)'''", ",", "header_string", ",", "re", ".", "IGNORECASE", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1"...
Parse a "Content-Type" string for the document encoding. Returns: str, None
[ "Parse", "a", "Content", "-", "Type", "string", "for", "the", "document", "encoding", "." ]
python
train
rongcloud/server-sdk-python
rongcloud/message.py
https://github.com/rongcloud/server-sdk-python/blob/3daadd8b67c84cc5d2a9419e8d45fd69c9baf976/rongcloud/message.py#L319-L362
def broadcast(self, fromUserId, objectName, content, pushContent=None, pushData=None, os=None): """ 发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param txtMessage:文本消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/broadcast.json', params={ "fromUserId": fromUserId, "objectName": objectName, "content": content, "pushContent": pushContent, "pushData": pushData, "os": os }) return Response(r, desc)
[ "def", "broadcast", "(", "self", ",", "fromUserId", ",", "objectName", ",", "content", ",", "pushContent", "=", "None", ",", "pushData", "=", "None", ",", "os", "=", "None", ")", ":", "desc", "=", "{", "\"name\"", ":", "\"CodeSuccessReslut\"", ",", "\"de...
发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param txtMessage:文本消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
[ "发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送", "Push", "信息,单条消息最大", "128k,会话类型为", "SYSTEM。每小时只能发送", "1", "次,每天最多发送", "3", "次。)", "方法" ]
python
train
Azure/azure-sdk-for-python
azure-mgmt-containerservice/azure/mgmt/containerservice/container_service_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-containerservice/azure/mgmt/containerservice/container_service_client.py#L102-L126
def models(cls, api_version=DEFAULT_API_VERSION): """Module depends on the API version: * 2017-07-01: :mod:`v2017_07_01.models<azure.mgmt.containerservice.v2017_07_01.models>` * 2018-03-31: :mod:`v2018_03_31.models<azure.mgmt.containerservice.v2018_03_31.models>` * 2018-08-01-preview: :mod:`v2018_08_01_preview.models<azure.mgmt.containerservice.v2018_08_01_preview.models>` * 2018-09-30-preview: :mod:`v2018_09_30_preview.models<azure.mgmt.containerservice.v2018_09_30_preview.models>` * 2019-02-01: :mod:`v2019_02_01.models<azure.mgmt.containerservice.v2019_02_01.models>` """ if api_version == '2017-07-01': from .v2017_07_01 import models return models elif api_version == '2018-03-31': from .v2018_03_31 import models return models elif api_version == '2018-08-01-preview': from .v2018_08_01_preview import models return models elif api_version == '2018-09-30-preview': from .v2018_09_30_preview import models return models elif api_version == '2019-02-01': from .v2019_02_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
[ "def", "models", "(", "cls", ",", "api_version", "=", "DEFAULT_API_VERSION", ")", ":", "if", "api_version", "==", "'2017-07-01'", ":", "from", ".", "v2017_07_01", "import", "models", "return", "models", "elif", "api_version", "==", "'2018-03-31'", ":", "from", ...
Module depends on the API version: * 2017-07-01: :mod:`v2017_07_01.models<azure.mgmt.containerservice.v2017_07_01.models>` * 2018-03-31: :mod:`v2018_03_31.models<azure.mgmt.containerservice.v2018_03_31.models>` * 2018-08-01-preview: :mod:`v2018_08_01_preview.models<azure.mgmt.containerservice.v2018_08_01_preview.models>` * 2018-09-30-preview: :mod:`v2018_09_30_preview.models<azure.mgmt.containerservice.v2018_09_30_preview.models>` * 2019-02-01: :mod:`v2019_02_01.models<azure.mgmt.containerservice.v2019_02_01.models>`
[ "Module", "depends", "on", "the", "API", "version", ":" ]
python
test
cloudtools/stacker
stacker/blueprints/raw.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/blueprints/raw.py#L18-L38
def get_template_path(filename): """Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found """ if os.path.isfile(filename): return os.path.abspath(filename) for i in sys.path: if os.path.isfile(os.path.join(i, filename)): return os.path.abspath(os.path.join(i, filename)) return None
[ "def", "get_template_path", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "filename", ")", "for", "i", "in", "sys", ".", "path", ":", "if", "os", ".",...
Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found
[ "Find", "raw", "template", "in", "working", "directory", "or", "in", "sys", ".", "path", "." ]
python
train
kodexlab/reliure
reliure/engine.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L362-L373
def selected(self): """ returns the list of selected component names. if no component selected return the one marked as default. If the block is required and no component where indicated as default, then the first component is selected. """ selected = self._selected if len(self._selected) == 0 and self.required: # nothing has been selected yet BUT the component is required selected = self.defaults return selected
[ "def", "selected", "(", "self", ")", ":", "selected", "=", "self", ".", "_selected", "if", "len", "(", "self", ".", "_selected", ")", "==", "0", "and", "self", ".", "required", ":", "# nothing has been selected yet BUT the component is required", "selected", "="...
returns the list of selected component names. if no component selected return the one marked as default. If the block is required and no component where indicated as default, then the first component is selected.
[ "returns", "the", "list", "of", "selected", "component", "names", "." ]
python
train
maxpumperla/elephas
elephas/mllib/adapter.py
https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/mllib/adapter.py#L11-L20
def to_matrix(np_array): """Convert numpy array to MLlib Matrix """ if len(np_array.shape) == 2: return Matrices.dense(np_array.shape[0], np_array.shape[1], np_array.ravel()) else: raise Exception("An MLLib Matrix can only be created from a two-dimensional " + "numpy array, got {}".format(len(np_array.shape)))
[ "def", "to_matrix", "(", "np_array", ")", ":", "if", "len", "(", "np_array", ".", "shape", ")", "==", "2", ":", "return", "Matrices", ".", "dense", "(", "np_array", ".", "shape", "[", "0", "]", ",", "np_array", ".", "shape", "[", "1", "]", ",", "...
Convert numpy array to MLlib Matrix
[ "Convert", "numpy", "array", "to", "MLlib", "Matrix" ]
python
train
NYUCCL/psiTurk
psiturk/user_utils.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/user_utils.py#L41-L44
def check_auth(self, username, password): ''' This function is called to check if a username password combination is valid. ''' return username == self.queryname and password == self.querypw
[ "def", "check_auth", "(", "self", ",", "username", ",", "password", ")", ":", "return", "username", "==", "self", ".", "queryname", "and", "password", "==", "self", ".", "querypw" ]
This function is called to check if a username password combination is valid.
[ "This", "function", "is", "called", "to", "check", "if", "a", "username", "password", "combination", "is", "valid", "." ]
python
train
alejandroautalan/pygubu
pygubu/builder/__init__.py
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L228-L236
def import_variables(self, container, varnames=None): """Helper method to avoid call get_variable for every variable.""" if varnames is None: for keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword]) else: for keyword in varnames: if keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword])
[ "def", "import_variables", "(", "self", ",", "container", ",", "varnames", "=", "None", ")", ":", "if", "varnames", "is", "None", ":", "for", "keyword", "in", "self", ".", "tkvariables", ":", "setattr", "(", "container", ",", "keyword", ",", "self", ".",...
Helper method to avoid call get_variable for every variable.
[ "Helper", "method", "to", "avoid", "call", "get_variable", "for", "every", "variable", "." ]
python
train
tapilab/twutil
twutil/collect.py
https://github.com/tapilab/twutil/blob/c967120c41a6e216f3d476214b85a736cade0e29/twutil/collect.py#L107-L117
def tweets_for_user(screen_name, limit=1e10): """ Collect the most recent 3200 tweets for this user, sleeping to deal with rate limits.""" qu = Queue() p = Thread(target=_tweets_for_user, args=(qu, screen_name, limit)) p.start() p.join(910) if p.is_alive(): sys.stderr.write('no results after 15 minutes for %s. Aborting.' % screen_name) return [] else: return qu.get()
[ "def", "tweets_for_user", "(", "screen_name", ",", "limit", "=", "1e10", ")", ":", "qu", "=", "Queue", "(", ")", "p", "=", "Thread", "(", "target", "=", "_tweets_for_user", ",", "args", "=", "(", "qu", ",", "screen_name", ",", "limit", ")", ")", "p",...
Collect the most recent 3200 tweets for this user, sleeping to deal with rate limits.
[ "Collect", "the", "most", "recent", "3200", "tweets", "for", "this", "user", "sleeping", "to", "deal", "with", "rate", "limits", "." ]
python
train
genepattern/genepattern-python
gp/data.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/data.py#L155-L163
def _apply_backwards_compatibility(df): """ Attach properties to the Dataframe to make it backwards compatible with older versions of this library :param df: The dataframe to be modified """ df.row_count = types.MethodType(lambda self: len(self.index), df) df.col_count = types.MethodType(lambda self: len(self.columns), df) df.dataframe = df
[ "def", "_apply_backwards_compatibility", "(", "df", ")", ":", "df", ".", "row_count", "=", "types", ".", "MethodType", "(", "lambda", "self", ":", "len", "(", "self", ".", "index", ")", ",", "df", ")", "df", ".", "col_count", "=", "types", ".", "Method...
Attach properties to the Dataframe to make it backwards compatible with older versions of this library :param df: The dataframe to be modified
[ "Attach", "properties", "to", "the", "Dataframe", "to", "make", "it", "backwards", "compatible", "with", "older", "versions", "of", "this", "library" ]
python
train
rwl/pylon
pylon/dyn.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/dyn.py#L99-L129
def getAugYbus(self, U0, gbus): """ Based on AugYbus.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/ electa/teaching/matdyn/} for more information. @rtype: csr_matrix @return: The augmented bus admittance matrix. """ j = 0 + 1j buses = self.case.connected_buses nb = len(buses) Ybus, _, _ = self.case.getYbus() # Steady-state bus voltages. # Calculate equivalent load admittance Sd = array([self.case.s_demand(bus) for bus in buses]) Yd = conj(Sd) / abs(U0)**2 xd_tr = array([g.xd_tr for g in self.dyn_generators]) # Calculate equivalent generator admittance. Yg = zeros(nb) Yg[gbus] = 1 / (j * xd_tr) # Add equivalent load and generator admittance to Ybus matrix for i in range(nb): Ybus[i, i] = Ybus[i, i] + Yg[i] + Yd[i] return Ybus
[ "def", "getAugYbus", "(", "self", ",", "U0", ",", "gbus", ")", ":", "j", "=", "0", "+", "1j", "buses", "=", "self", ".", "case", ".", "connected_buses", "nb", "=", "len", "(", "buses", ")", "Ybus", ",", "_", ",", "_", "=", "self", ".", "case", ...
Based on AugYbus.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/ electa/teaching/matdyn/} for more information. @rtype: csr_matrix @return: The augmented bus admittance matrix.
[ "Based", "on", "AugYbus", ".", "m", "from", "MatDyn", "by", "Stijn", "Cole", "developed", "at", "Katholieke", "Universiteit", "Leuven", ".", "See", "U", "{", "http", ":", "//", "www", ".", "esat", ".", "kuleuven", ".", "be", "/", "electa", "/", "teachi...
python
train
singularityhub/sregistry-cli
sregistry/main/s3/pull.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/s3/pull.py#L18-L106
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from a s3 storage Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images,list): images = [images] bot.debug('Execution of PULL for %s images' %len(images)) finished = [] for image in images: image = remove_uri(image) names = parse_image_name(image) if file_name is None: file_name = names['storage'].replace('/','-') # Assume the user provided the correct uri to start uri = names['storage_uri'] # First try to get the storage uri directly. try: self.bucket.download_file(uri, file_name) # If we can't find the file, help the user except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # Case 1, image not found, but not error with API bot.error('Cannot find %s!' % name) # Try to help the user with suggestions results = self._search_all(query=image) if len(results) > 0: bot.info('Did you mean:\n' % '\n'.join(results)) sys.exit(1) else: # Case 2: error with request, exit. bot.exit('Error downloading image %s' % image) # if we get down here, we have a uri found = None for obj in self.bucket.objects.filter(Prefix=image): if image in obj.key: found = obj # If we find the object, get metadata metadata = {} if found != None: metadata = found.get()['Metadata'] # Metadata bug will capitalize all fields, workaround is to lowercase # https://github.com/boto/boto3/issues/1709 metadata = dict((k.lower(), v) for k, v in metadata.items()) metadata.update(names) # If the user is saving to local storage if save is True and os.path.exists(file_name): container = self.add(image_path = file_name, image_uri = names['tag_uri'], metadata = metadata) file_name = container.image # If the image was pulled to either if os.path.exists(file_name): bot.custom(prefix="Success!", message = file_name) finished.append(file_name) if len(finished) == 1: finished = finished[0] return finished
[ "def", "pull", "(", "self", ",", "images", ",", "file_name", "=", "None", ",", "save", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "images", ",", "list", ")", ":", "images", "=", "[", "images", "]", "bot", ".",...
pull an image from a s3 storage Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths
[ "pull", "an", "image", "from", "a", "s3", "storage", "Parameters", "==========", "images", ":", "refers", "to", "the", "uri", "given", "by", "the", "user", "to", "pull", "in", "the", "format", "<collection", ">", "/", "<namespace", ">", ".", "You", "shou...
python
test
alphagov/performanceplatform-collector
performanceplatform/collector/main.py
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/main.py#L38-L51
def make_extra_json_fields(args): """ From the parsed command-line arguments, generate a dictionary of additional fields to be inserted into JSON logs (logstash_formatter module) """ extra_json_fields = { 'data_group': _get_data_group(args.query), 'data_type': _get_data_type(args.query), 'data_group_data_type': _get_data_group_data_type(args.query), 'query': _get_query_params(args.query), } if "path_to_json_file" in args.query: extra_json_fields['path_to_query'] = _get_path_to_json_file(args.query) return extra_json_fields
[ "def", "make_extra_json_fields", "(", "args", ")", ":", "extra_json_fields", "=", "{", "'data_group'", ":", "_get_data_group", "(", "args", ".", "query", ")", ",", "'data_type'", ":", "_get_data_type", "(", "args", ".", "query", ")", ",", "'data_group_data_type'...
From the parsed command-line arguments, generate a dictionary of additional fields to be inserted into JSON logs (logstash_formatter module)
[ "From", "the", "parsed", "command", "-", "line", "arguments", "generate", "a", "dictionary", "of", "additional", "fields", "to", "be", "inserted", "into", "JSON", "logs", "(", "logstash_formatter", "module", ")" ]
python
train
aio-libs/aioftp
aioftp/common.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/common.py#L179-L230
def async_enterable(f): """ Decorator. Bring coroutine result up, so it can be used as async context :: >>> async def foo(): ... ... ... ... return AsyncContextInstance(...) ... ... ctx = await foo() ... async with ctx: ... ... # do :: >>> @async_enterable ... async def foo(): ... ... ... ... return AsyncContextInstance(...) ... ... async with foo() as ctx: ... ... # do ... ... ctx = await foo() ... async with ctx: ... ... # do """ @functools.wraps(f) def wrapper(*args, **kwargs): class AsyncEnterableInstance: async def __aenter__(self): self.context = await f(*args, **kwargs) return await self.context.__aenter__() async def __aexit__(self, *args, **kwargs): await self.context.__aexit__(*args, **kwargs) def __await__(self): return f(*args, **kwargs).__await__() return AsyncEnterableInstance() return wrapper
[ "def", "async_enterable", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "class", "AsyncEnterableInstance", ":", "async", "def", "__aenter__", "(", "self", ")", ...
Decorator. Bring coroutine result up, so it can be used as async context :: >>> async def foo(): ... ... ... ... return AsyncContextInstance(...) ... ... ctx = await foo() ... async with ctx: ... ... # do :: >>> @async_enterable ... async def foo(): ... ... ... ... return AsyncContextInstance(...) ... ... async with foo() as ctx: ... ... # do ... ... ctx = await foo() ... async with ctx: ... ... # do
[ "Decorator", ".", "Bring", "coroutine", "result", "up", "so", "it", "can", "be", "used", "as", "async", "context" ]
python
valid
lowandrew/OLCTools
coreGenome/annotate.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L233-L256
def cdsparse(self, record): """ Finds core genes, and records gene names and sequences in dictionaries :param record: SeqIO record """ try: # Find genes that are present in all strains of interest - the number of times the gene is found is # equal to the number of strains. Earlier parsing ensures that the same gene is not present in a strain # more than once if self.genes[self.genenames[record.id]] == len(self.runmetadata.samples): # Add the gene names and sequences to the appropriate dictionaries try: self.genesequence[self.genenames[record.id]].add(str(record.seq)) # Initialise the dictionary as required, then populate as above except KeyError: self.genesequence[self.genenames[record.id]] = set() self.genesequence[self.genenames[record.id]].add(str(record.seq)) try: self.coresequence[str(record.seq)].add(record.id) except KeyError: self.coresequence[str(record.seq)] = set() self.coresequence[str(record.seq)].add(record.id) except KeyError: pass
[ "def", "cdsparse", "(", "self", ",", "record", ")", ":", "try", ":", "# Find genes that are present in all strains of interest - the number of times the gene is found is", "# equal to the number of strains. Earlier parsing ensures that the same gene is not present in a strain", "# more than ...
Finds core genes, and records gene names and sequences in dictionaries :param record: SeqIO record
[ "Finds", "core", "genes", "and", "records", "gene", "names", "and", "sequences", "in", "dictionaries", ":", "param", "record", ":", "SeqIO", "record" ]
python
train
PMBio/limix-backup
limix/varDecomp/varianceDecomposition.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/varDecomp/varianceDecomposition.py#L176-L205
def addFixedEffect(self, F=None, A=None, Ftest=None): """ add fixed effect term to the model Args: F: sample design matrix for the fixed effect [N,K] A: trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P] Ftest: sample design matrix for test samples [Ntest,K] """ if A is None: A = sp.eye(self.P) if F is None: F = sp.ones((self.N,1)) if self.Ntest is not None: Ftest = sp.ones((self.Ntest,1)) assert A.shape[1]==self.P, 'VarianceDecomposition:: A has incompatible shape' assert F.shape[0]==self.N, 'VarianceDecimposition:: F has incompatible shape' if Ftest is not None: assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)' assert Ftest.shape[0]==self.Ntest, 'VarianceDecimposition:: Ftest has incompatible shape' assert Ftest.shape[1]==F.shape[1], 'VarianceDecimposition:: Ftest has incompatible shape' # add fixed effect self.sample_designs.append(F) self.sample_test_designs.append(Ftest) self.trait_designs.append(A) self._desync()
[ "def", "addFixedEffect", "(", "self", ",", "F", "=", "None", ",", "A", "=", "None", ",", "Ftest", "=", "None", ")", ":", "if", "A", "is", "None", ":", "A", "=", "sp", ".", "eye", "(", "self", ".", "P", ")", "if", "F", "is", "None", ":", "F"...
add fixed effect term to the model Args: F: sample design matrix for the fixed effect [N,K] A: trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P] Ftest: sample design matrix for test samples [Ntest,K]
[ "add", "fixed", "effect", "term", "to", "the", "model" ]
python
train
westerncapelabs/django-messaging-subscription
subscription/tasks.py
https://github.com/westerncapelabs/django-messaging-subscription/blob/7af7021cdd6c02b0dfd4b617b9274401972dbaf8/subscription/tasks.py#L43-L57
def ensure_one_subscription(): """ Fixes issues caused by upstream failures that lead to users having multiple active subscriptions Runs daily """ cursor = connection.cursor() cursor.execute("UPDATE subscription_subscription SET active = False \ WHERE id NOT IN \ (SELECT MAX(id) as id FROM \ subscription_subscription GROUP BY to_addr)") affected = cursor.rowcount vumi_fire_metric.delay( metric="subscription.duplicates", value=affected, agg="last") return affected
[ "def", "ensure_one_subscription", "(", ")", ":", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"UPDATE subscription_subscription SET active = False \\\n WHERE id NOT IN \\\n (SELECT MAX(id) as id FROM \\\n ...
Fixes issues caused by upstream failures that lead to users having multiple active subscriptions Runs daily
[ "Fixes", "issues", "caused", "by", "upstream", "failures", "that", "lead", "to", "users", "having", "multiple", "active", "subscriptions", "Runs", "daily" ]
python
train
rwl/pylon
pylon/generator.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L311-L317
def get_offers(self, n_points=6): """ Returns quantity and price offers created from the cost function. """ from pyreto.smart_market import Offer qtyprc = self._get_qtyprc(n_points) return [Offer(self, qty, prc) for qty, prc in qtyprc]
[ "def", "get_offers", "(", "self", ",", "n_points", "=", "6", ")", ":", "from", "pyreto", ".", "smart_market", "import", "Offer", "qtyprc", "=", "self", ".", "_get_qtyprc", "(", "n_points", ")", "return", "[", "Offer", "(", "self", ",", "qty", ",", "prc...
Returns quantity and price offers created from the cost function.
[ "Returns", "quantity", "and", "price", "offers", "created", "from", "the", "cost", "function", "." ]
python
train
dirmeier/dataframe
dataframe/_check.py
https://github.com/dirmeier/dataframe/blob/39992e23293393cc1320d1b9c1c8d2c325fc5626/dataframe/_check.py#L64-L77
def is_disjoint(set1, set2, warn): """ Checks if elements of set2 are in set1. :param set1: a set of values :param set2: a set of values :param warn: the error message that should be thrown when the sets are NOT disjoint :return: returns true no elements of set2 are in set1 """ for elem in set2: if elem in set1: raise ValueError(warn) return True
[ "def", "is_disjoint", "(", "set1", ",", "set2", ",", "warn", ")", ":", "for", "elem", "in", "set2", ":", "if", "elem", "in", "set1", ":", "raise", "ValueError", "(", "warn", ")", "return", "True" ]
Checks if elements of set2 are in set1. :param set1: a set of values :param set2: a set of values :param warn: the error message that should be thrown when the sets are NOT disjoint :return: returns true no elements of set2 are in set1
[ "Checks", "if", "elements", "of", "set2", "are", "in", "set1", "." ]
python
valid
fastai/fastai
fastai/callback.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L196-L200
def get_state(self, minimal:bool=True): "Return the inner state of the `Callback`, `minimal` or not." to_remove = ['exclude', 'not_min'] + getattr(self, 'exclude', []).copy() if minimal: to_remove += getattr(self, 'not_min', []).copy() return {k:v for k,v in self.__dict__.items() if k not in to_remove}
[ "def", "get_state", "(", "self", ",", "minimal", ":", "bool", "=", "True", ")", ":", "to_remove", "=", "[", "'exclude'", ",", "'not_min'", "]", "+", "getattr", "(", "self", ",", "'exclude'", ",", "[", "]", ")", ".", "copy", "(", ")", "if", "minimal...
Return the inner state of the `Callback`, `minimal` or not.
[ "Return", "the", "inner", "state", "of", "the", "Callback", "minimal", "or", "not", "." ]
python
train
gem/oq-engine
openquake/risklib/scientific.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/scientific.py#L295-L309
def mean_imls(self): """ Compute the mean IMLs (Intensity Measure Level) for the given vulnerability function. :param vulnerability_function: the vulnerability function where the IMLs (Intensity Measure Level) are taken from. :type vuln_function: :py:class:`openquake.risklib.vulnerability_function.\ VulnerabilityFunction` """ return numpy.array( [max(0, self.imls[0] - (self.imls[1] - self.imls[0]) / 2.)] + [numpy.mean(pair) for pair in pairwise(self.imls)] + [self.imls[-1] + (self.imls[-1] - self.imls[-2]) / 2.])
[ "def", "mean_imls", "(", "self", ")", ":", "return", "numpy", ".", "array", "(", "[", "max", "(", "0", ",", "self", ".", "imls", "[", "0", "]", "-", "(", "self", ".", "imls", "[", "1", "]", "-", "self", ".", "imls", "[", "0", "]", ")", "/",...
Compute the mean IMLs (Intensity Measure Level) for the given vulnerability function. :param vulnerability_function: the vulnerability function where the IMLs (Intensity Measure Level) are taken from. :type vuln_function: :py:class:`openquake.risklib.vulnerability_function.\ VulnerabilityFunction`
[ "Compute", "the", "mean", "IMLs", "(", "Intensity", "Measure", "Level", ")", "for", "the", "given", "vulnerability", "function", "." ]
python
train
bspaans/python-mingus
mingus/containers/bar.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L183-L188
def determine_chords(self, shorthand=False): """Return a list of lists [place_in_beat, possible_chords].""" chords = [] for x in self.bar: chords.append([x[0], x[2].determine(shorthand)]) return chords
[ "def", "determine_chords", "(", "self", ",", "shorthand", "=", "False", ")", ":", "chords", "=", "[", "]", "for", "x", "in", "self", ".", "bar", ":", "chords", ".", "append", "(", "[", "x", "[", "0", "]", ",", "x", "[", "2", "]", ".", "determin...
Return a list of lists [place_in_beat, possible_chords].
[ "Return", "a", "list", "of", "lists", "[", "place_in_beat", "possible_chords", "]", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/utils/resources.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/utils/resources.py#L39-L57
def resource_filename(package_or_requirement, resource_name): """ Similar to pkg_resources.resource_filename but if the resource it not found via pkg_resources it also looks in a predefined list of paths in order to find the resource :param package_or_requirement: the module in which the resource resides :param resource_name: the name of the resource :return: the path to the resource :rtype: str """ if pkg_resources.resource_exists(package_or_requirement, resource_name): return pkg_resources.resource_filename(package_or_requirement, resource_name) path = _search_in_share_folders(package_or_requirement, resource_name) if path: return path raise RuntimeError("Resource {} not found in {}".format(package_or_requirement, resource_name))
[ "def", "resource_filename", "(", "package_or_requirement", ",", "resource_name", ")", ":", "if", "pkg_resources", ".", "resource_exists", "(", "package_or_requirement", ",", "resource_name", ")", ":", "return", "pkg_resources", ".", "resource_filename", "(", "package_or...
Similar to pkg_resources.resource_filename but if the resource it not found via pkg_resources it also looks in a predefined list of paths in order to find the resource :param package_or_requirement: the module in which the resource resides :param resource_name: the name of the resource :return: the path to the resource :rtype: str
[ "Similar", "to", "pkg_resources", ".", "resource_filename", "but", "if", "the", "resource", "it", "not", "found", "via", "pkg_resources", "it", "also", "looks", "in", "a", "predefined", "list", "of", "paths", "in", "order", "to", "find", "the", "resource" ]
python
train
epandurski/flask_signalbus
flask_signalbus/signalbus_cli.py
https://github.com/epandurski/flask_signalbus/blob/253800118443821a40404f04416422b076d62b6e/flask_signalbus/signalbus_cli.py#L82-L87
def signals(): """Show all signal types.""" signalbus = current_app.extensions['signalbus'] for signal_model in signalbus.get_signal_models(): click.echo(signal_model.__name__)
[ "def", "signals", "(", ")", ":", "signalbus", "=", "current_app", ".", "extensions", "[", "'signalbus'", "]", "for", "signal_model", "in", "signalbus", ".", "get_signal_models", "(", ")", ":", "click", ".", "echo", "(", "signal_model", ".", "__name__", ")" ]
Show all signal types.
[ "Show", "all", "signal", "types", "." ]
python
train
galaxy-genome-annotation/python-apollo
apollo/users/__init__.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/users/__init__.py#L52-L67
def get_users(self, omit_empty_organisms=False): """ Get all users known to this Apollo instance :type omit_empty_organisms: bool :param omit_empty_organisms: Will omit users having no access to any organism :rtype: list of dicts :return: list of user info dictionaries """ payload = {} if omit_empty_organisms: payload['omitEmptyOrganisms'] = omit_empty_organisms res = self.post('loadUsers', payload) data = [_fix_user(user) for user in res] return data
[ "def", "get_users", "(", "self", ",", "omit_empty_organisms", "=", "False", ")", ":", "payload", "=", "{", "}", "if", "omit_empty_organisms", ":", "payload", "[", "'omitEmptyOrganisms'", "]", "=", "omit_empty_organisms", "res", "=", "self", ".", "post", "(", ...
Get all users known to this Apollo instance :type omit_empty_organisms: bool :param omit_empty_organisms: Will omit users having no access to any organism :rtype: list of dicts :return: list of user info dictionaries
[ "Get", "all", "users", "known", "to", "this", "Apollo", "instance" ]
python
train
capitalone/giraffez
giraffez/load.py
https://github.com/capitalone/giraffez/blob/6b4d27eb1a1eaf188c6885c7364ef27e92b1b957/giraffez/load.py#L202-L267
def from_file(self, filename, table=None, delimiter='|', null='NULL', panic=True, quotechar='"', parse_dates=False): """ Load from a file into the target table, handling each step of the load process. Can load from text files, and properly formatted giraffez archive files. In both cases, if Gzip compression is detected the file will be decompressed while reading and handled appropriately. The encoding is determined automatically by the contents of the file. It is not necessary to set the columns in use prior to loading from a file. In the case of a text file, the header is used to determine column names and their order. Valid delimiters include '|', ',', and '\\t' (tab). When loading an archive file, the column information is decoded alongside the data. :param str filename: The location of the file to be loaded :param str table: The name of the target table, if it was not specified to the constructor for the isntance :param str null: The string that indicates a null value in the rows being inserted from a file. Defaults to 'NULL' :param str delimiter: When loading a file, indicates that fields are separated by this delimiter. Defaults to :code:`None`, which causes the delimiter to be determined from the header of the file. In most cases, this behavior is sufficient :param str quotechar: The character used to quote fields containing special characters, like the delimiter. :param bool panic: If :code:`True`, when an error is encountered it will be raised. Otherwise, the error will be logged and :code:`self.error_count` is incremented. :return: The output of the call to :meth:`~giraffez.load.TeradataBulkLoad.finish` :raises `giraffez.errors.GiraffeError`: if table was not set and :code:`table` is :code:`None`, or if a Teradata error ocurred while retrieving table info. :raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there are format errors in the row values. """ if not self.table: if not table: raise GiraffeError("Table must be set or specified to load a file.") self.table = table if not isinstance(null, basestring): raise GiraffeError("Expected 'null' to be str, received {}".format(type(null))) with Reader(filename, delimiter=delimiter, quotechar=quotechar) as f: if not isinstance(f.delimiter, basestring): raise GiraffeError("Expected 'delimiter' to be str, received {}".format(type(delimiter))) self.columns = f.header if isinstance(f, ArchiveFileReader): self.mload.set_encoding(ROW_ENCODING_RAW) self.preprocessor = lambda s: s if parse_dates: self.preprocessor = DateHandler(self.columns) self._initiate() self.mload.set_null(null) self.mload.set_delimiter(delimiter) i = 0 for i, line in enumerate(f, 1): self.put(line, panic=panic) if i % self.checkpoint_interval == 1: log.info("\rBulkLoad", "Processed {} rows".format(i), console=True) checkpoint_status = self.checkpoint() self.exit_code = self._exit_code() if self.exit_code != 0: return self.exit_code log.info("\rBulkLoad", "Processed {} rows".format(i)) return self.finish()
[ "def", "from_file", "(", "self", ",", "filename", ",", "table", "=", "None", ",", "delimiter", "=", "'|'", ",", "null", "=", "'NULL'", ",", "panic", "=", "True", ",", "quotechar", "=", "'\"'", ",", "parse_dates", "=", "False", ")", ":", "if", "not", ...
Load from a file into the target table, handling each step of the load process. Can load from text files, and properly formatted giraffez archive files. In both cases, if Gzip compression is detected the file will be decompressed while reading and handled appropriately. The encoding is determined automatically by the contents of the file. It is not necessary to set the columns in use prior to loading from a file. In the case of a text file, the header is used to determine column names and their order. Valid delimiters include '|', ',', and '\\t' (tab). When loading an archive file, the column information is decoded alongside the data. :param str filename: The location of the file to be loaded :param str table: The name of the target table, if it was not specified to the constructor for the isntance :param str null: The string that indicates a null value in the rows being inserted from a file. Defaults to 'NULL' :param str delimiter: When loading a file, indicates that fields are separated by this delimiter. Defaults to :code:`None`, which causes the delimiter to be determined from the header of the file. In most cases, this behavior is sufficient :param str quotechar: The character used to quote fields containing special characters, like the delimiter. :param bool panic: If :code:`True`, when an error is encountered it will be raised. Otherwise, the error will be logged and :code:`self.error_count` is incremented. :return: The output of the call to :meth:`~giraffez.load.TeradataBulkLoad.finish` :raises `giraffez.errors.GiraffeError`: if table was not set and :code:`table` is :code:`None`, or if a Teradata error ocurred while retrieving table info. :raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there are format errors in the row values.
[ "Load", "from", "a", "file", "into", "the", "target", "table", "handling", "each", "step", "of", "the", "load", "process", "." ]
python
test
jf-parent/brome
brome/runner/browser_config.py
https://github.com/jf-parent/brome/blob/784f45d96b83b703dd2181cb59ca8ea777c2510e/brome/runner/browser_config.py#L42-L59
def validate_config(self): """Validate that the browser config contains all the needed config """ # LOCALHOST if self.location == 'localhost': if 'browserName' not in self.config.keys(): msg = "Add the 'browserName' in your local_config: e.g.: 'Firefox', 'Chrome', 'Safari'" # noqa self.runner.critical_log(msg) raise BromeBrowserConfigException(msg) # EC2 elif self.location == 'ec2': self.validate_ec2_browser_config() # VIRTUALBOX elif self.location == 'virtualbox': self.validate_virtualbox_config()
[ "def", "validate_config", "(", "self", ")", ":", "# LOCALHOST", "if", "self", ".", "location", "==", "'localhost'", ":", "if", "'browserName'", "not", "in", "self", ".", "config", ".", "keys", "(", ")", ":", "msg", "=", "\"Add the 'browserName' in your local_c...
Validate that the browser config contains all the needed config
[ "Validate", "that", "the", "browser", "config", "contains", "all", "the", "needed", "config" ]
python
train
zaturox/glin
glin/zmq/messages.py
https://github.com/zaturox/glin/blob/55214a579c4e4b4d74765f3f6aa2eb815bac1c3b/glin/zmq/messages.py#L15-L17
def mainswitch_state(sequence_number, state): """Create a mainswitch.state message""" return MessageWriter().string("mainswitch.state").uint64(sequence_number).bool(state).get()
[ "def", "mainswitch_state", "(", "sequence_number", ",", "state", ")", ":", "return", "MessageWriter", "(", ")", ".", "string", "(", "\"mainswitch.state\"", ")", ".", "uint64", "(", "sequence_number", ")", ".", "bool", "(", "state", ")", ".", "get", "(", ")...
Create a mainswitch.state message
[ "Create", "a", "mainswitch", ".", "state", "message" ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L75-L100
def validate_endpoint_data(self, endpoints, admin_port, internal_port, public_port, expected, openstack_release=None): """Validate endpoint data. Pick the correct validator based on OpenStack release. Expected data should be in the v2 format: { 'id': id, 'region': region, 'adminurl': adminurl, 'internalurl': internalurl, 'publicurl': publicurl, 'service_id': service_id} """ validation_function = self.validate_v2_endpoint_data xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') if openstack_release and openstack_release >= xenial_queens: validation_function = self.validate_v3_endpoint_data expected = { 'id': expected['id'], 'region': expected['region'], 'region_id': 'RegionOne', 'url': self.valid_url, 'interface': self.not_null, 'service_id': expected['service_id']} return validation_function(endpoints, admin_port, internal_port, public_port, expected)
[ "def", "validate_endpoint_data", "(", "self", ",", "endpoints", ",", "admin_port", ",", "internal_port", ",", "public_port", ",", "expected", ",", "openstack_release", "=", "None", ")", ":", "validation_function", "=", "self", ".", "validate_v2_endpoint_data", "xeni...
Validate endpoint data. Pick the correct validator based on OpenStack release. Expected data should be in the v2 format: { 'id': id, 'region': region, 'adminurl': adminurl, 'internalurl': internalurl, 'publicurl': publicurl, 'service_id': service_id}
[ "Validate", "endpoint", "data", ".", "Pick", "the", "correct", "validator", "based", "on", "OpenStack", "release", ".", "Expected", "data", "should", "be", "in", "the", "v2", "format", ":", "{", "id", ":", "id", "region", ":", "region", "adminurl", ":", ...
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/route_map/content/set_/ipv6/next_vrf/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/route_map/content/set_/ipv6/next_vrf/__init__.py#L92-L113
def _set_next_vrf_list(self, v, load=False): """ Setter method for next_vrf_list, mapped from YANG variable /rbridge_id/route_map/content/set/ipv6/next_vrf/next_vrf_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_next_vrf_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_next_vrf_list() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """next_vrf_list must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""", }) self.__next_vrf_list = t if hasattr(self, '_set'): self._set()
[ "def", "_set_next_vrf_list", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "...
Setter method for next_vrf_list, mapped from YANG variable /rbridge_id/route_map/content/set/ipv6/next_vrf/next_vrf_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_next_vrf_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_next_vrf_list() directly.
[ "Setter", "method", "for", "next_vrf_list", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "route_map", "/", "content", "/", "set", "/", "ipv6", "/", "next_vrf", "/", "next_vrf_list", "(", "list", ")", "If", "this", "variable", "is", "read", ...
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_system.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system.py#L55-L69
def get_system_uptime_output_show_system_uptime_hours(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_system_uptime = ET.Element("get_system_uptime") config = get_system_uptime output = ET.SubElement(get_system_uptime, "output") show_system_uptime = ET.SubElement(output, "show-system-uptime") rbridge_id_key = ET.SubElement(show_system_uptime, "rbridge-id") rbridge_id_key.text = kwargs.pop('rbridge_id') hours = ET.SubElement(show_system_uptime, "hours") hours.text = kwargs.pop('hours') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_system_uptime_output_show_system_uptime_hours", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_system_uptime", "=", "ET", ".", "Element", "(", "\"get_system_uptime\"", ")", "config", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/slot.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/slot.py#L112-L140
def FromEncoded(cls, bindata): """Create a slot identifier from an encoded binary descriptor. These binary descriptors are used to communicate slot targeting to an embedded device. They are exactly 8 bytes in length. Args: bindata (bytes): The 8-byte binary descriptor. Returns: SlotIdentifier """ if len(bindata) != 8: raise ArgumentError("Invalid binary slot descriptor with invalid length", length=len(bindata), expected=8, data=bindata) slot, match_op = struct.unpack("<B6xB", bindata) match_name = cls.KNOWN_MATCH_CODES.get(match_op) if match_name is None: raise ArgumentError("Unknown match operation specified in binary slot descriptor", operation=match_op, known_match_ops=cls.KNOWN_MATCH_CODES) if match_name == 'match_controller': return SlotIdentifier(controller=True) if match_name == 'match_slot': return SlotIdentifier(slot=slot) raise ArgumentError("Unsupported match operation in binary slot descriptor", match_op=match_name)
[ "def", "FromEncoded", "(", "cls", ",", "bindata", ")", ":", "if", "len", "(", "bindata", ")", "!=", "8", ":", "raise", "ArgumentError", "(", "\"Invalid binary slot descriptor with invalid length\"", ",", "length", "=", "len", "(", "bindata", ")", ",", "expecte...
Create a slot identifier from an encoded binary descriptor. These binary descriptors are used to communicate slot targeting to an embedded device. They are exactly 8 bytes in length. Args: bindata (bytes): The 8-byte binary descriptor. Returns: SlotIdentifier
[ "Create", "a", "slot", "identifier", "from", "an", "encoded", "binary", "descriptor", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L21563-L21587
def replace_namespaced_pod(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_pod # noqa: E501 replace the specified Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_pod(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Pod (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Pod body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Pod If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_pod_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_pod_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
[ "def", "replace_namespaced_pod", "(", "self", ",", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", "...
replace_namespaced_pod # noqa: E501 replace the specified Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_pod(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Pod (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Pod body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Pod If the method is called asynchronously, returns the request thread.
[ "replace_namespaced_pod", "#", "noqa", ":", "E501" ]
python
train
thunder-project/thunder
thunder/images/images.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L322-L341
def gaussian_filter(self, sigma=2, order=0): """ Spatially smooth images with a gaussian filter. Filtering will be applied to every image in the collection. Parameters ---------- sigma : scalar or sequence of scalars, default = 2 Size of the filter size as standard deviation in pixels. A sequence is interpreted as the standard deviation for each axis. A single scalar is applied equally to all axes. order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0 Order of the gaussian kernel, 0 is a gaussian, higher numbers correspond to derivatives of a gaussian. """ from scipy.ndimage.filters import gaussian_filter return self.map(lambda v: gaussian_filter(v, sigma, order), value_shape=self.value_shape)
[ "def", "gaussian_filter", "(", "self", ",", "sigma", "=", "2", ",", "order", "=", "0", ")", ":", "from", "scipy", ".", "ndimage", ".", "filters", "import", "gaussian_filter", "return", "self", ".", "map", "(", "lambda", "v", ":", "gaussian_filter", "(", ...
Spatially smooth images with a gaussian filter. Filtering will be applied to every image in the collection. Parameters ---------- sigma : scalar or sequence of scalars, default = 2 Size of the filter size as standard deviation in pixels. A sequence is interpreted as the standard deviation for each axis. A single scalar is applied equally to all axes. order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0 Order of the gaussian kernel, 0 is a gaussian, higher numbers correspond to derivatives of a gaussian.
[ "Spatially", "smooth", "images", "with", "a", "gaussian", "filter", "." ]
python
train
IS-ENES-Data/esgf-pid
esgfpid/rabbit/asynchronous/thread_builder.py
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/rabbit/asynchronous/thread_builder.py#L111-L184
def __start_waiting_for_events(self): ''' This waits until the whole chain of callback methods triggered by "trigger_connection_to_rabbit_etc()" has finished, and then starts waiting for publications. This is done by starting the ioloop. Note: In the pika usage example, these things are both called inside the run() method, so I wonder if this check-and-wait here is necessary. Maybe not. But the usage example does not implement a Thread, so it probably blocks during the opening of the connection. Here, as it is a different thread, the run() might get called before the __init__ has finished? I'd rather stay on the safe side, as my experience of threading in Python is limited. ''' # Start ioloop if connection object ready: if self.thread._connection is not None: try: logdebug(LOGGER, 'Starting ioloop...') logtrace(LOGGER, 'ioloop is owned by connection %s...', self.thread._connection) # Tell the main thread that we're now open for events. # As soon as the thread._connection object is not None anymore, it # can receive events. self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events() self.thread.continue_gently_closing_if_applicable() self.thread._connection.ioloop.start() except PIDServerException as e: raise e # It seems that some connection problems do not cause # RabbitMQ to call any callback (on_connection_closed # or on_connection_error) - it just silently swallows the # problem. # So we need to manually trigger reconnection to the next # host here, which we do by manually calling the callback. # We start the ioloop, so it can handle the reconnection events, # or also receive events from the publisher in the meantime. except Exception as e: # This catches any error during connection startup and during the entire # time the ioloop runs, blocks and waits for events. time_passed = datetime.datetime.now() - self.__start_connect_time time_passed_seconds = time_passed.total_seconds() # Some pika errors: if isinstance(e, pika.exceptions.ProbableAuthenticationError): errorname = self.__make_error_name(e, 'e.g. wrong user or password') elif isinstance(e, pika.exceptions.ProbableAccessDeniedError): errorname = self.__make_error_name(e, 'e.g. wrong virtual host name') elif isinstance(e, pika.exceptions.IncompatibleProtocolError): errorname = self.__make_error_name(e, 'e.g. trying TLS/SSL on wrong port') # Other errors: else: errorname = self.__make_error_name(e) logdebug(LOGGER, 'Unexpected error during event listener\'s lifetime (after %s seconds): %s', time_passed_seconds, errorname) # Now trigger reconnection: self.statemachine.set_to_waiting_to_be_available() self.on_connection_error(self.thread._connection, errorname) self.thread._connection.ioloop.start() else: # I'm quite sure that this cannot happen, as the connection object # is created in "trigger_connection_...()" and thus exists, no matter # if the actual connection to RabbitMQ succeeded (yet) or not. logdebug(LOGGER, 'This cannot happen: Connection object is not ready.') logerror(LOGGER, 'Cannot happen. Cannot properly start the thread. Connection object is not ready.')
[ "def", "__start_waiting_for_events", "(", "self", ")", ":", "# Start ioloop if connection object ready:", "if", "self", ".", "thread", ".", "_connection", "is", "not", "None", ":", "try", ":", "logdebug", "(", "LOGGER", ",", "'Starting ioloop...'", ")", "logtrace", ...
This waits until the whole chain of callback methods triggered by "trigger_connection_to_rabbit_etc()" has finished, and then starts waiting for publications. This is done by starting the ioloop. Note: In the pika usage example, these things are both called inside the run() method, so I wonder if this check-and-wait here is necessary. Maybe not. But the usage example does not implement a Thread, so it probably blocks during the opening of the connection. Here, as it is a different thread, the run() might get called before the __init__ has finished? I'd rather stay on the safe side, as my experience of threading in Python is limited.
[ "This", "waits", "until", "the", "whole", "chain", "of", "callback", "methods", "triggered", "by", "trigger_connection_to_rabbit_etc", "()", "has", "finished", "and", "then", "starts", "waiting", "for", "publications", ".", "This", "is", "done", "by", "starting", ...
python
train
buckket/twtxt
twtxt/cache.py
https://github.com/buckket/twtxt/blob/6c8ad8ef3cbcf0dd335a12285d8b6bbdf93ce851/twtxt/cache.py#L80-L85
def is_cached(self, url): """Checks if specified URL is cached.""" try: return True if url in self.cache else False except TypeError: return False
[ "def", "is_cached", "(", "self", ",", "url", ")", ":", "try", ":", "return", "True", "if", "url", "in", "self", ".", "cache", "else", "False", "except", "TypeError", ":", "return", "False" ]
Checks if specified URL is cached.
[ "Checks", "if", "specified", "URL", "is", "cached", "." ]
python
valid
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L87-L115
def check_dns_name_availability(name, region, **kwargs): ''' .. versionadded:: 2019.2.0 Check whether a domain name in the current zone is available for use. :param name: The DNS name to query. :param region: The region to query for the DNS name in question. CLI Example: .. code-block:: bash salt-call azurearm_network.check_dns_name_availability testdnsname westus ''' netconn = __utils__['azurearm.get_client']('network', **kwargs) try: check_dns_name = netconn.check_dns_name_availability( location=region, domain_name_label=name ) result = check_dns_name.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "check_dns_name_availability", "(", "name", ",", "region", ",", "*", "*", "kwargs", ")", ":", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "check_dns_name", "=", "netconn",...
.. versionadded:: 2019.2.0 Check whether a domain name in the current zone is available for use. :param name: The DNS name to query. :param region: The region to query for the DNS name in question. CLI Example: .. code-block:: bash salt-call azurearm_network.check_dns_name_availability testdnsname westus
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
nvbn/thefuck
thefuck/shells/generic.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/shells/generic.py#L54-L69
def _get_history_lines(self): """Returns list of history entries.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with io.open(history_file_name, 'r', encoding='utf-8', errors='ignore') as history_file: lines = history_file.readlines() if settings.history_limit: lines = lines[-settings.history_limit:] for line in lines: prepared = self._script_from_history(line) \ .strip() if prepared: yield prepared
[ "def", "_get_history_lines", "(", "self", ")", ":", "history_file_name", "=", "self", ".", "_get_history_file_name", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "history_file_name", ")", ":", "with", "io", ".", "open", "(", "history_file_name", ",...
Returns list of history entries.
[ "Returns", "list", "of", "history", "entries", "." ]
python
train
rigetti/grove
grove/amplification/grover.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/amplification/grover.py#L72-L85
def _init_attr(self, bitstring_map: Dict[str, int]) -> None: """ Initializes an instance of Grover's Algorithm given a bitstring_map. :param bitstring_map: dict with string keys corresponding to bitstrings, and integer values corresponding to the desired phase on the output state. :type bitstring_map: Dict[String, Int] :return: None """ self.bit_map = bitstring_map self.unitary_function_mapping = self._compute_grover_oracle_matrix(bitstring_map) self.n_qubits = self.unitary_function_mapping.shape[0] self.qubits = list(range(int(np.log2(self.n_qubits)))) self._construct_grover_circuit()
[ "def", "_init_attr", "(", "self", ",", "bitstring_map", ":", "Dict", "[", "str", ",", "int", "]", ")", "->", "None", ":", "self", ".", "bit_map", "=", "bitstring_map", "self", ".", "unitary_function_mapping", "=", "self", ".", "_compute_grover_oracle_matrix", ...
Initializes an instance of Grover's Algorithm given a bitstring_map. :param bitstring_map: dict with string keys corresponding to bitstrings, and integer values corresponding to the desired phase on the output state. :type bitstring_map: Dict[String, Int] :return: None
[ "Initializes", "an", "instance", "of", "Grover", "s", "Algorithm", "given", "a", "bitstring_map", "." ]
python
train
cakebread/yolk
yolk/cli.py
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/cli.py#L836-L854
def show_entry_map(self): """ Show entry map for a package @param dist: package @param type: srting @returns: 0 for success or 1 if error """ pprinter = pprint.PrettyPrinter() try: entry_map = pkg_resources.get_entry_map(self.options.show_entry_map) if entry_map: pprinter.pprint(entry_map) except pkg_resources.DistributionNotFound: self.logger.error("Distribution not found: %s" \ % self.options.show_entry_map) return 1 return 0
[ "def", "show_entry_map", "(", "self", ")", ":", "pprinter", "=", "pprint", ".", "PrettyPrinter", "(", ")", "try", ":", "entry_map", "=", "pkg_resources", ".", "get_entry_map", "(", "self", ".", "options", ".", "show_entry_map", ")", "if", "entry_map", ":", ...
Show entry map for a package @param dist: package @param type: srting @returns: 0 for success or 1 if error
[ "Show", "entry", "map", "for", "a", "package" ]
python
train
Blizzard/s2client-proto
setup.py
https://github.com/Blizzard/s2client-proto/blob/e38efed74c03bec90f74b330ea1adda9215e655f/setup.py#L36-L49
def compile_proto(source, python_out, proto_path): """Invoke Protocol Compiler to generate python from given source .proto.""" if not protoc: sys.exit('protoc not found. Is the protobuf-compiler installed?\n') protoc_command = [ protoc, '--proto_path', proto_path, '--python_out', python_out, source, ] if subprocess.call(protoc_command) != 0: sys.exit('Make sure your protoc version >= 2.6. You can use a custom ' 'protoc by setting the PROTOC environment variable.')
[ "def", "compile_proto", "(", "source", ",", "python_out", ",", "proto_path", ")", ":", "if", "not", "protoc", ":", "sys", ".", "exit", "(", "'protoc not found. Is the protobuf-compiler installed?\\n'", ")", "protoc_command", "=", "[", "protoc", ",", "'--proto_path'"...
Invoke Protocol Compiler to generate python from given source .proto.
[ "Invoke", "Protocol", "Compiler", "to", "generate", "python", "from", "given", "source", ".", "proto", "." ]
python
train
explosion/spaCy
spacy/cli/train.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/train.py#L407-L418
def _load_pretrained_tok2vec(nlp, loc): """Load pre-trained weights for the 'token-to-vector' part of the component models, which is typically a CNN. See 'spacy pretrain'. Experimental. """ with loc.open("rb") as file_: weights_data = file_.read() loaded = [] for name, component in nlp.pipeline: if hasattr(component, "model") and hasattr(component.model, "tok2vec"): component.tok2vec.from_bytes(weights_data) loaded.append(name) return loaded
[ "def", "_load_pretrained_tok2vec", "(", "nlp", ",", "loc", ")", ":", "with", "loc", ".", "open", "(", "\"rb\"", ")", "as", "file_", ":", "weights_data", "=", "file_", ".", "read", "(", ")", "loaded", "=", "[", "]", "for", "name", ",", "component", "i...
Load pre-trained weights for the 'token-to-vector' part of the component models, which is typically a CNN. See 'spacy pretrain'. Experimental.
[ "Load", "pre", "-", "trained", "weights", "for", "the", "token", "-", "to", "-", "vector", "part", "of", "the", "component", "models", "which", "is", "typically", "a", "CNN", ".", "See", "spacy", "pretrain", ".", "Experimental", "." ]
python
train
astropy/astropy-healpix
astropy_healpix/healpy.py
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/healpy.py#L77-L83
def nside2pixarea(nside, degrees=False): """Drop-in replacement for healpy `~healpy.pixelfunc.nside2pixarea`.""" area = nside_to_pixel_area(nside) if degrees: return area.to(u.deg ** 2).value else: return area.to(u.sr).value
[ "def", "nside2pixarea", "(", "nside", ",", "degrees", "=", "False", ")", ":", "area", "=", "nside_to_pixel_area", "(", "nside", ")", "if", "degrees", ":", "return", "area", ".", "to", "(", "u", ".", "deg", "**", "2", ")", ".", "value", "else", ":", ...
Drop-in replacement for healpy `~healpy.pixelfunc.nside2pixarea`.
[ "Drop", "-", "in", "replacement", "for", "healpy", "~healpy", ".", "pixelfunc", ".", "nside2pixarea", "." ]
python
train
goshuirc/irc
girc/client.py
https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/client.py#L424-L434
def start(self): """Start our welcome!""" if ('sasl' in self.capabilities.enabled and self._sasl_info and (not self.capabilities.available['sasl']['value'] or (self.capabilities.available['sasl']['value'] and self._sasl_info['method'] in self.capabilities.available['sasl']['value']))): self.start_sasl() else: self.send('CAP', params=['END']) self.send_welcome()
[ "def", "start", "(", "self", ")", ":", "if", "(", "'sasl'", "in", "self", ".", "capabilities", ".", "enabled", "and", "self", ".", "_sasl_info", "and", "(", "not", "self", ".", "capabilities", ".", "available", "[", "'sasl'", "]", "[", "'value'", "]", ...
Start our welcome!
[ "Start", "our", "welcome!" ]
python
train
gwastro/pycbc
pycbc/_version_helper.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/_version_helper.py#L106-L113
def get_git_branch(git_path='git'): """Returns the name of the current git branch """ branch_match = call((git_path, 'rev-parse', '--symbolic-full-name', 'HEAD')) if branch_match == "HEAD": return None else: return os.path.basename(branch_match)
[ "def", "get_git_branch", "(", "git_path", "=", "'git'", ")", ":", "branch_match", "=", "call", "(", "(", "git_path", ",", "'rev-parse'", ",", "'--symbolic-full-name'", ",", "'HEAD'", ")", ")", "if", "branch_match", "==", "\"HEAD\"", ":", "return", "None", "e...
Returns the name of the current git branch
[ "Returns", "the", "name", "of", "the", "current", "git", "branch" ]
python
train
dpkp/kafka-python
kafka/consumer/group.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/group.py#L437-L460
def close(self, autocommit=True): """Close the consumer, waiting indefinitely for any needed cleanup. Keyword Arguments: autocommit (bool): If auto-commit is configured for this consumer, this optional flag causes the consumer to attempt to commit any pending consumed offsets prior to close. Default: True """ if self._closed: return log.debug("Closing the KafkaConsumer.") self._closed = True self._coordinator.close(autocommit=autocommit) self._metrics.close() self._client.close() try: self.config['key_deserializer'].close() except AttributeError: pass try: self.config['value_deserializer'].close() except AttributeError: pass log.debug("The KafkaConsumer has closed.")
[ "def", "close", "(", "self", ",", "autocommit", "=", "True", ")", ":", "if", "self", ".", "_closed", ":", "return", "log", ".", "debug", "(", "\"Closing the KafkaConsumer.\"", ")", "self", ".", "_closed", "=", "True", "self", ".", "_coordinator", ".", "c...
Close the consumer, waiting indefinitely for any needed cleanup. Keyword Arguments: autocommit (bool): If auto-commit is configured for this consumer, this optional flag causes the consumer to attempt to commit any pending consumed offsets prior to close. Default: True
[ "Close", "the", "consumer", "waiting", "indefinitely", "for", "any", "needed", "cleanup", "." ]
python
train
blockstack/pybitcoin
pybitcoin/services/chain_com.py
https://github.com/blockstack/pybitcoin/blob/92c8da63c40f7418594b1ce395990c3f5a4787cc/pybitcoin/services/chain_com.py#L59-L84
def broadcast_transaction(hex_tx, blockchain_client): """ Dispatch a raw hex transaction to the network. """ if not isinstance(blockchain_client, ChainComClient): raise Exception('A ChainComClient object is required') auth = blockchain_client.auth if not auth or len(auth) != 2: raise Exception('ChainComClient object must have auth credentials.') url = CHAIN_API_BASE_URL + '/bitcoin/transactions/send' payload = json.dumps({ 'signed_hex': hex_tx }) r = requests.post(url, data=payload, auth=auth) try: data = r.json() except ValueError, e: raise Exception('Received non-JSON from chain.com.') if 'transaction_hash' in data: reply = {} reply['tx_hash'] = data['transaction_hash'] reply['success'] = True return reply else: raise Exception('Tx hash missing from chain.com response: ' + str(data) + '\noriginal: ' + str(payload))
[ "def", "broadcast_transaction", "(", "hex_tx", ",", "blockchain_client", ")", ":", "if", "not", "isinstance", "(", "blockchain_client", ",", "ChainComClient", ")", ":", "raise", "Exception", "(", "'A ChainComClient object is required'", ")", "auth", "=", "blockchain_c...
Dispatch a raw hex transaction to the network.
[ "Dispatch", "a", "raw", "hex", "transaction", "to", "the", "network", "." ]
python
train
ioos/compliance-checker
compliance_checker/base.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/base.py#L160-L170
def serialize(self): ''' Returns a serializable dictionary that represents the result object ''' return { 'name' : self.name, 'weight' : self.weight, 'value' : self.value, 'msgs' : self.msgs, 'children' : [i.serialize() for i in self.children] }
[ "def", "serialize", "(", "self", ")", ":", "return", "{", "'name'", ":", "self", ".", "name", ",", "'weight'", ":", "self", ".", "weight", ",", "'value'", ":", "self", ".", "value", ",", "'msgs'", ":", "self", ".", "msgs", ",", "'children'", ":", "...
Returns a serializable dictionary that represents the result object
[ "Returns", "a", "serializable", "dictionary", "that", "represents", "the", "result", "object" ]
python
train
gawel/irc3
irc3/__init__.py
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/__init__.py#L258-L267
def ctcp(self, target, message, nowait=False): """send a ctcp to target""" if target and message: messages = utils.split_message(message, self.config.max_length) f = None for message in messages: f = self.send_line('PRIVMSG %s :\x01%s\x01' % (target, message), nowait=nowait) return f
[ "def", "ctcp", "(", "self", ",", "target", ",", "message", ",", "nowait", "=", "False", ")", ":", "if", "target", "and", "message", ":", "messages", "=", "utils", ".", "split_message", "(", "message", ",", "self", ".", "config", ".", "max_length", ")",...
send a ctcp to target
[ "send", "a", "ctcp", "to", "target" ]
python
train
Oneiroe/PySimpleAutomata
PySimpleAutomata/AFW.py
https://github.com/Oneiroe/PySimpleAutomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/AFW.py#L335-L365
def rename_afw_states(afw: dict, suffix: str): """ Side effect on input! Renames all the states of the AFW adding a **suffix**. It is an utility function used during testing to avoid automata to have states with names in common. Avoid suffix that can lead to special name like "as", "and",... :param dict afw: input AFW. :param str suffix: string to be added at beginning of each state name. """ conversion_dict = {} new_states = set() new_accepting = set() for state in afw['states']: conversion_dict[state] = '' + suffix + state new_states.add('' + suffix + state) if state in afw['accepting_states']: new_accepting.add('' + suffix + state) afw['states'] = new_states afw['initial_state'] = '' + suffix + afw['initial_state'] afw['accepting_states'] = new_accepting new_transitions = {} for transition in afw['transitions']: new_transition = __replace_all(conversion_dict, transition[0]) new_transitions[new_transition, transition[1]] = \ __replace_all(conversion_dict, afw['transitions'][transition]) afw['transitions'] = new_transitions
[ "def", "rename_afw_states", "(", "afw", ":", "dict", ",", "suffix", ":", "str", ")", ":", "conversion_dict", "=", "{", "}", "new_states", "=", "set", "(", ")", "new_accepting", "=", "set", "(", ")", "for", "state", "in", "afw", "[", "'states'", "]", ...
Side effect on input! Renames all the states of the AFW adding a **suffix**. It is an utility function used during testing to avoid automata to have states with names in common. Avoid suffix that can lead to special name like "as", "and",... :param dict afw: input AFW. :param str suffix: string to be added at beginning of each state name.
[ "Side", "effect", "on", "input!", "Renames", "all", "the", "states", "of", "the", "AFW", "adding", "a", "**", "suffix", "**", "." ]
python
train
twilio/twilio-python
twilio/rest/video/v1/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/video/v1/__init__.py#L37-L43
def composition_hooks(self): """ :rtype: twilio.rest.video.v1.composition_hook.CompositionHookList """ if self._composition_hooks is None: self._composition_hooks = CompositionHookList(self) return self._composition_hooks
[ "def", "composition_hooks", "(", "self", ")", ":", "if", "self", ".", "_composition_hooks", "is", "None", ":", "self", ".", "_composition_hooks", "=", "CompositionHookList", "(", "self", ")", "return", "self", ".", "_composition_hooks" ]
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookList
[ ":", "rtype", ":", "twilio", ".", "rest", ".", "video", ".", "v1", ".", "composition_hook", ".", "CompositionHookList" ]
python
train
heikomuller/sco-datastore
scodata/__init__.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/__init__.py#L1086-L1107
def subjects_create(self, filename): """Create subject from given data files. Expects the file to be a Freesurfer archive. Raises ValueError if given file is not a valid subject file. Parameters ---------- filename : File-type object Freesurfer archive file Returns ------- SubjectHandle Handle for created subject in database """ # Ensure that the file name has a valid archive suffix if get_filename_suffix(filename, ARCHIVE_SUFFIXES) is None: raise ValueError('invalid file suffix: ' + os.path.basename(os.path.normpath(filename))) # Create subject from archive. Raises exception if file is not a valid # subject archive return self.subjects.upload_file(filename)
[ "def", "subjects_create", "(", "self", ",", "filename", ")", ":", "# Ensure that the file name has a valid archive suffix", "if", "get_filename_suffix", "(", "filename", ",", "ARCHIVE_SUFFIXES", ")", "is", "None", ":", "raise", "ValueError", "(", "'invalid file suffix: '"...
Create subject from given data files. Expects the file to be a Freesurfer archive. Raises ValueError if given file is not a valid subject file. Parameters ---------- filename : File-type object Freesurfer archive file Returns ------- SubjectHandle Handle for created subject in database
[ "Create", "subject", "from", "given", "data", "files", ".", "Expects", "the", "file", "to", "be", "a", "Freesurfer", "archive", "." ]
python
train
thombashi/pytablereader
pytablereader/json/formatter.py
https://github.com/thombashi/pytablereader/blob/bc3c057a2cc775bcce690e0e9019c2907b638101/pytablereader/json/formatter.py#L38-L46
def _validate_source_data(self): """ :raises ValidationError: """ try: jsonschema.validate(self._buffer, self._schema) except jsonschema.ValidationError as e: raise ValidationError(e)
[ "def", "_validate_source_data", "(", "self", ")", ":", "try", ":", "jsonschema", ".", "validate", "(", "self", ".", "_buffer", ",", "self", ".", "_schema", ")", "except", "jsonschema", ".", "ValidationError", "as", "e", ":", "raise", "ValidationError", "(", ...
:raises ValidationError:
[ ":", "raises", "ValidationError", ":" ]
python
train