repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Yubico/python-pyhsm
pyhsm/tools/generate_keys.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/tools/generate_keys.py#L42-L98
def parse_args(): """ Parse the command line arguments """ parser = argparse.ArgumentParser(description = "Generate secrets for YubiKeys using YubiHSM", add_help=True, formatter_class = argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('-D', '--device', dest='device', default=default_device, required=False, help='YubiHSM device', ) parser.add_argument('-O', '--output-dir', '--aead-dir', dest='output_dir', default=default_dir, required=False, help='Output directory (AEAD base dir)', ) parser.add_argument('-c', '--count', dest='count', type=int, default=1, required=False, help='Number of secrets to generate', ) parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Enable verbose operation', ) parser.add_argument('--public-id-chars', dest='public_id_chars', type=int, default=12, required=False, help='Number of chars in generated public ids', ) parser.add_argument('--key-handles', dest='key_handles', nargs='+', required=True, help='Key handles to encrypt the generated secrets with', ) parser.add_argument('--start-public-id', dest='start_id', required=True, help='The first public id to generate AEAD for', ) parser.add_argument('--random-nonce', dest='random_nonce', required=False, action='store_true', default=False, help='Let the HSM generate nonce', ) return parser.parse_args()
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Generate secrets for YubiKeys using YubiHSM\"", ",", "add_help", "=", "True", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter",...
Parse the command line arguments
[ "Parse", "the", "command", "line", "arguments" ]
python
train
saltstack/salt
salt/modules/boto_rds.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_rds.py#L780-L819
def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None, region=None, key=None, keyid=None, profile=None): ''' Returns a list of `DBParameterGroup` descriptions. CLI example to description of parameter group:: salt myminion boto_rds.describe_parameter_group parametergroupname\ region=us-east-1 ''' res = __salt__['boto_rds.parameter_group_exists'](name, tags=None, region=region, key=key, keyid=keyid, profile=profile) if not res.get('exists'): return {'exists': bool(res)} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'results': bool(conn)} kwargs = {} for key in ('Marker', 'Filters'): if locals()[key] is not None: kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function if locals()['MaxRecords'] is not None: kwargs['MaxRecords'] = int(locals()['MaxRecords']) info = conn.describe_db_parameter_groups(DBParameterGroupName=name, **kwargs) if not info: return {'results': bool(info), 'message': 'Failed to get RDS description for group {0}.'.format(name)} return {'results': bool(info), 'message': 'Got RDS descrition for group {0}.'.format(name)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "describe_parameter_group", "(", "name", ",", "Filters", "=", "None", ",", "MaxRecords", "=", "None", ",", "Marker", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ...
Returns a list of `DBParameterGroup` descriptions. CLI example to description of parameter group:: salt myminion boto_rds.describe_parameter_group parametergroupname\ region=us-east-1
[ "Returns", "a", "list", "of", "DBParameterGroup", "descriptions", ".", "CLI", "example", "to", "description", "of", "parameter", "group", "::" ]
python
train
resync/resync
resync/resource_list.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/resource_list.py#L162-L206
def compare(self, src): """Compare this ResourceList object with that specified as src. The parameter src must also be a ResourceList object, it is assumed to be the source, and the current object is the destination. This written to work for any objects in self and sc, provided that the == operator can be used to compare them. The functioning of this method depends on the iterators for self and src providing access to the resource objects in URI order. """ dst_iter = iter(self.resources) src_iter = iter(src.resources) same = ResourceList() updated = ResourceList() deleted = ResourceList() created = ResourceList() dst_cur = next(dst_iter, None) src_cur = next(src_iter, None) while ((dst_cur is not None) and (src_cur is not None)): # print 'dst='+dst_cur+' src='+src_cur if (dst_cur.uri == src_cur.uri): if (dst_cur == src_cur): same.add(dst_cur) else: updated.add(src_cur) dst_cur = next(dst_iter, None) src_cur = next(src_iter, None) elif (not src_cur or dst_cur.uri < src_cur.uri): deleted.add(dst_cur) dst_cur = next(dst_iter, None) elif (not dst_cur or dst_cur.uri > src_cur.uri): created.add(src_cur) src_cur = next(src_iter, None) else: raise Exception("this should not be possible") # what do we have leftover in src or dst lists? while (dst_cur is not None): deleted.add(dst_cur) dst_cur = next(dst_iter, None) while (src_cur is not None): created.add(src_cur) src_cur = next(src_iter, None) # have now gone through both lists return(same, updated, deleted, created)
[ "def", "compare", "(", "self", ",", "src", ")", ":", "dst_iter", "=", "iter", "(", "self", ".", "resources", ")", "src_iter", "=", "iter", "(", "src", ".", "resources", ")", "same", "=", "ResourceList", "(", ")", "updated", "=", "ResourceList", "(", ...
Compare this ResourceList object with that specified as src. The parameter src must also be a ResourceList object, it is assumed to be the source, and the current object is the destination. This written to work for any objects in self and sc, provided that the == operator can be used to compare them. The functioning of this method depends on the iterators for self and src providing access to the resource objects in URI order.
[ "Compare", "this", "ResourceList", "object", "with", "that", "specified", "as", "src", "." ]
python
train
boakley/robotframework-lint
rflint/rflint.py
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/rflint.py#L178-L184
def list_rules(self): """Print a list of all rules""" for rule in sorted(self.all_rules, key=lambda rule: rule.name): print(rule) if self.args.verbose: for line in rule.doc.split("\n"): print(" ", line)
[ "def", "list_rules", "(", "self", ")", ":", "for", "rule", "in", "sorted", "(", "self", ".", "all_rules", ",", "key", "=", "lambda", "rule", ":", "rule", ".", "name", ")", ":", "print", "(", "rule", ")", "if", "self", ".", "args", ".", "verbose", ...
Print a list of all rules
[ "Print", "a", "list", "of", "all", "rules" ]
python
valid
indico/indico-plugins
livesync/indico_livesync/util.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/util.py#L82-L85
def get_excluded_categories(): """Get excluded category IDs.""" from indico_livesync.plugin import LiveSyncPlugin return {int(x['id']) for x in LiveSyncPlugin.settings.get('excluded_categories')}
[ "def", "get_excluded_categories", "(", ")", ":", "from", "indico_livesync", ".", "plugin", "import", "LiveSyncPlugin", "return", "{", "int", "(", "x", "[", "'id'", "]", ")", "for", "x", "in", "LiveSyncPlugin", ".", "settings", ".", "get", "(", "'excluded_cat...
Get excluded category IDs.
[ "Get", "excluded", "category", "IDs", "." ]
python
train
Fantomas42/django-blog-zinnia
zinnia/url_shortener/__init__.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/url_shortener/__init__.py#L11-L26
def get_url_shortener(): """ Return the selected URL shortener backend. """ try: backend_module = import_module(URL_SHORTENER_BACKEND) backend = getattr(backend_module, 'backend') except (ImportError, AttributeError): warnings.warn('%s backend cannot be imported' % URL_SHORTENER_BACKEND, RuntimeWarning) backend = default_backend except ImproperlyConfigured as e: warnings.warn(str(e), RuntimeWarning) backend = default_backend return backend
[ "def", "get_url_shortener", "(", ")", ":", "try", ":", "backend_module", "=", "import_module", "(", "URL_SHORTENER_BACKEND", ")", "backend", "=", "getattr", "(", "backend_module", ",", "'backend'", ")", "except", "(", "ImportError", ",", "AttributeError", ")", "...
Return the selected URL shortener backend.
[ "Return", "the", "selected", "URL", "shortener", "backend", "." ]
python
train
inveniosoftware/invenio-accounts
invenio_accounts/admin.py
https://github.com/inveniosoftware/invenio-accounts/blob/b0d2f0739b00dbefea22ca15d7d374a1b4a63aec/invenio_accounts/admin.py#L93-L111
def action_inactivate(self, ids): """Inactivate users.""" try: count = 0 for user_id in ids: user = _datastore.get_user(user_id) if user is None: raise ValueError(_("Cannot find user.")) if _datastore.deactivate_user(user): count += 1 if count > 0: flash(_('User(s) were successfully inactivated.'), 'success') except Exception as exc: if not self.handle_view_exception(exc): raise current_app.logger.exception(str(exc)) # pragma: no cover flash(_('Failed to inactivate users.'), 'error')
[ "def", "action_inactivate", "(", "self", ",", "ids", ")", ":", "try", ":", "count", "=", "0", "for", "user_id", "in", "ids", ":", "user", "=", "_datastore", ".", "get_user", "(", "user_id", ")", "if", "user", "is", "None", ":", "raise", "ValueError", ...
Inactivate users.
[ "Inactivate", "users", "." ]
python
train
childsish/sofia
sofia/execution_engines/workers/step_worker.py
https://github.com/childsish/sofia/blob/39b992f143e2610a62ad751568caa5ca9aaf0224/sofia/execution_engines/workers/step_worker.py#L4-L33
def step_worker(step, pipe, max_entities): """ All messages follow the form: <message>, <data> Valid messages -------------- run, <input_data> finalise, None next, None stop, None """ state = None while True: message, input = pipe.recv() if message == 'run': state = step.run(input, max_entities) elif message == 'finalise': state = step.finalise(max_entities) elif message == 'next': try: data = state.next() sys.stderr.write(' {}\n'.format(step.name)) sys.stderr.write(' * {}\n'.format(', '.join(key.name for key in data))) sys.stderr.write(' * {}\n'.format(', '.join(str(value) for value in data.values()))) pipe.send(('data', {'step': step, 'data': data})) except StopIteration: pipe.send(('stop', {'step': step})) state = None elif message == 'stop': break
[ "def", "step_worker", "(", "step", ",", "pipe", ",", "max_entities", ")", ":", "state", "=", "None", "while", "True", ":", "message", ",", "input", "=", "pipe", ".", "recv", "(", ")", "if", "message", "==", "'run'", ":", "state", "=", "step", ".", ...
All messages follow the form: <message>, <data> Valid messages -------------- run, <input_data> finalise, None next, None stop, None
[ "All", "messages", "follow", "the", "form", ":", "<message", ">", "<data", ">" ]
python
train
jrigden/pyPodcastParser
pyPodcastParser/Podcast.py
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Podcast.py#L403-L413
def set_owner(self): """Parses owner name and email then sets value""" owner = self.soup.find('itunes:owner') try: self.owner_name = owner.find('itunes:name').string except AttributeError: self.owner_name = None try: self.owner_email = owner.find('itunes:email').string except AttributeError: self.owner_email = None
[ "def", "set_owner", "(", "self", ")", ":", "owner", "=", "self", ".", "soup", ".", "find", "(", "'itunes:owner'", ")", "try", ":", "self", ".", "owner_name", "=", "owner", ".", "find", "(", "'itunes:name'", ")", ".", "string", "except", "AttributeError",...
Parses owner name and email then sets value
[ "Parses", "owner", "name", "and", "email", "then", "sets", "value" ]
python
train
Diaoul/subliminal
subliminal/subtitle.py
https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/subtitle.py#L166-L182
def get_subtitle_path(video_path, language=None, extension='.srt'): """Get the subtitle path using the `video_path` and `language`. :param str video_path: path to the video. :param language: language of the subtitle to put in the path. :type language: :class:`~babelfish.language.Language` :param str extension: extension of the subtitle. :return: path of the subtitle. :rtype: str """ subtitle_root = os.path.splitext(video_path)[0] if language: subtitle_root += '.' + str(language) return subtitle_root + extension
[ "def", "get_subtitle_path", "(", "video_path", ",", "language", "=", "None", ",", "extension", "=", "'.srt'", ")", ":", "subtitle_root", "=", "os", ".", "path", ".", "splitext", "(", "video_path", ")", "[", "0", "]", "if", "language", ":", "subtitle_root",...
Get the subtitle path using the `video_path` and `language`. :param str video_path: path to the video. :param language: language of the subtitle to put in the path. :type language: :class:`~babelfish.language.Language` :param str extension: extension of the subtitle. :return: path of the subtitle. :rtype: str
[ "Get", "the", "subtitle", "path", "using", "the", "video_path", "and", "language", "." ]
python
train
biolink/ontobio
ontobio/sparql/sparql_ontol_utils.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/sparql_ontol_utils.py#L231-L245
def fetchall_labels(ont): """ fetch all rdfs:label assertions for an ontology """ logging.info("fetching rdfs:labels for: "+ont) namedGraph = get_named_graph(ont) queryBody = querybody_label() query = """ SELECT * WHERE {{ GRAPH <{g}> {q} }} """.format(q=queryBody, g=namedGraph) bindings = run_sparql(query) rows = [(r['c']['value'], r['l']['value']) for r in bindings] return rows
[ "def", "fetchall_labels", "(", "ont", ")", ":", "logging", ".", "info", "(", "\"fetching rdfs:labels for: \"", "+", "ont", ")", "namedGraph", "=", "get_named_graph", "(", "ont", ")", "queryBody", "=", "querybody_label", "(", ")", "query", "=", "\"\"\"\n SELEC...
fetch all rdfs:label assertions for an ontology
[ "fetch", "all", "rdfs", ":", "label", "assertions", "for", "an", "ontology" ]
python
train
pymupdf/PyMuPDF
fitz/utils.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/utils.py#L573-L582
def getRectArea(*args): """Calculate area of rectangle.\nparameter is one of 'px' (default), 'in', 'cm', or 'mm'.""" rect = args[0] if len(args) > 1: unit = args[1] else: unit = "px" u = {"px": (1,1), "in": (1.,72.), "cm": (2.54, 72.), "mm": (25.4, 72.)} f = (u[unit][0] / u[unit][1])**2 return f * rect.width * rect.height
[ "def", "getRectArea", "(", "*", "args", ")", ":", "rect", "=", "args", "[", "0", "]", "if", "len", "(", "args", ")", ">", "1", ":", "unit", "=", "args", "[", "1", "]", "else", ":", "unit", "=", "\"px\"", "u", "=", "{", "\"px\"", ":", "(", "...
Calculate area of rectangle.\nparameter is one of 'px' (default), 'in', 'cm', or 'mm'.
[ "Calculate", "area", "of", "rectangle", ".", "\\", "nparameter", "is", "one", "of", "px", "(", "default", ")", "in", "cm", "or", "mm", "." ]
python
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/launcher.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/launcher.py#L122-L140
def parse_args(self, args=None): """Parse the given arguments All commands should support executing a function, so you can use the arg Namespace like this:: launcher = Launcher() args, unknown = launcher.parse_args() args.func(args, unknown) # execute the command :param args: arguments to pass :type args: :returns: the parsed arguments and all unknown arguments :rtype: (Namespace, list) :raises: None """ if args is None: args = sys.argv[1:] return self.parser.parse_known_args(args)
[ "def", "parse_args", "(", "self", ",", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "return", "self", ".", "parser", ".", "parse_known_args", "(", "args", ")" ]
Parse the given arguments All commands should support executing a function, so you can use the arg Namespace like this:: launcher = Launcher() args, unknown = launcher.parse_args() args.func(args, unknown) # execute the command :param args: arguments to pass :type args: :returns: the parsed arguments and all unknown arguments :rtype: (Namespace, list) :raises: None
[ "Parse", "the", "given", "arguments" ]
python
train
JasonKessler/scattertext
scattertext/TermDocMatrix.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrix.py#L512-L523
def _get_scaled_f_score_from_counts(self, cat_word_counts, not_cat_word_counts, scaler_algo, beta=DEFAULT_BETA): ''' scaler = self._get_scaler_function(scaler_algo) p_word_given_category = cat_word_counts.astype(np.float64) / cat_word_counts.sum() p_category_given_word = cat_word_counts.astype(np.float64) / (cat_word_counts + not_cat_word_counts) scores \ = self._computer_harmoic_mean_of_probabilities_over_non_zero_in_category_count_terms( cat_word_counts, p_category_given_word, p_word_given_category, scaler ) ''' return ScaledFScore.get_scores(cat_word_counts, not_cat_word_counts, scaler_algo, beta=beta)
[ "def", "_get_scaled_f_score_from_counts", "(", "self", ",", "cat_word_counts", ",", "not_cat_word_counts", ",", "scaler_algo", ",", "beta", "=", "DEFAULT_BETA", ")", ":", "return", "ScaledFScore", ".", "get_scores", "(", "cat_word_counts", ",", "not_cat_word_counts", ...
scaler = self._get_scaler_function(scaler_algo) p_word_given_category = cat_word_counts.astype(np.float64) / cat_word_counts.sum() p_category_given_word = cat_word_counts.astype(np.float64) / (cat_word_counts + not_cat_word_counts) scores \ = self._computer_harmoic_mean_of_probabilities_over_non_zero_in_category_count_terms( cat_word_counts, p_category_given_word, p_word_given_category, scaler )
[ "scaler", "=", "self", ".", "_get_scaler_function", "(", "scaler_algo", ")", "p_word_given_category", "=", "cat_word_counts", ".", "astype", "(", "np", ".", "float64", ")", "/", "cat_word_counts", ".", "sum", "()", "p_category_given_word", "=", "cat_word_counts", ...
python
train
mbr/simplekv
simplekv/__init__.py
https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/__init__.py#L240-L251
def _get_filename(self, key, filename): """Write key to file. Either this method or :meth:`~simplekv.KeyValueStore._get_file` will be called by :meth:`~simplekv.KeyValueStore.get_file`. This method only accepts filenames and will open the file with a mode of ``wb``, then call :meth:`~simplekv.KeyValueStore._get_file`. :param key: Key to be retrieved :param filename: Filename to write to """ with open(filename, 'wb') as dest: return self._get_file(key, dest)
[ "def", "_get_filename", "(", "self", ",", "key", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "dest", ":", "return", "self", ".", "_get_file", "(", "key", ",", "dest", ")" ]
Write key to file. Either this method or :meth:`~simplekv.KeyValueStore._get_file` will be called by :meth:`~simplekv.KeyValueStore.get_file`. This method only accepts filenames and will open the file with a mode of ``wb``, then call :meth:`~simplekv.KeyValueStore._get_file`. :param key: Key to be retrieved :param filename: Filename to write to
[ "Write", "key", "to", "file", ".", "Either", "this", "method", "or", ":", "meth", ":", "~simplekv", ".", "KeyValueStore", ".", "_get_file", "will", "be", "called", "by", ":", "meth", ":", "~simplekv", ".", "KeyValueStore", ".", "get_file", ".", "This", "...
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/rst/heilman_sagae_2015.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/heilman_sagae_2015.py#L254-L275
def get_nuclearity_type(child_types): """Returns the nuclearity type of an RST relation (i.e. 'multinuc', 'nucsat' or 'multisat') or 'edu' if the node is below the relation level. """ if 'text' in child_types and len(child_types) == 1: return NucType.edu assert 'nucleus' in child_types, \ "This is not a relational node. child_types: {}".format(child_types) if 'satellite' not in child_types: assert len(child_types['nucleus']) > 1 return NucType.multinuc else: # 'satellite' in child_types assert len(child_types['nucleus']) == 1 if len(child_types['satellite']) == 1: return NucType.nucsat else: assert len(child_types['satellite']) > 1 return NucType.multisat
[ "def", "get_nuclearity_type", "(", "child_types", ")", ":", "if", "'text'", "in", "child_types", "and", "len", "(", "child_types", ")", "==", "1", ":", "return", "NucType", ".", "edu", "assert", "'nucleus'", "in", "child_types", ",", "\"This is not a relational ...
Returns the nuclearity type of an RST relation (i.e. 'multinuc', 'nucsat' or 'multisat') or 'edu' if the node is below the relation level.
[ "Returns", "the", "nuclearity", "type", "of", "an", "RST", "relation", "(", "i", ".", "e", ".", "multinuc", "nucsat", "or", "multisat", ")", "or", "edu", "if", "the", "node", "is", "below", "the", "relation", "level", "." ]
python
train
facelessuser/bracex
bracex/__init__.py
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L229-L234
def combine(self, a, b): """A generator that combines two iterables.""" for l in (a, b): for x in l: yield x
[ "def", "combine", "(", "self", ",", "a", ",", "b", ")", ":", "for", "l", "in", "(", "a", ",", "b", ")", ":", "for", "x", "in", "l", ":", "yield", "x" ]
A generator that combines two iterables.
[ "A", "generator", "that", "combines", "two", "iterables", "." ]
python
train
senaite/senaite.core
bika/lims/browser/widgets/referenceresultswidget.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/referenceresultswidget.py#L126-L131
def folderitems(self): """TODO: Refactor to non-classic mode """ items = super(ReferenceResultsView, self).folderitems() self.categories.sort() return items
[ "def", "folderitems", "(", "self", ")", ":", "items", "=", "super", "(", "ReferenceResultsView", ",", "self", ")", ".", "folderitems", "(", ")", "self", ".", "categories", ".", "sort", "(", ")", "return", "items" ]
TODO: Refactor to non-classic mode
[ "TODO", ":", "Refactor", "to", "non", "-", "classic", "mode" ]
python
train
brainiak/brainiak
brainiak/factoranalysis/tfa.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L525-L567
def get_factors(self, unique_R, inds, centers, widths): """Calculate factors based on centers and widths Parameters ---------- unique_R : a list of array, Each element contains unique value in one dimension of scanner coordinate matrix R. inds : a list of array, Each element contains the indices to reconstruct one dimension of original cooridnate matrix from the unique array. centers : 2D array, with shape [K, n_dim] The centers of factors. widths : 1D array, with shape [K, 1] The widths of factors. Returns ------- F : 2D array, with shape [n_voxel,self.K] The latent factors from fMRI data. """ F = np.zeros((len(inds[0]), self.K)) tfa_extension.factor( F, centers, widths, unique_R[0], unique_R[1], unique_R[2], inds[0], inds[1], inds[2]) return F
[ "def", "get_factors", "(", "self", ",", "unique_R", ",", "inds", ",", "centers", ",", "widths", ")", ":", "F", "=", "np", ".", "zeros", "(", "(", "len", "(", "inds", "[", "0", "]", ")", ",", "self", ".", "K", ")", ")", "tfa_extension", ".", "fa...
Calculate factors based on centers and widths Parameters ---------- unique_R : a list of array, Each element contains unique value in one dimension of scanner coordinate matrix R. inds : a list of array, Each element contains the indices to reconstruct one dimension of original cooridnate matrix from the unique array. centers : 2D array, with shape [K, n_dim] The centers of factors. widths : 1D array, with shape [K, 1] The widths of factors. Returns ------- F : 2D array, with shape [n_voxel,self.K] The latent factors from fMRI data.
[ "Calculate", "factors", "based", "on", "centers", "and", "widths" ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5250-L5255
def showMessageOverlay(self, pchText, pchCaption, pchButton0Text, pchButton1Text, pchButton2Text, pchButton3Text): """Show the message overlay. This will block and return you a result.""" fn = self.function_table.showMessageOverlay result = fn(pchText, pchCaption, pchButton0Text, pchButton1Text, pchButton2Text, pchButton3Text) return result
[ "def", "showMessageOverlay", "(", "self", ",", "pchText", ",", "pchCaption", ",", "pchButton0Text", ",", "pchButton1Text", ",", "pchButton2Text", ",", "pchButton3Text", ")", ":", "fn", "=", "self", ".", "function_table", ".", "showMessageOverlay", "result", "=", ...
Show the message overlay. This will block and return you a result.
[ "Show", "the", "message", "overlay", ".", "This", "will", "block", "and", "return", "you", "a", "result", "." ]
python
train
eaton-lab/toytree
toytree/Coords.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Coords.py#L75-L88
def update_fixed_order(self): "after pruning fixed order needs update to match new nnodes/ntips." # set tips order if fixing for multi-tree plotting (default None) fixed_order = self.ttree._fixed_order self.ttree_fixed_order = None self.ttree_fixed_idx = list(range(self.ttree.ntips)) # check if fixed_order changed: if fixed_order: fixed_order = [ i for i in fixed_order if i in self.ttree.get_tip_labels()] self.ttree._set_fixed_order(fixed_order) else: self.ttree._fixed_idx = list(range(self.ttree.ntips))
[ "def", "update_fixed_order", "(", "self", ")", ":", "# set tips order if fixing for multi-tree plotting (default None)", "fixed_order", "=", "self", ".", "ttree", ".", "_fixed_order", "self", ".", "ttree_fixed_order", "=", "None", "self", ".", "ttree_fixed_idx", "=", "l...
after pruning fixed order needs update to match new nnodes/ntips.
[ "after", "pruning", "fixed", "order", "needs", "update", "to", "match", "new", "nnodes", "/", "ntips", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_validate.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_validate.py#L412-L426
def interactive(self): """Run in interactive mode.""" while True: line = sys.stdin.readline().strip() if line == 'quit': sys.exit() elif line == 'validate': self.check_syntax() self.check_imports() self.check_install_json() self.check_layout_json() self.print_json() # reset validation_data self.validation_data = self._validation_data
[ "def", "interactive", "(", "self", ")", ":", "while", "True", ":", "line", "=", "sys", ".", "stdin", ".", "readline", "(", ")", ".", "strip", "(", ")", "if", "line", "==", "'quit'", ":", "sys", ".", "exit", "(", ")", "elif", "line", "==", "'valid...
Run in interactive mode.
[ "Run", "in", "interactive", "mode", "." ]
python
train
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py#L370-L399
def _get_initial_request(self): """Return the initial request for the RPC. This defines the initial request that must always be sent to Pub/Sub immediately upon opening the subscription. Returns: google.cloud.pubsub_v1.types.StreamingPullRequest: A request suitable for being the first request on the stream (and not suitable for any other purpose). """ # Any ack IDs that are under lease management need to have their # deadline extended immediately. if self._leaser is not None: # Explicitly copy the list, as it could be modified by another # thread. lease_ids = list(self._leaser.ack_ids) else: lease_ids = [] # Put the request together. request = types.StreamingPullRequest( modify_deadline_ack_ids=list(lease_ids), modify_deadline_seconds=[self.ack_deadline] * len(lease_ids), stream_ack_deadline_seconds=self.ack_histogram.percentile(99), subscription=self._subscription, ) # Return the initial request. return request
[ "def", "_get_initial_request", "(", "self", ")", ":", "# Any ack IDs that are under lease management need to have their", "# deadline extended immediately.", "if", "self", ".", "_leaser", "is", "not", "None", ":", "# Explicitly copy the list, as it could be modified by another", "#...
Return the initial request for the RPC. This defines the initial request that must always be sent to Pub/Sub immediately upon opening the subscription. Returns: google.cloud.pubsub_v1.types.StreamingPullRequest: A request suitable for being the first request on the stream (and not suitable for any other purpose).
[ "Return", "the", "initial", "request", "for", "the", "RPC", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1520-L1527
def unordered(x, y): """ Return True if x or y is a NaN and False otherwise. """ x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_unordered_p(x, y)
[ "def", "unordered", "(", "x", ",", "y", ")", ":", "x", "=", "BigFloat", ".", "_implicit_convert", "(", "x", ")", "y", "=", "BigFloat", ".", "_implicit_convert", "(", "y", ")", "return", "mpfr", ".", "mpfr_unordered_p", "(", "x", ",", "y", ")" ]
Return True if x or y is a NaN and False otherwise.
[ "Return", "True", "if", "x", "or", "y", "is", "a", "NaN", "and", "False", "otherwise", "." ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/mag_calc.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L328-L376
def _GSE2_PAZ_read(gsefile): """ Read the instrument response information from a GSE Poles and Zeros file. Formatted for files generated by the SEISAN program RESP. Format must be CAL2, not coded for any other format at the moment, contact the authors to add others in. :type gsefile: string :param gsefile: Path to GSE file :returns: Dictionary of poles, zeros, gain and sensitivity :rtype: dict """ with open(gsefile, 'r') as f: # First line should start with CAL2 header = f.readline() if not header[0:4] == 'CAL2': raise IOError('Unknown format for GSE file, only coded for CAL2') station = header.split()[1] channel = header.split()[2] sensor = header.split()[3] date = dt.datetime.strptime(header.split()[7], '%Y/%m/%d') header = f.readline() if not header[0:4] == 'PAZ2': raise IOError('Unknown format for GSE file, only coded for PAZ2') gain = float(header.split()[3]) # Measured in nm/counts kpoles = int(header.split()[4]) kzeros = int(header.split()[5]) poles = [] for i in range(kpoles): pole = f.readline() poles.append(complex(float(pole.split()[0]), float(pole.split()[1]))) zeros = [] for i in range(kzeros): zero = f.readline() zeros.append(complex(float(zero.split()[0]), float(zero.split()[1]))) # Have Poles and Zeros, but need Gain and Sensitivity # Gain should be in the DIG2 line: for line in f: if line[0:4] == 'DIG2': sensitivity = float(line.split()[2]) # measured in counts/muVolt PAZ = {'poles': poles, 'zeros': zeros, 'gain': gain, 'sensitivity': sensitivity} return PAZ, date, station, channel, sensor
[ "def", "_GSE2_PAZ_read", "(", "gsefile", ")", ":", "with", "open", "(", "gsefile", ",", "'r'", ")", "as", "f", ":", "# First line should start with CAL2", "header", "=", "f", ".", "readline", "(", ")", "if", "not", "header", "[", "0", ":", "4", "]", "=...
Read the instrument response information from a GSE Poles and Zeros file. Formatted for files generated by the SEISAN program RESP. Format must be CAL2, not coded for any other format at the moment, contact the authors to add others in. :type gsefile: string :param gsefile: Path to GSE file :returns: Dictionary of poles, zeros, gain and sensitivity :rtype: dict
[ "Read", "the", "instrument", "response", "information", "from", "a", "GSE", "Poles", "and", "Zeros", "file", "." ]
python
train
wkentaro/fcn
fcn/initializers/weight.py
https://github.com/wkentaro/fcn/blob/a29e167b67b11418a06566ad1ddbbc6949575e05/fcn/initializers/weight.py#L6-L16
def _get_upsampling_filter(size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (size + 1) // 2 if size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:size, :size] filter = (1 - abs(og[0] - center) / factor) * \ (1 - abs(og[1] - center) / factor) return filter
[ "def", "_get_upsampling_filter", "(", "size", ")", ":", "factor", "=", "(", "size", "+", "1", ")", "//", "2", "if", "size", "%", "2", "==", "1", ":", "center", "=", "factor", "-", "1", "else", ":", "center", "=", "factor", "-", "0.5", "og", "=", ...
Make a 2D bilinear kernel suitable for upsampling
[ "Make", "a", "2D", "bilinear", "kernel", "suitable", "for", "upsampling" ]
python
train
joesecurity/jbxapi
jbxapi.py
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L365-L373
def analysis_search(self, query): """ Lists the webids of the analyses that match the given query. Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id. """ response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query}) return self._raise_or_extract(response)
[ "def", "analysis_search", "(", "self", ",", "query", ")", ":", "response", "=", "self", ".", "_post", "(", "self", ".", "apiurl", "+", "\"/v2/analysis/search\"", ",", "data", "=", "{", "'apikey'", ":", "self", ".", "apikey", ",", "'q'", ":", "query", "...
Lists the webids of the analyses that match the given query. Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
[ "Lists", "the", "webids", "of", "the", "analyses", "that", "match", "the", "given", "query", "." ]
python
train
Metatab/metapack
metapack/cli/metaaws.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L540-L548
def get_iam_account(l, args, user_name): """Return the local Account for a user name, by fetching User and looking up the arn. """ iam = get_resource(args, 'iam') user = iam.User(user_name) user.load() return l.find_or_new_account(user.arn)
[ "def", "get_iam_account", "(", "l", ",", "args", ",", "user_name", ")", ":", "iam", "=", "get_resource", "(", "args", ",", "'iam'", ")", "user", "=", "iam", ".", "User", "(", "user_name", ")", "user", ".", "load", "(", ")", "return", "l", ".", "fin...
Return the local Account for a user name, by fetching User and looking up the arn.
[ "Return", "the", "local", "Account", "for", "a", "user", "name", "by", "fetching", "User", "and", "looking", "up", "the", "arn", "." ]
python
train
jrief/djangocms-cascade
cmsplugin_cascade/segmentation/mixins.py
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/segmentation/mixins.py#L88-L159
def emulate_users(self, request): """ The list view """ def display_as_link(self, obj): try: identifier = getattr(user_model_admin, list_display_link)(obj) except AttributeError: identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2] emulate_user_id = request.session.get('emulate_user_id') if emulate_user_id == obj.id: return format_html('<strong>{}</strong>', identifier) fmtargs = { 'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}), 'identifier': identifier, } return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs) opts = self.UserModel._meta app_label = opts.app_label user_model_admin = self.admin_site._registry[self.UserModel] request._lookup_model = self.UserModel list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display) # replace first entry in list_display_links by customized method display_as_link list_display_link = list_display_links[0] try: list_display = list(user_model_admin.segmentation_list_display) except AttributeError: list_display = list(user_model_admin.list_display) list_display.remove(list_display_link) list_display.insert(0, 'display_as_link') display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9 try: display_as_link.short_description = user_model_admin.identifier.short_description except AttributeError: display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel) self.display_as_link = six.create_bound_method(display_as_link, self) ChangeList = self.get_changelist(request) cl = ChangeList(request, self.UserModel, list_display, (None,), # disable list_display_links in ChangeList, instead override that field user_model_admin.list_filter, user_model_admin.date_hierarchy, user_model_admin.search_fields, user_model_admin.list_select_related, user_model_admin.list_per_page, user_model_admin.list_max_show_all, (), # disable list_editable self) cl.formset = None selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count) context = { 'module_name': force_text(opts.verbose_name_plural), 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)}, 'selection_note_all': selection_note_all % {'total_count': cl.result_count}, 'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name}, 'is_popup': cl.is_popup, 'cl': cl, 'media': self.media, 'has_add_permission': False, 'opts': cl.opts, 'app_label': app_label, 'actions_on_top': self.actions_on_top, 'actions_on_bottom': self.actions_on_bottom, 'actions_selection_counter': self.actions_selection_counter, 'preserved_filters': self.get_preserved_filters(request), } return TemplateResponse(request, self.change_list_template or [ 'admin/%s/%s/change_list.html' % (app_label, opts.model_name), 'admin/%s/change_list.html' % app_label, 'admin/change_list.html' ], context)
[ "def", "emulate_users", "(", "self", ",", "request", ")", ":", "def", "display_as_link", "(", "self", ",", "obj", ")", ":", "try", ":", "identifier", "=", "getattr", "(", "user_model_admin", ",", "list_display_link", ")", "(", "obj", ")", "except", "Attrib...
The list view
[ "The", "list", "view" ]
python
train
ltalirz/aiida-gudhi
aiida_gudhi/parsers/rips.py
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/parsers/rips.py#L26-L69
def parse_with_retrieved(self, retrieved): """ Parse output data folder, store results in database. :param retrieved: a dictionary of retrieved nodes, where the key is the link name :returns: a tuple with two values ``(bool, node_list)``, where: * ``bool``: variable to tell if the parsing succeeded * ``node_list``: list of new nodes to be stored in the db (as a list of tuples ``(link_name, node)``) """ from aiida.orm.data.singlefile import SinglefileData success = False node_list = [] # Check that the retrieved folder is there try: out_folder = retrieved['retrieved'] except KeyError: self.logger.error("No retrieved folder found") return success, node_list # Check the folder content is as expected list_of_files = out_folder.get_folder_list() output_files = self._calc.inp.parameters.output_files # Note: set(A) <= set(B) checks whether A is a subset if set(output_files) <= set(list_of_files): pass else: self.logger.error( "Not all expected output files {} were found".format( output_files)) return success, node_list output_links = self._calc.inp.parameters.output_links for fname, link in list(zip(output_files, output_links)): parsed = SinglefileData(file=out_folder.get_abs_path(fname)) node_list.append((link, parsed)) success = True return success, node_list
[ "def", "parse_with_retrieved", "(", "self", ",", "retrieved", ")", ":", "from", "aiida", ".", "orm", ".", "data", ".", "singlefile", "import", "SinglefileData", "success", "=", "False", "node_list", "=", "[", "]", "# Check that the retrieved folder is there", "try...
Parse output data folder, store results in database. :param retrieved: a dictionary of retrieved nodes, where the key is the link name :returns: a tuple with two values ``(bool, node_list)``, where: * ``bool``: variable to tell if the parsing succeeded * ``node_list``: list of new nodes to be stored in the db (as a list of tuples ``(link_name, node)``)
[ "Parse", "output", "data", "folder", "store", "results", "in", "database", "." ]
python
train
google/apitools
apitools/base/py/encoding_helper.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/encoding_helper.py#L314-L345
def decode_field(self, field, value): """Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field. """ for decoder in _GetFieldCodecs(field, 'decoder'): result = decoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.MessageField): field_value = self.decode_message( field.message_type, json.dumps(value)) elif isinstance(field, messages.EnumField): value = GetCustomJsonEnumMapping( field.type, json_name=value) or value try: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) except messages.DecodeError: if not isinstance(value, six.string_types): raise field_value = None else: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) return field_value
[ "def", "decode_field", "(", "self", ",", "field", ",", "value", ")", ":", "for", "decoder", "in", "_GetFieldCodecs", "(", "field", ",", "'decoder'", ")", ":", "result", "=", "decoder", "(", "field", ",", "value", ")", "value", "=", "result", ".", "valu...
Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field.
[ "Decode", "the", "given", "JSON", "value", "." ]
python
train
f3at/feat
src/feat/common/error.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/error.py#L33-L45
def log_errors(function): """Logs the exceptions raised by the decorated function without interfering. For debugging purpose.""" def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except BaseException as e: handle_exception(None, e, "Exception in function %s", reflect.canonical_name(function)) raise return wrapper
[ "def", "log_errors", "(", "function", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "BaseException", "as", "e", ":", "han...
Logs the exceptions raised by the decorated function without interfering. For debugging purpose.
[ "Logs", "the", "exceptions", "raised", "by", "the", "decorated", "function", "without", "interfering", ".", "For", "debugging", "purpose", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L86-L89
def generate_data(self, *args, **kwargs): """Generates data for each problem.""" for p in self.problems: p.generate_data(*args, **kwargs)
[ "def", "generate_data", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "p", "in", "self", ".", "problems", ":", "p", ".", "generate_data", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Generates data for each problem.
[ "Generates", "data", "for", "each", "problem", "." ]
python
train
saltstack/salt
salt/utils/nacl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/nacl.py#L329-L350
def sealedbox_decrypt(data, **kwargs): ''' Decrypt data using a secret key that was encrypted using a public key with `nacl.sealedbox_encrypt`. CLI Examples: .. code-block:: bash salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A= salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' ''' if data is None: return None # ensure data is in bytes data = salt.utils.stringutils.to_bytes(data) sk = _get_sk(**kwargs) keypair = libnacl.public.SecretKey(sk) b = libnacl.sealed.SealedBox(keypair) return b.decrypt(base64.b64decode(data))
[ "def", "sealedbox_decrypt", "(", "data", ",", "*", "*", "kwargs", ")", ":", "if", "data", "is", "None", ":", "return", "None", "# ensure data is in bytes", "data", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "data", ")", "sk", "=...
Decrypt data using a secret key that was encrypted using a public key with `nacl.sealedbox_encrypt`. CLI Examples: .. code-block:: bash salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A= salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
[ "Decrypt", "data", "using", "a", "secret", "key", "that", "was", "encrypted", "using", "a", "public", "key", "with", "nacl", ".", "sealedbox_encrypt", "." ]
python
train
ncc-tools/python-pa-api
paapi/paapi.py
https://github.com/ncc-tools/python-pa-api/blob/a27481dd323d282d0f4457586198d9faec896f11/paapi/paapi.py#L121-L143
def _query_api(self, method, url, fields=None, extra_headers=None, req_body=None): """ Abstracts http queries to the API """ with self.auth.authenticate() as token: logging.debug('PA Authentication returned token %s', token) headers = { 'Authorization': 'Bearer %s' % (token,), 'Realm': self.auth_realm } if extra_headers is not None: headers.update(extra_headers) logging.info('[%s] %s', method, url) if req_body is not None: response = self.http.request(method, url, fields, headers, body=req_body) else: response = self.http.request(method, url, fields, headers) if response.status != 200: print(response.data) logging.warning('Got non-200 HTTP status from API: %d', response.status) raise ApiQueryError("Failed to get API data", response.status) return json.loads(response.data.decode())
[ "def", "_query_api", "(", "self", ",", "method", ",", "url", ",", "fields", "=", "None", ",", "extra_headers", "=", "None", ",", "req_body", "=", "None", ")", ":", "with", "self", ".", "auth", ".", "authenticate", "(", ")", "as", "token", ":", "loggi...
Abstracts http queries to the API
[ "Abstracts", "http", "queries", "to", "the", "API" ]
python
train
ergoithz/browsepy
browsepy/__init__.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/__init__.py#L61-L73
def iter_cookie_browse_sorting(cookies): ''' Get sorting-cookie from cookies dictionary. :yields: tuple of path and sorting property :ytype: 2-tuple of strings ''' try: data = cookies.get('browse-sorting', 'e30=').encode('ascii') for path, prop in json.loads(base64.b64decode(data).decode('utf-8')): yield path, prop except (ValueError, TypeError, KeyError) as e: logger.exception(e)
[ "def", "iter_cookie_browse_sorting", "(", "cookies", ")", ":", "try", ":", "data", "=", "cookies", ".", "get", "(", "'browse-sorting'", ",", "'e30='", ")", ".", "encode", "(", "'ascii'", ")", "for", "path", ",", "prop", "in", "json", ".", "loads", "(", ...
Get sorting-cookie from cookies dictionary. :yields: tuple of path and sorting property :ytype: 2-tuple of strings
[ "Get", "sorting", "-", "cookie", "from", "cookies", "dictionary", "." ]
python
train
ClericPy/torequests
torequests/parsers.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/parsers.py#L167-L182
def ensure_list(obj): """ null obj -> return []; str, unicode, bytes, bytearray -> [obj]; else -> list(obj) """ if not obj: return [] elif isinstance(obj, (str, unicode, bytes, bytearray)): return [obj] elif hasattr(obj, '__iter__') or hasattr(obj, '__getitem__'): return list(obj) else: return [obj]
[ "def", "ensure_list", "(", "obj", ")", ":", "if", "not", "obj", ":", "return", "[", "]", "elif", "isinstance", "(", "obj", ",", "(", "str", ",", "unicode", ",", "bytes", ",", "bytearray", ")", ")", ":", "return", "[", "obj", "]", "elif", "hasattr",...
null obj -> return []; str, unicode, bytes, bytearray -> [obj]; else -> list(obj)
[ "null", "obj", "-", ">", "return", "[]", ";" ]
python
train
guaix-ucm/numina
numina/core/pipelineload.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L65-L79
def load_mode(node): """Load one observing mdode""" obs_mode = ObservingMode() obs_mode.__dict__.update(node) # handle validator load_mode_validator(obs_mode, node) # handle builder load_mode_builder(obs_mode, node) # handle tagger: load_mode_tagger(obs_mode, node) return obs_mode
[ "def", "load_mode", "(", "node", ")", ":", "obs_mode", "=", "ObservingMode", "(", ")", "obs_mode", ".", "__dict__", ".", "update", "(", "node", ")", "# handle validator", "load_mode_validator", "(", "obs_mode", ",", "node", ")", "# handle builder", "load_mode_bu...
Load one observing mdode
[ "Load", "one", "observing", "mdode" ]
python
train
jrief/djangocms-cascade
cmsplugin_cascade/models.py
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/models.py#L371-L378
def assure_relation(cls, cms_page): """ Assure that we have a foreign key relation, pointing from CascadePage onto CMSPage. """ try: cms_page.cascadepage except cls.DoesNotExist: cls.objects.create(extended_object=cms_page)
[ "def", "assure_relation", "(", "cls", ",", "cms_page", ")", ":", "try", ":", "cms_page", ".", "cascadepage", "except", "cls", ".", "DoesNotExist", ":", "cls", ".", "objects", ".", "create", "(", "extended_object", "=", "cms_page", ")" ]
Assure that we have a foreign key relation, pointing from CascadePage onto CMSPage.
[ "Assure", "that", "we", "have", "a", "foreign", "key", "relation", "pointing", "from", "CascadePage", "onto", "CMSPage", "." ]
python
train
pschmitt/pyteleloisirs
pyteleloisirs/pyteleloisirs.py
https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L176-L232
async def async_get_program_guide(channel, no_cache=False, refresh_interval=4): ''' Get the program data for a channel ''' chan = await async_determine_channel(channel) now = datetime.datetime.now() max_cache_age = datetime.timedelta(hours=refresh_interval) if not no_cache and 'guide' in _CACHE and _CACHE.get('guide').get(chan): cache = _CACHE.get('guide').get(chan) cache_age = cache.get('last_updated') if now - cache_age < max_cache_age: _LOGGER.debug('Found program guide in cache.') return cache.get('data') else: _LOGGER.debug('Found outdated program guide in cache. Update it.') _CACHE['guide'].pop(chan) chans = await async_get_channels() url = chans.get('data', {}).get(chan) if not url: _LOGGER.error('Could not determine URL for %s', chan) return soup = await _async_request_soup(url) programs = [] for prg_item in soup.find_all('div', {'class': 'program-infos'}): try: prog_info = prg_item.find('a', {'class': 'prog_name'}) prog_name = prog_info.text.strip() prog_url = prog_info.get('href') if not prog_url: _LOGGER.warning('Failed to retrive the detail URL for program %s. ' 'The summary will be empty', prog_name) prog_type = prg_item.find('span', {'class': 'prog_type'}).text.strip() prog_times = prg_item.find('div', {'class': 'prog_progress'}) prog_start = datetime.datetime.fromtimestamp( int(prog_times.get('data-start'))) prog_end = datetime.datetime.fromtimestamp( int(prog_times.get('data-end'))) img = prg_item.find_previous_sibling().find( 'img', {'class': 'prime_broadcast_image'}) prog_img = img.get('data-src') if img else None programs.append( {'name': prog_name, 'type': prog_type, 'img': prog_img, 'url': prog_url, 'summary': None, 'start_time': prog_start, 'end_time': prog_end}) except Exception as exc: _LOGGER.error('Exception occured while fetching the program ' 'guide for channel %s: %s', chan, exc) import traceback traceback.print_exc() # Set the program summaries asynchronously tasks = [async_set_summary(prog) for prog in programs] programs = await asyncio.gather(*tasks) if programs: if 'guide' not in _CACHE: _CACHE['guide'] = {} _CACHE['guide'][chan] = {'last_updated': now, 'data': programs} return programs
[ "async", "def", "async_get_program_guide", "(", "channel", ",", "no_cache", "=", "False", ",", "refresh_interval", "=", "4", ")", ":", "chan", "=", "await", "async_determine_channel", "(", "channel", ")", "now", "=", "datetime", ".", "datetime", ".", "now", ...
Get the program data for a channel
[ "Get", "the", "program", "data", "for", "a", "channel" ]
python
train
vpelletier/pprofile
pprofile.py
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L723-L732
def dump_stats(self, filename): """ Similar to profile.Profile.dump_stats - but different output format ! """ if _isCallgrindName(filename): with open(filename, 'w') as out: self.callgrind(out) else: with io.open(filename, 'w', errors='replace') as out: self.annotate(out)
[ "def", "dump_stats", "(", "self", ",", "filename", ")", ":", "if", "_isCallgrindName", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "out", ":", "self", ".", "callgrind", "(", "out", ")", "else", ":", "with", "io"...
Similar to profile.Profile.dump_stats - but different output format !
[ "Similar", "to", "profile", ".", "Profile", ".", "dump_stats", "-", "but", "different", "output", "format", "!" ]
python
train
oemof/oemof.db
oemof/db/coastdat.py
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L92-L128
def fetch_raw_data(sql, connection, geometry): """ Fetch the coastdat2 from the database, adapt it to the local time zone and create a time index. """ tmp_dc = {} weather_df = pd.DataFrame( connection.execute(sql).fetchall(), columns=[ 'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series', 'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop( 'dat_id', 1) # Get the timezone of the geometry tz = tools.tz_from_geom(connection, geometry) for ix in weather_df.index: # Convert the point of the weather location to a shapely object weather_df.loc[ix, 'geom_point'] = wkt_loads( weather_df['geom_point'][ix]) # Roll the dataset forward according to the timezone, because the # dataset is based on utc (Berlin +1, Kiev +2, London +0) utc = timezone('utc') offset = int(utc.localize(datetime(2002, 1, 1)).astimezone( timezone(tz)).strftime("%z")[:-2]) # Get the year and the length of the data array db_year = weather_df.loc[ix, 'year'] db_len = len(weather_df['time_series'][ix]) # Set absolute time index for the data sets to avoid errors. tmp_dc[ix] = pd.Series( np.roll(np.array(weather_df['time_series'][ix]), offset), index=pd.date_range(pd.datetime(db_year, 1, 1, 0), periods=db_len, freq='H', tz=tz)) weather_df['time_series'] = pd.Series(tmp_dc) return weather_df
[ "def", "fetch_raw_data", "(", "sql", ",", "connection", ",", "geometry", ")", ":", "tmp_dc", "=", "{", "}", "weather_df", "=", "pd", ".", "DataFrame", "(", "connection", ".", "execute", "(", "sql", ")", ".", "fetchall", "(", ")", ",", "columns", "=", ...
Fetch the coastdat2 from the database, adapt it to the local time zone and create a time index.
[ "Fetch", "the", "coastdat2", "from", "the", "database", "adapt", "it", "to", "the", "local", "time", "zone", "and", "create", "a", "time", "index", "." ]
python
train
hellosign/hellosign-python-sdk
hellosign_sdk/hsclient.py
https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/hsclient.py#L1076-L1154
def create_embedded_unclaimed_draft(self, test_mode=False, client_id=None, is_for_embedded_signing=False, requester_email_address=None, files=None, file_urls=None, draft_type=None, subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, requesting_redirect_url=None, form_fields_per_document=None, metadata=None, use_preexisting_fields=False, allow_decline=False): ''' Creates a new Draft to be used for embedded requesting Args: test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False. client_id (str): Client id of the app used to create the embedded draft. is_for_embedded_signing (bool, optional): Whether this is also for embedded signing. Defaults to False. requester_email_address (str): Email address of the requester. files (list of str): The uploaded file(s) to send for signature. file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls` draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional. subject (str, optional): The subject in the email that will be sent to the signers message (str, optional): The custom message in the email that will be sent to the signers signers (list of dict): A list of signers, which each has the following attributes: name (str): The name of the signer email_address (str): Email address of the signer order (str, optional): The order the signer is required to sign in cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign. requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent. form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest) metadata (dict, optional): Metadata to associate with the draft use_preexisting_fields (bool): Whether to use preexisting PDF fields allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0. Returns: An UnclaimedDraft object ''' self._check_required_fields({ 'client_id': client_id, 'requester_email_address': requester_email_address, 'draft_type': draft_type }, [{ "files": files, "file_urls": file_urls }] ) params = { 'test_mode': test_mode, 'client_id': client_id, 'requester_email_address': requester_email_address, 'is_for_embedded_signing': is_for_embedded_signing, 'files': files, 'file_urls': file_urls, 'draft_type': draft_type, 'subject': subject, 'message': message, 'signing_redirect_url': signing_redirect_url, 'requesting_redirect_url': requesting_redirect_url, 'signers': signers, 'cc_email_addresses': cc_email_addresses, 'form_fields_per_document': form_fields_per_document, 'metadata': metadata, 'use_preexisting_fields': use_preexisting_fields, 'allow_decline': allow_decline } return self._create_unclaimed_draft(**params)
[ "def", "create_embedded_unclaimed_draft", "(", "self", ",", "test_mode", "=", "False", ",", "client_id", "=", "None", ",", "is_for_embedded_signing", "=", "False", ",", "requester_email_address", "=", "None", ",", "files", "=", "None", ",", "file_urls", "=", "No...
Creates a new Draft to be used for embedded requesting Args: test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False. client_id (str): Client id of the app used to create the embedded draft. is_for_embedded_signing (bool, optional): Whether this is also for embedded signing. Defaults to False. requester_email_address (str): Email address of the requester. files (list of str): The uploaded file(s) to send for signature. file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls` draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional. subject (str, optional): The subject in the email that will be sent to the signers message (str, optional): The custom message in the email that will be sent to the signers signers (list of dict): A list of signers, which each has the following attributes: name (str): The name of the signer email_address (str): Email address of the signer order (str, optional): The order the signer is required to sign in cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign. requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent. form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest) metadata (dict, optional): Metadata to associate with the draft use_preexisting_fields (bool): Whether to use preexisting PDF fields allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0. Returns: An UnclaimedDraft object
[ "Creates", "a", "new", "Draft", "to", "be", "used", "for", "embedded", "requesting" ]
python
train
saltstack/salt
salt/modules/libcloud_compute.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_compute.py#L531-L553
def delete_image(image_id, profile, **libcloud_kwargs): ''' Delete an image of a node :param image_id: Image to delete :type image_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_image method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.delete_image image1 profile1 ''' conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) image = _get_by_id(conn.list_images(), image_id) return conn.delete_image(image, **libcloud_kwargs)
[ "def", "delete_image", "(", "image_id", ",", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", ...
Delete an image of a node :param image_id: Image to delete :type image_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_image method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.delete_image image1 profile1
[ "Delete", "an", "image", "of", "a", "node" ]
python
train
google/grr
grr/server/grr_response_server/gui/api_plugins/report_plugins/server_report_plugins.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/report_plugins/server_report_plugins.py#L82-L86
def _ExtractHuntIdFromPath(entry, event): """Extracts a Hunt ID from an APIAuditEntry's HTTP request path.""" match = re.match(r".*hunt/([^/]+).*", entry.http_request_path) if match: event.urn = "aff4:/hunts/{}".format(match.group(1))
[ "def", "_ExtractHuntIdFromPath", "(", "entry", ",", "event", ")", ":", "match", "=", "re", ".", "match", "(", "r\".*hunt/([^/]+).*\"", ",", "entry", ".", "http_request_path", ")", "if", "match", ":", "event", ".", "urn", "=", "\"aff4:/hunts/{}\"", ".", "form...
Extracts a Hunt ID from an APIAuditEntry's HTTP request path.
[ "Extracts", "a", "Hunt", "ID", "from", "an", "APIAuditEntry", "s", "HTTP", "request", "path", "." ]
python
train
push-things/django-th
th_rss/lib/feedsservice/feedsservice.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_rss/lib/feedsservice/feedsservice.py#L21-L39
def datas(self): """ read the data from a given URL or path to a local file """ data = feedparser.parse(self.URL_TO_PARSE, agent=self.USER_AGENT) # when chardet says # >>> chardet.detect(data) # {'confidence': 0.99, 'encoding': 'utf-8'} # bozo says sometimes # >>> data.bozo_exception # CharacterEncodingOverride('document declared as us-ascii, but parsed as utf-8', ) # invalid Feed # so I remove this detection :( # the issue come from the server that return a charset different from the feeds # it is not related to Feedparser but from the HTTP server itself if data.bozo == 1: data.entries = '' return data
[ "def", "datas", "(", "self", ")", ":", "data", "=", "feedparser", ".", "parse", "(", "self", ".", "URL_TO_PARSE", ",", "agent", "=", "self", ".", "USER_AGENT", ")", "# when chardet says", "# >>> chardet.detect(data)", "# {'confidence': 0.99, 'encoding': 'utf-8'}", "...
read the data from a given URL or path to a local file
[ "read", "the", "data", "from", "a", "given", "URL", "or", "path", "to", "a", "local", "file" ]
python
train
astropy/astropy-helpers
astropy_helpers/commands/build_ext.py
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/commands/build_ext.py#L162-L206
def _check_cython_sources(self, extension): """ Where relevant, make sure that the .c files associated with .pyx modules are present (if building without Cython installed). """ # Determine the compiler we'll be using if self.compiler is None: compiler = get_default_compiler() else: compiler = self.compiler # Replace .pyx with C-equivalents, unless c files are missing for jdx, src in enumerate(extension.sources): base, ext = os.path.splitext(src) pyxfn = base + '.pyx' cfn = base + '.c' cppfn = base + '.cpp' if not os.path.isfile(pyxfn): continue if self._uses_cython: extension.sources[jdx] = pyxfn else: if os.path.isfile(cfn): extension.sources[jdx] = cfn elif os.path.isfile(cppfn): extension.sources[jdx] = cppfn else: msg = ( 'Could not find C/C++ file {0}.(c/cpp) for Cython ' 'file {1} when building extension {2}. Cython ' 'must be installed to build from a git ' 'checkout.'.format(base, pyxfn, extension.name)) raise IOError(errno.ENOENT, msg, cfn) # Cython (at least as of 0.29.2) uses deprecated Numpy API features # the use of which produces a few warnings when compiling. # These additional flags should squelch those warnings. # TODO: Feel free to remove this if/when a Cython update # removes use of the deprecated Numpy API if compiler == 'unix': extension.extra_compile_args.extend([ '-Wp,-w', '-Wno-unused-function'])
[ "def", "_check_cython_sources", "(", "self", ",", "extension", ")", ":", "# Determine the compiler we'll be using", "if", "self", ".", "compiler", "is", "None", ":", "compiler", "=", "get_default_compiler", "(", ")", "else", ":", "compiler", "=", "self", ".", "c...
Where relevant, make sure that the .c files associated with .pyx modules are present (if building without Cython installed).
[ "Where", "relevant", "make", "sure", "that", "the", ".", "c", "files", "associated", "with", ".", "pyx", "modules", "are", "present", "(", "if", "building", "without", "Cython", "installed", ")", "." ]
python
train
pypa/setuptools
setuptools/monkey.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/monkey.py#L104-L108
def _patch_distribution_metadata(): """Patch write_pkg_file and read_pkg_file for higher metadata standards""" for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'): new_val = getattr(setuptools.dist, attr) setattr(distutils.dist.DistributionMetadata, attr, new_val)
[ "def", "_patch_distribution_metadata", "(", ")", ":", "for", "attr", "in", "(", "'write_pkg_file'", ",", "'read_pkg_file'", ",", "'get_metadata_version'", ")", ":", "new_val", "=", "getattr", "(", "setuptools", ".", "dist", ",", "attr", ")", "setattr", "(", "d...
Patch write_pkg_file and read_pkg_file for higher metadata standards
[ "Patch", "write_pkg_file", "and", "read_pkg_file", "for", "higher", "metadata", "standards" ]
python
train
robotools/fontParts
Lib/fontParts/world.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/world.py#L420-L435
def getFontsByFontInfoAttribute(self, *attributeValuePairs): """ Get a list of fonts that match the (attribute, value) combinations in ``attributeValuePairs``. :: >>> subFonts = fonts.getFontsByFontInfoAttribute(("xHeight", 20)) >>> subFonts = fonts.getFontsByFontInfoAttribute(("xHeight", 20), ("descender", -150)) This will return an instance of :class:`BaseFontList`. """ found = self for attr, value in attributeValuePairs: found = self._matchFontInfoAttributes(found, (attr, value)) return found
[ "def", "getFontsByFontInfoAttribute", "(", "self", ",", "*", "attributeValuePairs", ")", ":", "found", "=", "self", "for", "attr", ",", "value", "in", "attributeValuePairs", ":", "found", "=", "self", ".", "_matchFontInfoAttributes", "(", "found", ",", "(", "a...
Get a list of fonts that match the (attribute, value) combinations in ``attributeValuePairs``. :: >>> subFonts = fonts.getFontsByFontInfoAttribute(("xHeight", 20)) >>> subFonts = fonts.getFontsByFontInfoAttribute(("xHeight", 20), ("descender", -150)) This will return an instance of :class:`BaseFontList`.
[ "Get", "a", "list", "of", "fonts", "that", "match", "the", "(", "attribute", "value", ")", "combinations", "in", "attributeValuePairs", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/roster.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/roster.py#L886-L904
def remove_item(self, jid, callback = None, error_callback = None): """Remove a contact from the roster. :Parameters: - `jid`: contact's jid - `callback`: function to call when the request succeeds. It should accept a single argument - a `RosterItem` describing the requested change - `error_callback`: function to call when the request fails. It should accept a single argument - an error stanza received (`None` in case of timeout) :Types: - `jid`: `JID` """ item = self.roster[jid] if jid not in self.roster: raise KeyError(jid) item = RosterItem(jid, subscription = "remove") self._roster_set(item, callback, error_callback)
[ "def", "remove_item", "(", "self", ",", "jid", ",", "callback", "=", "None", ",", "error_callback", "=", "None", ")", ":", "item", "=", "self", ".", "roster", "[", "jid", "]", "if", "jid", "not", "in", "self", ".", "roster", ":", "raise", "KeyError",...
Remove a contact from the roster. :Parameters: - `jid`: contact's jid - `callback`: function to call when the request succeeds. It should accept a single argument - a `RosterItem` describing the requested change - `error_callback`: function to call when the request fails. It should accept a single argument - an error stanza received (`None` in case of timeout) :Types: - `jid`: `JID`
[ "Remove", "a", "contact", "from", "the", "roster", "." ]
python
valid
deepmind/sonnet
sonnet/python/custom_getters/restore_initializer.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/custom_getters/restore_initializer.py#L26-L75
def restore_initializer(filename, name_fn=None, collection=tf.GraphKeys.GLOBAL_VARIABLES): """Custom getter to restore all variables with `snt.restore_initializer`. Args: filename: The filename of the checkpoint. name_fn: A function which can map the name of the variable requested. This allows restoring variables with values having different names in the checkpoint. collection: Only set the restore initializer for variables in this collection. If `None`, it will attempt to restore all variables. By default `tf.GraphKeys.GLOBAL_VARIABLES`. Returns: A restore_initializer custom getter, which is a function taking arguments (getter, name, *args, **kwargs). """ def _restore_initializer(getter, name, *args, **kwargs): """Gets variable with restore initializer.""" # Work out what collections this variable will go in. collections = kwargs["collections"] if collections is None: collections = [tf.GraphKeys.GLOBAL_VARIABLES] if (kwargs["trainable"] and tf.GraphKeys.TRAINABLE_VARIABLES not in collections): collections += [tf.GraphKeys.TRAINABLE_VARIABLES] if collection is None or collection in collections: # We don't make use of the 'scope' argument for restore_initializer as we # might want to change the name in more complex ways, such as removing the # scope prefix as well. if name_fn is not None: var_name_in_checkpoint = name_fn(name) else: var_name_in_checkpoint = name tf.logging.info("Restoring '%s' from '%s' into variable '%s'", var_name_in_checkpoint, filename, name) kwargs["initializer"] = snt.restore_initializer( filename, var_name_in_checkpoint, scope="") return getter(name, *args, **kwargs) return _restore_initializer
[ "def", "restore_initializer", "(", "filename", ",", "name_fn", "=", "None", ",", "collection", "=", "tf", ".", "GraphKeys", ".", "GLOBAL_VARIABLES", ")", ":", "def", "_restore_initializer", "(", "getter", ",", "name", ",", "*", "args", ",", "*", "*", "kwar...
Custom getter to restore all variables with `snt.restore_initializer`. Args: filename: The filename of the checkpoint. name_fn: A function which can map the name of the variable requested. This allows restoring variables with values having different names in the checkpoint. collection: Only set the restore initializer for variables in this collection. If `None`, it will attempt to restore all variables. By default `tf.GraphKeys.GLOBAL_VARIABLES`. Returns: A restore_initializer custom getter, which is a function taking arguments (getter, name, *args, **kwargs).
[ "Custom", "getter", "to", "restore", "all", "variables", "with", "snt", ".", "restore_initializer", "." ]
python
train
saltstack/salt
salt/modules/yumpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/yumpkg.py#L156-L170
def _call_yum(args, **kwargs): ''' Call yum/dnf. ''' params = {'output_loglevel': 'trace', 'python_shell': False, 'env': salt.utils.environment.get_module_environment(globals())} params.update(kwargs) cmd = [] if salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.append(_yum()) cmd.extend(args) return __salt__['cmd.run_all'](cmd, **params)
[ "def", "_call_yum", "(", "args", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'output_loglevel'", ":", "'trace'", ",", "'python_shell'", ":", "False", ",", "'env'", ":", "salt", ".", "utils", ".", "environment", ".", "get_module_environment", "(...
Call yum/dnf.
[ "Call", "yum", "/", "dnf", "." ]
python
train
gabstopper/smc-python
smc/elements/other.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/other.py#L64-L83
def add_element(self, element): """ Element can be href or type :py:class:`smc.base.model.Element` :: >>> from smc.elements.other import Category >>> category = Category('foo') >>> category.add_element(Host('kali')) :param str,Element element: element to add to tag :raises: ModificationFailed: failed adding element :return: None """ element = element_resolver(element) self.make_request( ModificationFailed, method='create', resource='category_add_element', json={'value': element})
[ "def", "add_element", "(", "self", ",", "element", ")", ":", "element", "=", "element_resolver", "(", "element", ")", "self", ".", "make_request", "(", "ModificationFailed", ",", "method", "=", "'create'", ",", "resource", "=", "'category_add_element'", ",", "...
Element can be href or type :py:class:`smc.base.model.Element` :: >>> from smc.elements.other import Category >>> category = Category('foo') >>> category.add_element(Host('kali')) :param str,Element element: element to add to tag :raises: ModificationFailed: failed adding element :return: None
[ "Element", "can", "be", "href", "or", "type", ":", "py", ":", "class", ":", "smc", ".", "base", ".", "model", ".", "Element", "::" ]
python
train
Kortemme-Lab/pull_into_place
pull_into_place/structures.py
https://github.com/Kortemme-Lab/pull_into_place/blob/247f303100a612cc90cf31c86e4fe5052eb28c8d/pull_into_place/structures.py#L564-L572
def angle(array_of_xyzs): """ Calculates angle between three coordinate points (I could not find a package that does this but if one exists that would probably be better). Used for Angle constraints. """ ab = array_of_xyzs[0] - array_of_xyzs[1] cb = array_of_xyzs[2] - array_of_xyzs[1] return np.arccos((np.dot(ab,cb)) / (np.sqrt(ab[0]**2 + ab[1]**2 \ + ab[2]**2) * np.sqrt(cb[0]**2 + cb[1]**2 + cb[2]**2)))
[ "def", "angle", "(", "array_of_xyzs", ")", ":", "ab", "=", "array_of_xyzs", "[", "0", "]", "-", "array_of_xyzs", "[", "1", "]", "cb", "=", "array_of_xyzs", "[", "2", "]", "-", "array_of_xyzs", "[", "1", "]", "return", "np", ".", "arccos", "(", "(", ...
Calculates angle between three coordinate points (I could not find a package that does this but if one exists that would probably be better). Used for Angle constraints.
[ "Calculates", "angle", "between", "three", "coordinate", "points", "(", "I", "could", "not", "find", "a", "package", "that", "does", "this", "but", "if", "one", "exists", "that", "would", "probably", "be", "better", ")", ".", "Used", "for", "Angle", "const...
python
train
bwohlberg/sporco
sporco/admm/cbpdn.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/cbpdn.py#L1733-L1748
def cnst_A0T(self, Y0): r"""Compute :math:`A_0^T \mathbf{y}_0` component of :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`). """ # This calculation involves non-negligible computational cost. It # should be possible to disable relevant diagnostic information # (dual residual) to avoid this cost. Y0f = sl.rfftn(Y0, None, self.cri.axisN) if self.cri.Cd == 1: return sl.irfftn(np.conj(self.Df) * Y0f, self.cri.Nv, self.cri.axisN) else: return sl.irfftn(sl.inner( np.conj(self.Df), Y0f, axis=self.cri.axisC), self.cri.Nv, self.cri.axisN)
[ "def", "cnst_A0T", "(", "self", ",", "Y0", ")", ":", "# This calculation involves non-negligible computational cost. It", "# should be possible to disable relevant diagnostic information", "# (dual residual) to avoid this cost.", "Y0f", "=", "sl", ".", "rfftn", "(", "Y0", ",", ...
r"""Compute :math:`A_0^T \mathbf{y}_0` component of :math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
[ "r", "Compute", ":", "math", ":", "A_0^T", "\\", "mathbf", "{", "y", "}", "_0", "component", "of", ":", "math", ":", "A^T", "\\", "mathbf", "{", "y", "}", "(", "see", ":", "meth", ":", ".", "ADMMTwoBlockCnstrnt", ".", "cnst_AT", ")", "." ]
python
train
ecell/ecell4
ecell4/util/ports.py
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/ports.py#L377-L411
def load_sbml(filename): """ Load a model from a SBML file. Parameters ---------- filename : str The input SBML filename. Returns ------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. """ import libsbml document = libsbml.readSBML(filename) document.validateSBML() num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR) + document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)) if num_errors > 0: messages = "The generated document is not valid." messages += " {} errors were found:\n".format(num_errors) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) raise RuntimeError(messages) return import_sbml(document)
[ "def", "load_sbml", "(", "filename", ")", ":", "import", "libsbml", "document", "=", "libsbml", ".", "readSBML", "(", "filename", ")", "document", ".", "validateSBML", "(", ")", "num_errors", "=", "(", "document", ".", "getNumErrors", "(", "libsbml", ".", ...
Load a model from a SBML file. Parameters ---------- filename : str The input SBML filename. Returns ------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume.
[ "Load", "a", "model", "from", "a", "SBML", "file", "." ]
python
train
ajk8/hatchery
hatchery/project.py
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L45-L48
def package_has_version_file(package_name): """ Check to make sure _version.py is contained in the package """ version_file_path = helpers.package_file_path('_version.py', package_name) return os.path.isfile(version_file_path)
[ "def", "package_has_version_file", "(", "package_name", ")", ":", "version_file_path", "=", "helpers", ".", "package_file_path", "(", "'_version.py'", ",", "package_name", ")", "return", "os", ".", "path", ".", "isfile", "(", "version_file_path", ")" ]
Check to make sure _version.py is contained in the package
[ "Check", "to", "make", "sure", "_version", ".", "py", "is", "contained", "in", "the", "package" ]
python
train
bram85/topydo
topydo/lib/Config.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/Config.py#L337-L359
def priority_color(self, p_priority): """ Returns a dict with priorities as keys and color numbers as value. """ def _str_to_dict(p_string): pri_colors_dict = dict() for pri_color in p_string.split(','): pri, color = pri_color.split(':') pri_colors_dict[pri] = Color(color) return pri_colors_dict try: pri_colors_str = self.cp.get('colorscheme', 'priority_colors') if pri_colors_str == '': pri_colors_dict = _str_to_dict('A:-1,B:-1,C:-1') else: pri_colors_dict = _str_to_dict(pri_colors_str) except ValueError: pri_colors_dict = _str_to_dict(self.defaults['colorscheme']['priority_colors']) return pri_colors_dict[p_priority] if p_priority in pri_colors_dict else Color('NEUTRAL')
[ "def", "priority_color", "(", "self", ",", "p_priority", ")", ":", "def", "_str_to_dict", "(", "p_string", ")", ":", "pri_colors_dict", "=", "dict", "(", ")", "for", "pri_color", "in", "p_string", ".", "split", "(", "','", ")", ":", "pri", ",", "color", ...
Returns a dict with priorities as keys and color numbers as value.
[ "Returns", "a", "dict", "with", "priorities", "as", "keys", "and", "color", "numbers", "as", "value", "." ]
python
train
mediawiki-utilities/python-mwapi
mwapi/cli.py
https://github.com/mediawiki-utilities/python-mwapi/blob/7a653c29207ecd318ae4b369d398aed13f26951d/mwapi/cli.py#L16-L42
def do_login(session, for_what): """ Performs a login handshake with a user on the command-line. This method will handle all of the follow-up requests (e.g. capcha or two-factor). A login that requires two-factor looks like this:: >>> import mwapi.cli >>> import mwapi >>> mwapi.cli.do_login(mwapi.Session("https://en.wikipedia.org"), "English Wikipedia") Log into English Wikipedia Username: Halfak (WMF) Passord: Please enter verification code from your mobile app Token(OATHToken): 234567 :Parameters: session : :class:`mwapi.Session` A session object to use for login for_what : `str` A name to display to the use (for what they are logging into) """ # noqa username, password = request_username_password(for_what) try: session.login(username, password) except ClientInteractionRequest as cir: params = request_interaction(cir) session.continue_login(cir.login_token, **params)
[ "def", "do_login", "(", "session", ",", "for_what", ")", ":", "# noqa", "username", ",", "password", "=", "request_username_password", "(", "for_what", ")", "try", ":", "session", ".", "login", "(", "username", ",", "password", ")", "except", "ClientInteractio...
Performs a login handshake with a user on the command-line. This method will handle all of the follow-up requests (e.g. capcha or two-factor). A login that requires two-factor looks like this:: >>> import mwapi.cli >>> import mwapi >>> mwapi.cli.do_login(mwapi.Session("https://en.wikipedia.org"), "English Wikipedia") Log into English Wikipedia Username: Halfak (WMF) Passord: Please enter verification code from your mobile app Token(OATHToken): 234567 :Parameters: session : :class:`mwapi.Session` A session object to use for login for_what : `str` A name to display to the use (for what they are logging into)
[ "Performs", "a", "login", "handshake", "with", "a", "user", "on", "the", "command", "-", "line", ".", "This", "method", "will", "handle", "all", "of", "the", "follow", "-", "up", "requests", "(", "e", ".", "g", ".", "capcha", "or", "two", "-", "facto...
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/utils/misc.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/utils/misc.py#L1014-L1040
def protect_pip_from_modification_on_windows(modifying_pip): """Protection of pip.exe from modification on Windows On Windows, any operation modifying pip should be run as: python -m pip ... """ pip_names = [ "pip.exe", "pip{}.exe".format(sys.version_info[0]), "pip{}.{}.exe".format(*sys.version_info[:2]) ] # See https://github.com/pypa/pip/issues/1299 for more discussion should_show_use_python_msg = ( modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names ) if should_show_use_python_msg: new_command = [ sys.executable, "-m", "pip" ] + sys.argv[1:] raise CommandError( 'To modify pip, please run the following command:\n{}' .format(" ".join(new_command)) )
[ "def", "protect_pip_from_modification_on_windows", "(", "modifying_pip", ")", ":", "pip_names", "=", "[", "\"pip.exe\"", ",", "\"pip{}.exe\"", ".", "format", "(", "sys", ".", "version_info", "[", "0", "]", ")", ",", "\"pip{}.{}.exe\"", ".", "format", "(", "*", ...
Protection of pip.exe from modification on Windows On Windows, any operation modifying pip should be run as: python -m pip ...
[ "Protection", "of", "pip", ".", "exe", "from", "modification", "on", "Windows" ]
python
train
sendgrid/sendgrid-python
examples/helpers/mail_example.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/examples/helpers/mail_example.py#L82-L92
def build_attachment1(): """Build attachment mock. Make sure your content is base64 encoded before passing into attachment.content. Another example: https://github.com/sendgrid/sendgrid-python/blob/master/use_cases/attachment.md""" attachment = Attachment() attachment.content = ("TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNl" "Y3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12") attachment.type = "application/pdf" attachment.filename = "balance_001.pdf" attachment.disposition = "attachment" attachment.content_id = "Balance Sheet" return attachment
[ "def", "build_attachment1", "(", ")", ":", "attachment", "=", "Attachment", "(", ")", "attachment", ".", "content", "=", "(", "\"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNl\"", "\"Y3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12\"", ")", "attachment", ".", "type", "=", "\"ap...
Build attachment mock. Make sure your content is base64 encoded before passing into attachment.content. Another example: https://github.com/sendgrid/sendgrid-python/blob/master/use_cases/attachment.md
[ "Build", "attachment", "mock", ".", "Make", "sure", "your", "content", "is", "base64", "encoded", "before", "passing", "into", "attachment", ".", "content", ".", "Another", "example", ":", "https", ":", "//", "github", ".", "com", "/", "sendgrid", "/", "se...
python
train
AmesCornish/buttersink
buttersink/progress.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/progress.py#L53-L80
def _display(self, sent, now, chunk, mbps): """ Display intermediate progress. """ if self.parent is not None: self.parent._display(self.parent.offset + sent, now, chunk, mbps) return elapsed = now - self.startTime if sent > 0 and self.total is not None and sent <= self.total: eta = (self.total - sent) * elapsed.total_seconds() / sent eta = datetime.timedelta(seconds=eta) else: eta = None self.output.write( "\r %s: Sent %s%s%s ETA: %s (%s) %s%20s\r" % ( elapsed, util.humanize(sent), "" if self.total is None else " of %s" % (util.humanize(self.total),), "" if self.total is None else " (%d%%)" % (int(100 * sent / self.total),), eta, "" if not mbps else "%.3g Mbps " % (mbps,), chunk or "", " ", ) ) self.output.flush()
[ "def", "_display", "(", "self", ",", "sent", ",", "now", ",", "chunk", ",", "mbps", ")", ":", "if", "self", ".", "parent", "is", "not", "None", ":", "self", ".", "parent", ".", "_display", "(", "self", ".", "parent", ".", "offset", "+", "sent", "...
Display intermediate progress.
[ "Display", "intermediate", "progress", "." ]
python
train
inveniosoftware/invenio-github
invenio_github/api.py
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/api.py#L224-L232
def check_sync(self): """Check if sync is required based on last sync date.""" # If refresh interval is not specified, we should refresh every time. expiration = utcnow() refresh_td = current_app.config.get('GITHUB_REFRESH_TIMEDELTA') if refresh_td: expiration -= refresh_td last_sync = parse_timestamp(self.account.extra_data['last_sync']) return last_sync < expiration
[ "def", "check_sync", "(", "self", ")", ":", "# If refresh interval is not specified, we should refresh every time.", "expiration", "=", "utcnow", "(", ")", "refresh_td", "=", "current_app", ".", "config", ".", "get", "(", "'GITHUB_REFRESH_TIMEDELTA'", ")", "if", "refres...
Check if sync is required based on last sync date.
[ "Check", "if", "sync", "is", "required", "based", "on", "last", "sync", "date", "." ]
python
train
TkTech/Jawa
jawa/attributes/code.py
https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/attributes/code.py#L70-L91
def unpack(self, info): """ Read the CodeAttribute from the byte string `info`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when loading a ClassFile. :param info: A byte string containing an unparsed CodeAttribute. """ self.max_stack, self.max_locals, c_len = info.unpack('>HHI') self._code = info.read(c_len) # The exception table ex_table_len = info.u2() for _ in repeat(None, ex_table_len): self.exception_table.append(CodeException( *info.unpack('>HHHH') )) self.attributes = AttributeTable(self.cf, parent=self) self.attributes.unpack(info)
[ "def", "unpack", "(", "self", ",", "info", ")", ":", "self", ".", "max_stack", ",", "self", ".", "max_locals", ",", "c_len", "=", "info", ".", "unpack", "(", "'>HHI'", ")", "self", ".", "_code", "=", "info", ".", "read", "(", "c_len", ")", "# The e...
Read the CodeAttribute from the byte string `info`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when loading a ClassFile. :param info: A byte string containing an unparsed CodeAttribute.
[ "Read", "the", "CodeAttribute", "from", "the", "byte", "string", "info", "." ]
python
train
ibis-project/ibis
ibis/sql/mysql/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/sql/mysql/client.py#L108-L141
def database(self, name=None): """Connect to a database called `name`. Parameters ---------- name : str, optional The name of the database to connect to. If ``None``, return the database named ``self.current_database``. Returns ------- db : MySQLDatabase An :class:`ibis.sql.mysql.client.MySQLDatabase` instance. Notes ----- This creates a new connection if `name` is both not ``None`` and not equal to the current database. """ if name == self.current_database or ( name is None and name != self.current_database ): return self.database_class(self.current_database, self) else: url = self.con.url client_class = type(self) new_client = client_class( host=url.host, user=url.username, port=url.port, password=url.password, database=name, ) return self.database_class(name, new_client)
[ "def", "database", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "==", "self", ".", "current_database", "or", "(", "name", "is", "None", "and", "name", "!=", "self", ".", "current_database", ")", ":", "return", "self", ".", "database_c...
Connect to a database called `name`. Parameters ---------- name : str, optional The name of the database to connect to. If ``None``, return the database named ``self.current_database``. Returns ------- db : MySQLDatabase An :class:`ibis.sql.mysql.client.MySQLDatabase` instance. Notes ----- This creates a new connection if `name` is both not ``None`` and not equal to the current database.
[ "Connect", "to", "a", "database", "called", "name", "." ]
python
train
rwl/pylon
pylon/opf.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/opf.py#L243-L255
def _get_voltage_magnitude_var(self, buses, generators): """ Returns the voltage magnitude variable set. """ Vm = array([b.v_magnitude for b in buses]) # For buses with generators initialise Vm from gen data. for g in generators: Vm[g.bus._i] = g.v_magnitude Vmin = array([b.v_min for b in buses]) Vmax = array([b.v_max for b in buses]) return Variable("Vm", len(buses), Vm, Vmin, Vmax)
[ "def", "_get_voltage_magnitude_var", "(", "self", ",", "buses", ",", "generators", ")", ":", "Vm", "=", "array", "(", "[", "b", ".", "v_magnitude", "for", "b", "in", "buses", "]", ")", "# For buses with generators initialise Vm from gen data.", "for", "g", "in",...
Returns the voltage magnitude variable set.
[ "Returns", "the", "voltage", "magnitude", "variable", "set", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxproject.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L163-L181
def move_folder(self, folder, destination, **kwargs): """ :param folder: Full path to the folder to move :type folder: string :param destination: Full path to the destination folder that will contain *folder* :type destination: string Moves *folder* to reside in *destination* in the same project or container. All objects and subfolders inside *folder* are also moved. """ api_method = dxpy.api.container_move if isinstance(self, DXProject): api_method = dxpy.api.project_move api_method(self._dxid, {"folders": [folder], "destination": destination}, **kwargs)
[ "def", "move_folder", "(", "self", ",", "folder", ",", "destination", ",", "*", "*", "kwargs", ")", ":", "api_method", "=", "dxpy", ".", "api", ".", "container_move", "if", "isinstance", "(", "self", ",", "DXProject", ")", ":", "api_method", "=", "dxpy",...
:param folder: Full path to the folder to move :type folder: string :param destination: Full path to the destination folder that will contain *folder* :type destination: string Moves *folder* to reside in *destination* in the same project or container. All objects and subfolders inside *folder* are also moved.
[ ":", "param", "folder", ":", "Full", "path", "to", "the", "folder", "to", "move", ":", "type", "folder", ":", "string", ":", "param", "destination", ":", "Full", "path", "to", "the", "destination", "folder", "that", "will", "contain", "*", "folder", "*",...
python
train
astroduff/commah
commah/commah.py
https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L349-L397
def calc_ab(zi, Mi, **cosmo): """ Calculate growth rate indices a_tilde and b_tilde Parameters ---------- zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (a_tilde, b_tilde) : float """ # When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta # Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17) # Arbitray formation redshift, z_-2 in COM is more physically motivated zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837 # Eqn 22 of Correa et al 2015a q = 4.137 * zf**(-0.9476) # Radius of a mass Mi R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc] # Radius of a mass Mi/q Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc] # Mass variance 'sigma' evaluate at z=0 to a good approximation sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc] sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc] f = (sigq**2 - sig**2)**(-0.5) # Eqn 9 and 10 from Correa et al 2015c # (generalised to zi from Correa et al 2015a's z=0 special case) # a_tilde is power law growth rate a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) / growthfactor(zi, norm=True, **cosmo)**2 + 1)*f # b_tilde is exponential growth rate b_tilde = -f return(a_tilde, b_tilde)
[ "def", "calc_ab", "(", "zi", ",", "Mi", ",", "*", "*", "cosmo", ")", ":", "# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta", "# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)", "# Arbitray formation redshift, z_-2 in COM is more physically motivate...
Calculate growth rate indices a_tilde and b_tilde Parameters ---------- zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (a_tilde, b_tilde) : float
[ "Calculate", "growth", "rate", "indices", "a_tilde", "and", "b_tilde" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/git/git_client_base.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L2154-L2176
def update_pull_request(self, git_pull_request_to_update, repository_id, pull_request_id, project=None): """UpdatePullRequest. [Preview API] Update a pull request :param :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` git_pull_request_to_update: The pull request content that should be updated. :param str repository_id: The repository ID of the pull request's target branch. :param int pull_request_id: ID of the pull request to update. :param str project: Project ID or project name :rtype: :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') if pull_request_id is not None: route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int') content = self._serialize.body(git_pull_request_to_update, 'GitPullRequest') response = self._send(http_method='PATCH', location_id='9946fd70-0d40-406e-b686-b4744cbbcc37', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('GitPullRequest', response)
[ "def", "update_pull_request", "(", "self", ",", "git_pull_request_to_update", ",", "repository_id", ",", "pull_request_id", ",", "project", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'...
UpdatePullRequest. [Preview API] Update a pull request :param :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>` git_pull_request_to_update: The pull request content that should be updated. :param str repository_id: The repository ID of the pull request's target branch. :param int pull_request_id: ID of the pull request to update. :param str project: Project ID or project name :rtype: :class:`<GitPullRequest> <azure.devops.v5_1.git.models.GitPullRequest>`
[ "UpdatePullRequest", ".", "[", "Preview", "API", "]", "Update", "a", "pull", "request", ":", "param", ":", "class", ":", "<GitPullRequest", ">", "<azure", ".", "devops", ".", "v5_1", ".", "git", ".", "models", ".", "GitPullRequest", ">", "git_pull_request_to...
python
train
google/grr
grr/core/grr_response_core/lib/communicator.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/communicator.py#L322-L399
def EncodeMessages(self, message_list, result, destination=None, timestamp=None, api_version=3): """Accepts a list of messages and encodes for transmission. This function signs and then encrypts the payload. Args: message_list: A MessageList rdfvalue containing a list of GrrMessages. result: A ClientCommunication rdfvalue which will be filled in. destination: The CN of the remote system this should go to. timestamp: A timestamp to use for the signed messages. If None - use the current time. api_version: The api version which this should be encoded in. Returns: A nonce (based on time) which is inserted to the encrypted payload. The client can verify that the server is able to decrypt the message and return the nonce. Raises: RuntimeError: If we do not support this api version. """ if api_version not in [3]: raise RuntimeError( "Unsupported api version: %s, expected 3." % api_version) # TODO(amoser): This is actually not great, we have two # communicator classes already, one for the client, one for the # server. This should be different methods, not a single one that # gets passed a destination (server side) or not (client side). if destination is None: destination = self.server_name # For the client it makes sense to cache the server cipher since # it's the only cipher it ever uses. cipher = self._GetServerCipher() else: remote_public_key = self._GetRemotePublicKey(destination) cipher = Cipher(self.common_name, self.private_key, remote_public_key) # Make a nonce for this transaction if timestamp is None: self.timestamp = timestamp = int(time.time() * 1000000) packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp) self.EncodeMessageList(message_list, packed_message_list) result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata # Include the encrypted cipher. result.encrypted_cipher = cipher.encrypted_cipher serialized_message_list = packed_message_list.SerializeToString() # Encrypt the message symmetrically. # New scheme cipher is signed plus hmac over message list. result.packet_iv, result.encrypted = cipher.Encrypt(serialized_message_list) # This is to support older endpoints. result.hmac = cipher.HMAC(result.encrypted) # Newer endpoints only look at this HMAC. It is recalculated for each packet # in the session. Note that encrypted_cipher and encrypted_cipher_metadata # do not change between all packets in this session. result.full_hmac = cipher.HMAC(result.encrypted, result.encrypted_cipher, result.encrypted_cipher_metadata, result.packet_iv.SerializeToString(), struct.pack("<I", api_version)) result.api_version = api_version if isinstance(result, rdfvalue.RDFValue): # Store the number of messages contained. result.num_messages = len(message_list) return timestamp
[ "def", "EncodeMessages", "(", "self", ",", "message_list", ",", "result", ",", "destination", "=", "None", ",", "timestamp", "=", "None", ",", "api_version", "=", "3", ")", ":", "if", "api_version", "not", "in", "[", "3", "]", ":", "raise", "RuntimeError...
Accepts a list of messages and encodes for transmission. This function signs and then encrypts the payload. Args: message_list: A MessageList rdfvalue containing a list of GrrMessages. result: A ClientCommunication rdfvalue which will be filled in. destination: The CN of the remote system this should go to. timestamp: A timestamp to use for the signed messages. If None - use the current time. api_version: The api version which this should be encoded in. Returns: A nonce (based on time) which is inserted to the encrypted payload. The client can verify that the server is able to decrypt the message and return the nonce. Raises: RuntimeError: If we do not support this api version.
[ "Accepts", "a", "list", "of", "messages", "and", "encodes", "for", "transmission", "." ]
python
train
chimera0/accel-brain-code
Automatic-Summarization/pysummarization/nlp_base.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Automatic-Summarization/pysummarization/nlp_base.py#L21-L26
def set_tokenizable_doc(self, value): ''' setter ''' if isinstance(value, TokenizableDoc): self.__tokenizable_doc = value else: raise TypeError()
[ "def", "set_tokenizable_doc", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "TokenizableDoc", ")", ":", "self", ".", "__tokenizable_doc", "=", "value", "else", ":", "raise", "TypeError", "(", ")" ]
setter
[ "setter" ]
python
train
apache/airflow
airflow/utils/log/gcs_task_handler.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/gcs_task_handler.py#L166-L177
def parse_gcs_url(gsurl): """ Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a tuple containing the corresponding bucket and blob. """ parsed_url = urlparse(gsurl) if not parsed_url.netloc: raise AirflowException('Please provide a bucket name') else: bucket = parsed_url.netloc blob = parsed_url.path.strip('/') return bucket, blob
[ "def", "parse_gcs_url", "(", "gsurl", ")", ":", "parsed_url", "=", "urlparse", "(", "gsurl", ")", "if", "not", "parsed_url", ".", "netloc", ":", "raise", "AirflowException", "(", "'Please provide a bucket name'", ")", "else", ":", "bucket", "=", "parsed_url", ...
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a tuple containing the corresponding bucket and blob.
[ "Given", "a", "Google", "Cloud", "Storage", "URL", "(", "gs", ":", "//", "<bucket", ">", "/", "<blob", ">", ")", "returns", "a", "tuple", "containing", "the", "corresponding", "bucket", "and", "blob", "." ]
python
test
openstack/networking-cisco
networking_cisco/apps/saf/server/cisco_dfa_rest.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/cisco_dfa_rest.py#L106-L119
def set_segmentid_range(self, orchestrator_id, segid_min, segid_max): """set segment id range in DCNM. """ url = self._segmentid_ranges_url payload = {'orchestratorId': orchestrator_id, 'segmentIdRanges': "%s-%s" % (segid_min, segid_max)} res = self._send_request('POST', url, payload, 'segment-id range') if not (res and res.status_code in self._resp_ok): LOG.error("Failed to set segment id range for orchestrator " "%(orch)s on DCNM: %(text)s", {'orch': orchestrator_id, 'text': res.text}) raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
[ "def", "set_segmentid_range", "(", "self", ",", "orchestrator_id", ",", "segid_min", ",", "segid_max", ")", ":", "url", "=", "self", ".", "_segmentid_ranges_url", "payload", "=", "{", "'orchestratorId'", ":", "orchestrator_id", ",", "'segmentIdRanges'", ":", "\"%s...
set segment id range in DCNM.
[ "set", "segment", "id", "range", "in", "DCNM", "." ]
python
train
swharden/PyOriginTools
PyOriginTools/highlevel.py
https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/PyOriginTools/highlevel.py#L318-L405
def sheetToHTML(sheet): """ Put 2d numpy data into a temporary HTML file. This is a hack, copy/pasted from an earlier version of this software. It is very messy, but works great! Good enough for me. """ assert "SHEET" in str(type(sheet)) #data,names=None,units=None,bookName=None,sheetName=None,xCol=None #sheet=OR.SHEET() data=sheet.data names=sheet.colDesc units=sheet.colUnits bookName=sheet.bookName sheetName=sheet.sheetName def htmlListToTR(l,trClass=None,tdClass=None,td1Class=None): """ turns a list into a <tr><td>something</td></tr> call this when generating HTML tables dynamically. """ html="<tr>" for item in l: html+="<td>%s</td>"%item html+="</tr>" if trClass: html=html.replace("<tr>",'<tr class="%s">'%trClass) if td1Class: html=html.replace("<td>",'<td class="%s">'%td1Class,1) if tdClass: html=html.replace("<td>",'<td class="%s">'%tdClass) return html htmlFname = os.path.expanduser("~")+"/WKS-%s.%s.html"%(bookName,sheetName) html="""<body> <style> body { background-color: #ababab; padding:20px; } table { font-size:12px; border-spacing: 0; border-collapse: collapse; } .name {background-color:#fafac8;text-align:center;} .units {background-color:#fafac8;text-align:center;} .data0 {background-color:#FFFFFF;font-family: monospace;text-align:center;} .data1 {background-color:#FAFAFA;font-family: monospace;text-align:center;} .labelRow {background-color:#e0dfe4; text-align:right;border:1px solid #000000;} .labelCol {background-color:#e0dfe4; text-align:center;border:1px solid #000000; padding-left: 20px; padding-right: 20px;} td { border:1px solid #c0c0c0; padding:5px; font-family: Arial, Helvetica, sans-serif; } </style> <html>""" html+="<h1>FauxRigin</h1>" if bookName or sheetName: html+='<code><b>%s / %s</b></code><br><br>'%(bookName,sheetName) html+="<table>" colNames=[''] for i in range(len(units)): shortName=chr(i%26+ord('A')) if i>=26: shortName=chr(int(i/26-1)+ord('A'))+shortName label="%s(%s)"%(shortName,"X" if sheet.colTypes[i]==3 else "Y") colNames.append(label) html+=htmlListToTR(colNames,'labelCol','labelCol') html+=htmlListToTR(['Long Name']+list(names),'name',td1Class='labelRow') html+=htmlListToTR(['Units']+list(units),'units',td1Class='labelRow') cutOff=False for y in range(len(data)): html+=htmlListToTR([y+1]+list(data[y]),trClass='data%d'%(y%2),td1Class='labelRow') if y>=200: cutOff=True break html+="</table>" html=html.replace(">nan<",">--<") html=html.replace(">None<","><") if cutOff: html+="<h3>... showing only %d of %d rows ...</h3>"%(y,len(data)) html+="</body></html>" with open(htmlFname,'w') as f: f.write(html) import webbrowser webbrowser.open(htmlFname) return
[ "def", "sheetToHTML", "(", "sheet", ")", ":", "assert", "\"SHEET\"", "in", "str", "(", "type", "(", "sheet", ")", ")", "#data,names=None,units=None,bookName=None,sheetName=None,xCol=None", "#sheet=OR.SHEET()", "data", "=", "sheet", ".", "data", "names", "=", "sheet"...
Put 2d numpy data into a temporary HTML file. This is a hack, copy/pasted from an earlier version of this software. It is very messy, but works great! Good enough for me.
[ "Put", "2d", "numpy", "data", "into", "a", "temporary", "HTML", "file", ".", "This", "is", "a", "hack", "copy", "/", "pasted", "from", "an", "earlier", "version", "of", "this", "software", ".", "It", "is", "very", "messy", "but", "works", "great!", "Go...
python
train
nickmckay/LiPD-utilities
Python/lipd/timeseries.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L69-L78
def _extract_fund(l, _root): """ Creates flat funding dictionary. :param list l: Funding entries """ logger_ts.info("enter _extract_funding") for idx, i in enumerate(l): for k, v in i.items(): _root['funding' + str(idx + 1) + '_' + k] = v return _root
[ "def", "_extract_fund", "(", "l", ",", "_root", ")", ":", "logger_ts", ".", "info", "(", "\"enter _extract_funding\"", ")", "for", "idx", ",", "i", "in", "enumerate", "(", "l", ")", ":", "for", "k", ",", "v", "in", "i", ".", "items", "(", ")", ":",...
Creates flat funding dictionary. :param list l: Funding entries
[ "Creates", "flat", "funding", "dictionary", ".", ":", "param", "list", "l", ":", "Funding", "entries" ]
python
train
google/grr
grr/server/grr_response_server/maintenance_utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/maintenance_utils.py#L40-L83
def UploadSignedConfigBlob(content, aff4_path, client_context=None, limit=None, token=None): """Upload a signed blob into the datastore. Args: content: File content to upload. aff4_path: aff4 path to upload to. client_context: The configuration contexts to use. limit: The maximum size of the chunk to use. token: A security token. Raises: IOError: On failure to write. """ if limit is None: limit = config.CONFIG["Datastore.maximum_blob_size"] # Get the values of these parameters which apply to the client running on the # target platform. if client_context is None: # Default to the windows client. client_context = ["Platform:Windows", "Client Context"] config.CONFIG.Validate( parameters="PrivateKeys.executable_signing_private_key") signing_key = config.CONFIG.Get( "PrivateKeys.executable_signing_private_key", context=client_context) verification_key = config.CONFIG.Get( "Client.executable_signing_public_key", context=client_context) signed_binary_utils.WriteSignedBinary( rdfvalue.RDFURN(aff4_path), content, signing_key, public_key=verification_key, chunk_size=limit, token=token) logging.info("Uploaded to %s", aff4_path)
[ "def", "UploadSignedConfigBlob", "(", "content", ",", "aff4_path", ",", "client_context", "=", "None", ",", "limit", "=", "None", ",", "token", "=", "None", ")", ":", "if", "limit", "is", "None", ":", "limit", "=", "config", ".", "CONFIG", "[", "\"Datast...
Upload a signed blob into the datastore. Args: content: File content to upload. aff4_path: aff4 path to upload to. client_context: The configuration contexts to use. limit: The maximum size of the chunk to use. token: A security token. Raises: IOError: On failure to write.
[ "Upload", "a", "signed", "blob", "into", "the", "datastore", "." ]
python
train
pseudo-lang/pseudo
pseudo/api_translator.py
https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/api_translator.py#L248-L287
def _expand_api(self, api, receiver, args, pseudo_type, equivalent): ''' the heart of api translation dsl function or <z>(<arg>, ..) can be expanded, <z> can be just a name for a global function, or #name for method, <arg> can be %{self} for self or %{n} for nth arg ''' if callable(api): if receiver: return api(receiver, *(args + [pseudo_type])) else: return api(*(args + [pseudo_type])) elif isinstance(api, str): if '(' in api: call_api, arg_code = api[:-1].split('(') new_args = [self._parse_part( a.strip(), receiver, args, equivalent) for a in arg_code.split(',')] else: call_api, arg_code = api, '' new_args = args if '#' in call_api: a, b = call_api.split('#') method_receiver = self._parse_part( a, receiver, args, equivalent) if a else receiver return method_call(method_receiver, b, new_args, pseudo_type=pseudo_type) elif '.' in call_api: a, b = call_api.split('.') static_receiver = self._parse_part( a, receiver, args, equivalent) if a else receiver if b[-1] != '!': return Node('static_call', receiver=static_receiver, message=b, args=new_args, pseudo_type=pseudo_type) else: return Node('attr', object=static_receiver, attr=b[:-1], pseudo_type=pseudo_type) else: if receiver: return call(call_api, [receiver] + new_args, pseudo_type=pseudo_type) else: return call(call_api, new_args, pseudo_type=pseudo_type) else: raise PseudoDSLError('%s not supported by api dsl' % str(api))
[ "def", "_expand_api", "(", "self", ",", "api", ",", "receiver", ",", "args", ",", "pseudo_type", ",", "equivalent", ")", ":", "if", "callable", "(", "api", ")", ":", "if", "receiver", ":", "return", "api", "(", "receiver", ",", "*", "(", "args", "+",...
the heart of api translation dsl function or <z>(<arg>, ..) can be expanded, <z> can be just a name for a global function, or #name for method, <arg> can be %{self} for self or %{n} for nth arg
[ "the", "heart", "of", "api", "translation", "dsl" ]
python
train
sirfoga/pyhal
hal/meta/attributes.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/meta/attributes.py#L83-L93
def get_classes(self): """Finds classes in file :return: list of top-level classes """ instances = self._get_instances(ast.ClassDef) instances = [ PyClass(instance, self.package) for instance in instances ] return instances
[ "def", "get_classes", "(", "self", ")", ":", "instances", "=", "self", ".", "_get_instances", "(", "ast", ".", "ClassDef", ")", "instances", "=", "[", "PyClass", "(", "instance", ",", "self", ".", "package", ")", "for", "instance", "in", "instances", "]"...
Finds classes in file :return: list of top-level classes
[ "Finds", "classes", "in", "file" ]
python
train
rwl/pylon
pylon/dc_pf.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/dc_pf.py#L126-L159
def _get_v_angle(self, case, B, v_angle_guess, p_businj, iref): """ Calculates the voltage phase angles. """ buses = case.connected_buses pv_idxs = [bus._i for bus in buses if bus.type == PV] pq_idxs = [bus._i for bus in buses if bus.type == PQ] pvpq_idxs = pv_idxs + pq_idxs pvpq_rows = [[i] for i in pvpq_idxs] # Get the susceptance matrix with the column and row corresponding to # the reference bus removed. Bpvpq = B[pvpq_rows, pvpq_idxs] Bref = B[pvpq_rows, [iref]] # Bus active power injections (generation - load) adjusted for phase # shifters and real shunts. p_surplus = array([case.s_surplus(v).real for v in buses]) g_shunt = array([bus.g_shunt for bus in buses]) Pbus = (p_surplus - p_businj - g_shunt) / case.base_mva Pbus.shape = len(Pbus), 1 A = Bpvpq b = Pbus[pvpq_idxs] - Bref * v_angle_guess[iref] # x, res, rank, s = linalg.lstsq(A.todense(), b) x = spsolve(A, b) # Insert the reference voltage angle of the slack bus. v_angle = r_[x[:iref], v_angle_guess[iref], x[iref:]] return v_angle, Pbus[iref]
[ "def", "_get_v_angle", "(", "self", ",", "case", ",", "B", ",", "v_angle_guess", ",", "p_businj", ",", "iref", ")", ":", "buses", "=", "case", ".", "connected_buses", "pv_idxs", "=", "[", "bus", ".", "_i", "for", "bus", "in", "buses", "if", "bus", "....
Calculates the voltage phase angles.
[ "Calculates", "the", "voltage", "phase", "angles", "." ]
python
train
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2671-L2678
def _retrieve_page(self, page_index): """Returns the node of matches to be processed""" params = self._get_params() params["page"] = str(page_index) doc = self._request(self._ws_prefix + ".search", True, params) return doc.getElementsByTagName(self._ws_prefix + "matches")[0]
[ "def", "_retrieve_page", "(", "self", ",", "page_index", ")", ":", "params", "=", "self", ".", "_get_params", "(", ")", "params", "[", "\"page\"", "]", "=", "str", "(", "page_index", ")", "doc", "=", "self", ".", "_request", "(", "self", ".", "_ws_pref...
Returns the node of matches to be processed
[ "Returns", "the", "node", "of", "matches", "to", "be", "processed" ]
python
train
LIVVkit/LIVVkit
livvkit/util/elements.py
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/elements.py#L261-L294
def image(title, desc, image_name, group=None, height=None): """ Builds an image element. Image elements are primarily created and then wrapped into an image gallery element. This is not required behavior, however and it's independent usage should be allowed depending on the behavior required. The Javascript will search for the `image_name` in the component's `imgs` directory when rendering. For example, all verification images are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification case's output page will search for `image_name` within that directory. Args: title: The title to display desc: A description of the image or plot image_name: The filename of the image group: (optional) Title of lightbox group to join height: (optional) Height of image thumbnail to draw Returns: A dictionary with the metadata specifying that it is to be rendered as an image element """ ie = { 'Type': 'Image', 'Title': title, 'Description': desc, 'Plot File': image_name, } if group: ie['Group'] = group if height: ie['Height'] = height return ie
[ "def", "image", "(", "title", ",", "desc", ",", "image_name", ",", "group", "=", "None", ",", "height", "=", "None", ")", ":", "ie", "=", "{", "'Type'", ":", "'Image'", ",", "'Title'", ":", "title", ",", "'Description'", ":", "desc", ",", "'Plot File...
Builds an image element. Image elements are primarily created and then wrapped into an image gallery element. This is not required behavior, however and it's independent usage should be allowed depending on the behavior required. The Javascript will search for the `image_name` in the component's `imgs` directory when rendering. For example, all verification images are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification case's output page will search for `image_name` within that directory. Args: title: The title to display desc: A description of the image or plot image_name: The filename of the image group: (optional) Title of lightbox group to join height: (optional) Height of image thumbnail to draw Returns: A dictionary with the metadata specifying that it is to be rendered as an image element
[ "Builds", "an", "image", "element", ".", "Image", "elements", "are", "primarily", "created", "and", "then", "wrapped", "into", "an", "image", "gallery", "element", ".", "This", "is", "not", "required", "behavior", "however", "and", "it", "s", "independent", ...
python
train
abingham/spor
src/spor/cli.py
https://github.com/abingham/spor/blob/673c8c36c99a4b9ea882f002bfb529f1eca89126/src/spor/cli.py#L191-L207
def status_handler(args): """usage: {program} status [<path>] Validate the anchors in the current repository. """ repo = _open_repo(args) for anchor_id, anchor in repo.items(): diff_lines = get_anchor_diff(anchor) if diff_lines: print('{} {}:{} out-of-date'.format( anchor_id, anchor.file_path, anchor.context.offset)) return ExitCode.OK
[ "def", "status_handler", "(", "args", ")", ":", "repo", "=", "_open_repo", "(", "args", ")", "for", "anchor_id", ",", "anchor", "in", "repo", ".", "items", "(", ")", ":", "diff_lines", "=", "get_anchor_diff", "(", "anchor", ")", "if", "diff_lines", ":", ...
usage: {program} status [<path>] Validate the anchors in the current repository.
[ "usage", ":", "{", "program", "}", "status", "[", "<path", ">", "]" ]
python
train
Chilipp/psyplot
psyplot/plotter.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L2085-L2101
def _set_rc(self): """Method to set the rcparams and defaultParams for this plotter""" base_str = self._get_rc_strings() # to make sure that the '.' is not interpreted as a regex pattern, # we specify the pattern_base by ourselves pattern_base = map(lambda s: s.replace('.', '\.'), base_str) # pattern for valid keys being all formatoptions in this plotter pattern = '(%s)(?=$)' % '|'.join(self._get_formatoptions()) self._rc = rcParams.find_and_replace(base_str, pattern=pattern, pattern_base=pattern_base) user_rc = SubDict(rcParams['plotter.user'], base_str, pattern=pattern, pattern_base=pattern_base) self._rc.update(user_rc.data) self._defaultParams = SubDict(rcParams.defaultParams, base_str, pattern=pattern, pattern_base=pattern_base)
[ "def", "_set_rc", "(", "self", ")", ":", "base_str", "=", "self", ".", "_get_rc_strings", "(", ")", "# to make sure that the '.' is not interpreted as a regex pattern,", "# we specify the pattern_base by ourselves", "pattern_base", "=", "map", "(", "lambda", "s", ":", "s"...
Method to set the rcparams and defaultParams for this plotter
[ "Method", "to", "set", "the", "rcparams", "and", "defaultParams", "for", "this", "plotter" ]
python
train
fermiPy/fermipy
fermipy/ltcube.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/ltcube.py#L239-L245
def create_empty(cls, tstart, tstop, fill=0.0, nside=64): """Create an empty livetime cube.""" cth_edges = np.linspace(0, 1.0, 41) domega = utils.edge_to_width(cth_edges) * 2.0 * np.pi hpx = HPX(nside, True, 'CEL', ebins=cth_edges) data = np.ones((len(cth_edges) - 1, hpx.npix)) * fill return cls(data, hpx, cth_edges, tstart=tstart, tstop=tstop)
[ "def", "create_empty", "(", "cls", ",", "tstart", ",", "tstop", ",", "fill", "=", "0.0", ",", "nside", "=", "64", ")", ":", "cth_edges", "=", "np", ".", "linspace", "(", "0", ",", "1.0", ",", "41", ")", "domega", "=", "utils", ".", "edge_to_width",...
Create an empty livetime cube.
[ "Create", "an", "empty", "livetime", "cube", "." ]
python
train
singnet/snet-cli
snet_cli/commands.py
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/commands.py#L541-L567
def list_my(self): """ Find organization that has the current identity as the owner or as the member """ org_list = self.call_contract_command("Registry", "listOrganizations", []) rez_owner = [] rez_member = [] for idx, org_id in enumerate(org_list): (found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id]) if (not found): raise Exception("Organization was removed during this call. Please retry."); if self.ident.address == owner: rez_owner.append((org_name, bytes32_to_str(org_id))) if self.ident.address in members: rez_member.append((org_name, bytes32_to_str(org_id))) if (rez_owner): self._printout("# Organizations you are the owner of") self._printout("# OrgName OrgId") for n,i in rez_owner: self._printout("%s %s"%(n,i)) if (rez_member): self._printout("# Organizations you are the member of") self._printout("# OrgName OrgId") for n,i in rez_member: self._printout("%s %s"%(n,i))
[ "def", "list_my", "(", "self", ")", ":", "org_list", "=", "self", ".", "call_contract_command", "(", "\"Registry\"", ",", "\"listOrganizations\"", ",", "[", "]", ")", "rez_owner", "=", "[", "]", "rez_member", "=", "[", "]", "for", "idx", ",", "org_id", "...
Find organization that has the current identity as the owner or as the member
[ "Find", "organization", "that", "has", "the", "current", "identity", "as", "the", "owner", "or", "as", "the", "member" ]
python
train
polysquare/polysquare-generic-file-linter
polysquarelinter/spelling.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L129-L159
def _split_line_with_offsets(line): """Split a line by delimiter, but yield tuples of word and offset. This function works by dropping all the english-like punctuation from a line (so parenthesis preceded or succeeded by spaces, periods, etc) and then splitting on spaces. """ for delimiter in re.finditer(r"[\.,:\;](?![^\s])", line): span = delimiter.span() line = line[:span[0]] + " " + line[span[1]:] for delimiter in re.finditer(r"[\"'\)\]\}>](?![^\.,\;:\"'\)\]\}>\s])", line): span = delimiter.span() line = line[:span[0]] + " " + line[span[1]:] for delimiter in re.finditer(r"(?<![^\.,\;:\"'\(\[\{<\s])[\"'\(\[\{<]", line): span = delimiter.span() line = line[:span[0]] + " " + line[span[1]:] # Treat hyphen separated words as separate words line = line.replace("-", " ") # Remove backticks line = line.replace("`", " ") for match in re.finditer(r"[^\s]+", line): content = match.group(0) if content.strip() != "": yield (match.span()[0], content)
[ "def", "_split_line_with_offsets", "(", "line", ")", ":", "for", "delimiter", "in", "re", ".", "finditer", "(", "r\"[\\.,:\\;](?![^\\s])\"", ",", "line", ")", ":", "span", "=", "delimiter", ".", "span", "(", ")", "line", "=", "line", "[", ":", "span", "[...
Split a line by delimiter, but yield tuples of word and offset. This function works by dropping all the english-like punctuation from a line (so parenthesis preceded or succeeded by spaces, periods, etc) and then splitting on spaces.
[ "Split", "a", "line", "by", "delimiter", "but", "yield", "tuples", "of", "word", "and", "offset", "." ]
python
train
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1455-L1474
def parents(self, as_resources=False): ''' method to return hierarchical parents of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources ''' parents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))] # if as_resources, issue GET requests for children and return if as_resources: logger.debug('retrieving parent as resource') parents = [ self.repo.get_resource(parent) for parent in parents ] return parents
[ "def", "parents", "(", "self", ",", "as_resources", "=", "False", ")", ":", "parents", "=", "[", "o", "for", "s", ",", "p", ",", "o", "in", "self", ".", "rdf", ".", "graph", ".", "triples", "(", "(", "None", ",", "self", ".", "rdf", ".", "prefi...
method to return hierarchical parents of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
[ "method", "to", "return", "hierarchical", "parents", "of", "this", "resource" ]
python
train
coins13/twins
twins/twins.py
https://github.com/coins13/twins/blob/d66cc850007a25f01812a9d8c7e3efe64a631ca2/twins/twins.py#L308-L331
def get_achievements_summary (self): """ 履修成績要約の取得 (累計)""" r = self.req("SIW0001200-flow") # XXX ret = {} k = "" for d in pq(r.text)("td"): if d.text is None: continue if k != "": # 全角英字ダメゼッタイ if k == "GPA": k = "GPA" ret[k] = d.text.strip() k = "" continue k = d.text.strip() if k == "履修単位数" or k == "修得単位数" or k == "GPA": continue else: k = "" return ret
[ "def", "get_achievements_summary", "(", "self", ")", ":", "r", "=", "self", ".", "req", "(", "\"SIW0001200-flow\"", ")", "# XXX", "ret", "=", "{", "}", "k", "=", "\"\"", "for", "d", "in", "pq", "(", "r", ".", "text", ")", "(", "\"td\"", ")", ":", ...
履修成績要約の取得 (累計)
[ "履修成績要約の取得", "(", "累計", ")" ]
python
train
BernardFW/bernard
src/bernard/storage/context/base.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/context/base.py#L125-L173
def inject(self, require: Optional[List[Text]] = None, fail: Text = 'missing_context', var_name: Text = 'context'): """ This is a decorator intended to be used on states (and actually only work on state handlers). The `require` argument is a list of keys to be checked in the context. If at least one of them is missing, then instead of calling the handler another method will be called. By default the method is `missing_context` but it can be configured using the `fail` argument. The context will be injected into the handler as a keyword arg. By default, the arg is expected to be named `context` but you can change it to anything you'd like using `var_name`. See `create_context_store()` for a full example. """ def decorator(func): async def health_check(cls) -> Iterator[HealthCheckFail]: if not callable(getattr(cls, fail, None)): yield HealthCheckFail( '00001', f'State "{cls.__name__}" has no method "{fail}" to ' f'fall back to if required attributes are missing ' f'from the context.' ) if require: func.health_check = health_check @wraps(func) async def wrapper(state: Union[BaseState, BaseTrigger], **kwargs): conv_id = state.request.conversation.id key = f'context::{self.name}::{conv_id}' x = self.open(key) async with x as context: for item in (require or []): if item not in context: return await getattr(state, fail)(state, **kwargs) kwargs[var_name] = context return await func(state, **kwargs) return wrapper return decorator
[ "def", "inject", "(", "self", ",", "require", ":", "Optional", "[", "List", "[", "Text", "]", "]", "=", "None", ",", "fail", ":", "Text", "=", "'missing_context'", ",", "var_name", ":", "Text", "=", "'context'", ")", ":", "def", "decorator", "(", "fu...
This is a decorator intended to be used on states (and actually only work on state handlers). The `require` argument is a list of keys to be checked in the context. If at least one of them is missing, then instead of calling the handler another method will be called. By default the method is `missing_context` but it can be configured using the `fail` argument. The context will be injected into the handler as a keyword arg. By default, the arg is expected to be named `context` but you can change it to anything you'd like using `var_name`. See `create_context_store()` for a full example.
[ "This", "is", "a", "decorator", "intended", "to", "be", "used", "on", "states", "(", "and", "actually", "only", "work", "on", "state", "handlers", ")", "." ]
python
train
learningequality/ricecooker
ricecooker/classes/nodes.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/nodes.py#L134-L151
def process_files(self): """ Processes all the files associated with this Node. Files are downloaded if not present in the local storage. Creates and processes a NodeFile containing this Node's metadata. :return: A list of names of all the processed files. """ file_names = [] for f in self.files: file_names.append(f.process_file()) if not self.has_thumbnail() and config.THUMBNAILS: file_names.append(self.derive_thumbnail()) # node_file = NodeFile(self.to_dict()) # self.hashed_file_name = node_file.process_file() # file_names.append(self.hashed_file_name) return file_names
[ "def", "process_files", "(", "self", ")", ":", "file_names", "=", "[", "]", "for", "f", "in", "self", ".", "files", ":", "file_names", ".", "append", "(", "f", ".", "process_file", "(", ")", ")", "if", "not", "self", ".", "has_thumbnail", "(", ")", ...
Processes all the files associated with this Node. Files are downloaded if not present in the local storage. Creates and processes a NodeFile containing this Node's metadata. :return: A list of names of all the processed files.
[ "Processes", "all", "the", "files", "associated", "with", "this", "Node", ".", "Files", "are", "downloaded", "if", "not", "present", "in", "the", "local", "storage", ".", "Creates", "and", "processes", "a", "NodeFile", "containing", "this", "Node", "s", "met...
python
train
tomprince/txgithub
txgithub/api.py
https://github.com/tomprince/txgithub/blob/3bd5eebb25db013e2193e6a102a91049f356710d/txgithub/api.py#L256-L263
def getStatuses(self, repo_user, repo_name, sha): """ :param sha: Full sha to list the statuses from. :return: A defered with the result from GitHub. """ return self.api.makeRequest( ['repos', repo_user, repo_name, 'statuses', sha], method='GET')
[ "def", "getStatuses", "(", "self", ",", "repo_user", ",", "repo_name", ",", "sha", ")", ":", "return", "self", ".", "api", ".", "makeRequest", "(", "[", "'repos'", ",", "repo_user", ",", "repo_name", ",", "'statuses'", ",", "sha", "]", ",", "method", "...
:param sha: Full sha to list the statuses from. :return: A defered with the result from GitHub.
[ ":", "param", "sha", ":", "Full", "sha", "to", "list", "the", "statuses", "from", ".", ":", "return", ":", "A", "defered", "with", "the", "result", "from", "GitHub", "." ]
python
train
saltstack/salt
salt/states/saltsupport.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/saltsupport.py#L115-L139
def collected(self, group, filename=None, host=None, location=None, move=True, all=True): ''' Sync archives to a central place. :param name: :param group: :param filename: :param host: :param location: :param move: :param all: :return: ''' ret = { 'name': 'support.collected', 'changes': {}, 'result': True, 'comment': '', } location = location or tempfile.gettempdir() self.check_destination(location, group) ret['changes'] = __salt__['support.sync'](group, name=filename, host=host, location=location, move=move, all=all) return ret
[ "def", "collected", "(", "self", ",", "group", ",", "filename", "=", "None", ",", "host", "=", "None", ",", "location", "=", "None", ",", "move", "=", "True", ",", "all", "=", "True", ")", ":", "ret", "=", "{", "'name'", ":", "'support.collected'", ...
Sync archives to a central place. :param name: :param group: :param filename: :param host: :param location: :param move: :param all: :return:
[ "Sync", "archives", "to", "a", "central", "place", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/compiler_frontend.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/compiler_frontend.py#L217-L244
def _process_output_source_directive(schema, current_schema_type, ast, location, context, local_unique_directives): """Process the output_source directive, modifying the context as appropriate. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! local_unique_directives: dict, directive name string -> directive object, containing unique directives present on the current AST node *only* Returns: an OutputSource block, if one should be emitted, or None otherwise """ # The 'ast' variable is only for function signature uniformity, and is currently not used. output_source_directive = local_unique_directives.get('output_source', None) if output_source_directive: if has_encountered_output_source(context): raise GraphQLCompilationError(u'Cannot have more than one output source!') if is_in_optional_scope(context): raise GraphQLCompilationError(u'Cannot have the output source in an optional block!') set_output_source_data(context, location) return blocks.OutputSource() else: return None
[ "def", "_process_output_source_directive", "(", "schema", ",", "current_schema_type", ",", "ast", ",", "location", ",", "context", ",", "local_unique_directives", ")", ":", "# The 'ast' variable is only for function signature uniformity, and is currently not used.", "output_source_...
Process the output_source directive, modifying the context as appropriate. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! local_unique_directives: dict, directive name string -> directive object, containing unique directives present on the current AST node *only* Returns: an OutputSource block, if one should be emitted, or None otherwise
[ "Process", "the", "output_source", "directive", "modifying", "the", "context", "as", "appropriate", "." ]
python
train
sdispater/orator
orator/migrations/migrator.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/migrations/migrator.py#L53-L69
def run_migration_list(self, path, migrations, pretend=False): """ Run a list of migrations. :type migrations: list :type pretend: bool """ if not migrations: self._note("<info>Nothing to migrate</info>") return batch = self._repository.get_next_batch_number() for f in migrations: self._run_up(path, f, batch, pretend)
[ "def", "run_migration_list", "(", "self", ",", "path", ",", "migrations", ",", "pretend", "=", "False", ")", ":", "if", "not", "migrations", ":", "self", ".", "_note", "(", "\"<info>Nothing to migrate</info>\"", ")", "return", "batch", "=", "self", ".", "_re...
Run a list of migrations. :type migrations: list :type pretend: bool
[ "Run", "a", "list", "of", "migrations", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/api/vrrp.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/vrrp.py#L626-L694
def set_secondary_ips(self, name, vrid, secondary_ips, run=True): """Configure the secondary_ip property of the vrrp Notes: set_secondary_ips takes a list of secondary ip addresses which are to be set on the virtal router. An empty list will remove any existing secondary ip addresses from the vrrp. A list containing addresses will configure the virtual router with only the addresses specified in the list - any existing addresses not included in the list will be removed. Args: name (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be managed. secondary_ips (list): A list of secondary ip addresses to be assigned to the virtual router. run (boolean): Set to True to execute the command, False to return a string with the formatted command. Returns: If run is True, returns True if the command executed successfully, error if failure. If run is False, returns the formatted command string which can be passed to the node """ cmds = [] # Get the current set of tracks defined for the vrrp curr_sec_ips = [] vrrps = self.get(name) if vrrps and vrid in vrrps: curr_sec_ips = vrrps[vrid]['secondary_ip'] # Validate the list of ip addresses for sec_ip in secondary_ips: if type(sec_ip) is not str or \ not re.match(r'^\d+\.\d+\.\d+\.\d+$', sec_ip): raise ValueError("vrrp property 'secondary_ip' must be a list " "of properly formatted ip address strings") intersection = list(set(curr_sec_ips) & set(secondary_ips)) # Delete the intersection from both lists to determine which # addresses need to be added or removed from the vrrp remove = list(set(curr_sec_ips) - set(intersection)) add = list(set(secondary_ips) - set(intersection)) # Build the commands to add and remove the secondary ip addresses for sec_ip in remove: cmds.append("no vrrp %d ip %s secondary" % (vrid, sec_ip)) for sec_ip in add: cmds.append("vrrp %d ip %s secondary" % (vrid, sec_ip)) cmds = sorted(cmds) # Run the command if requested if run: result = self.configure_interface(name, cmds) # And verify the command succeeded if result is False: return self.error return result # Otherwise return the formatted command return cmds
[ "def", "set_secondary_ips", "(", "self", ",", "name", ",", "vrid", ",", "secondary_ips", ",", "run", "=", "True", ")", ":", "cmds", "=", "[", "]", "# Get the current set of tracks defined for the vrrp", "curr_sec_ips", "=", "[", "]", "vrrps", "=", "self", ".",...
Configure the secondary_ip property of the vrrp Notes: set_secondary_ips takes a list of secondary ip addresses which are to be set on the virtal router. An empty list will remove any existing secondary ip addresses from the vrrp. A list containing addresses will configure the virtual router with only the addresses specified in the list - any existing addresses not included in the list will be removed. Args: name (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be managed. secondary_ips (list): A list of secondary ip addresses to be assigned to the virtual router. run (boolean): Set to True to execute the command, False to return a string with the formatted command. Returns: If run is True, returns True if the command executed successfully, error if failure. If run is False, returns the formatted command string which can be passed to the node
[ "Configure", "the", "secondary_ip", "property", "of", "the", "vrrp" ]
python
train
macacajs/wd.py
macaca/asserters.py
https://github.com/macacajs/wd.py/blob/6d3c52060013e01a67cd52b68b5230b387427bad/macaca/asserters.py#L9-L22
def is_displayed(target): """Assert whether the target is displayed Args: target(WebElement): WebElement Object. Returns: Return True if the element is displayed or return False otherwise. """ is_displayed = getattr(target, 'is_displayed', None) if not is_displayed or not callable(is_displayed): raise TypeError('Target has no attribute \'is_displayed\' or not callable') if not is_displayed(): raise WebDriverException('element not visible')
[ "def", "is_displayed", "(", "target", ")", ":", "is_displayed", "=", "getattr", "(", "target", ",", "'is_displayed'", ",", "None", ")", "if", "not", "is_displayed", "or", "not", "callable", "(", "is_displayed", ")", ":", "raise", "TypeError", "(", "'Target h...
Assert whether the target is displayed Args: target(WebElement): WebElement Object. Returns: Return True if the element is displayed or return False otherwise.
[ "Assert", "whether", "the", "target", "is", "displayed" ]
python
valid
cox-labs/perseuspy
perseuspy/dependent_peptides.py
https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L42-L51
def count(args): """ count occurences in a list of lists >>> count([['a','b'],['a']]) defaultdict(int, {'a' : 2, 'b' : 1}) """ counts = defaultdict(int) for arg in args: for item in arg: counts[item] = counts[item] + 1 return counts
[ "def", "count", "(", "args", ")", ":", "counts", "=", "defaultdict", "(", "int", ")", "for", "arg", "in", "args", ":", "for", "item", "in", "arg", ":", "counts", "[", "item", "]", "=", "counts", "[", "item", "]", "+", "1", "return", "counts" ]
count occurences in a list of lists >>> count([['a','b'],['a']]) defaultdict(int, {'a' : 2, 'b' : 1})
[ "count", "occurences", "in", "a", "list", "of", "lists", ">>>", "count", "(", "[[", "a", "b", "]", "[", "a", "]]", ")", "defaultdict", "(", "int", "{", "a", ":", "2", "b", ":", "1", "}", ")" ]
python
train
airspeed-velocity/asv
asv/extern/asizeof.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L740-L750
def _len_int(obj): '''Length of multi-precision int (aka long) in digits. ''' if obj: n, i = 1, abs(obj) if i > _digitmax: # no log(x[, base]) in Python 2.2 n += int(log(i) * _digitlog) else: # zero n = 0 return n
[ "def", "_len_int", "(", "obj", ")", ":", "if", "obj", ":", "n", ",", "i", "=", "1", ",", "abs", "(", "obj", ")", "if", "i", ">", "_digitmax", ":", "# no log(x[, base]) in Python 2.2", "n", "+=", "int", "(", "log", "(", "i", ")", "*", "_digitlog", ...
Length of multi-precision int (aka long) in digits.
[ "Length", "of", "multi", "-", "precision", "int", "(", "aka", "long", ")", "in", "digits", "." ]
python
train
juanifioren/django-oidc-provider
oidc_provider/lib/utils/token.py
https://github.com/juanifioren/django-oidc-provider/blob/f0daed07b2ac7608565b80d4c80ccf04d8c416a8/oidc_provider/lib/utils/token.py#L151-L167
def get_client_alg_keys(client): """ Takes a client and returns the set of keys associated with it. Returns a list of keys. """ if client.jwt_alg == 'RS256': keys = [] for rsakey in RSAKey.objects.all(): keys.append(jwk_RSAKey(key=importKey(rsakey.key), kid=rsakey.kid)) if not keys: raise Exception('You must add at least one RSA Key.') elif client.jwt_alg == 'HS256': keys = [SYMKey(key=client.client_secret, alg=client.jwt_alg)] else: raise Exception('Unsupported key algorithm.') return keys
[ "def", "get_client_alg_keys", "(", "client", ")", ":", "if", "client", ".", "jwt_alg", "==", "'RS256'", ":", "keys", "=", "[", "]", "for", "rsakey", "in", "RSAKey", ".", "objects", ".", "all", "(", ")", ":", "keys", ".", "append", "(", "jwk_RSAKey", ...
Takes a client and returns the set of keys associated with it. Returns a list of keys.
[ "Takes", "a", "client", "and", "returns", "the", "set", "of", "keys", "associated", "with", "it", ".", "Returns", "a", "list", "of", "keys", "." ]
python
train
IS-ENES-Data/esgf-pid
esgfpid/utils/logutils.py
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/logutils.py#L29-L38
def loginfo(logger, msg, *args, **kwargs): ''' Logs messages as INFO, unless esgfpid.defaults.LOG_INFO_TO_DEBUG, (then it logs messages as DEBUG). ''' if esgfpid.defaults.LOG_INFO_TO_DEBUG: logger.debug(msg, *args, **kwargs) else: logger.info(msg, *args, **kwargs)
[ "def", "loginfo", "(", "logger", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "esgfpid", ".", "defaults", ".", "LOG_INFO_TO_DEBUG", ":", "logger", ".", "debug", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ...
Logs messages as INFO, unless esgfpid.defaults.LOG_INFO_TO_DEBUG, (then it logs messages as DEBUG).
[ "Logs", "messages", "as", "INFO", "unless", "esgfpid", ".", "defaults", ".", "LOG_INFO_TO_DEBUG", "(", "then", "it", "logs", "messages", "as", "DEBUG", ")", "." ]
python
train