repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ecederstrand/exchangelib
exchangelib/protocol.py
https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/protocol.py#L100-L115
def decrease_poolsize(self): """Decreases the session pool size in response to error messages from the server requesting to rate-limit requests. We decrease by one session per call. """ # Take a single session from the pool and discard it. We need to protect this with a lock while we are changing # the pool size variable, to avoid race conditions. We must keep at least one session in the pool. if self._session_pool_size <= 1: raise SessionPoolMinSizeReached('Session pool size cannot be decreased further') with self._session_pool_lock: if self._session_pool_size <= 1: log.debug('Session pool size was decreased in another thread') return log.warning('Lowering session pool size from %s to %s', self._session_pool_size, self._session_pool_size - 1) self.get_session().close() self._session_pool_size -= 1
[ "def", "decrease_poolsize", "(", "self", ")", ":", "# Take a single session from the pool and discard it. We need to protect this with a lock while we are changing", "# the pool size variable, to avoid race conditions. We must keep at least one session in the pool.", "if", "self", ".", "_sessi...
Decreases the session pool size in response to error messages from the server requesting to rate-limit requests. We decrease by one session per call.
[ "Decreases", "the", "session", "pool", "size", "in", "response", "to", "error", "messages", "from", "the", "server", "requesting", "to", "rate", "-", "limit", "requests", ".", "We", "decrease", "by", "one", "session", "per", "call", "." ]
python
train
CellProfiler/centrosome
centrosome/cpmorphology.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L2084-L2091
def within_hull(point, hull): '''Return true if the point is within the convex hull''' h_prev_pt = hull[-1,:] for h_pt in hull: if np.cross(h_pt-h_prev_pt, point - h_pt) >= 0: return False h_prev_pt = h_pt return True
[ "def", "within_hull", "(", "point", ",", "hull", ")", ":", "h_prev_pt", "=", "hull", "[", "-", "1", ",", ":", "]", "for", "h_pt", "in", "hull", ":", "if", "np", ".", "cross", "(", "h_pt", "-", "h_prev_pt", ",", "point", "-", "h_pt", ")", ">=", ...
Return true if the point is within the convex hull
[ "Return", "true", "if", "the", "point", "is", "within", "the", "convex", "hull" ]
python
train
UCL-INGI/INGInious
base-containers/base/inginious/input.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/base-containers/base/inginious/input.py#L49-L92
def parse_template(input_filename, output_filename=''): """ Parses a template file Replaces all occurences of @@problem_id@@ by the value of the 'problem_id' key in data dictionary input_filename: file to parse output_filename: if not specified, overwrite input file """ data = load_input() with open(input_filename, 'rb') as file: template = file.read().decode("utf-8") # Check if 'input' in data if not 'input' in data: raise ValueError("Could not find 'input' in data") # Parse template for field in data['input']: subs = ["filename", "value"] if isinstance(data['input'][field], dict) and "filename" in data['input'][field] and "value" in data['input'][field] else [""] for sub in subs: displayed_field = field + (":" if sub else "") + sub regex = re.compile("@([^@]*)@" + displayed_field + '@([^@]*)@') for prefix, postfix in set(regex.findall(template)): if sub == "value": text = open(data['input'][field][sub], 'rb').read().decode('utf-8') elif sub: text = data['input'][field][sub] else: text = data['input'][field] rep = "\n".join([prefix + v + postfix for v in text.splitlines()]) template = template.replace("@{0}@{1}@{2}@".format(prefix, displayed_field, postfix), rep) if output_filename == '': output_filename=input_filename # Ensure directory of resulting file exists try: os.makedirs(os.path.dirname(output_filename)) except OSError as e: pass # Write file with open(output_filename, 'wb') as file: file.write(template.encode("utf-8"))
[ "def", "parse_template", "(", "input_filename", ",", "output_filename", "=", "''", ")", ":", "data", "=", "load_input", "(", ")", "with", "open", "(", "input_filename", ",", "'rb'", ")", "as", "file", ":", "template", "=", "file", ".", "read", "(", ")", ...
Parses a template file Replaces all occurences of @@problem_id@@ by the value of the 'problem_id' key in data dictionary input_filename: file to parse output_filename: if not specified, overwrite input file
[ "Parses", "a", "template", "file", "Replaces", "all", "occurences", "of" ]
python
train
CEA-COSMIC/ModOpt
modopt/math/matrix.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/math/matrix.py#L77-L111
def nuclear_norm(data): r"""Nuclear norm This method computes the nuclear (or trace) norm of the input data. Parameters ---------- data : np.ndarray Input data array Returns ------- float nuclear norm value Examples -------- >>> from modopt.math.matrix import nuclear_norm >>> a = np.arange(9).reshape(3, 3) >>> nuclear_norm(a) 15.49193338482967 Notes ----- Implements the following equation: .. math:: \|\mathbf{A}\|_* = \sum_{i=1}^{\min\{m,n\}} \sigma_i (\mathbf{A}) """ # Get SVD of the data. u, s, v = np.linalg.svd(data) # Return nuclear norm. return np.sum(s)
[ "def", "nuclear_norm", "(", "data", ")", ":", "# Get SVD of the data.", "u", ",", "s", ",", "v", "=", "np", ".", "linalg", ".", "svd", "(", "data", ")", "# Return nuclear norm.", "return", "np", ".", "sum", "(", "s", ")" ]
r"""Nuclear norm This method computes the nuclear (or trace) norm of the input data. Parameters ---------- data : np.ndarray Input data array Returns ------- float nuclear norm value Examples -------- >>> from modopt.math.matrix import nuclear_norm >>> a = np.arange(9).reshape(3, 3) >>> nuclear_norm(a) 15.49193338482967 Notes ----- Implements the following equation: .. math:: \|\mathbf{A}\|_* = \sum_{i=1}^{\min\{m,n\}} \sigma_i (\mathbf{A})
[ "r", "Nuclear", "norm" ]
python
train
tijme/not-your-average-web-crawler
nyawc/QueueItem.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/QueueItem.py#L118-L152
def get_hash(self): """Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item. """ if self.__index_hash: return self.__index_hash key = self.request.method key += URLHelper.get_protocol(self.request.url) key += URLHelper.get_subdomain(self.request.url) key += URLHelper.get_hostname(self.request.url) key += URLHelper.get_tld(self.request.url) key += URLHelper.get_path(self.request.url) key += str(URLHelper.get_ordered_params(self.request.url)) if self.request.data is not None: key += str(self.request.data.keys()) self.__index_hash = key return self.__index_hash
[ "def", "get_hash", "(", "self", ")", ":", "if", "self", ".", "__index_hash", ":", "return", "self", ".", "__index_hash", "key", "=", "self", ".", "request", ".", "method", "key", "+=", "URLHelper", ".", "get_protocol", "(", "self", ".", "request", ".", ...
Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item.
[ "Generate", "and", "return", "the", "dict", "index", "hash", "of", "the", "given", "queue", "item", "." ]
python
train
spyder-ide/spyder
spyder/plugins/explorer/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L682-L709
def rename_file(self, fname): """Rename file""" path, valid = QInputDialog.getText(self, _('Rename'), _('New name:'), QLineEdit.Normal, osp.basename(fname)) if valid: path = osp.join(osp.dirname(fname), to_text_string(path)) if path == fname: return if osp.exists(path): if QMessageBox.warning(self, _("Rename"), _("Do you really want to rename <b>%s</b> and " "overwrite the existing file <b>%s</b>?" ) % (osp.basename(fname), osp.basename(path)), QMessageBox.Yes|QMessageBox.No) == QMessageBox.No: return try: misc.rename_file(fname, path) if osp.isfile(fname): self.sig_renamed.emit(fname, path) else: self.sig_renamed_tree.emit(fname, path) return path except EnvironmentError as error: QMessageBox.critical(self, _("Rename"), _("<b>Unable to rename file <i>%s</i></b>" "<br><br>Error message:<br>%s" ) % (osp.basename(fname), to_text_string(error)))
[ "def", "rename_file", "(", "self", ",", "fname", ")", ":", "path", ",", "valid", "=", "QInputDialog", ".", "getText", "(", "self", ",", "_", "(", "'Rename'", ")", ",", "_", "(", "'New name:'", ")", ",", "QLineEdit", ".", "Normal", ",", "osp", ".", ...
Rename file
[ "Rename", "file" ]
python
train
spyder-ide/spyder
spyder/utils/programs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/programs.py#L34-L54
def get_temp_dir(suffix=None): """ Return temporary Spyder directory, checking previously that it exists. """ to_join = [tempfile.gettempdir()] if os.name == 'nt': to_join.append('spyder') else: username = encoding.to_unicode_from_fs(getuser()) to_join.append('spyder-' + username) if suffix is not None: to_join.append(suffix) tempdir = osp.join(*to_join) if not osp.isdir(tempdir): os.mkdir(tempdir) return tempdir
[ "def", "get_temp_dir", "(", "suffix", "=", "None", ")", ":", "to_join", "=", "[", "tempfile", ".", "gettempdir", "(", ")", "]", "if", "os", ".", "name", "==", "'nt'", ":", "to_join", ".", "append", "(", "'spyder'", ")", "else", ":", "username", "=", ...
Return temporary Spyder directory, checking previously that it exists.
[ "Return", "temporary", "Spyder", "directory", "checking", "previously", "that", "it", "exists", "." ]
python
train
pallets/werkzeug
src/werkzeug/wrappers/base_request.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/wrappers/base_request.py#L532-L540
def path(self): """Requested path as unicode. This works a bit like the regular path info in the WSGI environment but will always include a leading slash, even if the URL root is accessed. """ raw_path = wsgi_decoding_dance( self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors ) return "/" + raw_path.lstrip("/")
[ "def", "path", "(", "self", ")", ":", "raw_path", "=", "wsgi_decoding_dance", "(", "self", ".", "environ", ".", "get", "(", "\"PATH_INFO\"", ")", "or", "\"\"", ",", "self", ".", "charset", ",", "self", ".", "encoding_errors", ")", "return", "\"/\"", "+",...
Requested path as unicode. This works a bit like the regular path info in the WSGI environment but will always include a leading slash, even if the URL root is accessed.
[ "Requested", "path", "as", "unicode", ".", "This", "works", "a", "bit", "like", "the", "regular", "path", "info", "in", "the", "WSGI", "environment", "but", "will", "always", "include", "a", "leading", "slash", "even", "if", "the", "URL", "root", "is", "...
python
train
ArchiveTeam/wpull
wpull/scraper/html.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/scraper/html.py#L551-L575
def iter_links_script_element(self, element): '''Iterate a ``script`` element.''' if self.javascript_scraper and element.text: link_iter = self.javascript_scraper.scrape_links(element.text, context=True) for link, context in link_iter: inline = is_likely_inline(link) if context is True: link_type = None else: link_type = context yield LinkInfo( element=element, tag=element.tag, attrib=None, link=link, inline=inline, linked=not inline, base_link=None, value_type='script', link_type=link_type ) for link in self.iter_links_plain_element(element): yield link
[ "def", "iter_links_script_element", "(", "self", ",", "element", ")", ":", "if", "self", ".", "javascript_scraper", "and", "element", ".", "text", ":", "link_iter", "=", "self", ".", "javascript_scraper", ".", "scrape_links", "(", "element", ".", "text", ",", ...
Iterate a ``script`` element.
[ "Iterate", "a", "script", "element", "." ]
python
train
codelv/enaml-native
src/enamlnative/android/android_web_view.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_web_view.py#L95-L103
def destroy(self): """ Destroy the client """ if self.client: #: Stop listening self.client.setWebView(self.widget, None) del self.client super(AndroidWebView, self).destroy()
[ "def", "destroy", "(", "self", ")", ":", "if", "self", ".", "client", ":", "#: Stop listening", "self", ".", "client", ".", "setWebView", "(", "self", ".", "widget", ",", "None", ")", "del", "self", ".", "client", "super", "(", "AndroidWebView", ",", "...
Destroy the client
[ "Destroy", "the", "client" ]
python
train
pyroscope/pyrocore
src/pyrocore/torrent/rtorrent.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/rtorrent.py#L399-L503
def cull(self, file_filter=None, attrs=None): """ Delete ALL data files and remove torrent from client. @param file_filter: Optional callable for selecting a subset of all files. The callable gets a file item as described for RtorrentItem._get_files and must return True for items eligible for deletion. @param attrs: Optional list of additional attributes to fetch for a filter. """ dry_run = 0 # set to 1 for testing def remove_with_links(path): "Remove a path including any symlink chains leading to it." rm_paths = [] while os.path.islink(path): target = os.readlink(path) rm_paths.append(path) path = target if os.path.exists(path): rm_paths.append(path) else: self._engine.LOG.debug("Real path '%s' doesn't exist," " but %d symlink(s) leading to it will be deleted..." % (path, len(rm_paths))) # Remove the link chain, starting at the real path # (this prevents losing the chain when there's permission problems) for rm_path in reversed(rm_paths): is_dir = os.path.isdir(rm_path) and not os.path.islink(rm_path) self._engine.LOG.debug("Deleting '%s%s'" % (rm_path, '/' if is_dir else '')) if not dry_run: try: (os.rmdir if is_dir else os.remove)(rm_path) except OSError as exc: if exc.errno == errno.ENOENT: # Seems this disappeared somehow inbetween (race condition) self._engine.LOG.info("Path '%s%s' disappeared before it could be deleted" % (rm_path, '/' if is_dir else '')) else: raise return rm_paths # Assemble doomed files and directories files, dirs = set(), set() base_path = os.path.expanduser(self.directory) item_files = list(self._get_files(attrs=attrs)) if not self.directory: raise error.EngineError("Directory for item #%s is empty," " you might want to add a filter 'directory=!'" % (self._fields["hash"],)) if not os.path.isabs(base_path): raise error.EngineError("Directory '%s' for item #%s is not absolute, which is a bad idea;" " fix your .rtorrent.rc, and use 'directory.default.set = /...'" % (self.directory, self._fields["hash"],)) if self.fetch("=is_multi_file") and os.path.isdir(self.directory): dirs.add(self.directory) for item_file in item_files: if file_filter and not file_filter(item_file): continue #print repr(item_file) path = os.path.join(base_path, item_file.path) files.add(path) if '/' in item_file.path: dirs.add(os.path.dirname(path)) # Delete selected files if not dry_run: self.stop() for path in sorted(files): ##self._engine.LOG.debug("Deleting file '%s'" % (path,)) remove_with_links(path) # Prune empty directories (longer paths first) doomed = files | dirs for path in sorted(dirs, reverse=True): residue = set(os.listdir(path) if os.path.exists(path) else []) ignorable = set(i for i in residue if any(fnmatch.fnmatch(i, pat) for pat in config.waif_pattern_list) #or os.path.join(path, i) in doomed ) ##print "---", residue - ignorable if residue and residue != ignorable: self._engine.LOG.info("Keeping non-empty directory '%s' with %d %s%s!" % ( path, len(residue), "entry" if len(residue) == 1 else "entries", (" (%d ignorable)" % len(ignorable)) if ignorable else "", )) else: ##print "---", ignorable for waif in ignorable:# - doomed: waif = os.path.join(path, waif) self._engine.LOG.debug("Deleting waif '%s'" % (waif,)) if not dry_run: try: os.remove(waif) except EnvironmentError as exc: self._engine.LOG.warn("Problem deleting waif '%s' (%s)" % (waif, exc)) ##self._engine.LOG.debug("Deleting empty directory '%s'" % (path,)) doomed.update(remove_with_links(path)) # Delete item from engine if not dry_run: self.delete()
[ "def", "cull", "(", "self", ",", "file_filter", "=", "None", ",", "attrs", "=", "None", ")", ":", "dry_run", "=", "0", "# set to 1 for testing", "def", "remove_with_links", "(", "path", ")", ":", "\"Remove a path including any symlink chains leading to it.\"", "rm_p...
Delete ALL data files and remove torrent from client. @param file_filter: Optional callable for selecting a subset of all files. The callable gets a file item as described for RtorrentItem._get_files and must return True for items eligible for deletion. @param attrs: Optional list of additional attributes to fetch for a filter.
[ "Delete", "ALL", "data", "files", "and", "remove", "torrent", "from", "client", "." ]
python
train
rjdkmr/do_x3dna
dnaMD/dnaMD/dnaEY.py
https://github.com/rjdkmr/do_x3dna/blob/fe910335eefcada76737f9e7cd6f25036cd32ab6/dnaMD/dnaMD/dnaEY.py#L1183-L1267
def calculateLocalElasticitySegments(self, bp, span=2, frameGap=None, helical=False, unit='kT', err_type='block', tool='gmx analyze', outFile=None): """Calculate local elastic properties of consecutive overlapped DNA segments Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`. Parameters ---------- bp : list List of two base-steps forming the global DNA segment. For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered. span : int Length of overlapping (local) DNA segments. It should be less than four. frameGap : int How many frames to skip for next time-frame. Lower the number, slower will be the calculation. helical : bool If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise, by default, elastic matrix for **base-step** parameters are calculated. unit : str Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``. err_type : str Error estimation by autocorrelation method ``err_type='acf'`` or block averaging method ``err_type='block'`` tool : str GROMACS tool to calculate error. In older versions it is `g_analyze` while in newer versions (above 2016) it is `gmx analyze`. outFile : str Output file in csv format. Returns ------- segments : list list of DNA segments for which local elastic properties was calculated. elasticities : OrderedDict A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in the same order as listed above. error : OrderedDict A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in the same order as listed above.. """ if helical: props_name = helical_local_props_vector else: props_name = local_props_vector segments, errors, elasticities = [], OrderedDict(), OrderedDict() for name in props_name: elasticities[name] = [] errors[name] = [] for s in range(bp[0], bp[1]): if s+span-1 > bp[1]: break time, elasticity_t = self.getLocalElasticityByTime([s, s+span-1], frameGap=frameGap, helical=helical, unit=unit) error_t = dnaMD.get_error(time, list(elasticity_t.values()), len(props_name), err_type=err_type, tool=tool) for i in range(len(props_name)): esy_t = elasticity_t[props_name[i]][-1] # only take last entry elasticities[props_name[i]].append(esy_t) errors[props_name[i]].append(error_t[i]) segments.append('{0}-{1}'.format(s, s+span-1)) # Write output file if outFile is not None: with open(outFile, 'w') as fout: fout.write('#bps') for name in props_name: fout.write(', {0}, {0}-error'.format(name)) fout.write('\n') for s in range(len(segments)): fout.write('{0}'.format(segments[s])) for name in props_name: fout.write(', {0:.5f}, {1:.5f}'.format(elasticities[name][s], errors[name][s])) fout.write('\n') return segments, elasticities, errors
[ "def", "calculateLocalElasticitySegments", "(", "self", ",", "bp", ",", "span", "=", "2", ",", "frameGap", "=", "None", ",", "helical", "=", "False", ",", "unit", "=", "'kT'", ",", "err_type", "=", "'block'", ",", "tool", "=", "'gmx analyze'", ",", "outF...
Calculate local elastic properties of consecutive overlapped DNA segments Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`. Parameters ---------- bp : list List of two base-steps forming the global DNA segment. For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered. span : int Length of overlapping (local) DNA segments. It should be less than four. frameGap : int How many frames to skip for next time-frame. Lower the number, slower will be the calculation. helical : bool If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise, by default, elastic matrix for **base-step** parameters are calculated. unit : str Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``. err_type : str Error estimation by autocorrelation method ``err_type='acf'`` or block averaging method ``err_type='block'`` tool : str GROMACS tool to calculate error. In older versions it is `g_analyze` while in newer versions (above 2016) it is `gmx analyze`. outFile : str Output file in csv format. Returns ------- segments : list list of DNA segments for which local elastic properties was calculated. elasticities : OrderedDict A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in the same order as listed above. error : OrderedDict A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in the same order as listed above..
[ "Calculate", "local", "elastic", "properties", "of", "consecutive", "overlapped", "DNA", "segments" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/df/expr/element.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/element.py#L599-L653
def _switch(expr, *args, **kw): """ Similar to the case-when in SQL. Refer to the example below :param expr: :param args: :param kw: :return: sequence or scalar :Example: >>> # if df.id == 3 then df.name >>> # elif df.id == df.fid.abs() then df.name + 'test' >>> # default: 'test' >>> df.id.switch(3, df.name, df.fid.abs(), df.name + 'test', default='test') """ default = _scalar(kw.get('default')) if len(args) <= 0: raise errors.ExpressionError('Switch must accept more than one condition') if all(isinstance(arg, tuple) and len(arg) == 2 for arg in args): conditions, thens = zip(*args) else: conditions = [arg for i, arg in enumerate(args) if i % 2 == 0] thens = [arg for i, arg in enumerate(args) if i % 2 == 1] if len(conditions) == len(thens): conditions, thens = _scalar(conditions), _scalar(thens) else: raise errors.ExpressionError('Switch should be called by case and then pairs') if isinstance(expr, (Scalar, SequenceExpr)): case = expr else: case = None if not all(hasattr(it, 'dtype') and it.dtype == types.boolean for it in conditions): raise errors.ExpressionError('Switch must be called by all boolean conditions') res = thens if default is None else thens + [default, ] output_type = utils.highest_precedence_data_type(*(it.dtype for it in res)) is_seq = isinstance(expr, SequenceExpr) or \ any(isinstance(it, SequenceExpr) for it in conditions) or \ any(isinstance(it, SequenceExpr) for it in res) if case is not None: is_seq = is_seq or isinstance(case, SequenceExpr) kwargs = dict() if is_seq: kwargs['_data_type'] = output_type else: kwargs['_value_type'] = output_type return Switch(_input=expr, _case=case, _conditions=conditions, _thens=thens, _default=default, **kwargs)
[ "def", "_switch", "(", "expr", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "default", "=", "_scalar", "(", "kw", ".", "get", "(", "'default'", ")", ")", "if", "len", "(", "args", ")", "<=", "0", ":", "raise", "errors", ".", "ExpressionError"...
Similar to the case-when in SQL. Refer to the example below :param expr: :param args: :param kw: :return: sequence or scalar :Example: >>> # if df.id == 3 then df.name >>> # elif df.id == df.fid.abs() then df.name + 'test' >>> # default: 'test' >>> df.id.switch(3, df.name, df.fid.abs(), df.name + 'test', default='test')
[ "Similar", "to", "the", "case", "-", "when", "in", "SQL", ".", "Refer", "to", "the", "example", "below" ]
python
train
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L3728-L3757
def dynamical_potential(xdata, dt, order=3): """ Computes potential from spring function Parameters ---------- xdata : ndarray Position data for a degree of freedom, at which to calculate potential dt : float time between measurements order : int order of polynomial to fit Returns ------- Potential : ndarray valued of potential at positions in xdata """ import numpy as _np adata = calc_acceleration(xdata, dt) xdata = xdata[2:] # removes first 2 values as differentiating twice means # we have acceleration[n] corresponds to position[n-2] z=_np.polyfit(xdata,adata,order) p=_np.poly1d(z) spring_pot=_np.polyint(p) return -spring_pot
[ "def", "dynamical_potential", "(", "xdata", ",", "dt", ",", "order", "=", "3", ")", ":", "import", "numpy", "as", "_np", "adata", "=", "calc_acceleration", "(", "xdata", ",", "dt", ")", "xdata", "=", "xdata", "[", "2", ":", "]", "# removes first 2 values...
Computes potential from spring function Parameters ---------- xdata : ndarray Position data for a degree of freedom, at which to calculate potential dt : float time between measurements order : int order of polynomial to fit Returns ------- Potential : ndarray valued of potential at positions in xdata
[ "Computes", "potential", "from", "spring", "function" ]
python
train
PBR/MQ2
MQ2/add_marker_to_qtls.py
https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/add_marker_to_qtls.py#L38-L55
def add_marker_to_qtl(qtl, map_list): """Add the closest marker to the given QTL. :arg qtl: a row of the QTL list. :arg map_list: the genetic map containing the list of markers. """ closest = '' diff = None for marker in map_list: if qtl[1] == marker[1]: tmp_diff = float(qtl[2]) - float(marker[2]) if diff is None or abs(diff) > abs(tmp_diff): diff = tmp_diff closest = marker if closest != '': closest = closest[0] return closest
[ "def", "add_marker_to_qtl", "(", "qtl", ",", "map_list", ")", ":", "closest", "=", "''", "diff", "=", "None", "for", "marker", "in", "map_list", ":", "if", "qtl", "[", "1", "]", "==", "marker", "[", "1", "]", ":", "tmp_diff", "=", "float", "(", "qt...
Add the closest marker to the given QTL. :arg qtl: a row of the QTL list. :arg map_list: the genetic map containing the list of markers.
[ "Add", "the", "closest", "marker", "to", "the", "given", "QTL", "." ]
python
train
draios/python-sdc-client
sdcclient/_monitor.py
https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_monitor.py#L94-L170
def create_alert(self, name=None, description=None, severity=None, for_atleast_s=None, condition=None, segmentby=[], segment_condition='ANY', user_filter='', notify=None, enabled=True, annotations={}, alert_obj=None): '''**Description** Create a threshold-based alert. **Arguments** - **name**: the alert name. This will appear in the Sysdig Monitor UI and in notification emails. - **description**: the alert description. This will appear in the Sysdig Monitor UI and in notification emails. - **severity**: syslog-encoded alert severity. This is a number from 0 to 7 where 0 means 'emergency' and 7 is 'debug'. - **for_atleast_s**: the number of consecutive seconds the condition must be satisfied for the alert to fire. - **condition**: the alert condition, as described here https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts - **segmentby**: a list of Sysdig Monitor segmentation criteria that can be used to apply the alert to multiple entities. For example, segmenting a CPU alert by ['host.mac', 'proc.name'] allows to apply it to any process in any machine. - **segment_condition**: When *segmentby* is specified (and therefore the alert will cover multiple entities) this field is used to determine when it will fire. In particular, you have two options for *segment_condition*: **ANY** (the alert will fire when at least one of the monitored entities satisfies the condition) and **ALL** (the alert will fire when all of the monitored entities satisfy the condition). - **user_filter**: a boolean expression combining Sysdig Monitor segmentation criteria that makes it possible to reduce the scope of the alert. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - **notify**: the type of notification you want this alert to generate. Options are *EMAIL*, *SNS*, *PAGER_DUTY*, *SYSDIG_DUMP*. - **enabled**: if True, the alert will be enabled when created. - **annotations**: an optional dictionary of custom properties that you can associate to this alert for automation or management reasons - **alert_obj**: an optional fully-formed Alert object of the format returned in an "alerts" list by :func:`~SdcClient.get_alerts` This is an alternative to creating the Alert using the individual parameters listed above. **Success Return Value** A dictionary describing the just created alert, with the format described at `this link <https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts>`__ **Example** `examples/create_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/create_alert.py>`_ ''' # # Get the list of alerts from the server # res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] res.json() if alert_obj is None: if None in (name, description, severity, for_atleast_s, condition): return [False, 'Must specify a full Alert object or all parameters: name, description, severity, for_atleast_s, condition'] else: # # Populate the alert information # alert_json = { 'alert': { 'type': 'MANUAL', 'name': name, 'description': description, 'enabled': enabled, 'severity': severity, 'timespan': for_atleast_s * 1000000, 'condition': condition, 'filter': user_filter } } if segmentby != None and segmentby != []: alert_json['alert']['segmentBy'] = segmentby alert_json['alert']['segmentCondition'] = {'type': segment_condition} if annotations != None and annotations != {}: alert_json['alert']['annotations'] = annotations if notify != None: alert_json['alert']['notificationChannelIds'] = notify else: # The REST API enforces "Alert ID and version must be null", so remove them if present, # since these would have been there in a dump from the list_alerts.py example. alert_obj.pop('id', None) alert_obj.pop('version', None) alert_json = { 'alert': alert_obj } # # Create the new alert # res = requests.post(self.url + '/api/alerts', headers=self.hdrs, data=json.dumps(alert_json), verify=self.ssl_verify) return self._request_result(res)
[ "def", "create_alert", "(", "self", ",", "name", "=", "None", ",", "description", "=", "None", ",", "severity", "=", "None", ",", "for_atleast_s", "=", "None", ",", "condition", "=", "None", ",", "segmentby", "=", "[", "]", ",", "segment_condition", "=",...
**Description** Create a threshold-based alert. **Arguments** - **name**: the alert name. This will appear in the Sysdig Monitor UI and in notification emails. - **description**: the alert description. This will appear in the Sysdig Monitor UI and in notification emails. - **severity**: syslog-encoded alert severity. This is a number from 0 to 7 where 0 means 'emergency' and 7 is 'debug'. - **for_atleast_s**: the number of consecutive seconds the condition must be satisfied for the alert to fire. - **condition**: the alert condition, as described here https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts - **segmentby**: a list of Sysdig Monitor segmentation criteria that can be used to apply the alert to multiple entities. For example, segmenting a CPU alert by ['host.mac', 'proc.name'] allows to apply it to any process in any machine. - **segment_condition**: When *segmentby* is specified (and therefore the alert will cover multiple entities) this field is used to determine when it will fire. In particular, you have two options for *segment_condition*: **ANY** (the alert will fire when at least one of the monitored entities satisfies the condition) and **ALL** (the alert will fire when all of the monitored entities satisfy the condition). - **user_filter**: a boolean expression combining Sysdig Monitor segmentation criteria that makes it possible to reduce the scope of the alert. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - **notify**: the type of notification you want this alert to generate. Options are *EMAIL*, *SNS*, *PAGER_DUTY*, *SYSDIG_DUMP*. - **enabled**: if True, the alert will be enabled when created. - **annotations**: an optional dictionary of custom properties that you can associate to this alert for automation or management reasons - **alert_obj**: an optional fully-formed Alert object of the format returned in an "alerts" list by :func:`~SdcClient.get_alerts` This is an alternative to creating the Alert using the individual parameters listed above. **Success Return Value** A dictionary describing the just created alert, with the format described at `this link <https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts>`__ **Example** `examples/create_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/create_alert.py>`_
[ "**", "Description", "**", "Create", "a", "threshold", "-", "based", "alert", "." ]
python
test
edx/edx-enterprise
enterprise/admin/views.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/views.py#L676-L696
def get_failed_enrollment_message(cls, users, enrolled_in): """ Create message for the users who were not able to be enrolled in a course or program. Args: users: An iterable of users who were not successfully enrolled enrolled_in (str): A string identifier for the course or program with which enrollment was attempted Returns: tuple: A 2-tuple containing a message type and message text """ failed_emails = [user.email for user in users] return ( 'error', _( 'The following learners could not be enrolled in {enrolled_in}: {user_list}' ).format( enrolled_in=enrolled_in, user_list=', '.join(failed_emails), ) )
[ "def", "get_failed_enrollment_message", "(", "cls", ",", "users", ",", "enrolled_in", ")", ":", "failed_emails", "=", "[", "user", ".", "email", "for", "user", "in", "users", "]", "return", "(", "'error'", ",", "_", "(", "'The following learners could not be enr...
Create message for the users who were not able to be enrolled in a course or program. Args: users: An iterable of users who were not successfully enrolled enrolled_in (str): A string identifier for the course or program with which enrollment was attempted Returns: tuple: A 2-tuple containing a message type and message text
[ "Create", "message", "for", "the", "users", "who", "were", "not", "able", "to", "be", "enrolled", "in", "a", "course", "or", "program", "." ]
python
valid
ewiger/mlab
src/mlab/awmstools.py
https://github.com/ewiger/mlab/blob/72a98adf6499f548848ad44c604f74d68f07fe4f/src/mlab/awmstools.py#L1011-L1022
def without(seq1, seq2): r"""Return a list with all elements in `seq2` removed from `seq1`, order preserved. Examples: >>> without([1,2,3,1,2], [1]) [2, 3, 2] """ if isSet(seq2): d2 = seq2 else: d2 = set(seq2) return [elt for elt in seq1 if elt not in d2]
[ "def", "without", "(", "seq1", ",", "seq2", ")", ":", "if", "isSet", "(", "seq2", ")", ":", "d2", "=", "seq2", "else", ":", "d2", "=", "set", "(", "seq2", ")", "return", "[", "elt", "for", "elt", "in", "seq1", "if", "elt", "not", "in", "d2", ...
r"""Return a list with all elements in `seq2` removed from `seq1`, order preserved. Examples: >>> without([1,2,3,1,2], [1]) [2, 3, 2]
[ "r", "Return", "a", "list", "with", "all", "elements", "in", "seq2", "removed", "from", "seq1", "order", "preserved", "." ]
python
train
rtfd/sphinx-autoapi
autoapi/mappers/javascript.py
https://github.com/rtfd/sphinx-autoapi/blob/9735f43a8d9ff4620c7bcbd177fd1bb7608052e9/autoapi/mappers/javascript.py#L42-L48
def map(self, options=None): """Trigger find of serialized sources and build objects""" for path, data in self.paths.items(): for item in data: for obj in self.create_class(item, options): obj.jinja_env = self.jinja_env self.add_object(obj)
[ "def", "map", "(", "self", ",", "options", "=", "None", ")", ":", "for", "path", ",", "data", "in", "self", ".", "paths", ".", "items", "(", ")", ":", "for", "item", "in", "data", ":", "for", "obj", "in", "self", ".", "create_class", "(", "item",...
Trigger find of serialized sources and build objects
[ "Trigger", "find", "of", "serialized", "sources", "and", "build", "objects" ]
python
train
wummel/linkchecker
linkcheck/director/aggregator.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/director/aggregator.py#L211-L217
def end_log_output(self, **kwargs): """Print ending output to log.""" kwargs.update(dict( downloaded_bytes=self.downloaded_bytes, num_urls = len(self.result_cache), )) self.logger.end_log_output(**kwargs)
[ "def", "end_log_output", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "dict", "(", "downloaded_bytes", "=", "self", ".", "downloaded_bytes", ",", "num_urls", "=", "len", "(", "self", ".", "result_cache", ")", ",", ")", ...
Print ending output to log.
[ "Print", "ending", "output", "to", "log", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L253-L257
def check_header(in_bam, rgnames, ref_file, config): """Ensure passed in BAM header matches reference file and read groups names. """ _check_bam_contigs(in_bam, ref_file, config) _check_sample(in_bam, rgnames)
[ "def", "check_header", "(", "in_bam", ",", "rgnames", ",", "ref_file", ",", "config", ")", ":", "_check_bam_contigs", "(", "in_bam", ",", "ref_file", ",", "config", ")", "_check_sample", "(", "in_bam", ",", "rgnames", ")" ]
Ensure passed in BAM header matches reference file and read groups names.
[ "Ensure", "passed", "in", "BAM", "header", "matches", "reference", "file", "and", "read", "groups", "names", "." ]
python
train
mitsei/dlkit
dlkit/records/assessment/basic/feedback_answer_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/feedback_answer_records.py#L77-L82
def _init_map(self): """stub""" self.my_osid_object_form._my_map['confusedLearningObjectiveIds'] = \ self._confused_learning_objectives_metadata['default_list_values'][0] self.my_osid_object_form._my_map['feedback'] = \ self._feedback_metadata['default_string_values'][0]
[ "def", "_init_map", "(", "self", ")", ":", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'confusedLearningObjectiveIds'", "]", "=", "self", ".", "_confused_learning_objectives_metadata", "[", "'default_list_values'", "]", "[", "0", "]", "self", ".", "m...
stub
[ "stub" ]
python
train
pgmpy/pgmpy
pgmpy/sampling/HMC.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/sampling/HMC.py#L518-L594
def generate_sample(self, initial_pos, num_adapt, num_samples, trajectory_length, stepsize=None): """ Method returns a generator type object whose each iteration yields a sample using Hamiltonian Monte Carlo Parameters ---------- initial_pos: A 1d array like object Vector representing values of parameter position, the starting state in markov chain. num_adapt: int The number of interations to run the adaptation of stepsize num_samples: int Number of samples to be generated trajectory_length: int or float Target trajectory length, stepsize * number of steps(L), where L is the number of steps taken to propose new values of position and momentum per HMC iteration and stepsize is step size. stepsize: float , defaults to None The stepsize for proposing new values of position and momentum in simulate_dynamics If None, then will be choosen suitably Returns ------- genrator: yielding a numpy.array type object for a sample Examples -------- >>> from pgmpy.sampling import HamiltonianMCDA as HMCda, GradLogPDFGaussian as GLPG, LeapFrog >>> from pgmpy.factors.continuous import GaussianDistribution as JGD >>> import numpy as np >>> mean = np.array([1, 1]) >>> covariance = np.array([[1, 0.7], [0.7, 3]]) >>> model = JGD(['x', 'y'], mean, covariance) >>> sampler = HMCda(model=model, grad_log_pdf=GLPG, simulate_dynamics=LeapFrog) >>> gen_samples = sampler.generate_sample(np.array([1, 1]), num_adapt=10000, ... num_samples = 10000, trajectory_length=2, stepsize=None) >>> samples_array = np.array([sample for sample in gen_samples]) >>> np.cov(samples_array.T) array([[ 0.98432155, 0.69517394], [ 0.69517394, 2.95449533]]) """ self.accepted_proposals = 0 initial_pos = _check_1d_array_object(initial_pos, 'initial_pos') _check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables') if stepsize is None: stepsize = self._find_reasonable_stepsize(initial_pos) if num_adapt <= 1: # return sample generated using Simple HMC algorithm for sample in HamiltonianMC.generate_sample(self, initial_pos, num_samples, trajectory_length, stepsize): yield sample return mu = np.log(10.0 * stepsize) stepsize_bar = 1.0 h_bar = 0.0 position_m = initial_pos.copy() num_adapt += 1 for i in range(1, num_samples + 1): position_m, alpha = self._sample(position_m, trajectory_length, stepsize) if i <= num_adapt: stepsize, stepsize_bar, h_bar = self._adapt_params(stepsize, stepsize_bar, h_bar, mu, i, alpha) else: stepsize = stepsize_bar yield position_m self.acceptance_rate = self.accepted_proposals / num_samples
[ "def", "generate_sample", "(", "self", ",", "initial_pos", ",", "num_adapt", ",", "num_samples", ",", "trajectory_length", ",", "stepsize", "=", "None", ")", ":", "self", ".", "accepted_proposals", "=", "0", "initial_pos", "=", "_check_1d_array_object", "(", "in...
Method returns a generator type object whose each iteration yields a sample using Hamiltonian Monte Carlo Parameters ---------- initial_pos: A 1d array like object Vector representing values of parameter position, the starting state in markov chain. num_adapt: int The number of interations to run the adaptation of stepsize num_samples: int Number of samples to be generated trajectory_length: int or float Target trajectory length, stepsize * number of steps(L), where L is the number of steps taken to propose new values of position and momentum per HMC iteration and stepsize is step size. stepsize: float , defaults to None The stepsize for proposing new values of position and momentum in simulate_dynamics If None, then will be choosen suitably Returns ------- genrator: yielding a numpy.array type object for a sample Examples -------- >>> from pgmpy.sampling import HamiltonianMCDA as HMCda, GradLogPDFGaussian as GLPG, LeapFrog >>> from pgmpy.factors.continuous import GaussianDistribution as JGD >>> import numpy as np >>> mean = np.array([1, 1]) >>> covariance = np.array([[1, 0.7], [0.7, 3]]) >>> model = JGD(['x', 'y'], mean, covariance) >>> sampler = HMCda(model=model, grad_log_pdf=GLPG, simulate_dynamics=LeapFrog) >>> gen_samples = sampler.generate_sample(np.array([1, 1]), num_adapt=10000, ... num_samples = 10000, trajectory_length=2, stepsize=None) >>> samples_array = np.array([sample for sample in gen_samples]) >>> np.cov(samples_array.T) array([[ 0.98432155, 0.69517394], [ 0.69517394, 2.95449533]])
[ "Method", "returns", "a", "generator", "type", "object", "whose", "each", "iteration", "yields", "a", "sample", "using", "Hamiltonian", "Monte", "Carlo" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L8554-L8568
def set_settings_secret(self, password): """Unlocks the secret data by passing the unlock password to the server. The server will cache the password for that machine. in password of type str The cipher key. raises :class:`VBoxErrorInvalidVmState` Virtual machine is not mutable. """ if not isinstance(password, basestring): raise TypeError("password can only be an instance of type basestring") self._call("setSettingsSecret", in_p=[password])
[ "def", "set_settings_secret", "(", "self", ",", "password", ")", ":", "if", "not", "isinstance", "(", "password", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"password can only be an instance of type basestring\"", ")", "self", ".", "_call", "(", "\"...
Unlocks the secret data by passing the unlock password to the server. The server will cache the password for that machine. in password of type str The cipher key. raises :class:`VBoxErrorInvalidVmState` Virtual machine is not mutable.
[ "Unlocks", "the", "secret", "data", "by", "passing", "the", "unlock", "password", "to", "the", "server", ".", "The", "server", "will", "cache", "the", "password", "for", "that", "machine", "." ]
python
train
xapple/plumbing
plumbing/databases/sqlite_database.py
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/databases/sqlite_database.py#L334-L345
def get_and_order(self, ids, column=None, table=None): """Get specific entries and order them in the same way.""" command = """ SELECT rowid, * from "data" WHERE rowid in (%s) ORDER BY CASE rowid %s END; """ ordered = ','.join(map(str,ids)) rowids = '\n'.join("WHEN '%s' THEN %s" % (row,i) for i,row in enumerate(ids)) command = command % (ordered, rowids)
[ "def", "get_and_order", "(", "self", ",", "ids", ",", "column", "=", "None", ",", "table", "=", "None", ")", ":", "command", "=", "\"\"\"\n SELECT rowid, * from \"data\"\n WHERE rowid in (%s)\n ORDER BY CASE rowid\n %s\n END;\n \"\"\"", ...
Get specific entries and order them in the same way.
[ "Get", "specific", "entries", "and", "order", "them", "in", "the", "same", "way", "." ]
python
train
OCHA-DAP/hdx-python-api
src/hdx/data/dataset.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L355-L374
def _dataset_merge_filestore_resource(self, resource, updated_resource, filestore_resources, ignore_fields): # type: (hdx.data.Resource, hdx.data.Resource, List[hdx.data.Resource], List[str]) -> None """Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore. Args: resource (hdx.data.Resource): Resource read from HDX updated_resource (hdx.data.Resource): Updated resource from dataset filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) ignore_fields (List[str]): List of fields to ignore when checking resource Returns: None """ if updated_resource.get_file_to_upload(): resource.set_file_to_upload(updated_resource.get_file_to_upload()) filestore_resources.append(resource) merge_two_dictionaries(resource, updated_resource) resource.check_required_fields(ignore_fields=ignore_fields) if resource.get_file_to_upload(): resource['url'] = Dataset.temporary_url
[ "def", "_dataset_merge_filestore_resource", "(", "self", ",", "resource", ",", "updated_resource", ",", "filestore_resources", ",", "ignore_fields", ")", ":", "# type: (hdx.data.Resource, hdx.data.Resource, List[hdx.data.Resource], List[str]) -> None", "if", "updated_resource", "."...
Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore. Args: resource (hdx.data.Resource): Resource read from HDX updated_resource (hdx.data.Resource): Updated resource from dataset filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) ignore_fields (List[str]): List of fields to ignore when checking resource Returns: None
[ "Helper", "method", "to", "merge", "updated", "resource", "from", "dataset", "into", "HDX", "resource", "read", "from", "HDX", "including", "filestore", "." ]
python
train
Parsl/parsl
parsl/executors/serialize/canning.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/executors/serialize/canning.py#L87-L96
def use_pickle(): """Revert to using stdlib pickle. Reverts custom serialization enabled by use_dill|cloudpickle. """ from . import serialize serialize.pickle = serialize._stdlib_pickle # restore special function handling can_map[FunctionType] = _original_can_map[FunctionType]
[ "def", "use_pickle", "(", ")", ":", "from", ".", "import", "serialize", "serialize", ".", "pickle", "=", "serialize", ".", "_stdlib_pickle", "# restore special function handling", "can_map", "[", "FunctionType", "]", "=", "_original_can_map", "[", "FunctionType", "]...
Revert to using stdlib pickle. Reverts custom serialization enabled by use_dill|cloudpickle.
[ "Revert", "to", "using", "stdlib", "pickle", "." ]
python
valid
apache/airflow
airflow/security/kerberos.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/security/kerberos.py#L100-L110
def detect_conf_var(): """Return true if the ticket cache contains "conf" information as is found in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the Sun Java Krb5LoginModule in Java6, so we need to take an action to work around it. """ ticket_cache = configuration.conf.get('kerberos', 'ccache') with open(ticket_cache, 'rb') as f: # Note: this file is binary, so we check against a bytearray. return b'X-CACHECONF:' in f.read()
[ "def", "detect_conf_var", "(", ")", ":", "ticket_cache", "=", "configuration", ".", "conf", ".", "get", "(", "'kerberos'", ",", "'ccache'", ")", "with", "open", "(", "ticket_cache", ",", "'rb'", ")", "as", "f", ":", "# Note: this file is binary, so we check agai...
Return true if the ticket cache contains "conf" information as is found in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the Sun Java Krb5LoginModule in Java6, so we need to take an action to work around it.
[ "Return", "true", "if", "the", "ticket", "cache", "contains", "conf", "information", "as", "is", "found", "in", "ticket", "caches", "of", "Kerberos", "1", ".", "8", ".", "1", "or", "later", ".", "This", "is", "incompatible", "with", "the", "Sun", "Java",...
python
test
Accelize/pycosio
pycosio/_core/functions_os_path.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/_core/functions_os_path.py#L176-L202
def relpath(path, start=None): """ Return a relative file path to path either from the current directory or from an optional start directory. For storage objects, "path" and "start" are relative to storage root. "/" are not stripped on storage objects path. The ending slash is required on some storage to signify that target is a directory. Equivalent to "os.path.relpath". Args: path (path-like object): Path or URL. start (path-like object): Relative from this optional directory. Default to "os.curdir" for local files. Returns: str: Relative path. """ relative = get_instance(path).relpath(path) if start: # Storage relative path # Replaces "\" by "/" for Windows. return os_path_relpath(relative, start=start).replace('\\', '/') return relative
[ "def", "relpath", "(", "path", ",", "start", "=", "None", ")", ":", "relative", "=", "get_instance", "(", "path", ")", ".", "relpath", "(", "path", ")", "if", "start", ":", "# Storage relative path", "# Replaces \"\\\" by \"/\" for Windows.", "return", "os_path_...
Return a relative file path to path either from the current directory or from an optional start directory. For storage objects, "path" and "start" are relative to storage root. "/" are not stripped on storage objects path. The ending slash is required on some storage to signify that target is a directory. Equivalent to "os.path.relpath". Args: path (path-like object): Path or URL. start (path-like object): Relative from this optional directory. Default to "os.curdir" for local files. Returns: str: Relative path.
[ "Return", "a", "relative", "file", "path", "to", "path", "either", "from", "the", "current", "directory", "or", "from", "an", "optional", "start", "directory", "." ]
python
train
awslabs/aws-shell
awsshell/utils.py
https://github.com/awslabs/aws-shell/blob/8950f03d9d720879890af6c11537b8f9789ce5a9/awsshell/utils.py#L73-L87
def file_contents(self, filename, binary=False): """Return the file for a given filename. If you want binary content use ``mode='rb'``. """ if binary: mode = 'rb' else: mode = 'r' try: with open(filename, mode) as f: return f.read() except (OSError, IOError) as e: raise FileReadError(str(e))
[ "def", "file_contents", "(", "self", ",", "filename", ",", "binary", "=", "False", ")", ":", "if", "binary", ":", "mode", "=", "'rb'", "else", ":", "mode", "=", "'r'", "try", ":", "with", "open", "(", "filename", ",", "mode", ")", "as", "f", ":", ...
Return the file for a given filename. If you want binary content use ``mode='rb'``.
[ "Return", "the", "file", "for", "a", "given", "filename", "." ]
python
train
ricequant/rqalpha
rqalpha/api/api_base.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/api/api_base.py#L387-L407
def unsubscribe(id_or_symbols): """ 取消订阅合约行情。取消订阅会导致合约池内合约的减少,如果当前合约池中没有任何合约,则策略直接退出。 :param id_or_symbols: 标的物 :type id_or_symbols: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] """ current_universe = Environment.get_instance().get_universe() if isinstance(id_or_symbols, six.string_types): order_book_id = instruments(id_or_symbols).order_book_id current_universe.discard(order_book_id) elif isinstance(id_or_symbols, Instrument): current_universe.discard(id_or_symbols.order_book_id) elif isinstance(id_or_symbols, Iterable): for item in id_or_symbols: i = assure_order_book_id(item) current_universe.discard(i) else: raise RQInvalidArgument(_(u"unsupported order_book_id type")) Environment.get_instance().update_universe(current_universe)
[ "def", "unsubscribe", "(", "id_or_symbols", ")", ":", "current_universe", "=", "Environment", ".", "get_instance", "(", ")", ".", "get_universe", "(", ")", "if", "isinstance", "(", "id_or_symbols", ",", "six", ".", "string_types", ")", ":", "order_book_id", "=...
取消订阅合约行情。取消订阅会导致合约池内合约的减少,如果当前合约池中没有任何合约,则策略直接退出。 :param id_or_symbols: 标的物 :type id_or_symbols: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`]
[ "取消订阅合约行情。取消订阅会导致合约池内合约的减少,如果当前合约池中没有任何合约,则策略直接退出。" ]
python
train
matrix-org/matrix-python-sdk
matrix_client/room.py
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/room.py#L472-L478
def add_room_alias(self, room_alias): """Add an alias to the room and return True if successful.""" try: self.client.api.set_room_alias(self.room_id, room_alias) return True except MatrixRequestError: return False
[ "def", "add_room_alias", "(", "self", ",", "room_alias", ")", ":", "try", ":", "self", ".", "client", ".", "api", ".", "set_room_alias", "(", "self", ".", "room_id", ",", "room_alias", ")", "return", "True", "except", "MatrixRequestError", ":", "return", "...
Add an alias to the room and return True if successful.
[ "Add", "an", "alias", "to", "the", "room", "and", "return", "True", "if", "successful", "." ]
python
train
postlund/pyatv
pyatv/mrp/protocol.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/protocol.py#L53-L99
async def start(self): """Connect to device and listen to incoming messages.""" if self.connection.connected: return await self.connection.connect() # In case credentials have been given externally (i.e. not by pairing # with a device), then use that client id if self.service.device_credentials: self.srp.pairing_id = Credentials.parse( self.service.device_credentials).client_id # The first message must always be DEVICE_INFORMATION, otherwise the # device will not respond with anything msg = messages.device_information( 'pyatv', self.srp.pairing_id.decode()) await self.send_and_receive(msg) self._initial_message_sent = True # This should be the first message sent after encryption has # been enabled await self.send(messages.set_ready_state()) async def _wait_for_updates(_, semaphore): # Use a counter here whenever more than one message is expected semaphore.release() # Wait for some stuff to arrive before returning semaphore = asyncio.Semaphore(value=0, loop=self.loop) self.add_listener(_wait_for_updates, protobuf.SET_STATE_MESSAGE, data=semaphore, one_shot=True) # Subscribe to updates at this stage await self.send(messages.client_updates_config()) await self.send(messages.wake_device()) try: await asyncio.wait_for( semaphore.acquire(), 1, loop=self.loop) except asyncio.TimeoutError: # This is not an issue itself, but I should do something better. # Basically this gives the device about one second to respond with # some metadata before continuing. pass
[ "async", "def", "start", "(", "self", ")", ":", "if", "self", ".", "connection", ".", "connected", ":", "return", "await", "self", ".", "connection", ".", "connect", "(", ")", "# In case credentials have been given externally (i.e. not by pairing", "# with a device), ...
Connect to device and listen to incoming messages.
[ "Connect", "to", "device", "and", "listen", "to", "incoming", "messages", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L11965-L11982
def scaled_imu3_encode(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag): ''' The RAW IMU readings for 3rd 9DOF sensor setup. This message should contain the scaled values to the described units time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) xacc : X acceleration (mg) (int16_t) yacc : Y acceleration (mg) (int16_t) zacc : Z acceleration (mg) (int16_t) xgyro : Angular speed around X axis (millirad /sec) (int16_t) ygyro : Angular speed around Y axis (millirad /sec) (int16_t) zgyro : Angular speed around Z axis (millirad /sec) (int16_t) xmag : X Magnetic field (milli tesla) (int16_t) ymag : Y Magnetic field (milli tesla) (int16_t) zmag : Z Magnetic field (milli tesla) (int16_t) ''' return MAVLink_scaled_imu3_message(time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag)
[ "def", "scaled_imu3_encode", "(", "self", ",", "time_boot_ms", ",", "xacc", ",", "yacc", ",", "zacc", ",", "xgyro", ",", "ygyro", ",", "zgyro", ",", "xmag", ",", "ymag", ",", "zmag", ")", ":", "return", "MAVLink_scaled_imu3_message", "(", "time_boot_ms", "...
The RAW IMU readings for 3rd 9DOF sensor setup. This message should contain the scaled values to the described units time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) xacc : X acceleration (mg) (int16_t) yacc : Y acceleration (mg) (int16_t) zacc : Z acceleration (mg) (int16_t) xgyro : Angular speed around X axis (millirad /sec) (int16_t) ygyro : Angular speed around Y axis (millirad /sec) (int16_t) zgyro : Angular speed around Z axis (millirad /sec) (int16_t) xmag : X Magnetic field (milli tesla) (int16_t) ymag : Y Magnetic field (milli tesla) (int16_t) zmag : Z Magnetic field (milli tesla) (int16_t)
[ "The", "RAW", "IMU", "readings", "for", "3rd", "9DOF", "sensor", "setup", ".", "This", "message", "should", "contain", "the", "scaled", "values", "to", "the", "described", "units" ]
python
train
project-ncl/pnc-cli
pnc_cli/buildconfigurationsets.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigurationsets.py#L44-L51
def create_build_configuration_set_raw(**kwargs): """ Create a new BuildConfigurationSet. """ config_set = _create_build_config_set_object(**kwargs) response = utils.checked_api_call(pnc_api.build_group_configs, 'create_new', body=config_set) if response: return response.content
[ "def", "create_build_configuration_set_raw", "(", "*", "*", "kwargs", ")", ":", "config_set", "=", "_create_build_config_set_object", "(", "*", "*", "kwargs", ")", "response", "=", "utils", ".", "checked_api_call", "(", "pnc_api", ".", "build_group_configs", ",", ...
Create a new BuildConfigurationSet.
[ "Create", "a", "new", "BuildConfigurationSet", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L5043-L5060
def ServiceWorker_updateRegistration(self, scopeURL): """ Function path: ServiceWorker.updateRegistration Domain: ServiceWorker Method name: updateRegistration Parameters: Required arguments: 'scopeURL' (type: string) -> No description No return value. """ assert isinstance(scopeURL, (str,) ), "Argument 'scopeURL' must be of type '['str']'. Received type: '%s'" % type( scopeURL) subdom_funcs = self.synchronous_command('ServiceWorker.updateRegistration', scopeURL=scopeURL) return subdom_funcs
[ "def", "ServiceWorker_updateRegistration", "(", "self", ",", "scopeURL", ")", ":", "assert", "isinstance", "(", "scopeURL", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'scopeURL' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "scopeURL", ")"...
Function path: ServiceWorker.updateRegistration Domain: ServiceWorker Method name: updateRegistration Parameters: Required arguments: 'scopeURL' (type: string) -> No description No return value.
[ "Function", "path", ":", "ServiceWorker", ".", "updateRegistration", "Domain", ":", "ServiceWorker", "Method", "name", ":", "updateRegistration", "Parameters", ":", "Required", "arguments", ":", "scopeURL", "(", "type", ":", "string", ")", "-", ">", "No", "descr...
python
train
liamw9534/bt-manager
bt_manager/audio.py
https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/audio.py#L252-L264
def write_transport(self, data): """ Write data to media transport. The data is encoded using the SBC codec and RTP encapsulated before being written to the transport file descriptor. :param array{byte} data: Payload data to encode, encapsulate and send. """ if ('w' not in self.access_type): raise BTIncompatibleTransportAccessType return self.codec.encode(self.fd, self.write_mtu, data)
[ "def", "write_transport", "(", "self", ",", "data", ")", ":", "if", "(", "'w'", "not", "in", "self", ".", "access_type", ")", ":", "raise", "BTIncompatibleTransportAccessType", "return", "self", ".", "codec", ".", "encode", "(", "self", ".", "fd", ",", "...
Write data to media transport. The data is encoded using the SBC codec and RTP encapsulated before being written to the transport file descriptor. :param array{byte} data: Payload data to encode, encapsulate and send.
[ "Write", "data", "to", "media", "transport", ".", "The", "data", "is", "encoded", "using", "the", "SBC", "codec", "and", "RTP", "encapsulated", "before", "being", "written", "to", "the", "transport", "file", "descriptor", "." ]
python
train
Erotemic/utool
utool/util_inspect.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_inspect.py#L1683-L1792
def find_funcs_called_with_kwargs(sourcecode, target_kwargs_name='kwargs'): r""" Finds functions that are called with the keyword `kwargs` variable CommandLine: python3 -m utool.util_inspect find_funcs_called_with_kwargs Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> sourcecode = ut.codeblock( ''' x, y = list(zip(*ut.ichunks(data, 2))) somecall(arg1, arg2, arg3=4, **kwargs) import sys sys.badcall(**kwargs) def foo(): bar(**kwargs) ut.holymoly(**kwargs) baz() def biz(**kwargs): foo2(**kwargs) ''') >>> child_funcnamess = ut.find_funcs_called_with_kwargs(sourcecode) >>> print('child_funcnamess = %r' % (child_funcnamess,)) >>> assert 'foo2' not in child_funcnamess, 'foo2 should not be found' >>> assert 'bar' in child_funcnamess, 'bar should be found' """ import ast sourcecode = 'from __future__ import print_function\n' + sourcecode pt = ast.parse(sourcecode) child_funcnamess = [] debug = False or VERYVERB_INSPECT if debug: print('\nInput:') print('target_kwargs_name = %r' % (target_kwargs_name,)) print('\nSource:') print(sourcecode) import astor print('\nParse:') print(astor.dump(pt)) class KwargParseVisitor(ast.NodeVisitor): """ TODO: understand ut.update_existing and dict update ie, know when kwargs is passed to these functions and then look assume the object that was updated is a dictionary and check wherever that is passed to kwargs as well. """ def visit_FunctionDef(self, node): if debug: print('\nVISIT FunctionDef node = %r' % (node,)) print('node.args.kwarg = %r' % (node.args.kwarg,)) if six.PY2: kwarg_name = node.args.kwarg else: if node.args.kwarg is None: kwarg_name = None else: kwarg_name = node.args.kwarg.arg #import utool as ut #ut.embed() if kwarg_name != target_kwargs_name: # target kwargs is still in scope ast.NodeVisitor.generic_visit(self, node) def visit_Call(self, node): if debug: print('\nVISIT Call node = %r' % (node,)) #print(ut.repr4(node.__dict__,)) if isinstance(node.func, ast.Attribute): try: funcname = node.func.value.id + '.' + node.func.attr except AttributeError: funcname = None elif isinstance(node.func, ast.Name): funcname = node.func.id else: raise NotImplementedError( 'do not know how to parse: node.func = %r' % (node.func,)) if six.PY2: kwargs = node.kwargs kwargs_name = None if kwargs is None else kwargs.id if funcname is not None and kwargs_name == target_kwargs_name: child_funcnamess.append(funcname) if debug: print('funcname = %r' % (funcname,)) print('kwargs_name = %r' % (kwargs_name,)) else: if node.keywords: for kwargs in node.keywords: if kwargs.arg is None: if hasattr(kwargs.value, 'id'): kwargs_name = kwargs.value.id if funcname is not None and kwargs_name == target_kwargs_name: child_funcnamess.append(funcname) if debug: print('funcname = %r' % (funcname,)) print('kwargs_name = %r' % (kwargs_name,)) ast.NodeVisitor.generic_visit(self, node) try: KwargParseVisitor().visit(pt) except Exception: raise pass #import utool as ut #if ut.SUPER_STRICT: # raise return child_funcnamess
[ "def", "find_funcs_called_with_kwargs", "(", "sourcecode", ",", "target_kwargs_name", "=", "'kwargs'", ")", ":", "import", "ast", "sourcecode", "=", "'from __future__ import print_function\\n'", "+", "sourcecode", "pt", "=", "ast", ".", "parse", "(", "sourcecode", ")"...
r""" Finds functions that are called with the keyword `kwargs` variable CommandLine: python3 -m utool.util_inspect find_funcs_called_with_kwargs Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> sourcecode = ut.codeblock( ''' x, y = list(zip(*ut.ichunks(data, 2))) somecall(arg1, arg2, arg3=4, **kwargs) import sys sys.badcall(**kwargs) def foo(): bar(**kwargs) ut.holymoly(**kwargs) baz() def biz(**kwargs): foo2(**kwargs) ''') >>> child_funcnamess = ut.find_funcs_called_with_kwargs(sourcecode) >>> print('child_funcnamess = %r' % (child_funcnamess,)) >>> assert 'foo2' not in child_funcnamess, 'foo2 should not be found' >>> assert 'bar' in child_funcnamess, 'bar should be found'
[ "r", "Finds", "functions", "that", "are", "called", "with", "the", "keyword", "kwargs", "variable" ]
python
train
welbornprod/colr
colr/colr_docopt.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/colr_docopt.py#L57-L104
def _coloredhelp(s): """ Colorize the usage string for docopt (ColorDocoptExit, docoptextras) """ newlines = [] bigindent = (' ' * 16) in_opts = False for line in s.split('\n'): linestripped = line.strip('\n').strip().strip(':') if linestripped == 'Usage': # label line = line.replace('Usage', str(C('Usage', **ARGS_LABEL))) elif linestripped == 'Options': line = line.replace('Options', str(C('Options', **ARGS_LABEL))) in_opts = True elif (':' in line) and (not line.startswith(bigindent)): # opt,desc line. colorize it. lineparts = line.split(':') opt = lineparts[0] vals = [lineparts[1]] if len(lineparts) == 2 else lineparts[1:] # colorize opt if ',' in opt: opts = opt.split(',') else: opts = [opt] optstr = ','.join(str(C(o, **ARGS_OPTIONS)) for o in opts) # colorize desc valstr = ':'.join(str(C(val, **ARGS_DESC)) for val in vals) line = ':'.join((optstr, valstr)) elif in_opts and line.startswith(bigindent): # continued desc string.. # Make any 'Default:Value' parts look the same as the opt,desc. line = ':'.join(str(C(s, **ARGS_DESC)) for s in line.split(':')) elif (not line.startswith(' ')): # header line. line = str(C(line, **ARGS_HEADER)) else: # Everything else, usage mainly. if SCRIPT: line = line.replace(SCRIPT, str(C(SCRIPT, **ARGS_SCRIPT))) newlines.append( '{}{}'.format(line, C('', style='reset_all')) ) return '\n'.join(newlines)
[ "def", "_coloredhelp", "(", "s", ")", ":", "newlines", "=", "[", "]", "bigindent", "=", "(", "' '", "*", "16", ")", "in_opts", "=", "False", "for", "line", "in", "s", ".", "split", "(", "'\\n'", ")", ":", "linestripped", "=", "line", ".", "strip", ...
Colorize the usage string for docopt (ColorDocoptExit, docoptextras)
[ "Colorize", "the", "usage", "string", "for", "docopt", "(", "ColorDocoptExit", "docoptextras", ")" ]
python
train
abe-winter/pg13-py
pg13/diff.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L96-L108
def rediff(a,b,global_a_slice=None): "recursive diff (splits around longest substring and runs diff on head and tail remnants).\ global_a_slice is used for recursion and should be left undefined in outer call.\ returns a list of Delta tuples." if not (a or b): return [] global_a_slice=global_a_slice or (0,len(a)) csresult=contigsub(a,b) if not csresult: return (Delta(global_a_slice[0],global_a_slice[1],b),) # i.e. total replacement slicea,sliceb=csresult if slicea[0]==0 and sliceb[0]==0 and slicea[1]==len(a) and sliceb[1]==len(b): return [] # i.e. nochange head=rediff(a[:slicea[0]],b[:sliceb[0]],subslice(slicea,global_a_slice,'head')) tail=rediff(a[slicea[1]:],b[sliceb[1]:],subslice(slicea,global_a_slice,'tail')) return list(head)+list(tail)
[ "def", "rediff", "(", "a", ",", "b", ",", "global_a_slice", "=", "None", ")", ":", "if", "not", "(", "a", "or", "b", ")", ":", "return", "[", "]", "global_a_slice", "=", "global_a_slice", "or", "(", "0", ",", "len", "(", "a", ")", ")", "csresult"...
recursive diff (splits around longest substring and runs diff on head and tail remnants).\ global_a_slice is used for recursion and should be left undefined in outer call.\ returns a list of Delta tuples.
[ "recursive", "diff", "(", "splits", "around", "longest", "substring", "and", "runs", "diff", "on", "head", "and", "tail", "remnants", ")", ".", "\\", "global_a_slice", "is", "used", "for", "recursion", "and", "should", "be", "left", "undefined", "in", "outer...
python
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/query.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/query.py#L283-L324
def order_by(self, field_path, direction=ASCENDING): """Modify the query to add an order clause on a specific field. See :meth:`~.firestore_v1beta1.client.Client.field_path` for more information on **field paths**. Successive :meth:`~.firestore_v1beta1.query.Query.order_by` calls will further refine the ordering of results returned by the query (i.e. the new "order by" fields will be added to existing ones). Args: field_path (str): A field path (``.``-delimited list of field names) on which to order the query results. direction (Optional[str]): The direction to order by. Must be one of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to :attr:`ASCENDING`. Returns: ~.firestore_v1beta1.query.Query: An ordered query. Acts as a copy of the current query, modified with the newly added "order by" constraint. Raises: ValueError: If ``field_path`` is invalid. ValueError: If ``direction`` is not one of :attr:`ASCENDING` or :attr:`DESCENDING`. """ field_path_module.split_field_path(field_path) # raises order_pb = self._make_order(field_path, direction) new_orders = self._orders + (order_pb,) return self.__class__( self._parent, projection=self._projection, field_filters=self._field_filters, orders=new_orders, limit=self._limit, offset=self._offset, start_at=self._start_at, end_at=self._end_at, )
[ "def", "order_by", "(", "self", ",", "field_path", ",", "direction", "=", "ASCENDING", ")", ":", "field_path_module", ".", "split_field_path", "(", "field_path", ")", "# raises", "order_pb", "=", "self", ".", "_make_order", "(", "field_path", ",", "direction", ...
Modify the query to add an order clause on a specific field. See :meth:`~.firestore_v1beta1.client.Client.field_path` for more information on **field paths**. Successive :meth:`~.firestore_v1beta1.query.Query.order_by` calls will further refine the ordering of results returned by the query (i.e. the new "order by" fields will be added to existing ones). Args: field_path (str): A field path (``.``-delimited list of field names) on which to order the query results. direction (Optional[str]): The direction to order by. Must be one of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to :attr:`ASCENDING`. Returns: ~.firestore_v1beta1.query.Query: An ordered query. Acts as a copy of the current query, modified with the newly added "order by" constraint. Raises: ValueError: If ``field_path`` is invalid. ValueError: If ``direction`` is not one of :attr:`ASCENDING` or :attr:`DESCENDING`.
[ "Modify", "the", "query", "to", "add", "an", "order", "clause", "on", "a", "specific", "field", "." ]
python
train
nschloe/matplotlib2tikz
matplotlib2tikz/text.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/text.py#L8-L126
def draw_text(data, obj): """Paints text on the graph. """ content = [] properties = [] style = [] if isinstance(obj, mpl.text.Annotation): _annotation(obj, data, content) # 1: coordinates # 2: properties (shapes, rotation, etc) # 3: text style # 4: the text # -------1--------2---3--4-- pos = obj.get_position() # from .util import transform_to_data_coordinates # pos = transform_to_data_coordinates(obj, *pos) text = obj.get_text() if text in ["", data["current axis title"]]: # Text nodes which are direct children of Axes are typically titles. They are # already captured by the `title` property of pgfplots axes, so skip them here. return data, content size = obj.get_size() bbox = obj.get_bbox_patch() converter = mpl.colors.ColorConverter() # without the factor 0.5, the fonts are too big most of the time. # TODO fix this scaling = 0.5 * size / data["font size"] ff = data["float format"] if scaling != 1.0: properties.append(("scale=" + ff).format(scaling)) if bbox is not None: _bbox(bbox, data, properties, scaling) ha = obj.get_ha() va = obj.get_va() anchor = _transform_positioning(ha, va) if anchor is not None: properties.append(anchor) data, col, _ = color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color())) properties.append("text={}".format(col)) properties.append("rotate={:.1f}".format(obj.get_rotation())) if obj.get_style() == "italic": style.append("\\itshape") else: assert obj.get_style() == "normal" # From matplotlib/font_manager.py: # weight_dict = { # 'ultralight' : 100, # 'light' : 200, # 'normal' : 400, # 'regular' : 400, # 'book' : 400, # 'medium' : 500, # 'roman' : 500, # 'semibold' : 600, # 'demibold' : 600, # 'demi' : 600, # 'bold' : 700, # 'heavy' : 800, # 'extra bold' : 800, # 'black' : 900} # # get_weights returns a numeric value in the range 0-1000 or one of # ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’, # ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’ weight = obj.get_weight() if weight in [ "semibold", "demibold", "demi", "bold", "heavy", "extra bold", "black", ] or (isinstance(weight, int) and weight > 550): style.append("\\bfseries") # \lfseries isn't that common yet # elif weight == 'light' or (isinstance(weight, int) and weight < 300): # style.append('\\lfseries') if obj.axes: # If the coordinates are relative to an axis, use `axis cs`. tikz_pos = ("(axis cs:" + ff + "," + ff + ")").format(*pos) else: # relative to the entire figure, it's a getting a littler harder. See # <http://tex.stackexchange.com/a/274902/13262> for a solution to the # problem: tikz_pos = ( "({{$(current bounding box.south west)!" + ff + "!" "(current bounding box.south east)$}}" "|-" "{{$(current bounding box.south west)!" + ff + "!" "(current bounding box.north west)$}})" ).format(*pos) if "\n" in text: # http://tex.stackexchange.com/a/124114/13262 properties.append("align={}".format(ha)) # Manipulating the text here is actually against mpl2tikz's policy not # to do that. On the other hand, newlines should translate into # newlines. # We might want to remove this here in the future. text = text.replace("\n ", "\\\\") content.append( "\\node at {}[\n {}\n]{{{}}};\n".format( tikz_pos, ",\n ".join(properties), " ".join(style + [text]) ) ) return data, content
[ "def", "draw_text", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "properties", "=", "[", "]", "style", "=", "[", "]", "if", "isinstance", "(", "obj", ",", "mpl", ".", "text", ".", "Annotation", ")", ":", "_annotation", "(", "obj", ...
Paints text on the graph.
[ "Paints", "text", "on", "the", "graph", "." ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L559-L567
def update_supplier(self, supplier_id, supplier_dict): """ Updates a supplier :param supplier_id: the supplier id :param supplier_dict: dict :return: dict """ return self._create_put_request(resource=SUPPLIERS, billomat_id=supplier_id, send_data=supplier_dict)
[ "def", "update_supplier", "(", "self", ",", "supplier_id", ",", "supplier_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "SUPPLIERS", ",", "billomat_id", "=", "supplier_id", ",", "send_data", "=", "supplier_dict", ")" ]
Updates a supplier :param supplier_id: the supplier id :param supplier_dict: dict :return: dict
[ "Updates", "a", "supplier" ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L31600-L31639
def create(cls, client_payment_service_provider_certificate, client_payment_service_provider_certificate_chain, client_public_key_signature, custom_headers=None): """ :param client_payment_service_provider_certificate: Payment Services Directive 2 compatible QSEAL certificate :type client_payment_service_provider_certificate: str :param client_payment_service_provider_certificate_chain: Intermediate and root certificate belonging to the provided certificate. :type client_payment_service_provider_certificate_chain: str :param client_public_key_signature: The Base64 encoded signature of the public key provided during installation and with the installation token appended as a nonce. Signed with the private key belonging to the QSEAL certificate. :type client_public_key_signature: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_CLIENT_PAYMENT_SERVICE_PROVIDER_CERTIFICATE: client_payment_service_provider_certificate, cls.FIELD_CLIENT_PAYMENT_SERVICE_PROVIDER_CERTIFICATE_CHAIN: client_payment_service_provider_certificate_chain, cls.FIELD_CLIENT_PUBLIC_KEY_SIGNATURE: client_public_key_signature } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
[ "def", "create", "(", "cls", ",", "client_payment_service_provider_certificate", ",", "client_payment_service_provider_certificate_chain", ",", "client_public_key_signature", ",", "custom_headers", "=", "None", ")", ":", "if", "custom_headers", "is", "None", ":", "custom_he...
:param client_payment_service_provider_certificate: Payment Services Directive 2 compatible QSEAL certificate :type client_payment_service_provider_certificate: str :param client_payment_service_provider_certificate_chain: Intermediate and root certificate belonging to the provided certificate. :type client_payment_service_provider_certificate_chain: str :param client_public_key_signature: The Base64 encoded signature of the public key provided during installation and with the installation token appended as a nonce. Signed with the private key belonging to the QSEAL certificate. :type client_public_key_signature: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
[ ":", "param", "client_payment_service_provider_certificate", ":", "Payment", "Services", "Directive", "2", "compatible", "QSEAL", "certificate", ":", "type", "client_payment_service_provider_certificate", ":", "str", ":", "param", "client_payment_service_provider_certificate_chai...
python
train
portfoliome/foil
foil/dates.py
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/dates.py#L47-L52
def parse_date(date_str: str, pattern=_RE_DATE) -> dt.date: """Parse datetime.date from YYYY-MM-DD format.""" groups = re.match(pattern, date_str) return dt.date(*_date_to_tuple(groups.groupdict()))
[ "def", "parse_date", "(", "date_str", ":", "str", ",", "pattern", "=", "_RE_DATE", ")", "->", "dt", ".", "date", ":", "groups", "=", "re", ".", "match", "(", "pattern", ",", "date_str", ")", "return", "dt", ".", "date", "(", "*", "_date_to_tuple", "(...
Parse datetime.date from YYYY-MM-DD format.
[ "Parse", "datetime", ".", "date", "from", "YYYY", "-", "MM", "-", "DD", "format", "." ]
python
train
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2283-L2293
def truncated_pareto_expval(alpha, m, b): """ Expected value of truncated Pareto distribution. """ if alpha <= 1: return inf part1 = (m ** alpha) / (1. - (m / b) ** alpha) part2 = 1. * alpha / (alpha - 1) part3 = (1. / (m ** (alpha - 1)) - 1. / (b ** (alpha - 1.))) return part1 * part2 * part3
[ "def", "truncated_pareto_expval", "(", "alpha", ",", "m", ",", "b", ")", ":", "if", "alpha", "<=", "1", ":", "return", "inf", "part1", "=", "(", "m", "**", "alpha", ")", "/", "(", "1.", "-", "(", "m", "/", "b", ")", "**", "alpha", ")", "part2",...
Expected value of truncated Pareto distribution.
[ "Expected", "value", "of", "truncated", "Pareto", "distribution", "." ]
python
train
ynop/audiomate
audiomate/corpus/io/tatoeba.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/io/tatoeba.py#L123-L134
def _download_audio_files(self, records, target_path): """ Download all audio files based on the given records. """ for record in records: audio_folder = os.path.join(target_path, 'audio', record[2]) audio_file = os.path.join(audio_folder, '{}.mp3'.format(record[0])) os.makedirs(audio_folder, exist_ok=True) download_url = 'https://audio.tatoeba.org/sentences/{}/{}.mp3'.format(record[2], record[0]) download.download_file(download_url, audio_file)
[ "def", "_download_audio_files", "(", "self", ",", "records", ",", "target_path", ")", ":", "for", "record", "in", "records", ":", "audio_folder", "=", "os", ".", "path", ".", "join", "(", "target_path", ",", "'audio'", ",", "record", "[", "2", "]", ")", ...
Download all audio files based on the given records.
[ "Download", "all", "audio", "files", "based", "on", "the", "given", "records", "." ]
python
train
mapnik/Cascadenik
cascadenik/compile.py
https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/compile.py#L1449-L1621
def compile(src, dirs, verbose=False, srs=None, datasources_cfg=None, user_styles=[], scale=1): """ Compile a Cascadenik MML file, returning a cascadenik.output.Map object. Parameters: src: Path to .mml file, or raw .mml file content. dirs: Object with directory names in 'cache', 'output', and 'source' attributes. dirs.source is expected to be fully-qualified, e.g. "http://example.com" or "file:///home/example". Keyword Parameters: verbose: If True, debugging information will be printed to stderr. srs: Target spatial reference system for the compiled stylesheet. If provided, overrides default map srs in the .mml file. datasources_cfg: If a file or URL, uses the config to override datasources or parameters (i.e. postgis_dbname) defined in the map's canonical <DataSourcesConfig> entities. This is most useful in development, whereby one redefines individual datasources, connection parameters, and/or local paths. user_styles: A optional list of files or URLs, that override styles defined in the map source. These are evaluated in order, with declarations from later styles overriding those from earlier styles. scale: Scale value for output map, 2 doubles the size for high-res displays. """ global VERBOSE if verbose: VERBOSE = True sys.stderr.write('\n') msg('Targeting mapnik version: %s | %s' % (MAPNIK_VERSION, MAPNIK_VERSION_STR)) if posixpath.exists(src): doc = ElementTree.parse(src) map_el = doc.getroot() else: try: # guessing src is a literal XML string? map_el = ElementTree.fromstring(src) except: if not (src[:7] in ('http://', 'https:/', 'file://')): src = "file://" + src try: doc = ElementTree.parse(urllib.urlopen(src)) except IOError, e: raise IOError('%s: %s' % (e,src)) map_el = doc.getroot() expand_source_declarations(map_el, dirs, datasources_cfg) declarations = extract_declarations(map_el, dirs, scale, user_styles) # a list of layers and a sequential ID generator layers, ids = [], (i for i in xrange(1, 999999)) # Handle base datasources # http://trac.mapnik.org/changeset/574 datasource_templates = {} for base_el in map_el: if base_el.tag != 'Datasource': continue datasource_templates[base_el.get('name')] = dict(((p.get('name'),p.text) for p in base_el.findall('Parameter'))) for layer_el in map_el.findall('Layer'): # nevermind with this one if layer_el.get('status', None) in ('off', '0', 0): continue # build up a map of Parameters for this Layer datasource_params = dict((p.get('name'),p.text) for p in layer_el.find('Datasource').findall('Parameter')) base = layer_el.find('Datasource').get('base') if base: datasource_params.update(datasource_templates[base]) if datasource_params.get('table'): # remove line breaks from possible SQL, using a possibly-unsafe regexp # that simply blows away anything that looks like it might be a SQL comment. # http://trac.mapnik.org/ticket/173 if not MAPNIK_VERSION >= 601: sql = datasource_params.get('table') sql = compile(r'--.*$', MULTILINE).sub('', sql) sql = sql.replace('\r', ' ').replace('\n', ' ') datasource_params['table'] = sql elif datasource_params.get('file') is not None: # make sure we localize any remote files file_param = datasource_params.get('file') if datasource_params.get('type') == 'shape': # handle a local shapefile or fetch a remote, zipped shapefile msg('Handling shapefile datasource...') file_param = localize_shapefile(file_param, dirs) # TODO - support datasource reprojection to make map srs # TODO - support automatically indexing shapefiles else: # ogr,raster, gdal, sqlite # attempt to generically handle other file based datasources msg('Handling generic datasource...') file_param = localize_file_datasource(file_param, dirs) msg("Localized path = %s" % un_posix(file_param)) datasource_params['file'] = un_posix(file_param) # TODO - consider custom support for other mapnik datasources: # sqlite, oracle, osm, kismet, gdal, raster, rasterlite layer_declarations = get_applicable_declarations(layer_el, declarations) # a list of styles styles = [] if datasource_params.get('type', None) == 'gdal': styles.append(output.Style('raster style %d' % ids.next(), get_raster_rules(layer_declarations))) else: styles.append(output.Style('polygon style %d' % ids.next(), get_polygon_rules(layer_declarations))) styles.append(output.Style('polygon pattern style %d' % ids.next(), get_polygon_pattern_rules(layer_declarations, dirs))) styles.append(output.Style('line style %d' % ids.next(), get_line_rules(layer_declarations))) styles.append(output.Style('line pattern style %d' % ids.next(), get_line_pattern_rules(layer_declarations, dirs))) for (shield_name, shield_rules) in get_shield_rule_groups(layer_declarations, dirs).items(): styles.append(output.Style('shield style %d (%s)' % (ids.next(), shield_name), shield_rules)) for (text_name, text_rules) in get_text_rule_groups(layer_declarations).items(): styles.append(output.Style('text style %d (%s)' % (ids.next(), text_name), text_rules)) styles.append(output.Style('point style %d' % ids.next(), get_point_rules(layer_declarations, dirs))) styles = [s for s in styles if s.rules] if styles: datasource = output.Datasource(**datasource_params) layer = output.Layer('layer %d' % ids.next(), datasource, styles, layer_el.get('srs', None), layer_el.get('min_zoom', None) and int(layer_el.get('min_zoom')) or None, layer_el.get('max_zoom', None) and int(layer_el.get('max_zoom')) or None) layers.append(layer) map_attrs = get_map_attributes(get_applicable_declarations(map_el, declarations)) # if a target srs is profiled, override whatever is in mml if srs is not None: map_el.set('srs', srs) return output.Map(map_el.attrib.get('srs', None), layers, **map_attrs)
[ "def", "compile", "(", "src", ",", "dirs", ",", "verbose", "=", "False", ",", "srs", "=", "None", ",", "datasources_cfg", "=", "None", ",", "user_styles", "=", "[", "]", ",", "scale", "=", "1", ")", ":", "global", "VERBOSE", "if", "verbose", ":", "...
Compile a Cascadenik MML file, returning a cascadenik.output.Map object. Parameters: src: Path to .mml file, or raw .mml file content. dirs: Object with directory names in 'cache', 'output', and 'source' attributes. dirs.source is expected to be fully-qualified, e.g. "http://example.com" or "file:///home/example". Keyword Parameters: verbose: If True, debugging information will be printed to stderr. srs: Target spatial reference system for the compiled stylesheet. If provided, overrides default map srs in the .mml file. datasources_cfg: If a file or URL, uses the config to override datasources or parameters (i.e. postgis_dbname) defined in the map's canonical <DataSourcesConfig> entities. This is most useful in development, whereby one redefines individual datasources, connection parameters, and/or local paths. user_styles: A optional list of files or URLs, that override styles defined in the map source. These are evaluated in order, with declarations from later styles overriding those from earlier styles. scale: Scale value for output map, 2 doubles the size for high-res displays.
[ "Compile", "a", "Cascadenik", "MML", "file", "returning", "a", "cascadenik", ".", "output", ".", "Map", "object", ".", "Parameters", ":", "src", ":", "Path", "to", ".", "mml", "file", "or", "raw", ".", "mml", "file", "content", ".", "dirs", ":", "Objec...
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_kvlayer_table_names.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_table_names.py#L174-L187
def kvlayer_key_to_stream_id(k): '''Convert a kvlayer key to a text stream ID. `k` should be of the same form produced by :func:`stream_id_to_kvlayer_key`. :param k: :mod:`kvlayer` key tuple :return: converted stream ID :returntype str: ''' abs_url_hash, epoch_ticks = k return '{0}-{1}'.format(epoch_ticks, base64.b16encode(abs_url_hash).lower())
[ "def", "kvlayer_key_to_stream_id", "(", "k", ")", ":", "abs_url_hash", ",", "epoch_ticks", "=", "k", "return", "'{0}-{1}'", ".", "format", "(", "epoch_ticks", ",", "base64", ".", "b16encode", "(", "abs_url_hash", ")", ".", "lower", "(", ")", ")" ]
Convert a kvlayer key to a text stream ID. `k` should be of the same form produced by :func:`stream_id_to_kvlayer_key`. :param k: :mod:`kvlayer` key tuple :return: converted stream ID :returntype str:
[ "Convert", "a", "kvlayer", "key", "to", "a", "text", "stream", "ID", "." ]
python
test
ryan-roemer/django-cloud-browser
cloud_browser/cloud/errors.py
https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/cloud/errors.py#L111-L123
def translate(self, exc): """Return translation of exception to new class. Calling code should only raise exception if exception class is passed in, else ``None`` (which signifies no wrapping should be done). """ # Find actual class. for key in self.translations.keys(): if isinstance(exc, key): # pylint: disable=unsubscriptable-object return self.translations[key](str(exc)) return None
[ "def", "translate", "(", "self", ",", "exc", ")", ":", "# Find actual class.", "for", "key", "in", "self", ".", "translations", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "exc", ",", "key", ")", ":", "# pylint: disable=unsubscriptable-object", "ret...
Return translation of exception to new class. Calling code should only raise exception if exception class is passed in, else ``None`` (which signifies no wrapping should be done).
[ "Return", "translation", "of", "exception", "to", "new", "class", "." ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L813-L851
def update(self, arx, xarchive=None, arf=None, evals=None): """checks for better solutions in list `arx`. Based on the smallest corresponding value in `arf`, alternatively, `update` may be called with a `BestSolution` instance like ``update(another_best_solution)`` in which case the better solution becomes the current best. `xarchive` is used to retrieve the genotype of a solution. """ if isinstance(arx, BestSolution): if self.evalsall is None: self.evalsall = arx.evalsall elif arx.evalsall is not None: self.evalsall = max((self.evalsall, arx.evalsall)) if arx.f is not None and arx.f < np.inf: self.update([arx.x], xarchive, [arx.f], arx.evals) return self assert arf is not None # find failsave minimum minidx = np.nanargmin(arf) if minidx is np.nan: return minarf = arf[minidx] # minarf = reduce(lambda x, y: y if y and y is not np.nan # and y < x else x, arf, np.inf) if minarf < np.inf and (minarf < self.f or self.f is None): self.x, self.f = arx[minidx], arf[minidx] if xarchive is not None and xarchive.get(self.x) is not None: self.x_geno = xarchive[self.x].get('geno') else: self.x_geno = None self.evals = None if not evals else evals - len(arf) + minidx + 1 self.evalsall = evals elif evals: self.evalsall = evals self.last.x = arx[minidx] self.last.f = minarf
[ "def", "update", "(", "self", ",", "arx", ",", "xarchive", "=", "None", ",", "arf", "=", "None", ",", "evals", "=", "None", ")", ":", "if", "isinstance", "(", "arx", ",", "BestSolution", ")", ":", "if", "self", ".", "evalsall", "is", "None", ":", ...
checks for better solutions in list `arx`. Based on the smallest corresponding value in `arf`, alternatively, `update` may be called with a `BestSolution` instance like ``update(another_best_solution)`` in which case the better solution becomes the current best. `xarchive` is used to retrieve the genotype of a solution.
[ "checks", "for", "better", "solutions", "in", "list", "arx", "." ]
python
train
darkfeline/animanager
animanager/anidb.py
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/anidb.py#L58-L65
def number(self) -> int: """Episode number. Unique for an anime and episode type, but not unique across episode types for the same anime. """ match = self._NUMBER_SUFFIX.search(self.epno) return int(match.group(1))
[ "def", "number", "(", "self", ")", "->", "int", ":", "match", "=", "self", ".", "_NUMBER_SUFFIX", ".", "search", "(", "self", ".", "epno", ")", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")" ]
Episode number. Unique for an anime and episode type, but not unique across episode types for the same anime.
[ "Episode", "number", "." ]
python
train
chovanecm/sacredboard
sacredboard/app/process/process.py
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/process/process.py#L146-L155
def read_line(self, time_limit=None): """ Read a line from the process. On Windows, this the time_limit has no effect, it always blocks. """ if self.proc is not None: return self.proc.stdout.readline().decode() else: return None
[ "def", "read_line", "(", "self", ",", "time_limit", "=", "None", ")", ":", "if", "self", ".", "proc", "is", "not", "None", ":", "return", "self", ".", "proc", ".", "stdout", ".", "readline", "(", ")", ".", "decode", "(", ")", "else", ":", "return",...
Read a line from the process. On Windows, this the time_limit has no effect, it always blocks.
[ "Read", "a", "line", "from", "the", "process", "." ]
python
train
noahbenson/neuropythy
neuropythy/optimize/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/optimize/core.py#L274-L281
def const_potential(f): ''' const_potential(f) yields f if f is a constant potential function; if f is a constant, yields a potential function that always yields f; otherwise raises an error. ''' if is_const_potential(f): return f elif pimms.is_array(f, 'number'): return PotentialConstant(f) else: raise ValueError('Could not convert given value to potential constant: %s' % f)
[ "def", "const_potential", "(", "f", ")", ":", "if", "is_const_potential", "(", "f", ")", ":", "return", "f", "elif", "pimms", ".", "is_array", "(", "f", ",", "'number'", ")", ":", "return", "PotentialConstant", "(", "f", ")", "else", ":", "raise", "Val...
const_potential(f) yields f if f is a constant potential function; if f is a constant, yields a potential function that always yields f; otherwise raises an error.
[ "const_potential", "(", "f", ")", "yields", "f", "if", "f", "is", "a", "constant", "potential", "function", ";", "if", "f", "is", "a", "constant", "yields", "a", "potential", "function", "that", "always", "yields", "f", ";", "otherwise", "raises", "an", ...
python
train
saltstack/salt
salt/runners/reactor.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/reactor.py#L36-L59
def list_(saltenv='base', test=None): ''' List currently configured reactors CLI Example: .. code-block:: bash salt-run reactor.list ''' sevent = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=True) master_key = salt.utils.master.get_master_key('root', __opts__) __jid_event__.fire_event({'key': master_key}, 'salt/reactors/manage/list') results = sevent.get_event(wait=30, tag='salt/reactors/manage/list-results') reactors = results['reactors'] return reactors
[ "def", "list_", "(", "saltenv", "=", "'base'", ",", "test", "=", "None", ")", ":", "sevent", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "'master'", ",", "__opts__", "[", "'sock_dir'", "]", ",", "__opts__", "[", "'transport'", "]", ...
List currently configured reactors CLI Example: .. code-block:: bash salt-run reactor.list
[ "List", "currently", "configured", "reactors" ]
python
train
aparo/pyes
pyes/queryset.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/queryset.py#L456-L470
def update(self, **kwargs): """ Updates all elements in the current QuerySet, setting all the given fields to the appropriate values. """ query = self._build_query() connection = get_es_connection(self.es_url, self.es_kwargs) results = connection.search(query, indices=self.index, doc_types=self.type, model=self.model, scan=True) for item in results: item.update(kwargs) item.save(bulk=True) connection.flush_bulk(True) # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "query", "=", "self", ".", "_build_query", "(", ")", "connection", "=", "get_es_connection", "(", "self", ".", "es_url", ",", "self", ".", "es_kwargs", ")", "results", "=", "connection", ...
Updates all elements in the current QuerySet, setting all the given fields to the appropriate values.
[ "Updates", "all", "elements", "in", "the", "current", "QuerySet", "setting", "all", "the", "given", "fields", "to", "the", "appropriate", "values", "." ]
python
train
seleniumbase/SeleniumBase
seleniumbase/plugins/db_reporting_plugin.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/plugins/db_reporting_plugin.py#L92-L96
def addError(self, test, err, capt=None): """ After a test error, we want to record testcase run information. """ self.__insert_test_result(constants.State.ERROR, test, err)
[ "def", "addError", "(", "self", ",", "test", ",", "err", ",", "capt", "=", "None", ")", ":", "self", ".", "__insert_test_result", "(", "constants", ".", "State", ".", "ERROR", ",", "test", ",", "err", ")" ]
After a test error, we want to record testcase run information.
[ "After", "a", "test", "error", "we", "want", "to", "record", "testcase", "run", "information", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/gossip/permission_verifier.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/gossip/permission_verifier.py#L106-L176
def is_transaction_signer_authorized(self, transactions, state_root, from_state): """ Check the transaction signing key against the allowed transactor permissions. The roles being checked are the following, from first to last: "transactor.transaction_signer.<TP_Name>" "transactor.transaction_signer" "transactor" "default" The first role that is set will be the one used to enforce if the transaction signer is allowed. Args: transactions (List of Transactions): The transactions that are being verified. state_root(string): The state root of the previous block. If this is None, the current state root hash will be retrieved. from_state (bool): Whether the identity value should be read directly from state, instead of using the cached values. This should be used when the state_root passed is not from the current chain head. """ role = None if role is None: role = self._cache.get_role("transactor.transaction_signer", state_root, from_state) if role is None: role = self._cache.get_role("transactor", state_root, from_state) if role is None: policy_name = "default" else: policy_name = role.policy_name policy = self._cache.get_policy(policy_name, state_root, from_state) family_roles = {} for transaction in transactions: header = TransactionHeader() header.ParseFromString(transaction.header) family_policy = None if header.family_name not in family_roles: role = self._cache.get_role( "transactor.transaction_signer." + header.family_name, state_root, from_state) if role is not None: family_policy = self._cache.get_policy(role.policy_name, state_root, from_state) family_roles[header.family_name] = family_policy else: family_policy = family_roles[header.family_name] if family_policy is not None: if not self._allowed(header.signer_public_key, family_policy): LOGGER.debug("Transaction Signer: %s is not permitted.", header.signer_public_key) return False else: if policy is not None: if not self._allowed(header.signer_public_key, policy): LOGGER.debug( "Transaction Signer: %s is not permitted.", header.signer_public_key) return False return True
[ "def", "is_transaction_signer_authorized", "(", "self", ",", "transactions", ",", "state_root", ",", "from_state", ")", ":", "role", "=", "None", "if", "role", "is", "None", ":", "role", "=", "self", ".", "_cache", ".", "get_role", "(", "\"transactor.transacti...
Check the transaction signing key against the allowed transactor permissions. The roles being checked are the following, from first to last: "transactor.transaction_signer.<TP_Name>" "transactor.transaction_signer" "transactor" "default" The first role that is set will be the one used to enforce if the transaction signer is allowed. Args: transactions (List of Transactions): The transactions that are being verified. state_root(string): The state root of the previous block. If this is None, the current state root hash will be retrieved. from_state (bool): Whether the identity value should be read directly from state, instead of using the cached values. This should be used when the state_root passed is not from the current chain head.
[ "Check", "the", "transaction", "signing", "key", "against", "the", "allowed", "transactor", "permissions", ".", "The", "roles", "being", "checked", "are", "the", "following", "from", "first", "to", "last", ":", "transactor", ".", "transaction_signer", ".", "<TP_...
python
train
barryp/py-amqplib
amqplib/client_0_8/channel.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/channel.py#L2513-L2554
def _basic_return(self, args, msg): """ return a failed message This method returns an undeliverable message that was published with the "immediate" flag set, or an unroutable message published with the "mandatory" flag set. The reply code and text provide information about the reason that the message was undeliverable. PARAMETERS: reply_code: short The reply code. The AMQ reply codes are defined in AMQ RFC 011. reply_text: shortstr The localised reply text. This text can be logged as an aid to resolving issues. exchange: shortstr Specifies the name of the exchange that the message was originally published to. routing_key: shortstr Message routing key Specifies the routing key name specified when the message was published. """ reply_code = args.read_short() reply_text = args.read_shortstr() exchange = args.read_shortstr() routing_key = args.read_shortstr() self.returned_messages.put( (reply_code, reply_text, exchange, routing_key, msg) )
[ "def", "_basic_return", "(", "self", ",", "args", ",", "msg", ")", ":", "reply_code", "=", "args", ".", "read_short", "(", ")", "reply_text", "=", "args", ".", "read_shortstr", "(", ")", "exchange", "=", "args", ".", "read_shortstr", "(", ")", "routing_k...
return a failed message This method returns an undeliverable message that was published with the "immediate" flag set, or an unroutable message published with the "mandatory" flag set. The reply code and text provide information about the reason that the message was undeliverable. PARAMETERS: reply_code: short The reply code. The AMQ reply codes are defined in AMQ RFC 011. reply_text: shortstr The localised reply text. This text can be logged as an aid to resolving issues. exchange: shortstr Specifies the name of the exchange that the message was originally published to. routing_key: shortstr Message routing key Specifies the routing key name specified when the message was published.
[ "return", "a", "failed", "message" ]
python
train
idlesign/django-sitemessage
sitemessage/messengers/telegram.py
https://github.com/idlesign/django-sitemessage/blob/25b179b798370354c5988042ec209e255d23793f/sitemessage/messengers/telegram.py#L56-L69
def get_chat_ids(self): """Returns unique chat IDs from `/start` command messages sent to our bot by users. Those chat IDs can be used to send messages to chats. :rtype: list """ updates = self.get_updates() chat_ids = [] if updates: for update in updates: message = update['message'] if message['text'] == '/start': chat_ids.append(message['chat']['id']) return list(set(chat_ids))
[ "def", "get_chat_ids", "(", "self", ")", ":", "updates", "=", "self", ".", "get_updates", "(", ")", "chat_ids", "=", "[", "]", "if", "updates", ":", "for", "update", "in", "updates", ":", "message", "=", "update", "[", "'message'", "]", "if", "message"...
Returns unique chat IDs from `/start` command messages sent to our bot by users. Those chat IDs can be used to send messages to chats. :rtype: list
[ "Returns", "unique", "chat", "IDs", "from", "/", "start", "command", "messages", "sent", "to", "our", "bot", "by", "users", ".", "Those", "chat", "IDs", "can", "be", "used", "to", "send", "messages", "to", "chats", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/global_config.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/global_config.py#L17-L35
def parsyfiles_global_config(multiple_errors_tb_limit: int = None, full_paths_in_logs: bool = None, dict_to_object_subclass_limit: int = None): """ This is the method you should use to configure the parsyfiles library :param multiple_errors_tb_limit: the traceback size (default is 3) of individual parsers exceptions displayed when parsyfiles tries several parsing chains and all of them fail. :param full_paths_in_logs: if True, full file paths will be displayed in logs. Otherwise only the parent path will be displayed and children paths will be indented (default is False) :param dict_to_object_subclass_limit: the number of subclasses that the <dict_to_object> converter will try, when instantiating an object from a dictionary. Default is 50 :return: """ if multiple_errors_tb_limit is not None: GLOBAL_CONFIG.multiple_errors_tb_limit = multiple_errors_tb_limit if full_paths_in_logs is not None: GLOBAL_CONFIG.full_paths_in_logs = full_paths_in_logs if dict_to_object_subclass_limit is not None: GLOBAL_CONFIG.dict_to_object_subclass_limit = dict_to_object_subclass_limit
[ "def", "parsyfiles_global_config", "(", "multiple_errors_tb_limit", ":", "int", "=", "None", ",", "full_paths_in_logs", ":", "bool", "=", "None", ",", "dict_to_object_subclass_limit", ":", "int", "=", "None", ")", ":", "if", "multiple_errors_tb_limit", "is", "not", ...
This is the method you should use to configure the parsyfiles library :param multiple_errors_tb_limit: the traceback size (default is 3) of individual parsers exceptions displayed when parsyfiles tries several parsing chains and all of them fail. :param full_paths_in_logs: if True, full file paths will be displayed in logs. Otherwise only the parent path will be displayed and children paths will be indented (default is False) :param dict_to_object_subclass_limit: the number of subclasses that the <dict_to_object> converter will try, when instantiating an object from a dictionary. Default is 50 :return:
[ "This", "is", "the", "method", "you", "should", "use", "to", "configure", "the", "parsyfiles", "library" ]
python
train
aio-libs/aioftp
aioftp/common.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/common.py#L408-L421
def from_limits(cls, read_speed_limit=None, write_speed_limit=None): """ Simple wrapper for creation :py:class:`aioftp.StreamThrottle` :param read_speed_limit: stream read speed limit in bytes or :py:class:`None` for unlimited :type read_speed_limit: :py:class:`int` or :py:class:`None` :param write_speed_limit: stream write speed limit in bytes or :py:class:`None` for unlimited :type write_speed_limit: :py:class:`int` or :py:class:`None` """ return cls(read=Throttle(limit=read_speed_limit), write=Throttle(limit=write_speed_limit))
[ "def", "from_limits", "(", "cls", ",", "read_speed_limit", "=", "None", ",", "write_speed_limit", "=", "None", ")", ":", "return", "cls", "(", "read", "=", "Throttle", "(", "limit", "=", "read_speed_limit", ")", ",", "write", "=", "Throttle", "(", "limit",...
Simple wrapper for creation :py:class:`aioftp.StreamThrottle` :param read_speed_limit: stream read speed limit in bytes or :py:class:`None` for unlimited :type read_speed_limit: :py:class:`int` or :py:class:`None` :param write_speed_limit: stream write speed limit in bytes or :py:class:`None` for unlimited :type write_speed_limit: :py:class:`int` or :py:class:`None`
[ "Simple", "wrapper", "for", "creation", ":", "py", ":", "class", ":", "aioftp", ".", "StreamThrottle" ]
python
valid
kushaldas/retask
retask/queue.py
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L79-L96
def length(self): """ Gives the length of the queue. Returns ``None`` if the queue is not connected. If the queue is not connected then it will raise :class:`retask.ConnectionError`. """ if not self.connected: raise ConnectionError('Queue is not connected') try: length = self.rdb.llen(self._name) except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return length
[ "def", "length", "(", "self", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "try", ":", "length", "=", "self", ".", "rdb", ".", "llen", "(", "self", ".", "_name", ")", "except", ...
Gives the length of the queue. Returns ``None`` if the queue is not connected. If the queue is not connected then it will raise :class:`retask.ConnectionError`.
[ "Gives", "the", "length", "of", "the", "queue", ".", "Returns", "None", "if", "the", "queue", "is", "not", "connected", "." ]
python
train
tmr232/Sark
sark/enum.py
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L79-L84
def _add_enum_member(enum, name, value, bitmask=DEFMASK): """Add an enum member.""" error = idaapi.add_enum_member(enum, name, value, bitmask) if error: raise _enum_member_error(error, enum, name, value, bitmask)
[ "def", "_add_enum_member", "(", "enum", ",", "name", ",", "value", ",", "bitmask", "=", "DEFMASK", ")", ":", "error", "=", "idaapi", ".", "add_enum_member", "(", "enum", ",", "name", ",", "value", ",", "bitmask", ")", "if", "error", ":", "raise", "_enu...
Add an enum member.
[ "Add", "an", "enum", "member", "." ]
python
train
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/plugins/views/templates.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/views/templates.py#L114-L127
def delete(self, template_name): """Delete a template""" template = db.Template.find_one(template_name=template_name) if not template: return self.make_response('No such template found', HTTP.NOT_FOUND) db.session.delete(template) db.session.commit() auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name}) return self.make_response({ 'message': 'Template has been deleted', 'templateName': template_name })
[ "def", "delete", "(", "self", ",", "template_name", ")", ":", "template", "=", "db", ".", "Template", ".", "find_one", "(", "template_name", "=", "template_name", ")", "if", "not", "template", ":", "return", "self", ".", "make_response", "(", "'No such templ...
Delete a template
[ "Delete", "a", "template" ]
python
train
cuihantao/andes
andes/variables/dae.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/dae.py#L700-L705
def reset_small(self, eq): """Reset numbers smaller than 1e-12 in f and g equations""" assert eq in ('f', 'g') for idx, var in enumerate(self.__dict__[eq]): if abs(var) <= 1e-12: self.__dict__[eq][idx] = 0
[ "def", "reset_small", "(", "self", ",", "eq", ")", ":", "assert", "eq", "in", "(", "'f'", ",", "'g'", ")", "for", "idx", ",", "var", "in", "enumerate", "(", "self", ".", "__dict__", "[", "eq", "]", ")", ":", "if", "abs", "(", "var", ")", "<=", ...
Reset numbers smaller than 1e-12 in f and g equations
[ "Reset", "numbers", "smaller", "than", "1e", "-", "12", "in", "f", "and", "g", "equations" ]
python
train
QInfer/python-qinfer
src/qinfer/tomography/models.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/tomography/models.py#L172-L192
def trunc_neg_eigs(self, particle): """ Given a state represented as a model parameter vector, returns a model parameter vector representing the same state with any negative eigenvalues set to zero. :param np.ndarray particle: Vector of length ``(dim ** 2, )`` representing a state. :return: The same state with any negative eigenvalues set to zero. """ arr = np.tensordot(particle, self._basis.data.conj(), 1) w, v = np.linalg.eig(arr) if np.all(w >= 0): return particle else: w[w < 0] = 0 new_arr = np.dot(v * w, v.conj().T) new_particle = np.real(np.dot(self._basis.flat(), new_arr.flatten())) assert new_particle[0] > 0 return new_particle
[ "def", "trunc_neg_eigs", "(", "self", ",", "particle", ")", ":", "arr", "=", "np", ".", "tensordot", "(", "particle", ",", "self", ".", "_basis", ".", "data", ".", "conj", "(", ")", ",", "1", ")", "w", ",", "v", "=", "np", ".", "linalg", ".", "...
Given a state represented as a model parameter vector, returns a model parameter vector representing the same state with any negative eigenvalues set to zero. :param np.ndarray particle: Vector of length ``(dim ** 2, )`` representing a state. :return: The same state with any negative eigenvalues set to zero.
[ "Given", "a", "state", "represented", "as", "a", "model", "parameter", "vector", "returns", "a", "model", "parameter", "vector", "representing", "the", "same", "state", "with", "any", "negative", "eigenvalues", "set", "to", "zero", "." ]
python
train
Opentrons/opentrons
update-server/otupdate/buildroot/update.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/update-server/otupdate/buildroot/update.py#L171-L186
async def commit( request: web.Request, session: UpdateSession) -> web.Response: """ Serves /update/:session/commit """ if session.stage != Stages.DONE: return web.json_response( data={'error': 'not-ready', 'message': f'System is not ready to commit the update ' f'(currently {session.stage.value.short})'}, status=409) async with request.app[RESTART_LOCK_NAME]: file_actions.commit_update() session.set_stage(Stages.READY_FOR_RESTART) return web.json_response( data=session.state, status=200)
[ "async", "def", "commit", "(", "request", ":", "web", ".", "Request", ",", "session", ":", "UpdateSession", ")", "->", "web", ".", "Response", ":", "if", "session", ".", "stage", "!=", "Stages", ".", "DONE", ":", "return", "web", ".", "json_response", ...
Serves /update/:session/commit
[ "Serves", "/", "update", "/", ":", "session", "/", "commit" ]
python
train
gmcguire/django-db-pool
dbpool/db/backends/postgresql_psycopg2/base.py
https://github.com/gmcguire/django-db-pool/blob/d4e0aa6a150fd7bd2024e079cd3b7147ea341e63/dbpool/db/backends/postgresql_psycopg2/base.py#L56-L63
def close(self): ''' Override to return the connection to the pool rather than closing it. ''' if self._wrapped_connection and self._pool: logger.debug("Returning connection %s to pool %s" % (self._wrapped_connection, self._pool)) self._pool.putconn(self._wrapped_connection) self._wrapped_connection = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_wrapped_connection", "and", "self", ".", "_pool", ":", "logger", ".", "debug", "(", "\"Returning connection %s to pool %s\"", "%", "(", "self", ".", "_wrapped_connection", ",", "self", ".", "_pool", ...
Override to return the connection to the pool rather than closing it.
[ "Override", "to", "return", "the", "connection", "to", "the", "pool", "rather", "than", "closing", "it", "." ]
python
train
shapiromatron/bmds
bmds/batch.py
https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/batch.py#L59-L84
def to_json(self, filename, indent=2): """ Return a JSON string of all model inputs and outputs. Parameters ---------- filename : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. indent : int, optional Indentation level for JSON output. Returns ------- out : str JSON formatted output string. """ d = self.to_dicts() if hasattr(filename, "write"): json.dump(d, filename, indent=indent) elif isinstance(filename, string_types): with open(os.path.expanduser(filename), "w") as f: json.dump(d, f, indent=indent) else: raise ValueError("Unknown filename or file-object")
[ "def", "to_json", "(", "self", ",", "filename", ",", "indent", "=", "2", ")", ":", "d", "=", "self", ".", "to_dicts", "(", ")", "if", "hasattr", "(", "filename", ",", "\"write\"", ")", ":", "json", ".", "dump", "(", "d", ",", "filename", ",", "in...
Return a JSON string of all model inputs and outputs. Parameters ---------- filename : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. indent : int, optional Indentation level for JSON output. Returns ------- out : str JSON formatted output string.
[ "Return", "a", "JSON", "string", "of", "all", "model", "inputs", "and", "outputs", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/qos/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos/__init__.py#L168-L189
def _set_tx_queue(self, v, load=False): """ Setter method for tx_queue, mapped from YANG variable /qos/tx_queue (container) If this variable is read-only (config: false) in the source YANG file, then _set_tx_queue is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tx_queue() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=tx_queue.tx_queue, is_container='container', presence=False, yang_name="tx-queue", rest_name="tx-queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Eegress Queue', u'callpoint': u'qos_transmit_queue', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """tx_queue must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=tx_queue.tx_queue, is_container='container', presence=False, yang_name="tx-queue", rest_name="tx-queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Eegress Queue', u'callpoint': u'qos_transmit_queue', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)""", }) self.__tx_queue = t if hasattr(self, '_set'): self._set()
[ "def", "_set_tx_queue", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base"...
Setter method for tx_queue, mapped from YANG variable /qos/tx_queue (container) If this variable is read-only (config: false) in the source YANG file, then _set_tx_queue is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tx_queue() directly.
[ "Setter", "method", "for", "tx_queue", "mapped", "from", "YANG", "variable", "/", "qos", "/", "tx_queue", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file",...
python
train
konstantint/matplotlib-venn
matplotlib_venn/_util.py
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_util.py#L40-L63
def venn3_unweighted(subsets, set_labels=('A', 'B', 'C'), set_colors=('r', 'g', 'b'), alpha=0.4, normalize_to=1.0, subset_areas=(1, 1, 1, 1, 1, 1, 1), ax=None, subset_label_formatter=None): ''' The version of venn3 without area-weighting. It is implemented as a wrapper around venn3. Namely, venn3 is invoked as usual, but with all subset areas set to 1. The subset labels are then replaced in the resulting diagram with the provided subset sizes. The parameters are all the same as that of venn2. In addition there is a subset_areas parameter, which specifies the actual subset areas. (it is (1, 1, 1, 1, 1, 1, 1) by default. You are free to change it, within reason). ''' v = venn3(subset_areas, set_labels, set_colors, alpha, normalize_to, ax) # Now rename the labels if subset_label_formatter is None: subset_label_formatter = str subset_ids = ['100', '010', '110', '001', '101', '011', '111'] if isinstance(subsets, dict): subsets = [subsets.get(t, 0) for t in subset_ids] elif len(subsets) == 3: subsets = compute_venn3_subsets(*subsets) for n, id in enumerate(subset_ids): lbl = v.get_label_by_id(id) if lbl is not None: lbl.set_text(subset_label_formatter(subsets[n])) return v
[ "def", "venn3_unweighted", "(", "subsets", ",", "set_labels", "=", "(", "'A'", ",", "'B'", ",", "'C'", ")", ",", "set_colors", "=", "(", "'r'", ",", "'g'", ",", "'b'", ")", ",", "alpha", "=", "0.4", ",", "normalize_to", "=", "1.0", ",", "subset_areas...
The version of venn3 without area-weighting. It is implemented as a wrapper around venn3. Namely, venn3 is invoked as usual, but with all subset areas set to 1. The subset labels are then replaced in the resulting diagram with the provided subset sizes. The parameters are all the same as that of venn2. In addition there is a subset_areas parameter, which specifies the actual subset areas. (it is (1, 1, 1, 1, 1, 1, 1) by default. You are free to change it, within reason).
[ "The", "version", "of", "venn3", "without", "area", "-", "weighting", ".", "It", "is", "implemented", "as", "a", "wrapper", "around", "venn3", ".", "Namely", "venn3", "is", "invoked", "as", "usual", "but", "with", "all", "subset", "areas", "set", "to", "...
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/networking/dispatch.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/networking/dispatch.py#L76-L89
def add_send_message(self, connection, send_message): """Adds a send_message function to the Dispatcher's dictionary of functions indexed by connection. Args: connection (str): A locally unique identifier provided by the receiver of messages. send_message (fn): The method that should be called by the dispatcher to respond to messages which arrive via connection. """ self._send_message[connection] = send_message LOGGER.debug("Added send_message function " "for connection %s", connection)
[ "def", "add_send_message", "(", "self", ",", "connection", ",", "send_message", ")", ":", "self", ".", "_send_message", "[", "connection", "]", "=", "send_message", "LOGGER", ".", "debug", "(", "\"Added send_message function \"", "\"for connection %s\"", ",", "conne...
Adds a send_message function to the Dispatcher's dictionary of functions indexed by connection. Args: connection (str): A locally unique identifier provided by the receiver of messages. send_message (fn): The method that should be called by the dispatcher to respond to messages which arrive via connection.
[ "Adds", "a", "send_message", "function", "to", "the", "Dispatcher", "s", "dictionary", "of", "functions", "indexed", "by", "connection", "." ]
python
train
satellogic/telluric
telluric/georaster.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L765-L769
def shape(self): """Raster shape.""" if self._shape is None: self._populate_from_rasterio_object(read_image=False) return self._shape
[ "def", "shape", "(", "self", ")", ":", "if", "self", ".", "_shape", "is", "None", ":", "self", ".", "_populate_from_rasterio_object", "(", "read_image", "=", "False", ")", "return", "self", ".", "_shape" ]
Raster shape.
[ "Raster", "shape", "." ]
python
train
lk-geimfari/mimesis
mimesis/providers/payment.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/payment.py#L95-L133
def credit_card_number(self, card_type: Optional[CardType] = None) -> str: """Generate a random credit card number. :param card_type: Issuing Network. Default is Visa. :return: Credit card number. :raises NotImplementedError: if cart_type is not supported. :Example: 4455 5299 1152 2450 """ length = 16 regex = re.compile(r'(\d{4})(\d{4})(\d{4})(\d{4})') if card_type is None: card_type = get_random_item(CardType, rnd=self.random) if card_type == CardType.VISA: number = self.random.randint(4000, 4999) elif card_type == CardType.MASTER_CARD: number = self.random.choice([ self.random.randint(2221, 2720), self.random.randint(5100, 5599), ]) elif card_type == CardType.AMERICAN_EXPRESS: number = self.random.choice([34, 37]) length = 15 regex = re.compile(r'(\d{4})(\d{6})(\d{5})') else: raise NonEnumerableError(CardType) str_num = str(number) while len(str_num) < length - 1: str_num += self.random.choice(string.digits) groups = regex.search( # type: ignore str_num + luhn_checksum(str_num), ).groups() card = ' '.join(groups) return card
[ "def", "credit_card_number", "(", "self", ",", "card_type", ":", "Optional", "[", "CardType", "]", "=", "None", ")", "->", "str", ":", "length", "=", "16", "regex", "=", "re", ".", "compile", "(", "r'(\\d{4})(\\d{4})(\\d{4})(\\d{4})'", ")", "if", "card_type"...
Generate a random credit card number. :param card_type: Issuing Network. Default is Visa. :return: Credit card number. :raises NotImplementedError: if cart_type is not supported. :Example: 4455 5299 1152 2450
[ "Generate", "a", "random", "credit", "card", "number", "." ]
python
train
farshidce/touchworks-python
touchworks/api/http.py
https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L627-L648
def get_task_list(self, since='', task_types='', task_status=''): """ invokes TouchWorksMagicConstants.ACTION_GET_TASK_LIST action :param since - If given a datetime, retrieves only tasks created (or last modified) after that date and time. Defaults to 1/1/1900. :param task_status - Optional list of pipe-delimited task status names. For example, "Active|In Progress|Complete". :param task_types - Optional list of pipe-delimited task type names. For example, "Sign Note|Verify Result|MedRenewal" :return: JSON response """ magic = self._magic_json( action=TouchWorksMagicConstants.ACTION_GET_TASK_LIST, parameter1=since, parameter2=task_types, parameter3=task_status) response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic) result = self._get_results_or_raise_if_magic_invalid( magic, response, TouchWorksMagicConstants.RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT) return result
[ "def", "get_task_list", "(", "self", ",", "since", "=", "''", ",", "task_types", "=", "''", ",", "task_status", "=", "''", ")", ":", "magic", "=", "self", ".", "_magic_json", "(", "action", "=", "TouchWorksMagicConstants", ".", "ACTION_GET_TASK_LIST", ",", ...
invokes TouchWorksMagicConstants.ACTION_GET_TASK_LIST action :param since - If given a datetime, retrieves only tasks created (or last modified) after that date and time. Defaults to 1/1/1900. :param task_status - Optional list of pipe-delimited task status names. For example, "Active|In Progress|Complete". :param task_types - Optional list of pipe-delimited task type names. For example, "Sign Note|Verify Result|MedRenewal" :return: JSON response
[ "invokes", "TouchWorksMagicConstants", ".", "ACTION_GET_TASK_LIST", "action", ":", "param", "since", "-", "If", "given", "a", "datetime", "retrieves", "only", "tasks", "created", "(", "or", "last", "modified", ")", "after", "that", "date", "and", "time", ".", ...
python
train
tanghaibao/goatools
goatools/grouper/plotobj.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/plotobj.py#L207-L212
def _get_gos_upper(self, ntpltgo1, max_upper, go2parentids): """Plot a GO DAG for the upper portion of a single Group of user GOs.""" # Get GO IDs which are in the hdrgo path goids_possible = ntpltgo1.gosubdag.go2obj.keys() # Get upper GO IDs which have the most descendants return self._get_gosrcs_upper(goids_possible, max_upper, go2parentids)
[ "def", "_get_gos_upper", "(", "self", ",", "ntpltgo1", ",", "max_upper", ",", "go2parentids", ")", ":", "# Get GO IDs which are in the hdrgo path", "goids_possible", "=", "ntpltgo1", ".", "gosubdag", ".", "go2obj", ".", "keys", "(", ")", "# Get upper GO IDs which have...
Plot a GO DAG for the upper portion of a single Group of user GOs.
[ "Plot", "a", "GO", "DAG", "for", "the", "upper", "portion", "of", "a", "single", "Group", "of", "user", "GOs", "." ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/abstract_quantum_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/abstract_quantum_algebra.py#L783-L800
def free_symbols(self): """Set of free SymPy symbols contained within the expression.""" if self._free_symbols is None: if len(self._vals) == 0: self._free_symbols = self.operand.free_symbols else: dummy_map = {} for sym in self._vals.keys(): dummy_map[sym] = sympy.Dummy() # bound symbols may not be atomic, so we have to replace them # with dummies self._free_symbols = { sym for sym in self.operand.substitute(dummy_map).free_symbols if not isinstance(sym, sympy.Dummy)} for val in self._vals.values(): self._free_symbols.update(val.free_symbols) return self._free_symbols
[ "def", "free_symbols", "(", "self", ")", ":", "if", "self", ".", "_free_symbols", "is", "None", ":", "if", "len", "(", "self", ".", "_vals", ")", "==", "0", ":", "self", ".", "_free_symbols", "=", "self", ".", "operand", ".", "free_symbols", "else", ...
Set of free SymPy symbols contained within the expression.
[ "Set", "of", "free", "SymPy", "symbols", "contained", "within", "the", "expression", "." ]
python
train
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1215-L1223
def is_username_available(self, username): """Return True if username is valid and available, otherwise False.""" params = {'user': username} try: result = self.request_json(self.config['username_available'], params=params) except errors.BadUsername: return False return result
[ "def", "is_username_available", "(", "self", ",", "username", ")", ":", "params", "=", "{", "'user'", ":", "username", "}", "try", ":", "result", "=", "self", ".", "request_json", "(", "self", ".", "config", "[", "'username_available'", "]", ",", "params",...
Return True if username is valid and available, otherwise False.
[ "Return", "True", "if", "username", "is", "valid", "and", "available", "otherwise", "False", "." ]
python
train
ozgur/python-firebase
firebase/firebase.py
https://github.com/ozgur/python-firebase/blob/6b96b326f6d8f477503ca42fdfbd81bcbe1f9e0d/firebase/firebase.py#L46-L72
def make_put_request(url, data, params, headers, connection): """ Helper function that makes an HTTP PUT request to the given firebase endpoint. Timeout is 60 seconds. `url`: The full URL of the firebase endpoint (DSN appended.) `data`: JSON serializable dict that will be stored in the remote storage. `params`: Python dict that is appended to the URL like a querystring. `headers`: Python dict. HTTP request headers. `connection`: Predefined HTTP connection instance. If not given, it is supplied by the `decorators.http_connection` function. The returning value is a Python dict deserialized by the JSON decoder. However, if the status code is not 2x or 403, an requests.HTTPError is raised. connection = connection_pool.get_available_connection() response = make_put_request('http://firebase.localhost/users', '{"1": "Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection) response => {'1': 'Ozgur Vatansever'} or {'error': 'Permission denied.'} """ timeout = getattr(connection, 'timeout') response = connection.put(url, data=data, params=params, headers=headers, timeout=timeout) if response.ok or response.status_code == 403: return response.json() if response.content else None else: response.raise_for_status()
[ "def", "make_put_request", "(", "url", ",", "data", ",", "params", ",", "headers", ",", "connection", ")", ":", "timeout", "=", "getattr", "(", "connection", ",", "'timeout'", ")", "response", "=", "connection", ".", "put", "(", "url", ",", "data", "=", ...
Helper function that makes an HTTP PUT request to the given firebase endpoint. Timeout is 60 seconds. `url`: The full URL of the firebase endpoint (DSN appended.) `data`: JSON serializable dict that will be stored in the remote storage. `params`: Python dict that is appended to the URL like a querystring. `headers`: Python dict. HTTP request headers. `connection`: Predefined HTTP connection instance. If not given, it is supplied by the `decorators.http_connection` function. The returning value is a Python dict deserialized by the JSON decoder. However, if the status code is not 2x or 403, an requests.HTTPError is raised. connection = connection_pool.get_available_connection() response = make_put_request('http://firebase.localhost/users', '{"1": "Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection) response => {'1': 'Ozgur Vatansever'} or {'error': 'Permission denied.'}
[ "Helper", "function", "that", "makes", "an", "HTTP", "PUT", "request", "to", "the", "given", "firebase", "endpoint", ".", "Timeout", "is", "60", "seconds", ".", "url", ":", "The", "full", "URL", "of", "the", "firebase", "endpoint", "(", "DSN", "appended", ...
python
valid
google/prettytensor
prettytensor/tutorial/data_utils.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/tutorial/data_utils.py#L92-L106
def mnist(training): """Downloads MNIST and loads it into numpy arrays.""" if training: data_filename = 'train-images-idx3-ubyte.gz' labels_filename = 'train-labels-idx1-ubyte.gz' count = 60000 else: data_filename = 't10k-images-idx3-ubyte.gz' labels_filename = 't10k-labels-idx1-ubyte.gz' count = 10000 data_filename = maybe_download(MNIST_URL, data_filename) labels_filename = maybe_download(MNIST_URL, labels_filename) return (mnist_extract_data(data_filename, count), mnist_extract_labels(labels_filename, count))
[ "def", "mnist", "(", "training", ")", ":", "if", "training", ":", "data_filename", "=", "'train-images-idx3-ubyte.gz'", "labels_filename", "=", "'train-labels-idx1-ubyte.gz'", "count", "=", "60000", "else", ":", "data_filename", "=", "'t10k-images-idx3-ubyte.gz'", "labe...
Downloads MNIST and loads it into numpy arrays.
[ "Downloads", "MNIST", "and", "loads", "it", "into", "numpy", "arrays", "." ]
python
train
ultrabug/py3status
py3status/modules/battery_level.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/modules/battery_level.py#L190-L213
def on_click(self, event): """ Display a notification following the specified format """ if not self.notification: return if self.charging: format = self.format_notify_charging else: format = self.format_notify_discharging message = self.py3.safe_format( format, dict( ascii_bar=self.ascii_bar, icon=self.icon, percent=self.percent_charged, time_remaining=self.time_remaining, ), ) if message: self.py3.notify_user(message, "info")
[ "def", "on_click", "(", "self", ",", "event", ")", ":", "if", "not", "self", ".", "notification", ":", "return", "if", "self", ".", "charging", ":", "format", "=", "self", ".", "format_notify_charging", "else", ":", "format", "=", "self", ".", "format_no...
Display a notification following the specified format
[ "Display", "a", "notification", "following", "the", "specified", "format" ]
python
train
kipe/enocean
enocean/utils.py
https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/utils.py#L10-L15
def combine_hex(data): ''' Combine list of integer values to one big integer ''' output = 0x00 for i, value in enumerate(reversed(data)): output |= (value << i * 8) return output
[ "def", "combine_hex", "(", "data", ")", ":", "output", "=", "0x00", "for", "i", ",", "value", "in", "enumerate", "(", "reversed", "(", "data", ")", ")", ":", "output", "|=", "(", "value", "<<", "i", "*", "8", ")", "return", "output" ]
Combine list of integer values to one big integer
[ "Combine", "list", "of", "integer", "values", "to", "one", "big", "integer" ]
python
train
streamlink/streamlink
src/streamlink/plugins/ustvnow.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugins/ustvnow.py#L66-L99
def _get_streams(self): """ Finds the streams from tvcatchup.com. """ token = self.login(self.get_option("username"), self.get_option("password")) m = self._url_re.match(self.url) scode = m and m.group("scode") or self.get_option("station_code") res = self.session.http.get(self._guide_url, params=dict(token=token)) channels = OrderedDict() for t in itertags(res.text, "a"): if t.attributes.get('cs'): channels[t.attributes.get('cs').lower()] = t.attributes.get('title').replace("Watch ", "").strip() if not scode: log.error("Station code not provided, use --ustvnow-station-code.") log.info("Available stations are: \n{0} ".format('\n'.join(' {0} ({1})'.format(c, n) for c, n in channels.items()))) return if scode in channels: log.debug("Finding streams for: {0}", channels.get(scode)) r = self.session.http.get(self._stream_url, params={"scode": scode, "token": token, "br_n": "Firefox", "br_v": "52", "br_d": "desktop"}, headers={"User-Agent": useragents.FIREFOX}) data = self.session.http.json(r) return HLSStream.parse_variant_playlist(self.session, data["stream"]) else: log.error("Invalid station-code: {0}", scode)
[ "def", "_get_streams", "(", "self", ")", ":", "token", "=", "self", ".", "login", "(", "self", ".", "get_option", "(", "\"username\"", ")", ",", "self", ".", "get_option", "(", "\"password\"", ")", ")", "m", "=", "self", ".", "_url_re", ".", "match", ...
Finds the streams from tvcatchup.com.
[ "Finds", "the", "streams", "from", "tvcatchup", ".", "com", "." ]
python
test
EntilZha/PyFunctional
functional/transformations.py
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/transformations.py#L176-L190
def drop_right_t(n): """ Transformation for Sequence.drop_right :param n: number to drop from right :return: transformation """ if n <= 0: end_index = None else: end_index = -n return Transformation( 'drop_right({0})'.format(n), lambda sequence: sequence[:end_index], None )
[ "def", "drop_right_t", "(", "n", ")", ":", "if", "n", "<=", "0", ":", "end_index", "=", "None", "else", ":", "end_index", "=", "-", "n", "return", "Transformation", "(", "'drop_right({0})'", ".", "format", "(", "n", ")", ",", "lambda", "sequence", ":",...
Transformation for Sequence.drop_right :param n: number to drop from right :return: transformation
[ "Transformation", "for", "Sequence", ".", "drop_right", ":", "param", "n", ":", "number", "to", "drop", "from", "right", ":", "return", ":", "transformation" ]
python
train
ic-labs/django-icekit
icekit/api/base_serializers.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/api/base_serializers.py#L51-L69
def get_fields(self): """ Convert default field names for this sub-serializer into versions where the field name has the prefix removed, but each field object knows the real model field name by setting the field's `source` attribute. """ prefix = getattr(self.Meta, 'source_prefix', '') fields = super(ModelSubSerializer, self).get_fields() fields_without_prefix = OrderedDict() for field_name, field in fields.items(): if field_name.startswith(prefix): # Set real model field name as field's `source` unless the # source is already explicitly set, in which case it is # probably a method name not the direct field name if not field.source: field.source = field_name field_name = field_name[len(prefix):] fields_without_prefix[field_name] = field return fields_without_prefix
[ "def", "get_fields", "(", "self", ")", ":", "prefix", "=", "getattr", "(", "self", ".", "Meta", ",", "'source_prefix'", ",", "''", ")", "fields", "=", "super", "(", "ModelSubSerializer", ",", "self", ")", ".", "get_fields", "(", ")", "fields_without_prefix...
Convert default field names for this sub-serializer into versions where the field name has the prefix removed, but each field object knows the real model field name by setting the field's `source` attribute.
[ "Convert", "default", "field", "names", "for", "this", "sub", "-", "serializer", "into", "versions", "where", "the", "field", "name", "has", "the", "prefix", "removed", "but", "each", "field", "object", "knows", "the", "real", "model", "field", "name", "by",...
python
train
nevimov/django-easycart
easycart/cart.py
https://github.com/nevimov/django-easycart/blob/81b7d7d4b197e34d21dcd8cb9eb9104b565041a9/easycart/cart.py#L497-L514
def count_items(self, unique=True): """Count items in the cart. Parameters ---------- unique : bool-convertible, optional Returns ------- int If `unique` is truthy, then the result is the number of items in the cart. Otherwise, it's the sum of all item quantities. """ if unique: return len(self.items) return sum([item.quantity for item in self.items.values()])
[ "def", "count_items", "(", "self", ",", "unique", "=", "True", ")", ":", "if", "unique", ":", "return", "len", "(", "self", ".", "items", ")", "return", "sum", "(", "[", "item", ".", "quantity", "for", "item", "in", "self", ".", "items", ".", "valu...
Count items in the cart. Parameters ---------- unique : bool-convertible, optional Returns ------- int If `unique` is truthy, then the result is the number of items in the cart. Otherwise, it's the sum of all item quantities.
[ "Count", "items", "in", "the", "cart", "." ]
python
train
bcbio/bcbio-nextgen
scripts/bcbio_nextgen.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen.py#L162-L216
def _add_inputs_to_kwargs(args, kwargs, parser): """Convert input system config, flow cell directory and sample yaml to kwargs. Handles back compatibility with previous commandlines while allowing flexible specification of input parameters. """ inputs = [x for x in [args.global_config, args.fc_dir] + args.run_config if x is not None] global_config = "bcbio_system.yaml" # default configuration if not specified if kwargs.get("workflow", "") == "template": if args.only_metadata: inputs.append("--only-metadata") if args.force_single: inputs.append("--force-single") if args.separators: inputs.extend(["--separators", args.separators]) kwargs["inputs"] = inputs return kwargs elif len(inputs) == 1: if os.path.isfile(inputs[0]): fc_dir = None run_info_yaml = inputs[0] else: fc_dir = inputs[0] run_info_yaml = None elif len(inputs) == 2: if os.path.isfile(inputs[0]): global_config = inputs[0] if os.path.isfile(inputs[1]): fc_dir = None run_info_yaml = inputs[1] else: fc_dir = inputs[1] run_info_yaml = None else: fc_dir, run_info_yaml = inputs elif len(inputs) == 3: global_config, fc_dir, run_info_yaml = inputs elif args.version: print(version.__version__) sys.exit() else: print("Incorrect input arguments", inputs) parser.print_help() sys.exit() if fc_dir: fc_dir = os.path.abspath(fc_dir) if run_info_yaml: run_info_yaml = os.path.abspath(run_info_yaml) if kwargs.get("workflow"): kwargs["inputs"] = inputs kwargs["config_file"] = global_config kwargs["fc_dir"] = fc_dir kwargs["run_info_yaml"] = run_info_yaml return kwargs
[ "def", "_add_inputs_to_kwargs", "(", "args", ",", "kwargs", ",", "parser", ")", ":", "inputs", "=", "[", "x", "for", "x", "in", "[", "args", ".", "global_config", ",", "args", ".", "fc_dir", "]", "+", "args", ".", "run_config", "if", "x", "is", "not"...
Convert input system config, flow cell directory and sample yaml to kwargs. Handles back compatibility with previous commandlines while allowing flexible specification of input parameters.
[ "Convert", "input", "system", "config", "flow", "cell", "directory", "and", "sample", "yaml", "to", "kwargs", "." ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/types.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/types.py#L325-L331
def _cmd(self, command, data=None, params=None, api_version=1): """ Invokes a command on the resource. Commands are expected to be under the "commands/" sub-resource. """ return self._post("commands/" + command, ApiCommand, data=data, params=params, api_version=api_version)
[ "def", "_cmd", "(", "self", ",", "command", ",", "data", "=", "None", ",", "params", "=", "None", ",", "api_version", "=", "1", ")", ":", "return", "self", ".", "_post", "(", "\"commands/\"", "+", "command", ",", "ApiCommand", ",", "data", "=", "data...
Invokes a command on the resource. Commands are expected to be under the "commands/" sub-resource.
[ "Invokes", "a", "command", "on", "the", "resource", ".", "Commands", "are", "expected", "to", "be", "under", "the", "commands", "/", "sub", "-", "resource", "." ]
python
train
cherrypy/cheroot
cheroot/server.py
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L1610-L1647
def clear_stats(self): """Reset server stat counters..""" self._start_time = None self._run_time = 0 self.stats = { 'Enabled': False, 'Bind Address': lambda s: repr(self.bind_addr), 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(), 'Accepts': 0, 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), 'Queue': lambda s: getattr(self.requests, 'qsize', None), 'Threads': lambda s: len(getattr(self.requests, '_threads', [])), 'Threads Idle': lambda s: getattr(self.requests, 'idle', None), 'Socket Errors': 0, 'Requests': lambda s: (not s['Enabled']) and -1 or sum( [w['Requests'](w) for w in s['Worker Threads'].values()], 0, ), 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0, ), 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Written'](w) for w in s['Worker Threads'].values()], 0, ), 'Work Time': lambda s: (not s['Enabled']) and -1 or sum( [w['Work Time'](w) for w in s['Worker Threads'].values()], 0, ), 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0, ), 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0, ), 'Worker Threads': {}, } logging.statistics['Cheroot HTTPServer %d' % id(self)] = self.stats
[ "def", "clear_stats", "(", "self", ")", ":", "self", ".", "_start_time", "=", "None", "self", ".", "_run_time", "=", "0", "self", ".", "stats", "=", "{", "'Enabled'", ":", "False", ",", "'Bind Address'", ":", "lambda", "s", ":", "repr", "(", "self", ...
Reset server stat counters..
[ "Reset", "server", "stat", "counters", ".." ]
python
train
edx/edx-oauth2-provider
edx_oauth2_provider/oidc/core.py
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/core.py#L59-L99
def id_token(access_token, nonce=None, claims_request=None): """ Returns data required for an OpenID Connect ID Token according to: - http://openid.net/specs/openid-connect-basic-1_0.html#IDToken Arguments: access_token (:class:`AccessToken`): Associated OAuth2 access token. nonce (str): Optional nonce to protect against replay attacks. claims_request (dict): Optional dictionary with the claims request parameters. Information on the `claims_request` parameter specification: - http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter Returns an :class:`IDToken` instance with the scopes from the access_token and the corresponding claims. Claims in the `claims_request` paramater id_token section will be included *in addition* to the ones corresponding to the scopes specified in the `access_token`. """ handlers = HANDLERS['id_token'] # Select only the relevant section of the claims request. claims_request_section = claims_request.get('id_token', {}) if claims_request else {} scope_request = provider.scope.to_names(access_token.scope) if nonce: claims_request_section.update({'nonce': {'value': nonce}}) scopes, claims = collect( handlers, access_token, scope_request=scope_request, claims_request=claims_request_section, ) return IDToken(access_token, scopes, claims)
[ "def", "id_token", "(", "access_token", ",", "nonce", "=", "None", ",", "claims_request", "=", "None", ")", ":", "handlers", "=", "HANDLERS", "[", "'id_token'", "]", "# Select only the relevant section of the claims request.", "claims_request_section", "=", "claims_requ...
Returns data required for an OpenID Connect ID Token according to: - http://openid.net/specs/openid-connect-basic-1_0.html#IDToken Arguments: access_token (:class:`AccessToken`): Associated OAuth2 access token. nonce (str): Optional nonce to protect against replay attacks. claims_request (dict): Optional dictionary with the claims request parameters. Information on the `claims_request` parameter specification: - http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter Returns an :class:`IDToken` instance with the scopes from the access_token and the corresponding claims. Claims in the `claims_request` paramater id_token section will be included *in addition* to the ones corresponding to the scopes specified in the `access_token`.
[ "Returns", "data", "required", "for", "an", "OpenID", "Connect", "ID", "Token", "according", "to", ":" ]
python
train
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L262-L273
def _set_tf_functions(self): """ Sets up summary writer, saver, and session, with configurable gpu visibility """ merged = tf.summary.merge_all() saver = tf.train.Saver() if type(self.flags['GPU']) is int: os.environ["CUDA_VISIBLE_DEVICES"] = str(self.flags['GPU']) print('Using GPU %d' % self.flags['GPU']) gpu_options = tf.GPUOptions(allow_growth=True) config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options) sess = tf.Session(config=config) writer = tf.summary.FileWriter(self.flags['LOGGING_DIRECTORY'], sess.graph) return merged, saver, sess, writer
[ "def", "_set_tf_functions", "(", "self", ")", ":", "merged", "=", "tf", ".", "summary", ".", "merge_all", "(", ")", "saver", "=", "tf", ".", "train", ".", "Saver", "(", ")", "if", "type", "(", "self", ".", "flags", "[", "'GPU'", "]", ")", "is", "...
Sets up summary writer, saver, and session, with configurable gpu visibility
[ "Sets", "up", "summary", "writer", "saver", "and", "session", "with", "configurable", "gpu", "visibility" ]
python
train
googleapis/google-cloud-python
api_core/google/api_core/page_iterator.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/page_iterator.py#L399-L418
def _get_next_page_response(self): """Requests the next page from the path provided. Returns: dict: The parsed JSON response of the next page's contents. Raises: ValueError: If the HTTP method is not ``GET`` or ``POST``. """ params = self._get_query_params() if self._HTTP_METHOD == "GET": return self.api_request( method=self._HTTP_METHOD, path=self.path, query_params=params ) elif self._HTTP_METHOD == "POST": return self.api_request( method=self._HTTP_METHOD, path=self.path, data=params ) else: raise ValueError("Unexpected HTTP method", self._HTTP_METHOD)
[ "def", "_get_next_page_response", "(", "self", ")", ":", "params", "=", "self", ".", "_get_query_params", "(", ")", "if", "self", ".", "_HTTP_METHOD", "==", "\"GET\"", ":", "return", "self", ".", "api_request", "(", "method", "=", "self", ".", "_HTTP_METHOD"...
Requests the next page from the path provided. Returns: dict: The parsed JSON response of the next page's contents. Raises: ValueError: If the HTTP method is not ``GET`` or ``POST``.
[ "Requests", "the", "next", "page", "from", "the", "path", "provided", "." ]
python
train
titusjan/argos
argos/inspector/qtplugins/table.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/qtplugins/table.py#L58-L98
def makeReplacementField(formatSpec, altFormatSpec='', testValue=None): """ Prepends a colon and wraps the formatSpec in curly braces to yield a replacement field. The format specification is part of a replacement field, which can be used in new-style string formatting. See: https://docs.python.org/3/library/string.html#format-string-syntax https://docs.python.org/3/library/string.html#format-specification-mini-language If the formatSpec does not contain a a color or exclamation mark, a colon is prepended. If the formatSpec starts and end in quotes (single or double) only the quotes are removed, no curly braces or colon charactes are added. This allows users to define a format spec. :param formatSpec: e.g. '5.2f' will return '{:5.2f}' :param altFormatSpec: alternative that will be used if the formatSpec evaluates to False :param testValue: if not None, result.format(testValue) will be evaluated as a test. :return: string """ check_is_a_string(formatSpec) check_is_a_string(altFormatSpec) fmt = altFormatSpec if not formatSpec else formatSpec if is_quoted(fmt): fmt = fmt[1:-1] # remove quotes else: if fmt and ':' not in fmt and '!' not in fmt: fmt = ':' + fmt fmt = '{' + fmt + '}' # Test resulting replacement field if testValue is not None: try: _dummy = fmt.format(testValue) except Exception: msg = ("Format specifier failed: replacement-field={!r}, test-value={!r}" .format(fmt, testValue)) logger.error(msg) raise ValueError(msg) logger.debug("Resulting replacement field: {!r}".format(fmt)) return fmt
[ "def", "makeReplacementField", "(", "formatSpec", ",", "altFormatSpec", "=", "''", ",", "testValue", "=", "None", ")", ":", "check_is_a_string", "(", "formatSpec", ")", "check_is_a_string", "(", "altFormatSpec", ")", "fmt", "=", "altFormatSpec", "if", "not", "fo...
Prepends a colon and wraps the formatSpec in curly braces to yield a replacement field. The format specification is part of a replacement field, which can be used in new-style string formatting. See: https://docs.python.org/3/library/string.html#format-string-syntax https://docs.python.org/3/library/string.html#format-specification-mini-language If the formatSpec does not contain a a color or exclamation mark, a colon is prepended. If the formatSpec starts and end in quotes (single or double) only the quotes are removed, no curly braces or colon charactes are added. This allows users to define a format spec. :param formatSpec: e.g. '5.2f' will return '{:5.2f}' :param altFormatSpec: alternative that will be used if the formatSpec evaluates to False :param testValue: if not None, result.format(testValue) will be evaluated as a test. :return: string
[ "Prepends", "a", "colon", "and", "wraps", "the", "formatSpec", "in", "curly", "braces", "to", "yield", "a", "replacement", "field", "." ]
python
train
cimatosa/progression
progression/decorators.py
https://github.com/cimatosa/progression/blob/82cf74a25a47f9bda96157cc2c88e5975c20b41d/progression/decorators.py#L130-L136
def _get_callargs(self, *args, **kwargs): """ Retrieve all arguments that `self.func` needs and return a dictionary with call arguments. """ callargs = getcallargs(self.func, *args, **kwargs) return callargs
[ "def", "_get_callargs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "callargs", "=", "getcallargs", "(", "self", ".", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "callargs" ]
Retrieve all arguments that `self.func` needs and return a dictionary with call arguments.
[ "Retrieve", "all", "arguments", "that", "self", ".", "func", "needs", "and", "return", "a", "dictionary", "with", "call", "arguments", "." ]
python
train
pmacosta/peng
docs/support/requirements_to_rst.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/docs/support/requirements_to_rst.py#L53-L74
def op_to_words(item): """Translate >=, ==, <= to words.""" sdicts = [ {"==": ""}, {">=": " or newer"}, {">": "newer than "}, {"<=": " or older"}, {"<": "older than "}, {"!=": "except "}, ] for sdict in sdicts: prefix = list(sdict.keys())[0] suffix = sdict[prefix] if item.startswith(prefix): if prefix == "==": return item[2:] if prefix == "!=": return suffix + item[2:] if prefix in [">", "<"]: return suffix + item[1:] return item[2:] + suffix raise RuntimeError("Inequality not supported")
[ "def", "op_to_words", "(", "item", ")", ":", "sdicts", "=", "[", "{", "\"==\"", ":", "\"\"", "}", ",", "{", "\">=\"", ":", "\" or newer\"", "}", ",", "{", "\">\"", ":", "\"newer than \"", "}", ",", "{", "\"<=\"", ":", "\" or older\"", "}", ",", "{", ...
Translate >=, ==, <= to words.
[ "Translate", ">", "=", "==", "<", "=", "to", "words", "." ]
python
test
senaite/senaite.core
bika/lims/browser/publish/reports_listing.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/publish/reports_listing.py#L103-L110
def get_filesize(self, pdf): """Compute the filesize of the PDF """ try: filesize = float(pdf.get_size()) return filesize / 1024 except (POSKeyError, TypeError): return 0
[ "def", "get_filesize", "(", "self", ",", "pdf", ")", ":", "try", ":", "filesize", "=", "float", "(", "pdf", ".", "get_size", "(", ")", ")", "return", "filesize", "/", "1024", "except", "(", "POSKeyError", ",", "TypeError", ")", ":", "return", "0" ]
Compute the filesize of the PDF
[ "Compute", "the", "filesize", "of", "the", "PDF" ]
python
train
rm-hull/luma.core
luma/core/virtual.py
https://github.com/rm-hull/luma.core/blob/034b628fb304a01e77732a299c0b42e94d6443db/luma/core/virtual.py#L88-L96
def is_overlapping_viewport(self, hotspot, xy): """ Checks to see if the hotspot at position ``(x, y)`` is (at least partially) visible according to the position of the viewport. """ l1, t1, r1, b1 = calc_bounds(xy, hotspot) l2, t2, r2, b2 = calc_bounds(self._position, self._device) return range_overlap(l1, r1, l2, r2) and range_overlap(t1, b1, t2, b2)
[ "def", "is_overlapping_viewport", "(", "self", ",", "hotspot", ",", "xy", ")", ":", "l1", ",", "t1", ",", "r1", ",", "b1", "=", "calc_bounds", "(", "xy", ",", "hotspot", ")", "l2", ",", "t2", ",", "r2", ",", "b2", "=", "calc_bounds", "(", "self", ...
Checks to see if the hotspot at position ``(x, y)`` is (at least partially) visible according to the position of the viewport.
[ "Checks", "to", "see", "if", "the", "hotspot", "at", "position", "(", "x", "y", ")", "is", "(", "at", "least", "partially", ")", "visible", "according", "to", "the", "position", "of", "the", "viewport", "." ]
python
train
mamrhein/specification
specification/_extd_ast_expr.py
https://github.com/mamrhein/specification/blob/a4c09a0d286cda7a04e8a189f12e23edd97f64ea/specification/_extd_ast_expr.py#L429-L450
def visit_arguments(self, node: AST, dfltChaining: bool = True) -> str: """Return `node`s representation as argument list.""" args = node.args dflts = node.defaults vararg = node.vararg kwargs = node.kwonlyargs kwdflts = node.kw_defaults kwarg = node.kwarg self.compact = True n_args_without_dflt = len(args) - len(dflts) args_src = (arg.arg for arg in args[:n_args_without_dflt]) dflts_src = (f"{arg.arg}={self.visit(dflt)}" for arg, dflt in zip(args[n_args_without_dflt:], dflts)) vararg_src = (f"*{vararg.arg}",) if vararg else () kwargs_src = ((f"{kw.arg}={self.visit(dflt)}" if dflt is not None else f"{kw.arg}") for kw, dflt in zip(kwargs, kwdflts)) kwarg_src = (f"**{kwarg.arg}",) if kwarg else () src = ', '.join(chain(args_src, dflts_src, vararg_src, kwargs_src, kwarg_src)) self.compact = False return src
[ "def", "visit_arguments", "(", "self", ",", "node", ":", "AST", ",", "dfltChaining", ":", "bool", "=", "True", ")", "->", "str", ":", "args", "=", "node", ".", "args", "dflts", "=", "node", ".", "defaults", "vararg", "=", "node", ".", "vararg", "kwar...
Return `node`s representation as argument list.
[ "Return", "node", "s", "representation", "as", "argument", "list", "." ]
python
train
CI-WATER/gsshapy
gsshapy/grid/grid_to_gssha.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L589-L606
def xd(self): """get xarray dataset file handle to LSM files""" if self._xd is None: path_to_lsm_files = path.join(self.lsm_input_folder_path, self.lsm_search_card) self._xd = pa.open_mfdataset(path_to_lsm_files, lat_var=self.lsm_lat_var, lon_var=self.lsm_lon_var, time_var=self.lsm_time_var, lat_dim=self.lsm_lat_dim, lon_dim=self.lsm_lon_dim, time_dim=self.lsm_time_dim, loader=self.pangaea_loader) self.lsm_time_dim = 'time' self.lsm_time_var = 'time' return self._xd
[ "def", "xd", "(", "self", ")", ":", "if", "self", ".", "_xd", "is", "None", ":", "path_to_lsm_files", "=", "path", ".", "join", "(", "self", ".", "lsm_input_folder_path", ",", "self", ".", "lsm_search_card", ")", "self", ".", "_xd", "=", "pa", ".", "...
get xarray dataset file handle to LSM files
[ "get", "xarray", "dataset", "file", "handle", "to", "LSM", "files" ]
python
train