repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
klmitch/framer
framer/framers.py
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L838-L854
def to_bytes(self, frame, state): """ Convert a single frame into bytes that can be transmitted on the stream. :param frame: The frame to convert. Should be the same type of object returned by ``to_frame()``. :param state: An instance of ``FramerState``. This object may be used to track information across calls to the method. :returns: Bytes that may be transmitted on the stream. """ # Encode the frame and append the delimiter return six.binary_type(self.variant.encode( six.binary_type(frame))) + b'\0'
[ "def", "to_bytes", "(", "self", ",", "frame", ",", "state", ")", ":", "# Encode the frame and append the delimiter", "return", "six", ".", "binary_type", "(", "self", ".", "variant", ".", "encode", "(", "six", ".", "binary_type", "(", "frame", ")", ")", ")",...
Convert a single frame into bytes that can be transmitted on the stream. :param frame: The frame to convert. Should be the same type of object returned by ``to_frame()``. :param state: An instance of ``FramerState``. This object may be used to track information across calls to the method. :returns: Bytes that may be transmitted on the stream.
[ "Convert", "a", "single", "frame", "into", "bytes", "that", "can", "be", "transmitted", "on", "the", "stream", "." ]
python
train
yvesalexandre/bandicoot
bandicoot/helper/stops.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/stops.py#L46-L86
def dbscan(points, eps, minpts): """ Implementation of [DBSCAN]_ (*A density-based algorithm for discovering clusters in large spatial databases with noise*). It accepts a list of points (lat, lon) and returns the labels associated with the points. References ---------- .. [DBSCAN] Ester, M., Kriegel, H. P., Sander, J., & Xu, X. (1996, August). A density-based algorithm for discovering clusters in large spatial databases with noise. In Kdd (Vol. 96, No. 34, pp. 226-231). """ next_label = 0 n = len(points) labels = [None] * n distance_matrix = compute_distance_matrix(points) neighbors = [get_neighbors(distance_matrix, i, eps) for i in range(n)] for i in range(n): if labels[i] is not None: continue if len(neighbors[i]) < minpts: continue labels[i] = next_label candidates = [i] while len(candidates) > 0: c = candidates.pop() for j in neighbors[c]: if labels[j] is None: labels[j] = next_label if len(neighbors[j]) >= minpts: candidates.append(j) next_label += 1 return labels
[ "def", "dbscan", "(", "points", ",", "eps", ",", "minpts", ")", ":", "next_label", "=", "0", "n", "=", "len", "(", "points", ")", "labels", "=", "[", "None", "]", "*", "n", "distance_matrix", "=", "compute_distance_matrix", "(", "points", ")", "neighbo...
Implementation of [DBSCAN]_ (*A density-based algorithm for discovering clusters in large spatial databases with noise*). It accepts a list of points (lat, lon) and returns the labels associated with the points. References ---------- .. [DBSCAN] Ester, M., Kriegel, H. P., Sander, J., & Xu, X. (1996, August). A density-based algorithm for discovering clusters in large spatial databases with noise. In Kdd (Vol. 96, No. 34, pp. 226-231).
[ "Implementation", "of", "[", "DBSCAN", "]", "_", "(", "*", "A", "density", "-", "based", "algorithm", "for", "discovering", "clusters", "in", "large", "spatial", "databases", "with", "noise", "*", ")", ".", "It", "accepts", "a", "list", "of", "points", "...
python
train
merll/docker-fabric
dockerfabric/tasks.py
https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/tasks.py#L116-L126
def list_images(list_all=False, full_ids=False): """ Lists images on the Docker remote host, similar to ``docker images``. :param list_all: Lists all images (e.g. dependencies). Default is ``False``, only shows named images. :type list_all: bool :param full_ids: Shows the full ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """ images = docker_fabric().images(all=list_all) _format_output_table(images, IMAGE_COLUMNS, full_ids)
[ "def", "list_images", "(", "list_all", "=", "False", ",", "full_ids", "=", "False", ")", ":", "images", "=", "docker_fabric", "(", ")", ".", "images", "(", "all", "=", "list_all", ")", "_format_output_table", "(", "images", ",", "IMAGE_COLUMNS", ",", "full...
Lists images on the Docker remote host, similar to ``docker images``. :param list_all: Lists all images (e.g. dependencies). Default is ``False``, only shows named images. :type list_all: bool :param full_ids: Shows the full ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool
[ "Lists", "images", "on", "the", "Docker", "remote", "host", "similar", "to", "docker", "images", "." ]
python
train
PmagPy/PmagPy
pmagpy/builder2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L833-L877
def read_magic_file(self, path, sort_by_this_name, sort_by_file_type=False): """ read a magic-formatted tab-delimited file. return a dictionary of dictionaries, with this format: {'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....} """ DATA = {} with open(path, 'r') as fin: lines = list(fin.readlines()) first_line = lines[0] if not first_line: return False, None, 'empty_file' if first_line[0] == "s" or first_line[1] == "s": delim = ' ' elif first_line[0] == "t" or first_line[1] == "t": delim = '\t' else: print('-W- error reading ', path) return False, None, 'bad_file' file_type = first_line.strip('\n').split(delim)[1] if sort_by_file_type: item_type = file_type.split('_')[1][:-1] if item_type == 'age': sort_by_this_name = "by_line_number" else: sort_by_this_name = 'er_' + item_type + '_name' line = lines[1] header = line.strip('\n').split(delim) counter = 0 for line in lines[2:]: tmp_data = {} tmp_line = line.strip('\n').split(delim) for i in range(len(header)): if i < len(tmp_line): tmp_data[header[i]] = tmp_line[i].strip() else: tmp_data[header[i]] = "" if sort_by_this_name == "by_line_number": DATA[counter] = tmp_data counter += 1 else: if tmp_data[sort_by_this_name] != "": DATA[tmp_data[sort_by_this_name]] = tmp_data return DATA, header, file_type
[ "def", "read_magic_file", "(", "self", ",", "path", ",", "sort_by_this_name", ",", "sort_by_file_type", "=", "False", ")", ":", "DATA", "=", "{", "}", "with", "open", "(", "path", ",", "'r'", ")", "as", "fin", ":", "lines", "=", "list", "(", "fin", "...
read a magic-formatted tab-delimited file. return a dictionary of dictionaries, with this format: {'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....}
[ "read", "a", "magic", "-", "formatted", "tab", "-", "delimited", "file", ".", "return", "a", "dictionary", "of", "dictionaries", "with", "this", "format", ":", "{", "Z35", ".", "5a", ":", "{", "specimen_weight", ":", "1", ".", "000e", "-", "03", "er_ci...
python
train
trendels/rhino
rhino/http.py
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/http.py#L60-L79
def match_etag(etag, header, weak=False): """Try to match an ETag against a header value. If `weak` is True, uses the weak comparison function. """ if etag is None: return False m = etag_re.match(etag) if not m: raise ValueError("Not a well-formed ETag: '%s'" % etag) (is_weak, etag) = m.groups() parsed_header = parse_etag_header(header) if parsed_header == '*': return True if is_weak and not weak: return False if weak: return etag in [t[1] for t in parsed_header] else: return etag in [t[1] for t in parsed_header if not t[0]]
[ "def", "match_etag", "(", "etag", ",", "header", ",", "weak", "=", "False", ")", ":", "if", "etag", "is", "None", ":", "return", "False", "m", "=", "etag_re", ".", "match", "(", "etag", ")", "if", "not", "m", ":", "raise", "ValueError", "(", "\"Not...
Try to match an ETag against a header value. If `weak` is True, uses the weak comparison function.
[ "Try", "to", "match", "an", "ETag", "against", "a", "header", "value", "." ]
python
train
BeyondTheClouds/enoslib
enoslib/api.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/api.py#L782-L807
def wait_ssh(roles, retries=100, interval=30): """Wait for all the machines to be ssh-reachable Let ansible initiates a communication and retries if needed. Args: inventory (string): path to the inventoy file to test retries (int): Number of time we'll be retrying an SSH connection interval (int): Interval to wait in seconds between two retries """ utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml') options = {'enos_action': 'ping'} for i in range(0, retries): try: run_ansible([utils_playbook], roles=roles, extra_vars=options, on_error_continue=False) break except EnosUnreachableHostsError as e: logger.info("Hosts unreachable: %s " % e.hosts) logger.info("Retrying... %s/%s" % (i + 1, retries)) time.sleep(interval) else: raise EnosSSHNotReady('Maximum retries reached')
[ "def", "wait_ssh", "(", "roles", ",", "retries", "=", "100", ",", "interval", "=", "30", ")", ":", "utils_playbook", "=", "os", ".", "path", ".", "join", "(", "ANSIBLE_DIR", ",", "'utils.yml'", ")", "options", "=", "{", "'enos_action'", ":", "'ping'", ...
Wait for all the machines to be ssh-reachable Let ansible initiates a communication and retries if needed. Args: inventory (string): path to the inventoy file to test retries (int): Number of time we'll be retrying an SSH connection interval (int): Interval to wait in seconds between two retries
[ "Wait", "for", "all", "the", "machines", "to", "be", "ssh", "-", "reachable" ]
python
train
QInfer/python-qinfer
src/qinfer/simple_est.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/simple_est.py#L70-L106
def data_to_params(data, expparams_dtype, col_outcomes=(0, 'counts'), cols_expparams=None ): """ Given data as a NumPy array, separates out each column either as the outcomes, or as a field of an expparams array. Columns may be specified either as indices into a two-axis scalar array, or as field names for a one-axis record array. Since scalar arrays are homogenous in type, this may result in loss of precision due to casting between data types. """ BY_IDX, BY_NAME = range(2) is_exp_scalar = np.issctype(expparams_dtype) is_data_scalar = np.issctype(data.dtype) and not data.dtype.fields s_ = ( (lambda idx: np.s_[..., idx[BY_IDX]]) if is_data_scalar else (lambda idx: np.s_[idx[BY_NAME]]) ) outcomes = data[s_(col_outcomes)].astype(int) # mk new slicer t expparams = np.empty(outcomes.shape, dtype=expparams_dtype) if is_exp_scalar: expparams[:] = data[s_(cols_expparams)] else: for expparams_key, column in cols_expparams.items(): expparams[expparams_key] = data[s_(column)] return outcomes, expparams
[ "def", "data_to_params", "(", "data", ",", "expparams_dtype", ",", "col_outcomes", "=", "(", "0", ",", "'counts'", ")", ",", "cols_expparams", "=", "None", ")", ":", "BY_IDX", ",", "BY_NAME", "=", "range", "(", "2", ")", "is_exp_scalar", "=", "np", ".", ...
Given data as a NumPy array, separates out each column either as the outcomes, or as a field of an expparams array. Columns may be specified either as indices into a two-axis scalar array, or as field names for a one-axis record array. Since scalar arrays are homogenous in type, this may result in loss of precision due to casting between data types.
[ "Given", "data", "as", "a", "NumPy", "array", "separates", "out", "each", "column", "either", "as", "the", "outcomes", "or", "as", "a", "field", "of", "an", "expparams", "array", ".", "Columns", "may", "be", "specified", "either", "as", "indices", "into", ...
python
train
Parisson/TimeSide
timeside/plugins/grapher/utils.py
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/grapher/utils.py#L72-L81
def downsample(vector, factor): """ downsample(vector, factor): Downsample (by averaging) a vector by an integer factor. """ if (len(vector) % factor): print "Length of 'vector' is not divisible by 'factor'=%d!" % factor return 0 vector.shape = (len(vector) / factor, factor) return numpy.mean(vector, axis=1)
[ "def", "downsample", "(", "vector", ",", "factor", ")", ":", "if", "(", "len", "(", "vector", ")", "%", "factor", ")", ":", "print", "\"Length of 'vector' is not divisible by 'factor'=%d!\"", "%", "factor", "return", "0", "vector", ".", "shape", "=", "(", "l...
downsample(vector, factor): Downsample (by averaging) a vector by an integer factor.
[ "downsample", "(", "vector", "factor", ")", ":", "Downsample", "(", "by", "averaging", ")", "a", "vector", "by", "an", "integer", "factor", "." ]
python
train
riptano/ccm
ccmlib/cluster.py
https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/cluster.py#L677-L701
def timed_grep_nodes_for_patterns(self, versions_to_patterns, timeout_seconds, filename="system.log"): """ Searches all nodes in the cluster for a specific regular expression based on the node's version. Params: @versions_to_patterns : an instance of LogPatternToVersionMap, specifying the different log patterns based on a node's version. @version : the earliest version the new pattern was introduced. @timeout_seconds : the amount of time to spend searching the logs for. @filename : the name of the file to search for the patterns. Defaults to "system.log". Returns the first node where the pattern was found, along with the matching lines. Raises a TimeoutError if the pattern is not found within the specified timeout period. """ end_time = time.time() + timeout_seconds while True: if time.time() > end_time: raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) + " Unable to find: " + versions_to_patterns.patterns + " in any node log within " + str(timeout_seconds) + "s") for node in self.nodelist(): pattern = versions_to_patterns(node.get_cassandra_version()) matchings = node.grep_log(pattern, filename) if matchings: ret = namedtuple('Node_Log_Matching', 'node matchings') return ret(node=node, matchings=matchings) time.sleep(1)
[ "def", "timed_grep_nodes_for_patterns", "(", "self", ",", "versions_to_patterns", ",", "timeout_seconds", ",", "filename", "=", "\"system.log\"", ")", ":", "end_time", "=", "time", ".", "time", "(", ")", "+", "timeout_seconds", "while", "True", ":", "if", "time"...
Searches all nodes in the cluster for a specific regular expression based on the node's version. Params: @versions_to_patterns : an instance of LogPatternToVersionMap, specifying the different log patterns based on a node's version. @version : the earliest version the new pattern was introduced. @timeout_seconds : the amount of time to spend searching the logs for. @filename : the name of the file to search for the patterns. Defaults to "system.log". Returns the first node where the pattern was found, along with the matching lines. Raises a TimeoutError if the pattern is not found within the specified timeout period.
[ "Searches", "all", "nodes", "in", "the", "cluster", "for", "a", "specific", "regular", "expression", "based", "on", "the", "node", "s", "version", ".", "Params", ":", "@versions_to_patterns", ":", "an", "instance", "of", "LogPatternToVersionMap", "specifying", "...
python
train
lemieuxl/pyGenClean
pyGenClean/LaTeX/merge_reports.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/LaTeX/merge_reports.py#L288-L321
def checkArgs(args): """Checks the arguments and options. :param args: an object containing the options of the program. :type args: argparse.Namespace :returns: ``True`` if everything was OK. If there is a problem with an option, an exception is raised using the :py:class:`ProgramError` class, a message is printed to the :class:`sys.stderr` and the program exists with code 1. """ # For all input directories for dn in args.qc_dir: # Checking that all the directories exists if not os.path.isdir(dn): raise ProgramError("{}: no such directory".format(dn)) # Checking that this is a directory created from pyGenClean if not os.path.basename(dn.rstrip("/")).startswith("data_clean_up."): raise ProgramError("{}: not a pyGenClean directory".format(dn)) # Checking that each directory contains the required files for fn in ("excluded_markers.txt", "excluded_samples.txt", "results_summary.txt", "steps_summary.tex", "initial_files.txt", "final_files.txt"): required_fn = os.path.join(dn, fn) if not os.path.isfile(required_fn): raise ProgramError("{}: missing required " "file".format(required_fn)) return True
[ "def", "checkArgs", "(", "args", ")", ":", "# For all input directories", "for", "dn", "in", "args", ".", "qc_dir", ":", "# Checking that all the directories exists", "if", "not", "os", ".", "path", ".", "isdir", "(", "dn", ")", ":", "raise", "ProgramError", "...
Checks the arguments and options. :param args: an object containing the options of the program. :type args: argparse.Namespace :returns: ``True`` if everything was OK. If there is a problem with an option, an exception is raised using the :py:class:`ProgramError` class, a message is printed to the :class:`sys.stderr` and the program exists with code 1.
[ "Checks", "the", "arguments", "and", "options", "." ]
python
train
SheffieldML/GPy
GPy/kern/src/stationary.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/stationary.py#L193-L213
def update_gradients_full(self, dL_dK, X, X2=None, reset=True): """ Given the derivative of the objective wrt the covariance matrix (dL_dK), compute the gradient wrt the parameters of this kernel, and store in the parameters object as e.g. self.variance.gradient """ self.variance.gradient = np.sum(self.K(X, X2)* dL_dK)/self.variance #now the lengthscale gradient(s) dL_dr = self.dK_dr_via_X(X, X2) * dL_dK if self.ARD: tmp = dL_dr*self._inv_dist(X, X2) if X2 is None: X2 = X if use_stationary_cython: self.lengthscale.gradient = self._lengthscale_grads_cython(tmp, X, X2) else: self.lengthscale.gradient = self._lengthscale_grads_pure(tmp, X, X2) else: r = self._scaled_dist(X, X2) self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale
[ "def", "update_gradients_full", "(", "self", ",", "dL_dK", ",", "X", ",", "X2", "=", "None", ",", "reset", "=", "True", ")", ":", "self", ".", "variance", ".", "gradient", "=", "np", ".", "sum", "(", "self", ".", "K", "(", "X", ",", "X2", ")", ...
Given the derivative of the objective wrt the covariance matrix (dL_dK), compute the gradient wrt the parameters of this kernel, and store in the parameters object as e.g. self.variance.gradient
[ "Given", "the", "derivative", "of", "the", "objective", "wrt", "the", "covariance", "matrix", "(", "dL_dK", ")", "compute", "the", "gradient", "wrt", "the", "parameters", "of", "this", "kernel", "and", "store", "in", "the", "parameters", "object", "as", "e",...
python
train
CiscoDevNet/webexteamssdk
webexteamssdk/utils.py
https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/utils.py#L234-L255
def json_dict(json_data): """Given a dictionary or JSON string; return a dictionary. Args: json_data(dict, str): Input JSON object. Returns: A Python dictionary with the contents of the JSON object. Raises: TypeError: If the input object is not a dictionary or string. """ if isinstance(json_data, dict): return json_data elif isinstance(json_data, basestring): return json.loads(json_data, object_hook=OrderedDict) else: raise TypeError( "'json_data' must be a dictionary or valid JSON string; " "received: {!r}".format(json_data) )
[ "def", "json_dict", "(", "json_data", ")", ":", "if", "isinstance", "(", "json_data", ",", "dict", ")", ":", "return", "json_data", "elif", "isinstance", "(", "json_data", ",", "basestring", ")", ":", "return", "json", ".", "loads", "(", "json_data", ",", ...
Given a dictionary or JSON string; return a dictionary. Args: json_data(dict, str): Input JSON object. Returns: A Python dictionary with the contents of the JSON object. Raises: TypeError: If the input object is not a dictionary or string.
[ "Given", "a", "dictionary", "or", "JSON", "string", ";", "return", "a", "dictionary", "." ]
python
test
inveniosoftware/invenio-oaiserver
invenio_oaiserver/ext.py
https://github.com/inveniosoftware/invenio-oaiserver/blob/eae765e32bd816ddc5612d4b281caf205518b512/invenio_oaiserver/ext.py#L58-L65
def register_signals_oaiset(self): """Register OAISet signals to update records.""" from .models import OAISet from .receivers import after_insert_oai_set, \ after_update_oai_set, after_delete_oai_set listen(OAISet, 'after_insert', after_insert_oai_set) listen(OAISet, 'after_update', after_update_oai_set) listen(OAISet, 'after_delete', after_delete_oai_set)
[ "def", "register_signals_oaiset", "(", "self", ")", ":", "from", ".", "models", "import", "OAISet", "from", ".", "receivers", "import", "after_insert_oai_set", ",", "after_update_oai_set", ",", "after_delete_oai_set", "listen", "(", "OAISet", ",", "'after_insert'", ...
Register OAISet signals to update records.
[ "Register", "OAISet", "signals", "to", "update", "records", "." ]
python
train
thebjorn/pydeps
pydeps/render_context.py
https://github.com/thebjorn/pydeps/blob/1e6715b7bea47a40e8042821b57937deaaa0fdc3/pydeps/render_context.py#L47-L52
def text(self): """Get value of output stream (StringIO). """ if self.out: self.out.close() # pragma: nocover return self.fp.getvalue()
[ "def", "text", "(", "self", ")", ":", "if", "self", ".", "out", ":", "self", ".", "out", ".", "close", "(", ")", "# pragma: nocover", "return", "self", ".", "fp", ".", "getvalue", "(", ")" ]
Get value of output stream (StringIO).
[ "Get", "value", "of", "output", "stream", "(", "StringIO", ")", "." ]
python
train
LionelAuroux/pyrser
pyrser/grammar.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/grammar.py#L191-L203
def from_file(fn: str, entry=None, *optional_inherit) -> Grammar: """ Create a Grammar from a file """ import os.path if os.path.exists(fn): f = open(fn, 'r') bnf = f.read() f.close() inherit = [Grammar] + list(optional_inherit) scope = {'grammar': bnf, 'entry': entry, 'source': fn} return build_grammar(tuple(inherit), scope) raise Exception("File not Found!")
[ "def", "from_file", "(", "fn", ":", "str", ",", "entry", "=", "None", ",", "*", "optional_inherit", ")", "->", "Grammar", ":", "import", "os", ".", "path", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "f", "=", "open", "(", "fn", ...
Create a Grammar from a file
[ "Create", "a", "Grammar", "from", "a", "file" ]
python
test
python-bonobo/bonobo
bonobo/commands/__init__.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/commands/__init__.py#L9-L64
def entrypoint(args=None): """ Main callable for "bonobo" entrypoint. Will load commands from "bonobo.commands" entrypoints, using stevedore. """ mondrian.setup(excepthook=True) logger = logging.getLogger() logger.setLevel(settings.LOGGING_LEVEL.get()) parser = argparse.ArgumentParser() parser.add_argument("--debug", "-D", action="store_true") subparsers = parser.add_subparsers(dest="command") subparsers.required = True commands = {} def register_extension(ext): nonlocal commands try: parser = subparsers.add_parser(ext.name) if isinstance(ext.plugin, type) and issubclass(ext.plugin, BaseCommand): # current way, class based. cmd = ext.plugin() cmd.add_arguments(parser) cmd.__name__ = ext.name commands[ext.name] = cmd.handle else: # old school, function based. commands[ext.name] = ext.plugin(parser) except Exception: logger.exception("Error while loading command {}.".format(ext.name)) from stevedore import ExtensionManager mgr = ExtensionManager(namespace="bonobo.commands") mgr.map(register_extension) parsed_args = parser.parse_args(args).__dict__ if parsed_args.pop("debug", False): settings.DEBUG.set(True) settings.LOGGING_LEVEL.set(logging.DEBUG) logger.setLevel(settings.LOGGING_LEVEL.get()) logger.debug("Command: " + parsed_args["command"] + " Arguments: " + repr(parsed_args)) # Get command handler, execute, rince. command = commands[parsed_args.pop("command")] command(**parsed_args) return 0
[ "def", "entrypoint", "(", "args", "=", "None", ")", ":", "mondrian", ".", "setup", "(", "excepthook", "=", "True", ")", "logger", "=", "logging", ".", "getLogger", "(", ")", "logger", ".", "setLevel", "(", "settings", ".", "LOGGING_LEVEL", ".", "get", ...
Main callable for "bonobo" entrypoint. Will load commands from "bonobo.commands" entrypoints, using stevedore.
[ "Main", "callable", "for", "bonobo", "entrypoint", "." ]
python
train
bkjones/pyrabbit
pyrabbit/api.py
https://github.com/bkjones/pyrabbit/blob/e8a9f74ed5c6bba958994fb9a72c396e6a99ea0f/pyrabbit/api.py#L218-L230
def get_vhost(self, vname): """ Returns the attributes of a single named vhost in a dict. :param string vname: Name of the vhost to get. :returns dict vhost: Attribute dict for the named vhost """ vname = quote(vname, '') path = Client.urls['vhosts_by_name'] % vname vhost = self._call(path, 'GET', headers=Client.json_headers) return vhost
[ "def", "get_vhost", "(", "self", ",", "vname", ")", ":", "vname", "=", "quote", "(", "vname", ",", "''", ")", "path", "=", "Client", ".", "urls", "[", "'vhosts_by_name'", "]", "%", "vname", "vhost", "=", "self", ".", "_call", "(", "path", ",", "'GE...
Returns the attributes of a single named vhost in a dict. :param string vname: Name of the vhost to get. :returns dict vhost: Attribute dict for the named vhost
[ "Returns", "the", "attributes", "of", "a", "single", "named", "vhost", "in", "a", "dict", "." ]
python
train
ethereum/py-trie
trie/binary.py
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L297-L303
def delete(self, key): """ Equals to setting the value to None """ validate_is_bytes(key) self.root_hash = self._set(self.root_hash, encode_to_bin(key), b'')
[ "def", "delete", "(", "self", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "self", ".", "root_hash", "=", "self", ".", "_set", "(", "self", ".", "root_hash", ",", "encode_to_bin", "(", "key", ")", ",", "b''", ")" ]
Equals to setting the value to None
[ "Equals", "to", "setting", "the", "value", "to", "None" ]
python
train
grycap/RADL
radl/radl_parse.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl_parse.py#L236-L246
def p_contextualize_items(self, t): """contextualize_items : contextualize_items contextualize_item | contextualize_item | empty""" if len(t) == 3: t[0] = t[1] t[0].append(t[2]) elif t[1]: t[0] = [t[1]] else: t[0] = []
[ "def", "p_contextualize_items", "(", "self", ",", "t", ")", ":", "if", "len", "(", "t", ")", "==", "3", ":", "t", "[", "0", "]", "=", "t", "[", "1", "]", "t", "[", "0", "]", ".", "append", "(", "t", "[", "2", "]", ")", "elif", "t", "[", ...
contextualize_items : contextualize_items contextualize_item | contextualize_item | empty
[ "contextualize_items", ":", "contextualize_items", "contextualize_item", "|", "contextualize_item", "|", "empty" ]
python
train
saltstack/salt
salt/modules/win_iis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_iis.py#L1749-L1784
def create_backup(name): r''' Backup an IIS Configuration on the System. .. versionadded:: 2017.7.0 .. note:: Backups are stored in the ``$env:Windir\System32\inetsrv\backup`` folder. Args: name (str): The name to give the backup Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.create_backup good_config_20170209 ''' if name in list_backups(): raise CommandExecutionError('Backup already present: {0}'.format(name)) ps_cmd = ['Backup-WebConfiguration', '-Name', "'{0}'".format(name)] cmd_ret = _srvmgr(ps_cmd) if cmd_ret['retcode'] != 0: msg = 'Unable to backup web configuration: {0}\nError: {1}' \ ''.format(name, cmd_ret['stderr']) raise CommandExecutionError(msg) return name in list_backups()
[ "def", "create_backup", "(", "name", ")", ":", "if", "name", "in", "list_backups", "(", ")", ":", "raise", "CommandExecutionError", "(", "'Backup already present: {0}'", ".", "format", "(", "name", ")", ")", "ps_cmd", "=", "[", "'Backup-WebConfiguration'", ",", ...
r''' Backup an IIS Configuration on the System. .. versionadded:: 2017.7.0 .. note:: Backups are stored in the ``$env:Windir\System32\inetsrv\backup`` folder. Args: name (str): The name to give the backup Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.create_backup good_config_20170209
[ "r", "Backup", "an", "IIS", "Configuration", "on", "the", "System", "." ]
python
train
gear11/pypelogs
pypeout/mysql_out.py
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypeout/mysql_out.py#L43-L53
def get_existing_keys(self, events): """Returns the list of keys from the given event source that are already in the DB""" data = [e[self.key] for e in events] ss = ','.join(['%s' for _ in data]) query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss) cursor = self.conn.conn.cursor() cursor.execute(query, data) LOG.info("%s (data: %s)", query, data) existing = [r[0] for r in cursor.fetchall()] LOG.info("Existing IDs: %s" % existing) return set(existing)
[ "def", "get_existing_keys", "(", "self", ",", "events", ")", ":", "data", "=", "[", "e", "[", "self", ".", "key", "]", "for", "e", "in", "events", "]", "ss", "=", "','", ".", "join", "(", "[", "'%s'", "for", "_", "in", "data", "]", ")", "query"...
Returns the list of keys from the given event source that are already in the DB
[ "Returns", "the", "list", "of", "keys", "from", "the", "given", "event", "source", "that", "are", "already", "in", "the", "DB" ]
python
train
grundic/yagocd
yagocd/resources/pipeline.py
https://github.com/grundic/yagocd/blob/4c75336ae6f107c8723d37b15e52169151822127/yagocd/resources/pipeline.py#L642-L653
def stages(self): """ Method for getting stages from pipeline instance. :return: arrays of stages :rtype: list of yagocd.resources.stage.StageInstance """ stages = list() for data in self.data.stages: stages.append(StageInstance(session=self._session, data=data, pipeline=self)) return stages
[ "def", "stages", "(", "self", ")", ":", "stages", "=", "list", "(", ")", "for", "data", "in", "self", ".", "data", ".", "stages", ":", "stages", ".", "append", "(", "StageInstance", "(", "session", "=", "self", ".", "_session", ",", "data", "=", "d...
Method for getting stages from pipeline instance. :return: arrays of stages :rtype: list of yagocd.resources.stage.StageInstance
[ "Method", "for", "getting", "stages", "from", "pipeline", "instance", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L33-L65
def create(parallel, dirs, config): """Create a cluster based on the provided parallel arguments. Returns an IPython view on the cluster, enabling processing on jobs. Adds a mincores specification if he have machines with a larger number of cores to allow jobs to be batched together for shared memory usage. """ profile_dir = utils.safe_makedir(os.path.join(dirs["work"], get_log_dir(config), "ipython")) has_mincores = any(x.startswith("mincores=") for x in parallel["resources"]) cores = min(_get_common_cores(config["resources"]), parallel["system_cores"]) if cores > 1 and not has_mincores: adj_cores = max(1, int(math.floor(cores * float(parallel.get("mem_pct", 1.0))))) # if we have less scheduled cores than per machine, use the scheduled count if cores > parallel["cores"]: cores = parallel["cores"] # if we have less total cores required for the entire process, use that elif adj_cores > parallel["num_jobs"] * parallel["cores_per_job"]: cores = parallel["num_jobs"] * parallel["cores_per_job"] else: cores = adj_cores cores = per_machine_target_cores(cores, parallel["num_jobs"]) parallel["resources"].append("mincores=%s" % cores) return ipython_cluster.cluster_view(parallel["scheduler"].lower(), parallel["queue"], parallel["num_jobs"], parallel["cores_per_job"], profile=profile_dir, start_wait=parallel["timeout"], extra_params={"resources": parallel["resources"], "mem": parallel["mem"], "tag": parallel.get("tag"), "run_local": parallel.get("run_local"), "local_controller": parallel.get("local_controller")}, retries=parallel.get("retries"))
[ "def", "create", "(", "parallel", ",", "dirs", ",", "config", ")", ":", "profile_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dirs", "[", "\"work\"", "]", ",", "get_log_dir", "(", "config", ")", ",", "\"ipython\""...
Create a cluster based on the provided parallel arguments. Returns an IPython view on the cluster, enabling processing on jobs. Adds a mincores specification if he have machines with a larger number of cores to allow jobs to be batched together for shared memory usage.
[ "Create", "a", "cluster", "based", "on", "the", "provided", "parallel", "arguments", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/items/ports.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/items/ports.py#L856-L877
def _get_port_center_position(self, width): """Calculates the center position of the port rectangle The port itself can be positioned in the corner, the center of the port rectangle however is restricted by the width of the rectangle. This method therefore calculates the center, depending on the position of the port and the width of the rectangle. :param float width: The width of the rectangle :return: The center position of the rectangle :rtype: float, float """ x, y = self.pos.x.value, self.pos.y.value if self.side is SnappedSide.TOP or self.side is SnappedSide.BOTTOM: if x - width / 2. < 0: x = width / 2 elif x + width / 2. > self.parent.width: x = self.parent.width - width / 2. else: if y - width / 2. < 0: y = width / 2 elif y + width / 2. > self.parent.height: y = self.parent.height - width / 2. return x, y
[ "def", "_get_port_center_position", "(", "self", ",", "width", ")", ":", "x", ",", "y", "=", "self", ".", "pos", ".", "x", ".", "value", ",", "self", ".", "pos", ".", "y", ".", "value", "if", "self", ".", "side", "is", "SnappedSide", ".", "TOP", ...
Calculates the center position of the port rectangle The port itself can be positioned in the corner, the center of the port rectangle however is restricted by the width of the rectangle. This method therefore calculates the center, depending on the position of the port and the width of the rectangle. :param float width: The width of the rectangle :return: The center position of the rectangle :rtype: float, float
[ "Calculates", "the", "center", "position", "of", "the", "port", "rectangle" ]
python
train
bitcraze/crazyflie-lib-python
cflib/crazyflie/localization.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/localization.py#L113-L122
def send_short_lpp_packet(self, dest_id, data): """ Send ultra-wide-band LPP packet to dest_id """ pk = CRTPPacket() pk.port = CRTPPort.LOCALIZATION pk.channel = self.GENERIC_CH pk.data = struct.pack('<BB', self.LPS_SHORT_LPP_PACKET, dest_id) + data self._cf.send_packet(pk)
[ "def", "send_short_lpp_packet", "(", "self", ",", "dest_id", ",", "data", ")", ":", "pk", "=", "CRTPPacket", "(", ")", "pk", ".", "port", "=", "CRTPPort", ".", "LOCALIZATION", "pk", ".", "channel", "=", "self", ".", "GENERIC_CH", "pk", ".", "data", "="...
Send ultra-wide-band LPP packet to dest_id
[ "Send", "ultra", "-", "wide", "-", "band", "LPP", "packet", "to", "dest_id" ]
python
train
swharden/SWHLab
scripts/old/helper.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/scripts/old/helper.py#L12-L25
def newVersion(): """increments version counter in swhlab/version.py""" version=None fname='../swhlab/version.py' with open(fname) as f: raw=f.read().split("\n") for i,line in enumerate(raw): if line.startswith("__counter__"): if version is None: version = int(line.split("=")[1]) raw[i]="__counter__=%d"%(version+1) with open(fname,'w') as f: f.write("\n".join(raw)) print("upgraded from version %03d to %03d"%(version,version+1))
[ "def", "newVersion", "(", ")", ":", "version", "=", "None", "fname", "=", "'../swhlab/version.py'", "with", "open", "(", "fname", ")", "as", "f", ":", "raw", "=", "f", ".", "read", "(", ")", ".", "split", "(", "\"\\n\"", ")", "for", "i", ",", "line...
increments version counter in swhlab/version.py
[ "increments", "version", "counter", "in", "swhlab", "/", "version", ".", "py" ]
python
valid
moralrecordings/mrcrowbar
mrcrowbar/utils.py
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L592-L627
def put_bits( self, value, count ): """Push bits into the target. value Integer containing bits to push, ordered from least-significant bit to most-significant bit. count Number of bits to push to the target. """ for _ in range( count ): # bits are retrieved from the source LSB first bit = (value & 1) value >>= 1 # however, bits are put into the result based on the rule if self.bits_reverse: if self.insert_at_msb: self.current_bits |= (bit << (self.bits_remaining-1)) else: self.current_bits <<= 1 self.current_bits |= bit else: if self.insert_at_msb: self.current_bits >>= 1 self.current_bits |= (bit << 7) else: self.current_bits |= (bit << (8-self.bits_remaining)) self.bits_remaining -= 1 if self.bits_remaining <= 0: self.output.append( self.current_bits ) self.current_bits = 0 self.bits_remaining = 8
[ "def", "put_bits", "(", "self", ",", "value", ",", "count", ")", ":", "for", "_", "in", "range", "(", "count", ")", ":", "# bits are retrieved from the source LSB first", "bit", "=", "(", "value", "&", "1", ")", "value", ">>=", "1", "# however, bits are put ...
Push bits into the target. value Integer containing bits to push, ordered from least-significant bit to most-significant bit. count Number of bits to push to the target.
[ "Push", "bits", "into", "the", "target", "." ]
python
train
collectiveacuity/labPack
labpack/databases/sql.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L574-L733
def list(self, query_criteria=None, order_criteria=None): ''' a generator method to list records in table which match query criteria :param query_criteria: dictionary with schema dot-path field names and query qualifiers :param order_criteria: list of single keypair dictionaries with field names to order by :return: generator object with string of primary key an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: sql only supports a limited number of query conditions and all list fields in a record are stored as a blob. this method constructs a sql query which contains clauses wherever the query conditions can be translated one-to-one into sql keywords and returns the entire record of each qualifying record. once sql returns its results, the remaining query conditions are applied to the record and only those results which match all conditions are yield by the generator. as such, depending upon the conditions selected, this method acts more or less like a SCAN of the entire database. if no sql supported conditions are provided, the method will look through all records. native SQL supported conditions float, integer & strings: value_exists equal_to discrete_values excluded_values greater_than less_than max_value min_value booleans: value_exists equal_to lists: value_exists NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria an example of how to construct the order_criteria argument: order_criteria = [ { '.path.to.number': 'descend' }, { '.path.to.string': '' } ] NOTE: results can be ordered either by ascending or descending values. to order in ascending order, leave the value for the field empty. any value for the field key automatically is interpreted as descending order ''' title = '%s.list' % self.__class__.__name__ from sqlalchemy import desc as order_desc # validate inputs if query_criteria: self.model.query(query_criteria) else: query_criteria = {} if order_criteria: object_title = '%s(%s=%s)' % (title, 'order_criteria', str(order_criteria)) self.fields.validate(order_criteria, '.order_criteria', object_title) for i in range(len(order_criteria)): criterion = order_criteria[i] for key, value in criterion.items(): criteria_key = key if key.find('.') != 0: criteria_key = '.%s' % key if criteria_key not in self.model.keyMap.keys(): raise ValueError('%s(order_criteria=[...]) item %s key %s does not exist in record_schema.' % (title, i, key)) else: order_criteria = [] # construct select statement with sql supported conditions # http://docs.sqlalchemy.org/en/latest/orm/tutorial.html#common-filter-operators select_object = self.table.select() for key, value in query_criteria.items(): record_key = key map_key = key if key.find('.') == 0: record_key = key[1:] else: map_key = '.%s' % key if record_key: if self.item_key.findall(record_key): pass else: test_value = value if not isinstance(value, dict): test_value = { 'equal_to': value } column_object = getattr(self.table.c, record_key) for k, v in test_value.items(): if k == 'value_exists': if self.model.keyMap[map_key]['value_datatype'] in ('string', 'number', 'boolean', 'list'): if v: select_object = select_object.where(column_object!=None) else: select_object = select_object.where(column_object==None) else: if self.model.keyMap[map_key]['value_datatype'] in ('string', 'number', 'boolean'): if k == 'equal_to': select_object = select_object.where(column_object==v) elif k == 'discrete_values': select_object = select_object.where(column_object.in_(v)) elif k == 'excluded_values': select_object = select_object.where(~column_object.in_(v)) elif k == 'greater_than': select_object = select_object.where(column_object.__gt__(v)) elif k == 'less_than': select_object = select_object.where(column_object.__lt__(v)) elif k == 'max_value': select_object = select_object.where(column_object.__le__(v)) elif k == 'min_value': select_object = select_object.where(column_object.__ge__(v)) # add order criteria for criterion in order_criteria: key, value = next(iter(criterion.items())) record_key = key if key.find('.') == 0: record_key = key[1:] if record_key: if self.item_key.findall(record_key): pass else: column_object = getattr(self.table.c, record_key) if value: select_object = select_object.order_by(order_desc(column_object)) else: select_object = select_object.order_by(column_object) # execute query on database # print(select_object) for record in self.session.execute(select_object).fetchall(): record_details = self._reconstruct_record(record) # filter results with non-sql supported conditions if query_criteria: if self.model.query(query_criteria, record_details): yield record_details else: yield record_details
[ "def", "list", "(", "self", ",", "query_criteria", "=", "None", ",", "order_criteria", "=", "None", ")", ":", "title", "=", "'%s.list'", "%", "self", ".", "__class__", ".", "__name__", "from", "sqlalchemy", "import", "desc", "as", "order_desc", "# validate i...
a generator method to list records in table which match query criteria :param query_criteria: dictionary with schema dot-path field names and query qualifiers :param order_criteria: list of single keypair dictionaries with field names to order by :return: generator object with string of primary key an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: sql only supports a limited number of query conditions and all list fields in a record are stored as a blob. this method constructs a sql query which contains clauses wherever the query conditions can be translated one-to-one into sql keywords and returns the entire record of each qualifying record. once sql returns its results, the remaining query conditions are applied to the record and only those results which match all conditions are yield by the generator. as such, depending upon the conditions selected, this method acts more or less like a SCAN of the entire database. if no sql supported conditions are provided, the method will look through all records. native SQL supported conditions float, integer & strings: value_exists equal_to discrete_values excluded_values greater_than less_than max_value min_value booleans: value_exists equal_to lists: value_exists NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria an example of how to construct the order_criteria argument: order_criteria = [ { '.path.to.number': 'descend' }, { '.path.to.string': '' } ] NOTE: results can be ordered either by ascending or descending values. to order in ascending order, leave the value for the field empty. any value for the field key automatically is interpreted as descending order
[ "a", "generator", "method", "to", "list", "records", "in", "table", "which", "match", "query", "criteria", ":", "param", "query_criteria", ":", "dictionary", "with", "schema", "dot", "-", "path", "field", "names", "and", "query", "qualifiers", ":", "param", ...
python
train
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L115-L123
def get(self, *names, **kwargs): """ get(*names, key=None) Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None* is returned when no tensor was found. In case a tensor is passed, it's name is used for the lookup. """ tensors = tuple(self._get(name, **kwargs) for name in names) return tensors[0] if len(names) == 1 else tensors
[ "def", "get", "(", "self", ",", "*", "names", ",", "*", "*", "kwargs", ")", ":", "tensors", "=", "tuple", "(", "self", ".", "_get", "(", "name", ",", "*", "*", "kwargs", ")", "for", "name", "in", "names", ")", "return", "tensors", "[", "0", "]"...
get(*names, key=None) Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None* is returned when no tensor was found. In case a tensor is passed, it's name is used for the lookup.
[ "get", "(", "*", "names", "key", "=", "None", ")", "Returns", "one", "or", "more", ":", "py", ":", "class", ":", "Tensor", "instances", "given", "by", "*", "names", "*", "using", "a", "deep", "lookup", "within", "the", "model", ".", "If", "*", "key...
python
train
xtuml/pyxtuml
xtuml/tools.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/tools.py#L227-L236
def default_accept(self, node, **kwargs): ''' The default accept behaviour is to decend into the iterable member *node.children* (if available). ''' if not hasattr(node, 'children'): return for child in node.children: self.accept(child, **kwargs)
[ "def", "default_accept", "(", "self", ",", "node", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "node", ",", "'children'", ")", ":", "return", "for", "child", "in", "node", ".", "children", ":", "self", ".", "accept", "(", "child",...
The default accept behaviour is to decend into the iterable member *node.children* (if available).
[ "The", "default", "accept", "behaviour", "is", "to", "decend", "into", "the", "iterable", "member", "*", "node", ".", "children", "*", "(", "if", "available", ")", "." ]
python
test
saltstack/salt
salt/states/boto_vpc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_vpc.py#L1649-L1736
def vpc_peering_connection_present(name, requester_vpc_id=None, requester_vpc_name=None, peer_vpc_id=None, peer_vpc_name=None, conn_name=None, peer_owner_id=None, peer_region=None, region=None, key=None, keyid=None, profile=None): ''' name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC in the same account, else resolving it into a vpc ID will fail. Exclusive with peer_vpc_id. conn_name The name to use for this VPC peering connection. peer_owner_id ID of the owner of the peer VPC. Defaults to your account ID, so a value is required if peering with a VPC in a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml ensure peering twixt local vpc and the other guys: boto_vpc.vpc_peering_connection_present: - requester_vpc_name: my_local_vpc - peer_vpc_name: some_other_guys_vpc - conn_name: peering_from_here_to_there - peer_owner_id: 012345654321 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if __salt__['boto_vpc.is_peering_connection_pending'](conn_name=conn_name, region=region, key=key, keyid=keyid, profile=profile): if __salt__['boto_vpc.peering_connection_pending_from_vpc'](conn_name=conn_name, vpc_id=requester_vpc_id, vpc_name=requester_vpc_name, region=region, key=key, keyid=keyid, profile=profile): ret['comment'] = ('VPC peering {0} already requested - pending ' 'acceptance by {1}'.format(conn_name, peer_owner_id or peer_vpc_name or peer_vpc_id)) log.info(ret['comment']) return ret return accept_vpc_peering_connection(name=name, conn_name=conn_name, region=region, key=key, keyid=keyid, profile=profile) return request_vpc_peering_connection(name=name, requester_vpc_id=requester_vpc_id, requester_vpc_name=requester_vpc_name, peer_vpc_id=peer_vpc_id, peer_vpc_name=peer_vpc_name, conn_name=conn_name, peer_owner_id=peer_owner_id, peer_region=peer_region, region=region, key=key, keyid=keyid, profile=profile)
[ "def", "vpc_peering_connection_present", "(", "name", ",", "requester_vpc_id", "=", "None", ",", "requester_vpc_name", "=", "None", ",", "peer_vpc_id", "=", "None", ",", "peer_vpc_name", "=", "None", ",", "conn_name", "=", "None", ",", "peer_owner_id", "=", "Non...
name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC in the same account, else resolving it into a vpc ID will fail. Exclusive with peer_vpc_id. conn_name The name to use for this VPC peering connection. peer_owner_id ID of the owner of the peer VPC. Defaults to your account ID, so a value is required if peering with a VPC in a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml ensure peering twixt local vpc and the other guys: boto_vpc.vpc_peering_connection_present: - requester_vpc_name: my_local_vpc - peer_vpc_name: some_other_guys_vpc - conn_name: peering_from_here_to_there - peer_owner_id: 012345654321
[ "name", "Name", "of", "the", "state" ]
python
train
dshean/demcoreg
demcoreg/dem_mask.py
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L34-L49
def get_nlcd_fn(): """Calls external shell script `get_nlcd.sh` to fetch: 2011 Land Use Land Cover (nlcd) grids, 30 m http://www.mrlc.gov/nlcd11_leg.php """ #This is original filename, which requires ~17 GB #nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img') #get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif') if not os.path.exists(nlcd_fn): cmd = ['get_nlcd.sh',] #subprocess.call(cmd) sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0]) return nlcd_fn
[ "def", "get_nlcd_fn", "(", ")", ":", "#This is original filename, which requires ~17 GB", "#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')", "#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB", "nlcd_fn", "=...
Calls external shell script `get_nlcd.sh` to fetch: 2011 Land Use Land Cover (nlcd) grids, 30 m http://www.mrlc.gov/nlcd11_leg.php
[ "Calls", "external", "shell", "script", "get_nlcd", ".", "sh", "to", "fetch", ":" ]
python
train
JarryShaw/PyPCAPKit
src/const/ipv4/tos_del.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/ipv4/tos_del.py#L16-L22
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return TOS_DEL(key) if key not in TOS_DEL._member_map_: extend_enum(TOS_DEL, key, default) return TOS_DEL[key]
[ "def", "get", "(", "key", ",", "default", "=", "-", "1", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "TOS_DEL", "(", "key", ")", "if", "key", "not", "in", "TOS_DEL", ".", "_member_map_", ":", "extend_enum", "(", "TOS_D...
Backport support for original codes.
[ "Backport", "support", "for", "original", "codes", "." ]
python
train
saltstack/salt
salt/modules/tls.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tls.py#L1857-L2035
def revoke_cert( ca_name, CN, cacert_path=None, ca_filename=None, cert_path=None, cert_filename=None, crl_file=None, digest='sha256', ): ''' Revoke a certificate. .. versionadded:: 2015.8.0 ca_name Name of the CA. CN Common name matching the certificate signing request. cacert_path Absolute path to ca certificates root directory. ca_filename Alternative filename for the CA. cert_path Path to the cert file. cert_filename Alternative filename for the certificate, useful when using special characters in the CN. crl_file Full path to the CRL file. digest The message digest algorithm. Must be a string describing a digest algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically). For example, "md5" or "sha1". Default: 'sha256' CLI Example: .. code-block:: bash salt '*' tls.revoke_cert ca_name='koji' \ ca_filename='ca' \ crl_file='/etc/openvpn/team1/crl.pem' ''' set_ca_path(cacert_path) ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name) if ca_filename is None: ca_filename = '{0}_ca_cert'.format(ca_name) if cert_path is None: cert_path = '{0}/{1}/certs'.format(_cert_base_path(), ca_name) if cert_filename is None: cert_filename = '{0}'.format(CN) try: with salt.utils.files.fopen('{0}/{1}/{2}.crt'.format( cert_base_path(), ca_name, ca_filename)) as fp_: ca_cert = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, fp_.read() ) with salt.utils.files.fopen('{0}/{1}/{2}.key'.format( cert_base_path(), ca_name, ca_filename)) as fp_: ca_key = OpenSSL.crypto.load_privatekey( OpenSSL.crypto.FILETYPE_PEM, fp_.read() ) except IOError: return 'There is no CA named "{0}"'.format(ca_name) client_cert = _read_cert('{0}/{1}.crt'.format(cert_path, cert_filename)) if client_cert is None: return 'There is no client certificate named "{0}"'.format(CN) index_file, expire_date, serial_number, subject = _get_basic_info( ca_name, client_cert, ca_dir) index_serial_subject = '{0}\tunknown\t{1}'.format( serial_number, subject) index_v_data = 'V\t{0}\t\t{1}'.format( expire_date, index_serial_subject) index_r_data_pattern = re.compile( r"R\t" + expire_date + r"\t\d{12}Z\t" + re.escape(index_serial_subject)) index_r_data = 'R\t{0}\t{1}\t{2}'.format( expire_date, _four_digit_year_to_two_digit(datetime.utcnow()), index_serial_subject) ret = {} with salt.utils.files.fopen(index_file) as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if index_r_data_pattern.match(line): revoke_date = line.split('\t')[2] try: datetime.strptime(revoke_date, two_digit_year_fmt) return ('"{0}/{1}.crt" was already revoked, ' 'serial number: {2}').format( cert_path, cert_filename, serial_number ) except ValueError: ret['retcode'] = 1 ret['comment'] = ("Revocation date '{0}' does not match" "format '{1}'".format( revoke_date, two_digit_year_fmt)) return ret elif index_serial_subject in line: __salt__['file.replace']( index_file, index_v_data, index_r_data, backup=False) break crl = OpenSSL.crypto.CRL() with salt.utils.files.fopen(index_file) as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('R'): fields = line.split('\t') revoked = OpenSSL.crypto.Revoked() revoked.set_serial(salt.utils.stringutils.to_bytes(fields[3])) revoke_date_2_digit = datetime.strptime(fields[2], two_digit_year_fmt) revoked.set_rev_date(salt.utils.stringutils.to_bytes( revoke_date_2_digit.strftime(four_digit_year_fmt) )) crl.add_revoked(revoked) crl_text = crl.export(ca_cert, ca_key, digest=salt.utils.stringutils.to_bytes(digest)) if crl_file is None: crl_file = '{0}/{1}/crl.pem'.format( _cert_base_path(), ca_name ) if os.path.isdir(crl_file): ret['retcode'] = 1 ret['comment'] = 'crl_file "{0}" is an existing directory'.format( crl_file) return ret with salt.utils.files.fopen(crl_file, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(crl_text)) return ('Revoked Certificate: "{0}/{1}.crt", ' 'serial number: {2}').format( cert_path, cert_filename, serial_number )
[ "def", "revoke_cert", "(", "ca_name", ",", "CN", ",", "cacert_path", "=", "None", ",", "ca_filename", "=", "None", ",", "cert_path", "=", "None", ",", "cert_filename", "=", "None", ",", "crl_file", "=", "None", ",", "digest", "=", "'sha256'", ",", ")", ...
Revoke a certificate. .. versionadded:: 2015.8.0 ca_name Name of the CA. CN Common name matching the certificate signing request. cacert_path Absolute path to ca certificates root directory. ca_filename Alternative filename for the CA. cert_path Path to the cert file. cert_filename Alternative filename for the certificate, useful when using special characters in the CN. crl_file Full path to the CRL file. digest The message digest algorithm. Must be a string describing a digest algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically). For example, "md5" or "sha1". Default: 'sha256' CLI Example: .. code-block:: bash salt '*' tls.revoke_cert ca_name='koji' \ ca_filename='ca' \ crl_file='/etc/openvpn/team1/crl.pem'
[ "Revoke", "a", "certificate", "." ]
python
train
facebook/pyre-check
sapp/sapp/models.py
https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/sapp/sapp/models.py#L108-L171
def _merge_by_keys(cls, session, items, hash_item, *attrs): """An object can have multiple attributes as its key. This merges the items to be added with existing items in the database based on their key(s). session: Session object for querying the DB. items: Iterator of items to be added to the DB. hash_item: Function that takes as in put the item to be added and returns a hash of it. attrs: List of attributes of the object/class that represent the object's key. Returns the next item (in items) that is not already in the DB. """ # Note: items is an iterator, not an iterable, 'tee' is a must. items_iter1, items_iter2 = tee(items) keys = {} # map of hash -> keys of the item for i in items_iter1: # An item's key is a map of 'attr -> item[attr]' where attr is # usually a column name. # For 'SharedText', its key would look like: { # "kind": "feature", # "contents": "via tito", # } item_hash = hash_item(i) keys[item_hash] = {attr.key: getattr(i, attr.key) for attr in attrs} # Find existing items. existing_ids = {} # map of item_hash -> existing ID cls_attrs = [getattr(cls, attr.key) for attr in attrs] for fetch_keys in split_every(BATCH_SIZE, keys.values()): filters = [] for fetch_key in fetch_keys: # Sub-filters for checking if item with fetch_key is in the DB # Example: [ # SharedText.kind.__eq__("feature"), # SharedText.contents.__eq__("via tito"), # ] subfilter = [ getattr(cls, attr).__eq__(val) for attr, val in fetch_key.items() ] filters.append(and_(*subfilter)) existing_items = ( session.query(cls.id, *cls_attrs).filter(or_(*(filters))).all() ) for existing_item in existing_items: item_hash = hash_item(existing_item) existing_ids[item_hash] = existing_item.id # Now see if we can merge new_items = {} for i in items_iter2: item_hash = hash_item(i) if item_hash in existing_ids: # The key is already in the DB i.id.resolve(existing_ids[item_hash], is_new=False) elif item_hash in new_items: # The key is already in the list of new items i.id.resolve(new_items[item_hash].id, is_new=False) else: # The key is new new_items[item_hash] = i yield i
[ "def", "_merge_by_keys", "(", "cls", ",", "session", ",", "items", ",", "hash_item", ",", "*", "attrs", ")", ":", "# Note: items is an iterator, not an iterable, 'tee' is a must.", "items_iter1", ",", "items_iter2", "=", "tee", "(", "items", ")", "keys", "=", "{",...
An object can have multiple attributes as its key. This merges the items to be added with existing items in the database based on their key(s). session: Session object for querying the DB. items: Iterator of items to be added to the DB. hash_item: Function that takes as in put the item to be added and returns a hash of it. attrs: List of attributes of the object/class that represent the object's key. Returns the next item (in items) that is not already in the DB.
[ "An", "object", "can", "have", "multiple", "attributes", "as", "its", "key", ".", "This", "merges", "the", "items", "to", "be", "added", "with", "existing", "items", "in", "the", "database", "based", "on", "their", "key", "(", "s", ")", "." ]
python
train
mkouhei/tonicdnscli
src/tonicdnscli/command.py
https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/command.py#L485-L512
def conn_options(prs, conn): """Set options of connecting to TonicDNS API server Arguments: prs: parser object of argparse conn: dictionary of connection information """ if conn.get('server') and conn.get('username') and conn.get('password'): prs.set_defaults(server=conn.get('server'), username=conn.get('username'), password=conn.get('password')) elif conn.get('server') and conn.get('username'): prs.set_defaults(server=conn.get('server'), username=conn.get('username')) if conn.get('auto_update_soa'): prs.set_defaults(auto_update_soa=conn.get('auto_update_soa')) else: prs.set_defaults(auto_update_soa=False) if not conn.get('server'): set_option(prs, 'server') if not conn.get('username'): set_option(prs, 'username') if not conn.get('password'): set_option(prs, 'password')
[ "def", "conn_options", "(", "prs", ",", "conn", ")", ":", "if", "conn", ".", "get", "(", "'server'", ")", "and", "conn", ".", "get", "(", "'username'", ")", "and", "conn", ".", "get", "(", "'password'", ")", ":", "prs", ".", "set_defaults", "(", "s...
Set options of connecting to TonicDNS API server Arguments: prs: parser object of argparse conn: dictionary of connection information
[ "Set", "options", "of", "connecting", "to", "TonicDNS", "API", "server" ]
python
train
juju/python-libjuju
juju/model.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L924-L936
async def _wait_for_new(self, entity_type, entity_id): """Wait for a new object to appear in the Model and return it. Waits for an object of type ``entity_type`` with id ``entity_id`` to appear in the model. This is similar to watching for the object using ``block_until``, but uses the watcher rather than polling. """ # if the entity is already in the model, just return it if entity_id in self.state._live_entity_map(entity_type): return self.state._live_entity_map(entity_type)[entity_id] return await self._wait(entity_type, entity_id, None)
[ "async", "def", "_wait_for_new", "(", "self", ",", "entity_type", ",", "entity_id", ")", ":", "# if the entity is already in the model, just return it", "if", "entity_id", "in", "self", ".", "state", ".", "_live_entity_map", "(", "entity_type", ")", ":", "return", "...
Wait for a new object to appear in the Model and return it. Waits for an object of type ``entity_type`` with id ``entity_id`` to appear in the model. This is similar to watching for the object using ``block_until``, but uses the watcher rather than polling.
[ "Wait", "for", "a", "new", "object", "to", "appear", "in", "the", "Model", "and", "return", "it", "." ]
python
train
saltstack/salt
salt/modules/xapi_virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xapi_virt.py#L761-L784
def is_hyper(): ''' Returns a bool whether or not this node is a hypervisor of any kind CLI Example: .. code-block:: bash salt '*' virt.is_hyper ''' try: if __grains__['virtual_subtype'] != 'Xen Dom0': return False except KeyError: # virtual_subtype isn't set everywhere. return False try: with salt.utils.files.fopen('/proc/modules') as fp_: if 'xen_' not in salt.utils.stringutils.to_unicode(fp_.read()): return False except (OSError, IOError): return False # there must be a smarter way... return 'xenstore' in __salt__['cmd.run'](__grains__['ps'])
[ "def", "is_hyper", "(", ")", ":", "try", ":", "if", "__grains__", "[", "'virtual_subtype'", "]", "!=", "'Xen Dom0'", ":", "return", "False", "except", "KeyError", ":", "# virtual_subtype isn't set everywhere.", "return", "False", "try", ":", "with", "salt", ".",...
Returns a bool whether or not this node is a hypervisor of any kind CLI Example: .. code-block:: bash salt '*' virt.is_hyper
[ "Returns", "a", "bool", "whether", "or", "not", "this", "node", "is", "a", "hypervisor", "of", "any", "kind" ]
python
train
sentinel-hub/sentinel2-cloud-detector
s2cloudless/PixelClassifier.py
https://github.com/sentinel-hub/sentinel2-cloud-detector/blob/7130a4a6af90a92f28592d11da692bbb0dc1dc01/s2cloudless/PixelClassifier.py#L81-L96
def image_predict_proba(self, X): """ Predicts class probabilities for the entire image. :param X: Array of images to be classified. :type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands] :return: classification probability map :rtype: numpy array, [n_samples, n_pixels_y, n_pixels_x] """ pixels = self.extract_pixels(X) probabilities = self.classifier.predict_proba(pixels) return probabilities.reshape(X.shape[0], X.shape[1], X.shape[2], probabilities.shape[1])
[ "def", "image_predict_proba", "(", "self", ",", "X", ")", ":", "pixels", "=", "self", ".", "extract_pixels", "(", "X", ")", "probabilities", "=", "self", ".", "classifier", ".", "predict_proba", "(", "pixels", ")", "return", "probabilities", ".", "reshape", ...
Predicts class probabilities for the entire image. :param X: Array of images to be classified. :type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands] :return: classification probability map :rtype: numpy array, [n_samples, n_pixels_y, n_pixels_x]
[ "Predicts", "class", "probabilities", "for", "the", "entire", "image", "." ]
python
train
mozilla/taar
taar/recommenders/ensemble_recommender.py
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/ensemble_recommender.py#L81-L150
def _recommend(self, client_data, limit, extra_data={}): """ Ensemble recommendations are aggregated from individual recommenders. The ensemble recommender applies a weight to the recommendation outputs of each recommender to reorder the recommendations to be a better fit. The intuitive understanding is that the total space of recommended addons across all recommenders will include the 'true' addons that should be recommended better than any individual recommender. The ensemble method simply needs to weight each recommender appropriate so that the ordering is correct. """ self.logger.info("Ensemble recommend invoked") preinstalled_addon_ids = client_data.get("installed_addons", []) # Compute an extended limit by adding the length of # the list of any preinstalled addons. extended_limit = limit + len(preinstalled_addon_ids) flattened_results = [] ensemble_weights = self._weight_cache.getWeights() for rkey in self.RECOMMENDER_KEYS: recommender = self._recommender_map[rkey] if recommender.can_recommend(client_data): raw_results = recommender.recommend( client_data, extended_limit, extra_data ) reweighted_results = [] for guid, weight in raw_results: item = (guid, weight * ensemble_weights[rkey]) reweighted_results.append(item) flattened_results.extend(reweighted_results) # Sort the results by the GUID flattened_results.sort(key=lambda item: item[0]) # group by the guid, sum up the weights for recurring GUID # suggestions across all recommenders guid_grouper = itertools.groupby(flattened_results, lambda item: item[0]) ensemble_suggestions = [] for (guid, guid_group) in guid_grouper: weight_sum = sum([v for (g, v) in guid_group]) item = (guid, weight_sum) ensemble_suggestions.append(item) # Sort in reverse order (greatest weight to least) ensemble_suggestions.sort(key=lambda x: -x[1]) filtered_ensemble_suggestions = [ (guid, weight) for (guid, weight) in ensemble_suggestions if guid not in preinstalled_addon_ids ] results = filtered_ensemble_suggestions[:limit] log_data = ( client_data["client_id"], str(ensemble_weights), str([r[0] for r in results]), ) self.logger.info( "client_id: [%s], ensemble_weight: [%s], guids: [%s]" % log_data ) return results
[ "def", "_recommend", "(", "self", ",", "client_data", ",", "limit", ",", "extra_data", "=", "{", "}", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Ensemble recommend invoked\"", ")", "preinstalled_addon_ids", "=", "client_data", ".", "get", "(", "\"...
Ensemble recommendations are aggregated from individual recommenders. The ensemble recommender applies a weight to the recommendation outputs of each recommender to reorder the recommendations to be a better fit. The intuitive understanding is that the total space of recommended addons across all recommenders will include the 'true' addons that should be recommended better than any individual recommender. The ensemble method simply needs to weight each recommender appropriate so that the ordering is correct.
[ "Ensemble", "recommendations", "are", "aggregated", "from", "individual", "recommenders", ".", "The", "ensemble", "recommender", "applies", "a", "weight", "to", "the", "recommendation", "outputs", "of", "each", "recommender", "to", "reorder", "the", "recommendations",...
python
train
deepmind/sonnet
sonnet/python/modules/util.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/util.py#L1039-L1099
def supports_kwargs(module_or_fn, kwargs_list): """Determines whether the provided callable supports all the kwargs. This is useful when you have a module that might or might not support a kwarg such as `is_training`. Rather than calling the module and catching the error, risking the potential modification of underlying state, this function introspects the module to see what kwargs are actually supported, using the python `inspect` module. Note that many TF functions do not export a valid argspec object, rather they have a generic *args, **kwargs signature due to various layers of wrapping (deprecation decorators, etc). In those circumstances we return MAYBE_SUPPORTED, and users will have to use another method to tell whether the kwargs are supported (e.g. by just calling the function). Args: module_or_fn: some callable, generally an object or a method of some object. If an object is provided, we check wither `module_or_fn.__call__` supports the provided kwargs, which for a Sonnet module will automatically check the signature of _build. If `module_or_fn` is a function/method, then we check its signature directly, so non-Sonnet functions can be used. kwargs_list: string or iterable of strings of keyword arg names to test for. If an empty iterable is provided this function will always return True. Raises: ValueError: if a non-string is provided in `kwargs_list`. Returns: a string, one of 'supported', 'not_supported' or 'maybe_supported'. """ if isinstance(kwargs_list, six.string_types): kwargs_list = [kwargs_list] # If it's not a function or method, then assume it's a module, so introspect # the __call__ method. wrapt ensures that for Sonnet modules the _build # signature is available here. if not (inspect.isfunction(module_or_fn) or inspect.ismethod(module_or_fn)): module_or_fn = module_or_fn.__call__ arg_spec = inspect.getargspec(module_or_fn) # If there is a keywords element, then an arbitrary kwargs will work, as far # as we can tell from here. takes_arbitrary_kwargs = (arg_spec.keywords is not None) for kwarg in kwargs_list: if not isinstance(kwarg, six.string_types): raise ValueError("kwargs should be strings, instead got {}".format( kwarg)) if kwarg not in arg_spec.args: if not takes_arbitrary_kwargs: # The function doesn't take **kwargs, and this name is not in the # regular args, so it would definitely cause an error to call this. return NOT_SUPPORTED else: # The function may accept the kwarg, but we can't say for sure. Even # though this is only one kwarg, we can't be certain about the whole # lot, so the combined answer is now "maybe". return MAYBE_SUPPORTED # All the kwargs must actually be present in the specific args list return SUPPORTED
[ "def", "supports_kwargs", "(", "module_or_fn", ",", "kwargs_list", ")", ":", "if", "isinstance", "(", "kwargs_list", ",", "six", ".", "string_types", ")", ":", "kwargs_list", "=", "[", "kwargs_list", "]", "# If it's not a function or method, then assume it's a module, s...
Determines whether the provided callable supports all the kwargs. This is useful when you have a module that might or might not support a kwarg such as `is_training`. Rather than calling the module and catching the error, risking the potential modification of underlying state, this function introspects the module to see what kwargs are actually supported, using the python `inspect` module. Note that many TF functions do not export a valid argspec object, rather they have a generic *args, **kwargs signature due to various layers of wrapping (deprecation decorators, etc). In those circumstances we return MAYBE_SUPPORTED, and users will have to use another method to tell whether the kwargs are supported (e.g. by just calling the function). Args: module_or_fn: some callable, generally an object or a method of some object. If an object is provided, we check wither `module_or_fn.__call__` supports the provided kwargs, which for a Sonnet module will automatically check the signature of _build. If `module_or_fn` is a function/method, then we check its signature directly, so non-Sonnet functions can be used. kwargs_list: string or iterable of strings of keyword arg names to test for. If an empty iterable is provided this function will always return True. Raises: ValueError: if a non-string is provided in `kwargs_list`. Returns: a string, one of 'supported', 'not_supported' or 'maybe_supported'.
[ "Determines", "whether", "the", "provided", "callable", "supports", "all", "the", "kwargs", "." ]
python
train
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L2710-L2732
def solve_limited(self, assumptions=[]): """ Solve internal formula using given budgets for conflicts and propagations. """ if self.minicard: if self.use_timer: start_time = time.clock() # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) self.status = pysolvers.minicard_solve_lim(self.minicard, assumptions) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if self.use_timer: self.call_time = time.clock() - start_time self.accu_time += self.call_time return self.status
[ "def", "solve_limited", "(", "self", ",", "assumptions", "=", "[", "]", ")", ":", "if", "self", ".", "minicard", ":", "if", "self", ".", "use_timer", ":", "start_time", "=", "time", ".", "clock", "(", ")", "# saving default SIGINT handler", "def_sigint_handl...
Solve internal formula using given budgets for conflicts and propagations.
[ "Solve", "internal", "formula", "using", "given", "budgets", "for", "conflicts", "and", "propagations", "." ]
python
train
inveniosoftware-attic/invenio-utils
invenio_utils/shell.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/shell.py#L309-L327
def escape_shell_arg(shell_arg): """Escape shell argument shell_arg by placing it within single-quotes. Any single quotes found within the shell argument string will be escaped. @param shell_arg: The shell argument to be escaped. @type shell_arg: string @return: The single-quote-escaped value of the shell argument. @rtype: string @raise TypeError: if shell_arg is not a string. @see: U{http://mail.python.org/pipermail/python-list/2005-October/346957.html} """ if isinstance(shell_arg, six.text_type): msg = "ERROR: escape_shell_arg() expected string argument but " \ "got '%s' of type '%s'." % (repr(shell_arg), type(shell_arg)) raise TypeError(msg) return "'%s'" % shell_arg.replace("'", r"'\''")
[ "def", "escape_shell_arg", "(", "shell_arg", ")", ":", "if", "isinstance", "(", "shell_arg", ",", "six", ".", "text_type", ")", ":", "msg", "=", "\"ERROR: escape_shell_arg() expected string argument but \"", "\"got '%s' of type '%s'.\"", "%", "(", "repr", "(", "shell_...
Escape shell argument shell_arg by placing it within single-quotes. Any single quotes found within the shell argument string will be escaped. @param shell_arg: The shell argument to be escaped. @type shell_arg: string @return: The single-quote-escaped value of the shell argument. @rtype: string @raise TypeError: if shell_arg is not a string. @see: U{http://mail.python.org/pipermail/python-list/2005-October/346957.html}
[ "Escape", "shell", "argument", "shell_arg", "by", "placing", "it", "within", "single", "-", "quotes", ".", "Any", "single", "quotes", "found", "within", "the", "shell", "argument", "string", "will", "be", "escaped", "." ]
python
train
tornadoweb/tornado
tornado/websocket.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/websocket.py#L995-L1013
def _get_compressor_options( self, side: str, agreed_parameters: Dict[str, Any], compression_options: Dict[str, Any] = None, ) -> Dict[str, Any]: """Converts a websocket agreed_parameters set to keyword arguments for our compressor objects. """ options = dict( persistent=(side + "_no_context_takeover") not in agreed_parameters ) # type: Dict[str, Any] wbits_header = agreed_parameters.get(side + "_max_window_bits", None) if wbits_header is None: options["max_wbits"] = zlib.MAX_WBITS else: options["max_wbits"] = int(wbits_header) options["compression_options"] = compression_options return options
[ "def", "_get_compressor_options", "(", "self", ",", "side", ":", "str", ",", "agreed_parameters", ":", "Dict", "[", "str", ",", "Any", "]", ",", "compression_options", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", ")", "->", "Dict", "[", ...
Converts a websocket agreed_parameters set to keyword arguments for our compressor objects.
[ "Converts", "a", "websocket", "agreed_parameters", "set", "to", "keyword", "arguments", "for", "our", "compressor", "objects", "." ]
python
train
GeospatialPython/pyshp
shapefile.py
https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L820-L840
def __shapeIndex(self, i=None): """Returns the offset in a .shp file for a shape based on information in the .shx index file.""" shx = self.shx if not shx: return None if not self._offsets: # File length (16-bit word * 2 = bytes) - header length shx.seek(24) shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100 numRecords = shxRecordLength // 8 # Jump to the first record. shx.seek(100) shxRecords = _Array('i') # Each offset consists of two nrs, only the first one matters shxRecords.fromfile(shx, 2 * numRecords) if sys.byteorder != 'big': shxRecords.byteswap() self._offsets = [2 * el for el in shxRecords[::2]] if not i == None: return self._offsets[i]
[ "def", "__shapeIndex", "(", "self", ",", "i", "=", "None", ")", ":", "shx", "=", "self", ".", "shx", "if", "not", "shx", ":", "return", "None", "if", "not", "self", ".", "_offsets", ":", "# File length (16-bit word * 2 = bytes) - header length\r", "shx", "."...
Returns the offset in a .shp file for a shape based on information in the .shx index file.
[ "Returns", "the", "offset", "in", "a", ".", "shp", "file", "for", "a", "shape", "based", "on", "information", "in", "the", ".", "shx", "index", "file", "." ]
python
train
inveniosoftware/invenio-oaiserver
invenio_oaiserver/tasks.py
https://github.com/inveniosoftware/invenio-oaiserver/blob/eae765e32bd816ddc5612d4b281caf205518b512/invenio_oaiserver/tasks.py#L25-L29
def _records_commit(record_ids): """Commit all records.""" for record_id in record_ids: record = Record.get_record(record_id) record.commit()
[ "def", "_records_commit", "(", "record_ids", ")", ":", "for", "record_id", "in", "record_ids", ":", "record", "=", "Record", ".", "get_record", "(", "record_id", ")", "record", ".", "commit", "(", ")" ]
Commit all records.
[ "Commit", "all", "records", "." ]
python
train
PyThaiNLP/pythainlp
pythainlp/tokenize/longest.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/tokenize/longest.py#L142-L150
def segment(text: str, custom_dict: Trie = None) -> List[str]: """ตัดคำภาษาไทยด้วยวิธี longest matching""" if not text or not isinstance(text, str): return [] if not custom_dict: custom_dict = DEFAULT_DICT_TRIE return LongestMatchTokenizer(custom_dict).tokenize(text)
[ "def", "segment", "(", "text", ":", "str", ",", "custom_dict", ":", "Trie", "=", "None", ")", "->", "List", "[", "str", "]", ":", "if", "not", "text", "or", "not", "isinstance", "(", "text", ",", "str", ")", ":", "return", "[", "]", "if", "not", ...
ตัดคำภาษาไทยด้วยวิธี longest matching
[ "ตัดคำภาษาไทยด้วยวิธี", "longest", "matching" ]
python
train
sveetch/boussole
boussole/conf/discovery.py
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/conf/discovery.py#L141-L191
def search(self, filepath=None, basedir=None, kind=None): """ Search for a settings file. Keyword Arguments: filepath (string): Path to a config file, either absolute or relative. If absolute set its directory as basedir (omitting given basedir argument). If relative join it to basedir. basedir (string): Directory path where to search for. kind (string): Backend engine kind name (value of attribute ``_kind_name``) to help discovering with empty or relative filepath. Also if explicit absolute filepath is given, this will enforce the backend engine (such as yaml kind will be forced for a ``foo.json`` file). Returns: tuple: Absolute filepath and backend engine class. """ # None values would cause trouble with path joining if filepath is None: filepath = '' if basedir is None: basedir = '.' if not basedir and not filepath: msg = "Either basedir or filepath is required for discovering" raise SettingsDiscoveryError(msg) if kind and kind not in self.engines: msg = "Given settings format is unknow: {}" raise SettingsDiscoveryError(msg.format(kind)) # Implicit filename to find from backend if not filepath: filename, engine = self.guess_filename(basedir, kind) filepath = os.path.join(basedir, filename) # Explicit filename dont have to search for default backend file and # blindly force given backend if any else: if os.path.isabs(filepath): basedir, filename = os.path.split(filepath) else: filepath = os.path.join(basedir, filepath) if not os.path.exists(filepath): msg = "Given settings file does not exists: {}" raise SettingsDiscoveryError(msg.format(filepath)) engine = self.get_engine(filepath, kind) return filepath, engine
[ "def", "search", "(", "self", ",", "filepath", "=", "None", ",", "basedir", "=", "None", ",", "kind", "=", "None", ")", ":", "# None values would cause trouble with path joining", "if", "filepath", "is", "None", ":", "filepath", "=", "''", "if", "basedir", "...
Search for a settings file. Keyword Arguments: filepath (string): Path to a config file, either absolute or relative. If absolute set its directory as basedir (omitting given basedir argument). If relative join it to basedir. basedir (string): Directory path where to search for. kind (string): Backend engine kind name (value of attribute ``_kind_name``) to help discovering with empty or relative filepath. Also if explicit absolute filepath is given, this will enforce the backend engine (such as yaml kind will be forced for a ``foo.json`` file). Returns: tuple: Absolute filepath and backend engine class.
[ "Search", "for", "a", "settings", "file", "." ]
python
train
zhanglab/psamm
psamm/util.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/util.py#L54-L58
def write(self, s): """Write message to logger.""" for line in re.split(r'\n+', s): if line != '': self._logger.log(self._level, line)
[ "def", "write", "(", "self", ",", "s", ")", ":", "for", "line", "in", "re", ".", "split", "(", "r'\\n+'", ",", "s", ")", ":", "if", "line", "!=", "''", ":", "self", ".", "_logger", ".", "log", "(", "self", ".", "_level", ",", "line", ")" ]
Write message to logger.
[ "Write", "message", "to", "logger", "." ]
python
train
loganasherjones/yapconf
yapconf/items.py
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L256-L279
def migrate_config(self, current_config, config_to_migrate, always_update, update_defaults): """Migrate config value in current_config, updating config_to_migrate. Given the current_config object, it will attempt to find a value based on all the names given. If no name could be found, then it will simply set the value to the default. If a value is found and is in the list of previous_defaults, it will either update or keep the old value based on if update_defaults is set. If a non-default value is set it will either keep this value or update it based on if ``always_update`` is true. Args: current_config (dict): Current configuration. config_to_migrate (dict): Config to update. always_update (bool): Always update value. update_defaults (bool): Update values found in previous_defaults """ value = self._search_config_for_possible_names(current_config) self._update_config(config_to_migrate, value, always_update, update_defaults)
[ "def", "migrate_config", "(", "self", ",", "current_config", ",", "config_to_migrate", ",", "always_update", ",", "update_defaults", ")", ":", "value", "=", "self", ".", "_search_config_for_possible_names", "(", "current_config", ")", "self", ".", "_update_config", ...
Migrate config value in current_config, updating config_to_migrate. Given the current_config object, it will attempt to find a value based on all the names given. If no name could be found, then it will simply set the value to the default. If a value is found and is in the list of previous_defaults, it will either update or keep the old value based on if update_defaults is set. If a non-default value is set it will either keep this value or update it based on if ``always_update`` is true. Args: current_config (dict): Current configuration. config_to_migrate (dict): Config to update. always_update (bool): Always update value. update_defaults (bool): Update values found in previous_defaults
[ "Migrate", "config", "value", "in", "current_config", "updating", "config_to_migrate", "." ]
python
train
guaix-ucm/pyemir
emirdrp/instrument/dtu_configuration.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/instrument/dtu_configuration.py#L208-L236
def average_dtu_configurations(list_of_objects): """Return DtuConfiguration instance with averaged values. Parameters ---------- list_of_objects : python list List of DtuConfiguration instances to be averaged. Returns ------- result : DtuConfiguration instance Object with averaged values. """ result = DtuConfiguration() if len(list_of_objects) == 0: return result list_of_members = result.__dict__.keys() # compute average of all the members of the class for member in list_of_members: result.__dict__[member] = np.mean( [tmp_dtu.__dict__[member] for tmp_dtu in list_of_objects] ) return result
[ "def", "average_dtu_configurations", "(", "list_of_objects", ")", ":", "result", "=", "DtuConfiguration", "(", ")", "if", "len", "(", "list_of_objects", ")", "==", "0", ":", "return", "result", "list_of_members", "=", "result", ".", "__dict__", ".", "keys", "(...
Return DtuConfiguration instance with averaged values. Parameters ---------- list_of_objects : python list List of DtuConfiguration instances to be averaged. Returns ------- result : DtuConfiguration instance Object with averaged values.
[ "Return", "DtuConfiguration", "instance", "with", "averaged", "values", "." ]
python
train
taxpon/pymesh
pymesh/base.py
https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L107-L119
def translate_y(self, d): """Translate mesh for y-direction :param float d: Amount to translate """ mat = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, d, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
[ "def", "translate_y", "(", "self", ",", "d", ")", ":", "mat", "=", "numpy", ".", "array", "(", "[", "[", "1", ",", "0", ",", "0", ",", "0", "]", ",", "[", "0", ",", "1", ",", "0", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", ",",...
Translate mesh for y-direction :param float d: Amount to translate
[ "Translate", "mesh", "for", "y", "-", "direction" ]
python
train
tanghaibao/goatools
goatools/go_enrichment.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L136-L143
def set_goterm(self, go2obj): """Set goterm and copy GOTerm's name and namespace.""" if self.GO in go2obj: goterm = go2obj[self.GO] self.goterm = goterm self.name = goterm.name self.depth = goterm.depth self.NS = self.namespace2NS[self.goterm.namespace]
[ "def", "set_goterm", "(", "self", ",", "go2obj", ")", ":", "if", "self", ".", "GO", "in", "go2obj", ":", "goterm", "=", "go2obj", "[", "self", ".", "GO", "]", "self", ".", "goterm", "=", "goterm", "self", ".", "name", "=", "goterm", ".", "name", ...
Set goterm and copy GOTerm's name and namespace.
[ "Set", "goterm", "and", "copy", "GOTerm", "s", "name", "and", "namespace", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/prj.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L2024-L2051
def write(self, originalPrefix, newPrefix=None): """ Write project card to string. Args: originalPrefix (str): Original name to give to files that follow the project naming convention (e.g: prefix.gag). newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None. Returns: str: Card and value as they would be written to the project file. """ # Determine number of spaces between card and value for nice alignment numSpaces = max(2, 25 - len(self.name)) # Handle special case of booleans if self.value is None: line = '%s\n' % self.name else: if self.name == 'WMS': line = '%s %s\n' % (self.name, self.value) elif newPrefix is None: line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value) elif originalPrefix in self.value: line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value.replace(originalPrefix, newPrefix)) else: line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value) return line
[ "def", "write", "(", "self", ",", "originalPrefix", ",", "newPrefix", "=", "None", ")", ":", "# Determine number of spaces between card and value for nice alignment", "numSpaces", "=", "max", "(", "2", ",", "25", "-", "len", "(", "self", ".", "name", ")", ")", ...
Write project card to string. Args: originalPrefix (str): Original name to give to files that follow the project naming convention (e.g: prefix.gag). newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None. Returns: str: Card and value as they would be written to the project file.
[ "Write", "project", "card", "to", "string", "." ]
python
train
bokeh/bokeh
bokeh/transform.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/transform.py#L127-L155
def factor_hatch(field_name, patterns, factors, start=0, end=None): ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalPatternMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with patterns (seq[string]) : a list of hatch patterns to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict Added in version 1.1.1 ''' return field(field_name, CategoricalPatternMapper(patterns=patterns, factors=factors, start=start, end=end))
[ "def", "factor_hatch", "(", "field_name", ",", "patterns", ",", "factors", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "field", "(", "field_name", ",", "CategoricalPatternMapper", "(", "patterns", "=", "patterns", ",", "factors", "...
Create a ``DataSpec`` dict that applies a client-side ``CategoricalPatternMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with patterns (seq[string]) : a list of hatch patterns to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict Added in version 1.1.1
[ "Create", "a", "DataSpec", "dict", "that", "applies", "a", "client", "-", "side", "CategoricalPatternMapper", "transformation", "to", "a", "ColumnDataSource", "column", "." ]
python
train
expfactory/expfactory
expfactory/experiment.py
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/experiment.py#L84-L97
def load_experiment(folder, return_path=False): '''load_experiment: reads in the config.json for a folder, returns None if not found. :param folder: full path to experiment folder :param return_path: if True, don't load the config.json, but return it ''' fullpath = os.path.abspath(folder) config = "%s/config.json" %(fullpath) if not os.path.exists(config): bot.error("config.json could not be found in %s" %(folder)) config = None if return_path is False and config is not None: config = read_json(config) return config
[ "def", "load_experiment", "(", "folder", ",", "return_path", "=", "False", ")", ":", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "folder", ")", "config", "=", "\"%s/config.json\"", "%", "(", "fullpath", ")", "if", "not", "os", ".", "path", ...
load_experiment: reads in the config.json for a folder, returns None if not found. :param folder: full path to experiment folder :param return_path: if True, don't load the config.json, but return it
[ "load_experiment", ":", "reads", "in", "the", "config", ".", "json", "for", "a", "folder", "returns", "None", "if", "not", "found", ".", ":", "param", "folder", ":", "full", "path", "to", "experiment", "folder", ":", "param", "return_path", ":", "if", "T...
python
train
ttinies/sc2ladderMgmt
sc2ladderMgmt/ladders.py
https://github.com/ttinies/sc2ladderMgmt/blob/230292e18c54e43129c162116bbdf743b3e9dcf1/sc2ladderMgmt/ladders.py#L64-L69
def load(self, ladderName): """retrieve the ladder settings from saved disk file""" self.name = ladderName # preset value to load self.filename with open(self.filename, "rb") as f: data = f.read() self.__dict__.update( json.loads(data) )
[ "def", "load", "(", "self", ",", "ladderName", ")", ":", "self", ".", "name", "=", "ladderName", "# preset value to load self.filename", "with", "open", "(", "self", ".", "filename", ",", "\"rb\"", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(",...
retrieve the ladder settings from saved disk file
[ "retrieve", "the", "ladder", "settings", "from", "saved", "disk", "file" ]
python
train
keenlabs/KeenClient-Python
keen/__init__.py
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/__init__.py#L109-L139
def sum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None): """ Performs a sum query Adds the values of a target property for events that meet the given criteria. :param event_collection: string, the name of the collection to query :param target_property: string, the name of the event property you would like use :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds """ _initialize_client_from_environment() return _client.sum(event_collection=event_collection, timeframe=timeframe, timezone=timezone, interval=interval, filters=filters, group_by=group_by, order_by=order_by, target_property=target_property, max_age=max_age, limit=limit)
[ "def", "sum", "(", "event_collection", ",", "target_property", ",", "timeframe", "=", "None", ",", "timezone", "=", "None", ",", "interval", "=", "None", ",", "filters", "=", "None", ",", "group_by", "=", "None", ",", "order_by", "=", "None", ",", "max_a...
Performs a sum query Adds the values of a target property for events that meet the given criteria. :param event_collection: string, the name of the collection to query :param target_property: string, the name of the event property you would like use :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds
[ "Performs", "a", "sum", "query" ]
python
train
limodou/uliweb
uliweb/core/SimpleFrame.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L222-L261
def CORS(func=None): """ CORS support """ def w(r=None): from uliweb import request, response if request.method == 'OPTIONS': response = Response(status=204) response.headers['Access-Control-Allow-Credentials'] = 'true' response.headers['Access-Control-Allow-Origin'] = request.headers['Origin'] response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS' response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range' response.headers['Access-Control-Max-Age'] = 24*3600 response.headers['Content-Type'] = 'text/plain; charset=utf-8' response.headers['Content-Length'] = 0 return response elif request.method in ('GET', 'POST'): if isinstance(r, Response): response = r response.headers['Access-Control-Allow-Credentials'] = 'true' if 'Origin' in request.headers: response.headers['Access-Control-Allow-Origin'] = request.headers['Origin'] response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS' response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range' response.headers['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range' if callable(func): @wraps(func) def f(*arg, **kwargs): if request.method == 'OPTIONS': return w() ret = func(*arg, **kwargs) w(ret) return ret return f else: w()
[ "def", "CORS", "(", "func", "=", "None", ")", ":", "def", "w", "(", "r", "=", "None", ")", ":", "from", "uliweb", "import", "request", ",", "response", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "response", "=", "Response", "(", "status"...
CORS support
[ "CORS", "support" ]
python
train
qubell/contrib-python-qubell-client
qubell/api/private/platform.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/private/platform.py#L95-L101
def get_organization(self, id=None, name=None): """ Gets existing and accessible organization :rtype: Organization """ log.info("Picking organization: %s (%s)" % (name, id)) return self.organizations[id or name]
[ "def", "get_organization", "(", "self", ",", "id", "=", "None", ",", "name", "=", "None", ")", ":", "log", ".", "info", "(", "\"Picking organization: %s (%s)\"", "%", "(", "name", ",", "id", ")", ")", "return", "self", ".", "organizations", "[", "id", ...
Gets existing and accessible organization :rtype: Organization
[ "Gets", "existing", "and", "accessible", "organization", ":", "rtype", ":", "Organization" ]
python
train
openvax/pyensembl
pyensembl/transcript.py
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/transcript.py#L254-L267
def exon_intervals(self): """List of (start,end) tuples for each exon of this transcript, in the order specified by the 'exon_number' column of the exon table. """ results = self.db.query( select_column_names=["exon_number", "start", "end"], filter_column="transcript_id", filter_value=self.id, feature="exon") sorted_intervals = [None] * len(results) for (exon_number, start, end) in results: sorted_intervals[int(exon_number) - 1] = (start, end) return sorted_intervals
[ "def", "exon_intervals", "(", "self", ")", ":", "results", "=", "self", ".", "db", ".", "query", "(", "select_column_names", "=", "[", "\"exon_number\"", ",", "\"start\"", ",", "\"end\"", "]", ",", "filter_column", "=", "\"transcript_id\"", ",", "filter_value"...
List of (start,end) tuples for each exon of this transcript, in the order specified by the 'exon_number' column of the exon table.
[ "List", "of", "(", "start", "end", ")", "tuples", "for", "each", "exon", "of", "this", "transcript", "in", "the", "order", "specified", "by", "the", "exon_number", "column", "of", "the", "exon", "table", "." ]
python
train
google/grr
grr/server/grr_response_server/rdfvalues/objects.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/rdfvalues/objects.py#L573-L586
def ToURN(self): """Converts a reference into an URN.""" if self.path_type in [PathInfo.PathType.OS, PathInfo.PathType.TSK]: return rdfvalue.RDFURN(self.client_id).Add("fs").Add( self.path_type.name.lower()).Add("/".join(self.path_components)) elif self.path_type == PathInfo.PathType.REGISTRY: return rdfvalue.RDFURN(self.client_id).Add("registry").Add("/".join( self.path_components)) elif self.path_type == PathInfo.PathType.TEMP: return rdfvalue.RDFURN(self.client_id).Add("temp").Add("/".join( self.path_components)) raise ValueError("Unsupported path type: %s" % self.path_type)
[ "def", "ToURN", "(", "self", ")", ":", "if", "self", ".", "path_type", "in", "[", "PathInfo", ".", "PathType", ".", "OS", ",", "PathInfo", ".", "PathType", ".", "TSK", "]", ":", "return", "rdfvalue", ".", "RDFURN", "(", "self", ".", "client_id", ")",...
Converts a reference into an URN.
[ "Converts", "a", "reference", "into", "an", "URN", "." ]
python
train
umutbozkurt/django-rest-framework-mongoengine
rest_framework_mongoengine/serializers.py
https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/serializers.py#L457-L540
def get_customization_for_nested_field(self, field_name): """ Support of nested fields customization for: * EmbeddedDocumentField * NestedReference * Compound fields with EmbeddedDocument as a child: * ListField(EmbeddedDocument)/EmbeddedDocumentListField * MapField(EmbeddedDocument) Extracts fields, exclude, extra_kwargs and validate_*() attributes from parent serializer, related to attributes of field_name. """ # This method is supposed to be called after self.get_fields(), # thus it assumes that fields and exclude are mutually exclusive # and at least one of them is set. # # Also, all the sanity checks are left up to nested field's # get_fields() method, so if something is wrong with customization # nested get_fields() will report this. fields = getattr(self.Meta, 'fields', None) exclude = getattr(self.Meta, 'exclude', None) if fields and fields != ALL_FIELDS and not isinstance(fields, (list, tuple)): raise TypeError( 'The `fields` option must be a list or tuple or "__all__". ' 'Got %s.' % type(fields).__name__ ) if exclude and not isinstance(exclude, (list, tuple)): raise TypeError( 'The `exclude` option must be a list or tuple. Got %s.' % type(exclude).__name__ ) assert not (fields and exclude), ( "Cannot set both 'fields' and 'exclude' options on " "serializer {serializer_class}.".format( serializer_class=self.__class__.__name__ ) ) if fields is None and exclude is None: warnings.warn( "Creating a ModelSerializer without either the 'fields' " "attribute or the 'exclude' attribute is deprecated " "since 3.3.0. Add an explicit fields = '__all__' to the " "{serializer_class} serializer.".format( serializer_class=self.__class__.__name__ ), DeprecationWarning ) fields = ALL_FIELDS # assume that fields are ALL_FIELDS # TODO: validators # get nested_fields or nested_exclude (supposed to be mutually exclusive, assign the other one to None) if fields: if fields == ALL_FIELDS: nested_fields = ALL_FIELDS else: nested_fields = [field[len(field_name + '.'):] for field in fields if field.startswith(field_name + '.')] nested_exclude = None else: # leave all the sanity checks up to get_fields() method of nested field's serializer nested_fields = None nested_exclude = [field[len(field_name + '.'):] for field in exclude if field.startswith(field_name + '.')] # get nested_extra_kwargs (including read-only fields) # TODO: uniqueness extra kwargs extra_kwargs = self.get_extra_kwargs() nested_extra_kwargs = {key[len(field_name + '.'):]: value for key, value in extra_kwargs.items() if key.startswith(field_name + '.')} # get nested_validate_methods dict {name: function}, rename e.g. 'validate_author__age()' -> 'validate_age()' # so that we can add them to nested serializer's definition under this new name # validate_methods are normally checked in rest_framework.Serializer.to_internal_value() nested_validate_methods = {} for attr in dir(self.__class__): if attr.startswith('validate_%s__' % field_name.replace('.', '__')): method = get_unbound_function(getattr(self.__class__, attr)) method_name = 'validate_' + attr[len('validate_%s__' % field_name.replace('.', '__')):] nested_validate_methods[method_name] = method return Customization(nested_fields, nested_exclude, nested_extra_kwargs, nested_validate_methods)
[ "def", "get_customization_for_nested_field", "(", "self", ",", "field_name", ")", ":", "# This method is supposed to be called after self.get_fields(),", "# thus it assumes that fields and exclude are mutually exclusive", "# and at least one of them is set.", "#", "# Also, all the sanity che...
Support of nested fields customization for: * EmbeddedDocumentField * NestedReference * Compound fields with EmbeddedDocument as a child: * ListField(EmbeddedDocument)/EmbeddedDocumentListField * MapField(EmbeddedDocument) Extracts fields, exclude, extra_kwargs and validate_*() attributes from parent serializer, related to attributes of field_name.
[ "Support", "of", "nested", "fields", "customization", "for", ":", "*", "EmbeddedDocumentField", "*", "NestedReference", "*", "Compound", "fields", "with", "EmbeddedDocument", "as", "a", "child", ":", "*", "ListField", "(", "EmbeddedDocument", ")", "/", "EmbeddedDo...
python
train
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L85-L94
def standard_package_names(): """Yield standard module names.""" for name in standard_paths(): if name.startswith('_') or '-' in name: continue if '.' in name and name.rsplit('.')[-1] not in ['so', 'py', 'pyc']: continue yield name.split('.')[0]
[ "def", "standard_package_names", "(", ")", ":", "for", "name", "in", "standard_paths", "(", ")", ":", "if", "name", ".", "startswith", "(", "'_'", ")", "or", "'-'", "in", "name", ":", "continue", "if", "'.'", "in", "name", "and", "name", ".", "rsplit",...
Yield standard module names.
[ "Yield", "standard", "module", "names", "." ]
python
test
SHTOOLS/SHTOOLS
examples/python/GlobalSpectralAnalysis/GlobalSpectralAnalysis.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/examples/python/GlobalSpectralAnalysis/GlobalSpectralAnalysis.py#L30-L63
def example(): """ example that plots the power spectrum of Mars topography data """ # --- input data filename --- infile = os.path.join(os.path.dirname(__file__), '../../ExampleDataFiles/MarsTopo719.shape') coeffs, lmax = shio.shread(infile) # --- plot grid --- grid = expand.MakeGridDH(coeffs, csphase=-1) fig_map = plt.figure() plt.imshow(grid) # ---- compute spectrum ---- ls = np.arange(lmax + 1) pspectrum = spectralanalysis.spectrum(coeffs, unit='per_l') pdensity = spectralanalysis.spectrum(coeffs, unit='per_lm') # ---- plot spectrum ---- fig_spectrum, ax = plt.subplots(1, 1) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel('degree l') ax.grid(True, which='both') ax.plot(ls[1:], pspectrum[1:], label='power per degree l') ax.plot(ls[1:], pdensity[1:], label='power per degree l and order m') ax.legend() fig_map.savefig('SHRtopography_mars.png') fig_spectrum.savefig('SHRspectrum_mars.png') print('mars topography and spectrum saved')
[ "def", "example", "(", ")", ":", "# --- input data filename ---", "infile", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'../../ExampleDataFiles/MarsTopo719.shape'", ")", "coeffs", ",", "lmax", "=", ...
example that plots the power spectrum of Mars topography data
[ "example", "that", "plots", "the", "power", "spectrum", "of", "Mars", "topography", "data" ]
python
train
secdev/scapy
scapy/automaton.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/automaton.py#L123-L127
def _release_all(self): """Releases all locks to kill all threads""" for i in self.inputs: i.call_release(True) self.available_lock.release()
[ "def", "_release_all", "(", "self", ")", ":", "for", "i", "in", "self", ".", "inputs", ":", "i", ".", "call_release", "(", "True", ")", "self", ".", "available_lock", ".", "release", "(", ")" ]
Releases all locks to kill all threads
[ "Releases", "all", "locks", "to", "kill", "all", "threads" ]
python
train
iamjarret/pystockfish
pystockfish.py
https://github.com/iamjarret/pystockfish/blob/ae34a4b4d29c577c888b72691fcf0cb5a89b1792/pystockfish.py#L280-L287
def _get_info_pv(info): """ Helper function for _bestmove_get_info. Extracts "pv" field from bestmove's info and returns move sequence in UCI notation. """ search = re.search(pattern=PV_REGEX, string=info) return {"pv": search.group("move_list")}
[ "def", "_get_info_pv", "(", "info", ")", ":", "search", "=", "re", ".", "search", "(", "pattern", "=", "PV_REGEX", ",", "string", "=", "info", ")", "return", "{", "\"pv\"", ":", "search", ".", "group", "(", "\"move_list\"", ")", "}" ]
Helper function for _bestmove_get_info. Extracts "pv" field from bestmove's info and returns move sequence in UCI notation.
[ "Helper", "function", "for", "_bestmove_get_info", "." ]
python
train
aboSamoor/polyglot
polyglot/mapping/base.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/mapping/base.py#L246-L255
def from_vocabfile(filename): """ Construct a CountedVocabulary out of a vocabulary file. Note: File has the following format word1 count1 word2 count2 """ word_count = [x.strip().split() for x in _open(filename, 'r').read().splitlines()] word_count = {w:int(c) for w,c in word_count} return CountedVocabulary(word_count=word_count)
[ "def", "from_vocabfile", "(", "filename", ")", ":", "word_count", "=", "[", "x", ".", "strip", "(", ")", ".", "split", "(", ")", "for", "x", "in", "_open", "(", "filename", ",", "'r'", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", "]",...
Construct a CountedVocabulary out of a vocabulary file. Note: File has the following format word1 count1 word2 count2
[ "Construct", "a", "CountedVocabulary", "out", "of", "a", "vocabulary", "file", "." ]
python
train
Autodesk/aomi
aomi/validation.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/validation.py#L100-L111
def specific_path_check(path, opt): """Will make checks against include/exclude to determine if we actually care about the path in question.""" if opt.exclude: if path in opt.exclude: return False if opt.include: if path not in opt.include: return False return True
[ "def", "specific_path_check", "(", "path", ",", "opt", ")", ":", "if", "opt", ".", "exclude", ":", "if", "path", "in", "opt", ".", "exclude", ":", "return", "False", "if", "opt", ".", "include", ":", "if", "path", "not", "in", "opt", ".", "include", ...
Will make checks against include/exclude to determine if we actually care about the path in question.
[ "Will", "make", "checks", "against", "include", "/", "exclude", "to", "determine", "if", "we", "actually", "care", "about", "the", "path", "in", "question", "." ]
python
train
jonathf/chaospy
chaospy/descriptives/expected.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/descriptives/expected.py#L7-L69
def E(poly, dist=None, **kws): """ Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist): Input to take expected value on. dist (Dist): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E(dist)) [1. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.E(poly, dist)) [1. 1. 0. 0.] """ if not isinstance(poly, (distributions.Dist, polynomials.Poly)): print(type(poly)) print("Approximating expected value...") out = quadrature.quad(poly, dist, veceval=True, **kws) print("done") return out if isinstance(poly, distributions.Dist): dist, poly = poly, polynomials.variable(len(poly)) if not poly.keys: return numpy.zeros(poly.shape, dtype=int) if isinstance(poly, (list, tuple, numpy.ndarray)): return [E(_, dist, **kws) for _ in poly] if poly.dim < len(dist): poly = polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys mom = dist.mom(numpy.array(keys).T, **kws) A = poly.A if len(dist) == 1: mom = mom[0] out = numpy.zeros(poly.shape) for i in range(len(keys)): out += A[keys[i]]*mom[i] out = numpy.reshape(out, shape) return out
[ "def", "E", "(", "poly", ",", "dist", "=", "None", ",", "*", "*", "kws", ")", ":", "if", "not", "isinstance", "(", "poly", ",", "(", "distributions", ".", "Dist", ",", "polynomials", ".", "Poly", ")", ")", ":", "print", "(", "type", "(", "poly", ...
Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist): Input to take expected value on. dist (Dist): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E(dist)) [1. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.E(poly, dist)) [1. 1. 0. 0.]
[ "Expected", "value", "operator", "." ]
python
train
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L621-L635
def handle_authn_request(self, context, binding_in): """ Loads approved endpoints dynamically See super class satosa.frontends.saml2.SAMLFrontend#handle_authn_request :type context: satosa.context.Context :type binding_in: str :rtype: satosa.response.Response """ target_entity_id = context.target_entity_id_from_path() target_entity_id = urlsafe_b64decode(target_entity_id).decode() context.decorate(Context.KEY_TARGET_ENTITYID, target_entity_id) idp = self._load_idp_dynamic_endpoints(context) return self._handle_authn_request(context, binding_in, idp)
[ "def", "handle_authn_request", "(", "self", ",", "context", ",", "binding_in", ")", ":", "target_entity_id", "=", "context", ".", "target_entity_id_from_path", "(", ")", "target_entity_id", "=", "urlsafe_b64decode", "(", "target_entity_id", ")", ".", "decode", "(", ...
Loads approved endpoints dynamically See super class satosa.frontends.saml2.SAMLFrontend#handle_authn_request :type context: satosa.context.Context :type binding_in: str :rtype: satosa.response.Response
[ "Loads", "approved", "endpoints", "dynamically", "See", "super", "class", "satosa", ".", "frontends", ".", "saml2", ".", "SAMLFrontend#handle_authn_request" ]
python
train
pypa/pipenv
pipenv/vendor/six.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/six.py#L497-L505
def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,))
[ "def", "remove_move", "(", "name", ")", ":", "try", ":", "delattr", "(", "_MovedItems", ",", "name", ")", "except", "AttributeError", ":", "try", ":", "del", "moves", ".", "__dict__", "[", "name", "]", "except", "KeyError", ":", "raise", "AttributeError", ...
Remove item from six.moves.
[ "Remove", "item", "from", "six", ".", "moves", "." ]
python
train
kytos/python-openflow
pyof/foundation/basic_types.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/basic_types.py#L724-L743
def append(self, item): """Append one item to the list. Args: item: Item to be appended. Raises: :exc:`~.exceptions.WrongListItemType`: If an item has a different type than the first item to be stored. """ if isinstance(item, list): self.extend(item) elif not self: list.append(self, item) elif item.__class__ == self[0].__class__: list.append(self, item) else: raise exceptions.WrongListItemType(item.__class__.__name__, self[0].__class__.__name__)
[ "def", "append", "(", "self", ",", "item", ")", ":", "if", "isinstance", "(", "item", ",", "list", ")", ":", "self", ".", "extend", "(", "item", ")", "elif", "not", "self", ":", "list", ".", "append", "(", "self", ",", "item", ")", "elif", "item"...
Append one item to the list. Args: item: Item to be appended. Raises: :exc:`~.exceptions.WrongListItemType`: If an item has a different type than the first item to be stored.
[ "Append", "one", "item", "to", "the", "list", "." ]
python
train
briancappello/flask-unchained
flask_unchained/bundles/security/commands/users.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/commands/users.py#L168-L179
def add_role_to_user(user, role): """ Add a role to a user. """ user = _query_to_user(user) role = _query_to_role(role) if click.confirm(f'Are you sure you want to add {role!r} to {user!r}?'): user.roles.append(role) user_manager.save(user, commit=True) click.echo(f'Successfully added {role!r} to {user!r}') else: click.echo('Cancelled.')
[ "def", "add_role_to_user", "(", "user", ",", "role", ")", ":", "user", "=", "_query_to_user", "(", "user", ")", "role", "=", "_query_to_role", "(", "role", ")", "if", "click", ".", "confirm", "(", "f'Are you sure you want to add {role!r} to {user!r}?'", ")", ":"...
Add a role to a user.
[ "Add", "a", "role", "to", "a", "user", "." ]
python
train
bio2bel/bio2bel
src/bio2bel/cli.py
https://github.com/bio2bel/bio2bel/blob/d80762d891fa18b248709ff0b0f97ebb65ec64c2/src/bio2bel/cli.py#L45-L56
def _iterate_managers(connection, skip): """Iterate over instantiated managers.""" for idx, name, manager_cls in _iterate_manage_classes(skip): if name in skip: continue try: manager = manager_cls(connection=connection) except TypeError as e: click.secho(f'Could not instantiate {name}: {e}', fg='red') else: yield idx, name, manager
[ "def", "_iterate_managers", "(", "connection", ",", "skip", ")", ":", "for", "idx", ",", "name", ",", "manager_cls", "in", "_iterate_manage_classes", "(", "skip", ")", ":", "if", "name", "in", "skip", ":", "continue", "try", ":", "manager", "=", "manager_c...
Iterate over instantiated managers.
[ "Iterate", "over", "instantiated", "managers", "." ]
python
valid
insightindustry/validator-collection
validator_collection/checkers.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L1454-L1473
def is_ip_address(value, **kwargs): """Indicate whether ``value`` is a valid IP address (version 4 or version 6). :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.ip_address(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
[ "def", "is_ip_address", "(", "value", ",", "*", "*", "kwargs", ")", ":", "try", ":", "value", "=", "validators", ".", "ip_address", "(", "value", ",", "*", "*", "kwargs", ")", "except", "SyntaxError", "as", "error", ":", "raise", "error", "except", "Ex...
Indicate whether ``value`` is a valid IP address (version 4 or version 6). :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
[ "Indicate", "whether", "value", "is", "a", "valid", "IP", "address", "(", "version", "4", "or", "version", "6", ")", "." ]
python
train
aeguana/PyFileMaker
PyFileMaker/FMServer.py
https://github.com/aeguana/PyFileMaker/blob/ef269b52a97e329d91da3c4851ddac800d7fd7e6/PyFileMaker/FMServer.py#L190-L208
def _addSortParam(self, field, order=''): """Adds a sort parameter, order have to be in ['ascend', 'ascending','descend', 'descending','custom']""" if order != '': validSortOrders = { 'ascend':'ascend', 'ascending':'ascend', '<':'ascend', 'descend':'descend', 'descending':'descend', '>':'descend' } if not string.lower(order) in validSortOrders.keys(): raise FMError, 'Invalid sort order for "' + field + '"' self._sortParams.append( [field, validSortOrders[string.lower(order)]] )
[ "def", "_addSortParam", "(", "self", ",", "field", ",", "order", "=", "''", ")", ":", "if", "order", "!=", "''", ":", "validSortOrders", "=", "{", "'ascend'", ":", "'ascend'", ",", "'ascending'", ":", "'ascend'", ",", "'<'", ":", "'ascend'", ",", "'des...
Adds a sort parameter, order have to be in ['ascend', 'ascending','descend', 'descending','custom']
[ "Adds", "a", "sort", "parameter", "order", "have", "to", "be", "in", "[", "ascend", "ascending", "descend", "descending", "custom", "]" ]
python
train
galaxy-genome-annotation/python-apollo
arrow/commands/cannedkeys/add_key.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/arrow/commands/cannedkeys/add_key.py#L16-L23
def cli(ctx, key, metadata=""): """Add a canned key Output: A dictionnary containing canned key description """ return ctx.gi.cannedkeys.add_key(key, metadata=metadata)
[ "def", "cli", "(", "ctx", ",", "key", ",", "metadata", "=", "\"\"", ")", ":", "return", "ctx", ".", "gi", ".", "cannedkeys", ".", "add_key", "(", "key", ",", "metadata", "=", "metadata", ")" ]
Add a canned key Output: A dictionnary containing canned key description
[ "Add", "a", "canned", "key" ]
python
train
nyergler/hieroglyph
src/hieroglyph/html.py
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/html.py#L92-L100
def slide_path(builder, pagename=None): """Calculate the relative path to the Slides for pagename.""" return builder.get_relative_uri( pagename or builder.current_docname, os.path.join( builder.app.config.slide_relative_path, pagename or builder.current_docname, ))
[ "def", "slide_path", "(", "builder", ",", "pagename", "=", "None", ")", ":", "return", "builder", ".", "get_relative_uri", "(", "pagename", "or", "builder", ".", "current_docname", ",", "os", ".", "path", ".", "join", "(", "builder", ".", "app", ".", "co...
Calculate the relative path to the Slides for pagename.
[ "Calculate", "the", "relative", "path", "to", "the", "Slides", "for", "pagename", "." ]
python
train
portfors-lab/sparkle
sparkle/gui/stim/qauto_parameter_model.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/qauto_parameter_model.py#L220-L223
def toggleSelection(self, index, comp): """Toggles a component in or out of the currently selected parameter's compnents list""" self.model.toggleSelection(index.row(), comp)
[ "def", "toggleSelection", "(", "self", ",", "index", ",", "comp", ")", ":", "self", ".", "model", ".", "toggleSelection", "(", "index", ".", "row", "(", ")", ",", "comp", ")" ]
Toggles a component in or out of the currently selected parameter's compnents list
[ "Toggles", "a", "component", "in", "or", "out", "of", "the", "currently", "selected", "parameter", "s", "compnents", "list" ]
python
train
bohea/sanic-limiter
sanic_limiter/extension.py
https://github.com/bohea/sanic-limiter/blob/54c9fc4a3a3f1a9bb69367262637d07701ae5694/sanic_limiter/extension.py#L318-L335
def shared_limit(self, limit_value, scope, key_func=None, error_message=None, exempt_when=None): """ decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response. """ return self.__limit_decorator( limit_value, key_func, True, scope, error_message=error_message, exempt_when=exempt_when )
[ "def", "shared_limit", "(", "self", ",", "limit_value", ",", "scope", ",", "key_func", "=", "None", ",", "error_message", "=", "None", ",", "exempt_when", "=", "None", ")", ":", "return", "self", ".", "__limit_decorator", "(", "limit_value", ",", "key_func",...
decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response.
[ "decorator", "to", "be", "applied", "to", "multiple", "routes", "sharing", "the", "same", "rate", "limit", "." ]
python
train
agile-geoscience/welly
welly/curve.py
https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/curve.py#L817-L836
def apply(self, window_length, samples=True, func1d=None): """ Runs any kind of function over a window. Args: window_length (int): the window length. Required. samples (bool): window length is in samples. Use False for a window length given in metres. func1d (function): a function that takes a 1D array and returns a scalar. Default: ``np.mean()``. Returns: Curve. """ window_length /= 1 if samples else self.step if func1d is None: func1d = np.mean params = self.__dict__.copy() out = self._rolling_window(int(window_length), func1d) return Curve(out, params=params)
[ "def", "apply", "(", "self", ",", "window_length", ",", "samples", "=", "True", ",", "func1d", "=", "None", ")", ":", "window_length", "/=", "1", "if", "samples", "else", "self", ".", "step", "if", "func1d", "is", "None", ":", "func1d", "=", "np", "....
Runs any kind of function over a window. Args: window_length (int): the window length. Required. samples (bool): window length is in samples. Use False for a window length given in metres. func1d (function): a function that takes a 1D array and returns a scalar. Default: ``np.mean()``. Returns: Curve.
[ "Runs", "any", "kind", "of", "function", "over", "a", "window", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sphinxtools.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L709-L737
def get_sorted_source_files( self, source_filenames_or_globs: Union[str, List[str]], recursive: bool = True) -> List[str]: """ Returns a sorted list of filenames to process, from a filename, a glob string, or a list of filenames/globs. Args: source_filenames_or_globs: filename/glob, or list of them recursive: use :func:`glob.glob` in recursive mode? Returns: sorted list of files to process """ if isinstance(source_filenames_or_globs, str): source_filenames_or_globs = [source_filenames_or_globs] final_filenames = [] # type: List[str] for sfg in source_filenames_or_globs: sfg_expanded = expanduser(sfg) log.debug("Looking for: {!r}", sfg_expanded) for filename in glob.glob(sfg_expanded, recursive=recursive): log.debug("Trying: {!r}", filename) if self.should_exclude(filename): log.info("Skipping file {!r}", filename) continue final_filenames.append(filename) final_filenames.sort() return final_filenames
[ "def", "get_sorted_source_files", "(", "self", ",", "source_filenames_or_globs", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", ",", "recursive", ":", "bool", "=", "True", ")", "->", "List", "[", "str", "]", ":", "if", "isinstance", "(", ...
Returns a sorted list of filenames to process, from a filename, a glob string, or a list of filenames/globs. Args: source_filenames_or_globs: filename/glob, or list of them recursive: use :func:`glob.glob` in recursive mode? Returns: sorted list of files to process
[ "Returns", "a", "sorted", "list", "of", "filenames", "to", "process", "from", "a", "filename", "a", "glob", "string", "or", "a", "list", "of", "filenames", "/", "globs", "." ]
python
train
PythonCharmers/python-future
src/future/backports/xmlrpc/server.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/xmlrpc/server.py#L128-L150
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d Resolves a dotted attribute name to an object. Raises an AttributeError if any attribute in the chain starts with a '_'. If the optional allow_dotted_names argument is false, dots are not supported and this function operates similar to getattr(obj, attr). """ if allow_dotted_names: attrs = attr.split('.') else: attrs = [attr] for i in attrs: if i.startswith('_'): raise AttributeError( 'attempt to access private attribute "%s"' % i ) else: obj = getattr(obj,i) return obj
[ "def", "resolve_dotted_attribute", "(", "obj", ",", "attr", ",", "allow_dotted_names", "=", "True", ")", ":", "if", "allow_dotted_names", ":", "attrs", "=", "attr", ".", "split", "(", "'.'", ")", "else", ":", "attrs", "=", "[", "attr", "]", "for", "i", ...
resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d Resolves a dotted attribute name to an object. Raises an AttributeError if any attribute in the chain starts with a '_'. If the optional allow_dotted_names argument is false, dots are not supported and this function operates similar to getattr(obj, attr).
[ "resolve_dotted_attribute", "(", "a", "b", ".", "c", ".", "d", ")", "=", ">", "a", ".", "b", ".", "c", ".", "d" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/MSCommon/vc.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/MSCommon/vc.py#L302-L341
def find_batch_file(env,msvc_version,host_arch,target_arch): """ Find the location of the batch script which should set up the compiler for any TARGET_ARCH whose compilers were installed by Visual Studio/VCExpress """ pdir = find_vc_pdir(msvc_version) if pdir is None: raise NoVersionFound("No version of Visual Studio found") debug('vc.py: find_batch_file() pdir:{}'.format(pdir)) # filter out e.g. "Exp" from the version name msvc_ver_numeric = ''.join([x for x in msvc_version if x in string_digits + "."]) vernum = float(msvc_ver_numeric) if 7 <= vernum < 8: pdir = os.path.join(pdir, os.pardir, "Common7", "Tools") batfilename = os.path.join(pdir, "vsvars32.bat") elif vernum < 7: pdir = os.path.join(pdir, "Bin") batfilename = os.path.join(pdir, "vcvars32.bat") elif 8 <= vernum <= 14: batfilename = os.path.join(pdir, "vcvarsall.bat") else: # vernum >= 14.1 VS2017 and above batfilename = os.path.join(pdir, "Auxiliary", "Build", "vcvarsall.bat") if not os.path.exists(batfilename): debug("Not found: %s" % batfilename) batfilename = None installed_sdks=get_installed_sdks() for _sdk in installed_sdks: sdk_bat_file = _sdk.get_sdk_vc_script(host_arch,target_arch) if not sdk_bat_file: debug("vc.py:find_batch_file() not found:%s"%_sdk) else: sdk_bat_file_path = os.path.join(pdir,sdk_bat_file) if os.path.exists(sdk_bat_file_path): debug('vc.py:find_batch_file() sdk_bat_file_path:%s'%sdk_bat_file_path) return (batfilename,sdk_bat_file_path) return (batfilename,None)
[ "def", "find_batch_file", "(", "env", ",", "msvc_version", ",", "host_arch", ",", "target_arch", ")", ":", "pdir", "=", "find_vc_pdir", "(", "msvc_version", ")", "if", "pdir", "is", "None", ":", "raise", "NoVersionFound", "(", "\"No version of Visual Studio found\...
Find the location of the batch script which should set up the compiler for any TARGET_ARCH whose compilers were installed by Visual Studio/VCExpress
[ "Find", "the", "location", "of", "the", "batch", "script", "which", "should", "set", "up", "the", "compiler", "for", "any", "TARGET_ARCH", "whose", "compilers", "were", "installed", "by", "Visual", "Studio", "/", "VCExpress" ]
python
train
bede/tictax
tictax/cli.py
https://github.com/bede/tictax/blob/acc9811be3a8b5ad905daf4b5d413f2c5e6ad06c/tictax/cli.py#L66-L73
def matrix(fasta_path: 'path to tictax annotated fasta input', scafstats_path: 'path to BBMap scaftstats file'): ''' Generate taxonomic count matrix from tictax classified contigs ''' records = SeqIO.parse(fasta_path, 'fasta') df = tictax.matrix(records, scafstats_path) df.to_csv(sys.stdout)
[ "def", "matrix", "(", "fasta_path", ":", "'path to tictax annotated fasta input'", ",", "scafstats_path", ":", "'path to BBMap scaftstats file'", ")", ":", "records", "=", "SeqIO", ".", "parse", "(", "fasta_path", ",", "'fasta'", ")", "df", "=", "tictax", ".", "ma...
Generate taxonomic count matrix from tictax classified contigs
[ "Generate", "taxonomic", "count", "matrix", "from", "tictax", "classified", "contigs" ]
python
train
edx/edx-oauth2-provider
edx_oauth2_provider/oidc/handlers.py
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/handlers.py#L83-L88
def now(self): """ Capture time. """ if self._now is None: # Compute the current time only once per instance self._now = datetime.utcnow() return self._now
[ "def", "now", "(", "self", ")", ":", "if", "self", ".", "_now", "is", "None", ":", "# Compute the current time only once per instance", "self", ".", "_now", "=", "datetime", ".", "utcnow", "(", ")", "return", "self", ".", "_now" ]
Capture time.
[ "Capture", "time", "." ]
python
train
albu/albumentations
albumentations/augmentations/bbox_utils.py
https://github.com/albu/albumentations/blob/b31393cd6126516d37a84e44c879bd92c68ffc93/albumentations/augmentations/bbox_utils.py#L122-L152
def convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity=False): """Convert a bounding box from the format used by albumentations to a format, specified in `target_format`. Args: bbox (list): bounding box with coordinates in the format used by albumentations target_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'. rows (int): image height cols (int): image width check_validity (bool): check if all boxes are valid boxes Note: The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200]. The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212]. Raises: ValueError: if `target_format` is not equal to `coco` or `pascal_voc`. """ if target_format not in {'coco', 'pascal_voc'}: raise ValueError( "Unknown target_format {}. Supported formats are: 'coco' and 'pascal_voc'".format(target_format) ) if check_validity: check_bbox(bbox) bbox = denormalize_bbox(bbox, rows, cols) if target_format == 'coco': x_min, y_min, x_max, y_max = bbox[:4] width = x_max - x_min height = y_max - y_min bbox = [x_min, y_min, width, height] + list(bbox[4:]) return bbox
[ "def", "convert_bbox_from_albumentations", "(", "bbox", ",", "target_format", ",", "rows", ",", "cols", ",", "check_validity", "=", "False", ")", ":", "if", "target_format", "not", "in", "{", "'coco'", ",", "'pascal_voc'", "}", ":", "raise", "ValueError", "(",...
Convert a bounding box from the format used by albumentations to a format, specified in `target_format`. Args: bbox (list): bounding box with coordinates in the format used by albumentations target_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'. rows (int): image height cols (int): image width check_validity (bool): check if all boxes are valid boxes Note: The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200]. The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212]. Raises: ValueError: if `target_format` is not equal to `coco` or `pascal_voc`.
[ "Convert", "a", "bounding", "box", "from", "the", "format", "used", "by", "albumentations", "to", "a", "format", "specified", "in", "target_format", "." ]
python
train
pypa/pipenv
pipenv/vendor/jinja2/compiler.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L353-L358
def start_write(self, frame, node=None): """Yield or write into the frame buffer.""" if frame.buffer is None: self.writeline('yield ', node) else: self.writeline('%s.append(' % frame.buffer, node)
[ "def", "start_write", "(", "self", ",", "frame", ",", "node", "=", "None", ")", ":", "if", "frame", ".", "buffer", "is", "None", ":", "self", ".", "writeline", "(", "'yield '", ",", "node", ")", "else", ":", "self", ".", "writeline", "(", "'%s.append...
Yield or write into the frame buffer.
[ "Yield", "or", "write", "into", "the", "frame", "buffer", "." ]
python
train
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/027a270febf5bcda6a75db60ea9838d631370f4b/mechanicalsoup/form.py#L239-L278
def set(self, name, value, force=False): """Set a form element identified by ``name`` to a specified ``value``. The type of element (input, textarea, select, ...) does not need to be given; it is inferred by the following methods: :func:`~Form.set_checkbox`, :func:`~Form.set_radio`, :func:`~Form.set_input`, :func:`~Form.set_textarea`, :func:`~Form.set_select`. If none of these methods find a matching element, then if ``force`` is True, a new element (``<input type="text" ...>``) will be added using :func:`~Form.new_control`. Example: filling-in a login/password form with EULA checkbox .. code-block:: python form.set("login", username) form.set("password", password) form.set("eula-checkbox", True) Example: uploading a file through a ``<input type="file" name="tagname">`` field (provide the path to the local file, and its content will be uploaded): .. code-block:: python form.set("tagname") = path_to_local_file """ for func in ("checkbox", "radio", "input", "textarea", "select"): try: getattr(self, "set_" + func)({name: value}) return except InvalidFormMethod: pass if force: self.new_control('text', name, value=value) return raise LinkNotFoundError("No valid element named " + name)
[ "def", "set", "(", "self", ",", "name", ",", "value", ",", "force", "=", "False", ")", ":", "for", "func", "in", "(", "\"checkbox\"", ",", "\"radio\"", ",", "\"input\"", ",", "\"textarea\"", ",", "\"select\"", ")", ":", "try", ":", "getattr", "(", "s...
Set a form element identified by ``name`` to a specified ``value``. The type of element (input, textarea, select, ...) does not need to be given; it is inferred by the following methods: :func:`~Form.set_checkbox`, :func:`~Form.set_radio`, :func:`~Form.set_input`, :func:`~Form.set_textarea`, :func:`~Form.set_select`. If none of these methods find a matching element, then if ``force`` is True, a new element (``<input type="text" ...>``) will be added using :func:`~Form.new_control`. Example: filling-in a login/password form with EULA checkbox .. code-block:: python form.set("login", username) form.set("password", password) form.set("eula-checkbox", True) Example: uploading a file through a ``<input type="file" name="tagname">`` field (provide the path to the local file, and its content will be uploaded): .. code-block:: python form.set("tagname") = path_to_local_file
[ "Set", "a", "form", "element", "identified", "by", "name", "to", "a", "specified", "value", ".", "The", "type", "of", "element", "(", "input", "textarea", "select", "...", ")", "does", "not", "need", "to", "be", "given", ";", "it", "is", "inferred", "b...
python
train
dask/dask-ml
dask_ml/model_selection/_search.py
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L225-L240
def normalize_params(params): """Take a list of dictionaries, and tokenize/normalize.""" # Collect a set of all fields fields = set() for p in params: fields.update(p) fields = sorted(fields) params2 = list(pluck(fields, params, MISSING)) # Non-basic types (including MISSING) are unique to their id tokens = [ tuple(x if isinstance(x, (int, float, str)) else id(x) for x in p) for p in params2 ] return fields, tokens, params2
[ "def", "normalize_params", "(", "params", ")", ":", "# Collect a set of all fields", "fields", "=", "set", "(", ")", "for", "p", "in", "params", ":", "fields", ".", "update", "(", "p", ")", "fields", "=", "sorted", "(", "fields", ")", "params2", "=", "li...
Take a list of dictionaries, and tokenize/normalize.
[ "Take", "a", "list", "of", "dictionaries", "and", "tokenize", "/", "normalize", "." ]
python
train
zerok/zs.bibtex
src/zs/bibtex/parser.py
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L37-L52
def parse_entry(source, loc, tokens): """ Converts the tokens of an entry into an Entry instance. If no applicable type is available, an UnsupportedEntryType exception is raised. """ type_ = tokens[1].lower() entry_type = structures.TypeRegistry.get_type(type_) if entry_type is None or not issubclass(entry_type, structures.Entry): raise exceptions.UnsupportedEntryType( "%s is not a supported entry type" % type_ ) new_entry = entry_type() new_entry.name = tokens[3] for key, value in [t for t in tokens[4:-1] if t != ',']: new_entry[key] = value return new_entry
[ "def", "parse_entry", "(", "source", ",", "loc", ",", "tokens", ")", ":", "type_", "=", "tokens", "[", "1", "]", ".", "lower", "(", ")", "entry_type", "=", "structures", ".", "TypeRegistry", ".", "get_type", "(", "type_", ")", "if", "entry_type", "is",...
Converts the tokens of an entry into an Entry instance. If no applicable type is available, an UnsupportedEntryType exception is raised.
[ "Converts", "the", "tokens", "of", "an", "entry", "into", "an", "Entry", "instance", ".", "If", "no", "applicable", "type", "is", "available", "an", "UnsupportedEntryType", "exception", "is", "raised", "." ]
python
train
Robpol86/terminaltables
terminaltables/base_table.py
https://github.com/Robpol86/terminaltables/blob/ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc/terminaltables/base_table.py#L112-L169
def gen_row_lines(self, row, style, inner_widths, height): r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line. """ cells_in_row = list() # Resize row if it doesn't have enough cells. if len(row) != len(inner_widths): row = row + [''] * (len(inner_widths) - len(row)) # Pad and align each cell. Split each cell into lines to support multi-line cells. for i, cell in enumerate(row): align = (self.justify_columns.get(i),) inner_dimensions = (inner_widths[i], height) padding = (self.padding_left, self.padding_right, 0, 0) cells_in_row.append(align_and_pad_cell(cell, align, inner_dimensions, padding)) # Determine border characters. if style == 'heading': left = self.CHAR_H_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_H_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_H_OUTER_RIGHT_VERTICAL if self.outer_border else '' elif style == 'footing': left = self.CHAR_F_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_F_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_F_OUTER_RIGHT_VERTICAL if self.outer_border else '' else: left = self.CHAR_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_OUTER_RIGHT_VERTICAL if self.outer_border else '' # Yield each line. for line in build_row(cells_in_row, left, center, right): yield line
[ "def", "gen_row_lines", "(", "self", ",", "row", ",", "style", ",", "inner_widths", ",", "height", ")", ":", "cells_in_row", "=", "list", "(", ")", "# Resize row if it doesn't have enough cells.", "if", "len", "(", "row", ")", "!=", "len", "(", "inner_widths",...
r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line.
[ "r", "Combine", "cells", "in", "row", "and", "group", "them", "into", "lines", "with", "vertical", "borders", "." ]
python
train
clalancette/pycdlib
pycdlib/eltorito.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/eltorito.py#L873-L966
def hdmbrcheck(disk_mbr, sector_count, bootable): # type: (bytes, int, bool) -> int ''' A function to sanity check an El Torito Hard Drive Master Boot Record (HDMBR). On success, it returns the system_type (also known as the partition type) that should be fed into the rest of the El Torito methods. On failure, it raises an exception. Parameters: disk_mbr - The data to look in. sector_count - The number of sectors expected in the MBR. bootable - Whether this MBR is bootable. Returns: The system (or partition) type the should be fed into the rest of El Torito. ''' # The MBR that we want to see to do hd emulation boot for El Torito is a standard # x86 MBR, documented here: # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout # # In brief, it should consist of 512 bytes laid out like: # Offset 0x0 - 0x1BD: Bootstrap code area # Offset 0x1BE - 0x1CD: Partition entry 1 # Offset 0x1CE - 0x1DD: Partition entry 2 # Offset 0x1DE - 0x1ED: Partition entry 3 # Offset 0x1EE - 0x1FD: Partition entry 4 # Offset 0x1FE: 0x55 # Offset 0x1FF: 0xAA # # Each partition entry above should consist of: # Offset 0x0: Active (bit 7 set) or inactive (all zeros) # Offset 0x1 - 0x3: CHS address of first sector in partition # Offset 0x1: Head # Offset 0x2: Sector in bits 0-5, bits 6-7 are high bits of of cylinder # Offset 0x3: bits 0-7 of cylinder # Offset 0x4: Partition type (almost all of these are valid, see https://en.wikipedia.org/wiki/Partition_type) # Offset 0x5 - 0x7: CHS address of last sector in partition (same format as first sector) # Offset 0x8 - 0xB: LBA of first sector in partition # Offset 0xC - 0xF: number of sectors in partition PARTITION_TYPE_UNUSED = 0x0 PARTITION_STATUS_ACTIVE = 0x80 (bootstrap_unused, part1, part2, part3, part4, keybyte1, keybyte2) = struct.unpack_from('=446s16s16s16s16sBB', disk_mbr, 0) if keybyte1 != 0x55 or keybyte2 != 0xAA: raise pycdlibexception.PyCdlibInvalidInput('Invalid magic on HD MBR') parts = [part1, part2, part3, part4] system_type = PARTITION_TYPE_UNUSED for part in parts: (status, s_head, s_seccyl, s_cyl, parttype, e_head, e_seccyl, e_cyl, lba_unused, num_sectors_unused) = struct.unpack('=BBBBBBBBLL', part) if parttype == PARTITION_TYPE_UNUSED: continue if system_type != PARTITION_TYPE_UNUSED: raise pycdlibexception.PyCdlibInvalidInput('Boot image has multiple partitions') if bootable and status != PARTITION_STATUS_ACTIVE: # genisoimage prints a warning in this case, but we have no other # warning prints in the whole codebase, and an exception will probably # make us too fragile. So we leave the code but don't do anything. with open(os.devnull, 'w') as devnull: print('Warning: partition not marked active', file=devnull) cyl = ((s_seccyl & 0xC0) << 10) | s_cyl sec = s_seccyl & 0x3f if cyl != 0 or s_head != 1 or sec != 1: # genisoimage prints a warning in this case, but we have no other # warning prints in the whole codebase, and an exception will probably # make us too fragile. So we leave the code but don't do anything. with open(os.devnull, 'w') as devnull: print('Warning: partition does not start at 0/1/1', file=devnull) cyl = ((e_seccyl & 0xC0) << 10) | e_cyl sec = e_seccyl & 0x3f geometry_sectors = (cyl + 1) * (e_head + 1) * sec if sector_count != geometry_sectors: # genisoimage prints a warning in this case, but we have no other # warning prints in the whole codebase, and an exception will probably # make us too fragile. So we leave the code but don't do anything. with open(os.devnull, 'w') as devnull: print('Warning: image size does not match geometry', file=devnull) system_type = parttype if system_type == PARTITION_TYPE_UNUSED: raise pycdlibexception.PyCdlibInvalidInput('Boot image has no partitions') return system_type
[ "def", "hdmbrcheck", "(", "disk_mbr", ",", "sector_count", ",", "bootable", ")", ":", "# type: (bytes, int, bool) -> int", "# The MBR that we want to see to do hd emulation boot for El Torito is a standard", "# x86 MBR, documented here:", "# https://en.wikipedia.org/wiki/Master_boot_record...
A function to sanity check an El Torito Hard Drive Master Boot Record (HDMBR). On success, it returns the system_type (also known as the partition type) that should be fed into the rest of the El Torito methods. On failure, it raises an exception. Parameters: disk_mbr - The data to look in. sector_count - The number of sectors expected in the MBR. bootable - Whether this MBR is bootable. Returns: The system (or partition) type the should be fed into the rest of El Torito.
[ "A", "function", "to", "sanity", "check", "an", "El", "Torito", "Hard", "Drive", "Master", "Boot", "Record", "(", "HDMBR", ")", ".", "On", "success", "it", "returns", "the", "system_type", "(", "also", "known", "as", "the", "partition", "type", ")", "tha...
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_example.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_example.py#L74-L83
def idle_task(self): '''called rapidly by mavproxy''' now = time.time() if now-self.last_bored > self.boredom_interval: self.last_bored = now message = self.boredom_message() self.say("%s: %s" % (self.name,message)) # See if whatever we're connected to would like to play: self.master.mav.statustext_send(mavutil.mavlink.MAV_SEVERITY_NOTICE, message)
[ "def", "idle_task", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "self", ".", "last_bored", ">", "self", ".", "boredom_interval", ":", "self", ".", "last_bored", "=", "now", "message", "=", "self", ".", "boredom...
called rapidly by mavproxy
[ "called", "rapidly", "by", "mavproxy" ]
python
train
trsqxyz/glitch
glitch/glitch.py
https://github.com/trsqxyz/glitch/blob/b2cef7700a09660ff8a79ae587480680bef65a88/glitch/glitch.py#L114-L123
def decrease(self, infile): '''Decrease: 任意の箇所のバイト列を 削除する ''' gf = infile[31:] try: index = random.randint(len(gf)-1, 31) except ValueError: return infile gf = gf[:index] + gf[index+1:] return infile[:31] + gf
[ "def", "decrease", "(", "self", ",", "infile", ")", ":", "gf", "=", "infile", "[", "31", ":", "]", "try", ":", "index", "=", "random", ".", "randint", "(", "len", "(", "gf", ")", "-", "1", ",", "31", ")", "except", "ValueError", ":", "return", ...
Decrease: 任意の箇所のバイト列を 削除する
[ "Decrease", ":", "任意の箇所のバイト列を", "削除する" ]
python
train
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/ext.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/ext.py#L486-L545
def babel_extract(fileobj, keywords, comment_tags, options): """Babel extraction method for Jinja templates. .. versionchanged:: 2.3 Basic support for translation comments was added. If `comment_tags` is now set to a list of keywords for extraction, the extractor will try to find the best preceeding comment that begins with one of the keywords. For best results, make sure to not have more than one gettext call in one line of code and the matching comment in the same line or the line before. :param fileobj: the file-like object the messages should be extracted from :param keywords: a list of keywords (i.e. function names) that should be recognized as translation functions :param comment_tags: a list of translator tags to search for and include in the results. :param options: a dictionary of additional options (optional) :return: an iterator over ``(lineno, funcname, message, comments)`` tuples. (comments will be empty currently) """ extensions = set() for extension in options.get('extensions', '').split(','): extension = extension.strip() if not extension: continue extensions.add(import_string(extension)) if InternationalizationExtension not in extensions: extensions.add(InternationalizationExtension) environment = get_spontaneous_environment( options.get('block_start_string', BLOCK_START_STRING), options.get('block_end_string', BLOCK_END_STRING), options.get('variable_start_string', VARIABLE_START_STRING), options.get('variable_end_string', VARIABLE_END_STRING), options.get('comment_start_string', COMMENT_START_STRING), options.get('comment_end_string', COMMENT_END_STRING), options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX, options.get('line_comment_prefix') or LINE_COMMENT_PREFIX, str(options.get('trim_blocks', TRIM_BLOCKS)).lower() in \ ('1', 'on', 'yes', 'true'), NEWLINE_SEQUENCE, frozenset(extensions), # fill with defaults so that environments are shared # with other spontaneus environments. The rest of the # arguments are optimizer, undefined, finalize, autoescape, # loader, cache size, auto reloading setting and the # bytecode cache True, Undefined, None, False, None, 0, False, None ) source = fileobj.read().decode(options.get('encoding', 'utf-8')) try: node = environment.parse(source) tokens = list(environment.lex(environment.preprocess(source))) except TemplateSyntaxError as e: # skip templates with syntax errors return finder = _CommentFinder(tokens, comment_tags) for lineno, func, message in extract_from_ast(node, keywords): yield lineno, func, message, finder.find_comments(lineno)
[ "def", "babel_extract", "(", "fileobj", ",", "keywords", ",", "comment_tags", ",", "options", ")", ":", "extensions", "=", "set", "(", ")", "for", "extension", "in", "options", ".", "get", "(", "'extensions'", ",", "''", ")", ".", "split", "(", "','", ...
Babel extraction method for Jinja templates. .. versionchanged:: 2.3 Basic support for translation comments was added. If `comment_tags` is now set to a list of keywords for extraction, the extractor will try to find the best preceeding comment that begins with one of the keywords. For best results, make sure to not have more than one gettext call in one line of code and the matching comment in the same line or the line before. :param fileobj: the file-like object the messages should be extracted from :param keywords: a list of keywords (i.e. function names) that should be recognized as translation functions :param comment_tags: a list of translator tags to search for and include in the results. :param options: a dictionary of additional options (optional) :return: an iterator over ``(lineno, funcname, message, comments)`` tuples. (comments will be empty currently)
[ "Babel", "extraction", "method", "for", "Jinja", "templates", "." ]
python
train
mk-fg/python-onedrive
onedrive/conf.py
https://github.com/mk-fg/python-onedrive/blob/74d3f6605b0e8a9031a2aab8092f551293ffb533/onedrive/conf.py#L36-L80
def from_conf(cls, path=None, **overrides): '''Initialize instance from YAML configuration file, writing updates (only to keys, specified by "conf_update_keys") back to it.''' from onedrive import portalocker import yaml if path is None: path = cls.conf_path_default log.debug('Using default state-file path: %r', path) path = os.path.expanduser(path) with open(path, 'rb') as src: portalocker.lock(src, portalocker.LOCK_SH) yaml_str = src.read() portalocker.unlock(src) conf = yaml.safe_load(yaml_str) conf.setdefault('conf_save', path) conf_cls = dict() for ns, keys in cls.conf_update_keys.viewitems(): for k in keys: try: v = conf.get(ns, dict()).get(k) except AttributeError: if not cls.conf_raise_structure_errors: raise raise KeyError(( 'Unable to get value for configuration parameter' ' "{k}" in section "{ns}", check configuration file (path: {path}) syntax' ' near the aforementioned section/value.' ).format(ns=ns, k=k, path=path)) if v is not None: conf_cls['{}_{}'.format(ns, k)] = conf[ns][k] conf_cls.update(overrides) # Hack to work around YAML parsing client_id of e.g. 000123 as an octal int if isinstance(conf.get('client', dict()).get('id'), (int, long)): log.warn( 'Detected client_id being parsed as an integer (as per yaml), trying to un-mangle it.' ' If requests will still fail afterwards, please replace it in the configuration file (path: %r),' ' also putting single or double quotes (either one should work) around the value.', path ) cid = conf['client']['id'] if not re.search(r'\b(0*)?{:d}\b'.format(cid), yaml_str)\ and re.search(r'\b(0*)?{:o}\b'.format(cid), yaml_str): cid = int('{:0}'.format(cid)) conf['client']['id'] = '{:016d}'.format(cid) self = cls(**conf_cls) self.conf_save = conf['conf_save'] return self
[ "def", "from_conf", "(", "cls", ",", "path", "=", "None", ",", "*", "*", "overrides", ")", ":", "from", "onedrive", "import", "portalocker", "import", "yaml", "if", "path", "is", "None", ":", "path", "=", "cls", ".", "conf_path_default", "log", ".", "d...
Initialize instance from YAML configuration file, writing updates (only to keys, specified by "conf_update_keys") back to it.
[ "Initialize", "instance", "from", "YAML", "configuration", "file", "writing", "updates", "(", "only", "to", "keys", "specified", "by", "conf_update_keys", ")", "back", "to", "it", "." ]
python
test
OnroerendErfgoed/pyramid_urireferencer
pyramid_urireferencer/protected_resources.py
https://github.com/OnroerendErfgoed/pyramid_urireferencer/blob/c6ee4ba863e32ced304b9cf00f3f5b450757a29a/pyramid_urireferencer/protected_resources.py#L83-L103
def protected_operation(fn): """ Use this decorator to prevent an operation from being executed when the related uri resource is still in use. The parent_object must contain: * a request * with a registry.queryUtility(IReferencer) :raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to delete a certain URI because it's still in use somewhere else. :raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were unable to check that the URI is no longer being used. """ @functools.wraps(fn) def advice(parent_object, *args, **kw): response = _advice(parent_object.request) if response is not None: return response else: return fn(parent_object, *args, **kw) return advice
[ "def", "protected_operation", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "advice", "(", "parent_object", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "response", "=", "_advice", "(", "parent_object", ".", "request",...
Use this decorator to prevent an operation from being executed when the related uri resource is still in use. The parent_object must contain: * a request * with a registry.queryUtility(IReferencer) :raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to delete a certain URI because it's still in use somewhere else. :raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were unable to check that the URI is no longer being used.
[ "Use", "this", "decorator", "to", "prevent", "an", "operation", "from", "being", "executed", "when", "the", "related", "uri", "resource", "is", "still", "in", "use", ".", "The", "parent_object", "must", "contain", ":", "*", "a", "request", "*", "with", "a"...
python
train
astrorafael/twisted-mqtt
mqtt/pdu.py
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L511-L542
def encode(self): ''' Encode and store a PUBLISH control packet. @raise e: C{ValueError} if encoded topic string exceeds 65535 bytes. @raise e: C{ValueError} if encoded packet size exceeds 268435455 bytes. @raise e: C{TypeError} if C{data} is not a string, bytearray, int, boolean or float. ''' header = bytearray(1) varHeader = bytearray() payload = bytearray() if self.qos: header[0] = 0x30 | self.retain | (self.qos << 1) | (self.dup << 3) varHeader.extend(encodeString(self.topic)) # topic name varHeader.extend(encode16Int(self.msgId)) # msgId should not be None else: header[0] = 0x30 | self.retain varHeader.extend(encodeString(self.topic)) # topic name if isinstance(self.payload, bytearray): payload.extend(self.payload) elif isinstance(self.payload, str): payload.extend(bytearray(self.payload, encoding='utf-8')) else: raise PayloadTypeError(type(self.payload)) totalLen = len(varHeader) + len(payload) if totalLen > 268435455: raise PayloadValueError(totalLen) header.extend(encodeLength(totalLen)) header.extend(varHeader) header.extend(payload) self.encoded = header return str(header) if PY2 else bytes(header)
[ "def", "encode", "(", "self", ")", ":", "header", "=", "bytearray", "(", "1", ")", "varHeader", "=", "bytearray", "(", ")", "payload", "=", "bytearray", "(", ")", "if", "self", ".", "qos", ":", "header", "[", "0", "]", "=", "0x30", "|", "self", "...
Encode and store a PUBLISH control packet. @raise e: C{ValueError} if encoded topic string exceeds 65535 bytes. @raise e: C{ValueError} if encoded packet size exceeds 268435455 bytes. @raise e: C{TypeError} if C{data} is not a string, bytearray, int, boolean or float.
[ "Encode", "and", "store", "a", "PUBLISH", "control", "packet", "." ]
python
test