repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
PyCQA/pylint
pylint/checkers/utils.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/utils.py#L370-L380
def is_default_argument(node: astroid.node_classes.NodeNG) -> bool: """return true if the given Name node is used in function or lambda default argument's value """ parent = node.scope() if isinstance(parent, (astroid.FunctionDef, astroid.Lambda)): for default_node in parent.args.defaults: for default_name_node in default_node.nodes_of_class(astroid.Name): if default_name_node is node: return True return False
[ "def", "is_default_argument", "(", "node", ":", "astroid", ".", "node_classes", ".", "NodeNG", ")", "->", "bool", ":", "parent", "=", "node", ".", "scope", "(", ")", "if", "isinstance", "(", "parent", ",", "(", "astroid", ".", "FunctionDef", ",", "astroi...
return true if the given Name node is used in function or lambda default argument's value
[ "return", "true", "if", "the", "given", "Name", "node", "is", "used", "in", "function", "or", "lambda", "default", "argument", "s", "value" ]
python
test
GearPlug/payu-python
payu/recurring.py
https://github.com/GearPlug/payu-python/blob/47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e/payu/recurring.py#L107-L117
def delete_plan(self, plan_code): """ Delete an entire subscription plan associated with the merchant. Args: plan_code: Plan’s identification code for the merchant. Returns: """ return self.client._delete(self.url + 'plans/{}'.format(plan_code), headers=self.get_headers())
[ "def", "delete_plan", "(", "self", ",", "plan_code", ")", ":", "return", "self", ".", "client", ".", "_delete", "(", "self", ".", "url", "+", "'plans/{}'", ".", "format", "(", "plan_code", ")", ",", "headers", "=", "self", ".", "get_headers", "(", ")",...
Delete an entire subscription plan associated with the merchant. Args: plan_code: Plan’s identification code for the merchant. Returns:
[ "Delete", "an", "entire", "subscription", "plan", "associated", "with", "the", "merchant", "." ]
python
train
stsouko/CIMtools
CIMtools/datasets/molconvert_chemaxon.py
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/datasets/molconvert_chemaxon.py#L27-L58
def molconvert_chemaxon(data): """ molconvert wrapper :param data: buffer or string or path to file :return: array of molecules of reactions """ if isinstance(data, Path): with data.open('rb') as f: data = f.read() elif isinstance(data, StringIO): data = data.read().encode() elif isinstance(data, BytesIO): data = data.read() elif hasattr(data, 'read'): # check if data is open(filename, mode) data = data.read() if isinstance(data, str): data = data.encode() elif isinstance(data, str): data = data.encode() elif not isinstance(data, bytes): raise ValueError('invalid input') try: p = run(['molconvert', '-g', 'mrv'], input=data, stdout=PIPE) except FileNotFoundError as e: raise ConfigurationError from e if p.returncode != 0: raise ConfigurationError(p.stderr.decode()) with BytesIO(p.stdout) as f, MRVread(f) as r: return iter2array(r)
[ "def", "molconvert_chemaxon", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "Path", ")", ":", "with", "data", ".", "open", "(", "'rb'", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "elif", "isinstance", "(", "data", ...
molconvert wrapper :param data: buffer or string or path to file :return: array of molecules of reactions
[ "molconvert", "wrapper", ":", "param", "data", ":", "buffer", "or", "string", "or", "path", "to", "file", ":", "return", ":", "array", "of", "molecules", "of", "reactions" ]
python
valid
GGiecold/Cluster_Ensembles
src/Cluster_Ensembles/Cluster_Ensembles.py
https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L240-L315
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None): """Call up to three different functions for heuristic ensemble clustering (namely CSPA, HGPA and MCLA) then select as the definitive consensus clustering the one with the highest average mutual information score between its vector of consensus labels and the vectors of labels associated to each partition from the ensemble. Parameters ---------- cluster_runs : array of shape (n_partitions, n_samples) Each row of this matrix is such that the i-th entry corresponds to the cluster ID to which the i-th sample of the data-set has been classified by this particular clustering. Samples not selected for clustering in a given round are are tagged by an NaN. hdf5_file_name : file object or string, optional (default = None) The handle or name of an HDF5 file where any array needed for consensus_clustering and too large to fit into memory is to be stored. Created if not specified at input. verbose : Boolean, optional (default = False) Specifies if messages concerning the status of the many functions subsequently called 'cluster_ensembles' will be displayed on the standard output. N_clusters_max : int, optional The number of clusters in which to partition the samples into a consensus clustering. This defaults to the highest number of clusters encountered in the sets of independent clusterings on subsamples of the data-set (i.e. the maximum of the entries in "cluster_runs"). Returns ------- cluster_ensemble : array of shape (n_samples,) For the final ensemble clustering, this vector contains the cluster IDs of each sample in the whole data-set. Reference --------- A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework for Combining Multiple Partitions". In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 """ if hdf5_file_name is None: hdf5_file_name = './Cluster_Ensembles.h5' fileh = tables.open_file(hdf5_file_name, 'w') fileh.create_group(fileh.root, 'consensus_group') fileh.close() cluster_ensemble = [] score = np.empty(0) if cluster_runs.shape[1] > 10000: consensus_functions = [HGPA, MCLA] function_names = ['HGPA', 'MCLA'] print("\nINFO: Cluster_Ensembles: cluster_ensembles: " "due to a rather large number of cells in your data-set, " "using only 'HyperGraph Partitioning Algorithm' (HGPA) " "and 'Meta-CLustering Algorithm' (MCLA) " "as ensemble consensus functions.\n") else: consensus_functions = [CSPA, HGPA, MCLA] function_names = ['CSPA', 'HGPA', 'MCLA'] hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs) store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name) for i in range(len(consensus_functions)): cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max)) score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose)) print("\nINFO: Cluster_Ensembles: cluster_ensembles: " "{0} at {1}.".format(function_names[i], score[i])) print('*****') return cluster_ensemble[np.argmax(score)]
[ "def", "cluster_ensembles", "(", "cluster_runs", ",", "hdf5_file_name", "=", "None", ",", "verbose", "=", "False", ",", "N_clusters_max", "=", "None", ")", ":", "if", "hdf5_file_name", "is", "None", ":", "hdf5_file_name", "=", "'./Cluster_Ensembles.h5'", "fileh", ...
Call up to three different functions for heuristic ensemble clustering (namely CSPA, HGPA and MCLA) then select as the definitive consensus clustering the one with the highest average mutual information score between its vector of consensus labels and the vectors of labels associated to each partition from the ensemble. Parameters ---------- cluster_runs : array of shape (n_partitions, n_samples) Each row of this matrix is such that the i-th entry corresponds to the cluster ID to which the i-th sample of the data-set has been classified by this particular clustering. Samples not selected for clustering in a given round are are tagged by an NaN. hdf5_file_name : file object or string, optional (default = None) The handle or name of an HDF5 file where any array needed for consensus_clustering and too large to fit into memory is to be stored. Created if not specified at input. verbose : Boolean, optional (default = False) Specifies if messages concerning the status of the many functions subsequently called 'cluster_ensembles' will be displayed on the standard output. N_clusters_max : int, optional The number of clusters in which to partition the samples into a consensus clustering. This defaults to the highest number of clusters encountered in the sets of independent clusterings on subsamples of the data-set (i.e. the maximum of the entries in "cluster_runs"). Returns ------- cluster_ensemble : array of shape (n_samples,) For the final ensemble clustering, this vector contains the cluster IDs of each sample in the whole data-set. Reference --------- A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework for Combining Multiple Partitions". In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
[ "Call", "up", "to", "three", "different", "functions", "for", "heuristic", "ensemble", "clustering", "(", "namely", "CSPA", "HGPA", "and", "MCLA", ")", "then", "select", "as", "the", "definitive", "consensus", "clustering", "the", "one", "with", "the", "highes...
python
train
3DLIRIOUS/MeshLabXML
meshlabxml/transform.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/transform.py#L817-L850
def deform2curve(script, curve=mp_func.torus_knot('t'), step=0.001): """ Deform a mesh along a parametric curve function Provide a parametric curve function with z as the parameter. This will deform the xy cross section of the mesh along the curve as z increases. Source: http://blackpawn.com/texts/pqtorus/ Methodology: T = P' - P N1 = P' + P B = T x N1 N = B x T newPoint = point.x*N + point.y*B """ curve_step = [] for idx, val in enumerate(curve): curve[idx] = val.replace('t', 'z') curve_step.append(val.replace('t', 'z+{}'.format(step))) tangent = mp_func.v_subtract(curve_step, curve) normal1 = mp_func.v_add(curve_step, curve) bee = mp_func.v_cross(tangent, normal1) normal = mp_func.v_cross(bee, tangent) bee = mp_func.v_normalize(bee) normal = mp_func.v_normalize(normal) new_point = mp_func.v_add(mp_func.v_multiply('x', normal), mp_func.v_multiply('y', bee)) function = mp_func.v_add(curve, new_point) vert_function(script, x_func=function[0], y_func=function[1], z_func=function[2]) return function
[ "def", "deform2curve", "(", "script", ",", "curve", "=", "mp_func", ".", "torus_knot", "(", "'t'", ")", ",", "step", "=", "0.001", ")", ":", "curve_step", "=", "[", "]", "for", "idx", ",", "val", "in", "enumerate", "(", "curve", ")", ":", "curve", ...
Deform a mesh along a parametric curve function Provide a parametric curve function with z as the parameter. This will deform the xy cross section of the mesh along the curve as z increases. Source: http://blackpawn.com/texts/pqtorus/ Methodology: T = P' - P N1 = P' + P B = T x N1 N = B x T newPoint = point.x*N + point.y*B
[ "Deform", "a", "mesh", "along", "a", "parametric", "curve", "function" ]
python
test
google/grr
grr/server/grr_response_server/databases/mem_flows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_flows.py#L760-L771
def WriteFlowLogEntries(self, entries): """Writes flow output plugin log entries for a given flow.""" flow_ids = [(e.client_id, e.flow_id) for e in entries] for f in flow_ids: if f not in self.flows: raise db.AtLeastOneUnknownFlowError(flow_ids) for e in entries: dest = self.flow_log_entries.setdefault((e.client_id, e.flow_id), []) to_write = e.Copy() to_write.timestamp = rdfvalue.RDFDatetime.Now() dest.append(to_write)
[ "def", "WriteFlowLogEntries", "(", "self", ",", "entries", ")", ":", "flow_ids", "=", "[", "(", "e", ".", "client_id", ",", "e", ".", "flow_id", ")", "for", "e", "in", "entries", "]", "for", "f", "in", "flow_ids", ":", "if", "f", "not", "in", "self...
Writes flow output plugin log entries for a given flow.
[ "Writes", "flow", "output", "plugin", "log", "entries", "for", "a", "given", "flow", "." ]
python
train
pydata/xarray
xarray/coding/strings.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/coding/strings.py#L178-L184
def _numpy_char_to_bytes(arr): """Like netCDF4.chartostring, but faster and more flexible. """ # based on: http://stackoverflow.com/a/10984878/809705 arr = np.array(arr, copy=False, order='C') dtype = 'S' + str(arr.shape[-1]) return arr.view(dtype).reshape(arr.shape[:-1])
[ "def", "_numpy_char_to_bytes", "(", "arr", ")", ":", "# based on: http://stackoverflow.com/a/10984878/809705", "arr", "=", "np", ".", "array", "(", "arr", ",", "copy", "=", "False", ",", "order", "=", "'C'", ")", "dtype", "=", "'S'", "+", "str", "(", "arr", ...
Like netCDF4.chartostring, but faster and more flexible.
[ "Like", "netCDF4", ".", "chartostring", "but", "faster", "and", "more", "flexible", "." ]
python
train
has2k1/plotnine
plotnine/positions/position_dodge.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position_dodge.py#L59-L98
def strategy(data, params): """ Dodge overlapping interval Assumes that each set has the same horizontal position. """ width = params['width'] with suppress(TypeError): iter(width) width = np.asarray(width) width = width[data.index] udata_group = data['group'].drop_duplicates() n = params.get('n', None) if n is None: n = len(udata_group) if n == 1: return data if not all([col in data.columns for col in ['xmin', 'xmax']]): data['xmin'] = data['x'] data['xmax'] = data['x'] d_width = np.max(data['xmax'] - data['xmin']) # Have a new group index from 1 to number of groups. # This might be needed if the group numbers in this set don't # include all of 1:n udata_group = udata_group.sort_values() groupidx = match(data['group'], udata_group) groupidx = np.asarray(groupidx) + 1 # Find the center for each group, then use that to # calculate xmin and xmax data['x'] = data['x'] + width * ((groupidx - 0.5) / n - 0.5) data['xmin'] = data['x'] - (d_width / n) / 2 data['xmax'] = data['x'] + (d_width / n) / 2 return data
[ "def", "strategy", "(", "data", ",", "params", ")", ":", "width", "=", "params", "[", "'width'", "]", "with", "suppress", "(", "TypeError", ")", ":", "iter", "(", "width", ")", "width", "=", "np", ".", "asarray", "(", "width", ")", "width", "=", "w...
Dodge overlapping interval Assumes that each set has the same horizontal position.
[ "Dodge", "overlapping", "interval" ]
python
train
bokeh/bokeh
bokeh/command/util.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/command/util.py#L77-L139
def build_single_handler_application(path, argv=None): ''' Return a Bokeh application built using a single handler for a script, notebook, or directory. In general a Bokeh :class:`~bokeh.application.application.Application` may have any number of handlers to initialize :class:`~bokeh.document.Document` objects for new client sessions. However, in many cases only a single handler is needed. This function examines the ``path`` provided, and returns an ``Application`` initialized with one of the following handlers: * :class:`~bokeh.application.handlers.script.ScriptHandler` when ``path`` is to a ``.py`` script. * :class:`~bokeh.application.handlers.notebook.NotebookHandler` when ``path`` is to an ``.ipynb`` Jupyter notebook. * :class:`~bokeh.application.handlers.directory.DirectoryHandler` when ``path`` is to a directory containing a ``main.py`` script. Args: path (str) : path to a file or directory for creating a Bokeh application. argv (seq[str], optional) : command line arguments to pass to the application handler Returns: :class:`~bokeh.application.application.Application` Raises: RuntimeError Notes: If ``path`` ends with a file ``main.py`` then a warning will be printed regarding running directory-style apps by passing the directory instead. ''' argv = argv or [] path = os.path.abspath(path) # There are certainly race conditions here if the file/directory is deleted # in between the isdir/isfile tests and subsequent code. But it would be a # failure if they were not there to begin with, too (just a different error) if os.path.isdir(path): handler = DirectoryHandler(filename=path, argv=argv) elif os.path.isfile(path): if path.endswith(".ipynb"): handler = NotebookHandler(filename=path, argv=argv) elif path.endswith(".py"): if path.endswith("main.py"): warnings.warn(DIRSTYLE_MAIN_WARNING) handler = ScriptHandler(filename=path, argv=argv) else: raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path) else: raise ValueError("Path for Bokeh server application does not exist: %s" % path) if handler.failed: raise RuntimeError("Error loading %s:\n\n%s\n%s " % (path, handler.error, handler.error_detail)) application = Application(handler) return application
[ "def", "build_single_handler_application", "(", "path", ",", "argv", "=", "None", ")", ":", "argv", "=", "argv", "or", "[", "]", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "# There are certainly race conditions here if the file/directory is ...
Return a Bokeh application built using a single handler for a script, notebook, or directory. In general a Bokeh :class:`~bokeh.application.application.Application` may have any number of handlers to initialize :class:`~bokeh.document.Document` objects for new client sessions. However, in many cases only a single handler is needed. This function examines the ``path`` provided, and returns an ``Application`` initialized with one of the following handlers: * :class:`~bokeh.application.handlers.script.ScriptHandler` when ``path`` is to a ``.py`` script. * :class:`~bokeh.application.handlers.notebook.NotebookHandler` when ``path`` is to an ``.ipynb`` Jupyter notebook. * :class:`~bokeh.application.handlers.directory.DirectoryHandler` when ``path`` is to a directory containing a ``main.py`` script. Args: path (str) : path to a file or directory for creating a Bokeh application. argv (seq[str], optional) : command line arguments to pass to the application handler Returns: :class:`~bokeh.application.application.Application` Raises: RuntimeError Notes: If ``path`` ends with a file ``main.py`` then a warning will be printed regarding running directory-style apps by passing the directory instead.
[ "Return", "a", "Bokeh", "application", "built", "using", "a", "single", "handler", "for", "a", "script", "notebook", "or", "directory", "." ]
python
train
JoeVirtual/KonFoo
konfoo/core.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L2647-L2690
def _set_alignment(self, group_size, bit_offset=0, auto_align=False): """ Sets the alignment of the ``Decimal`` field. :param int group_size: size of the aligned `Field` group in bytes, can be between ``1`` and ``8``. :param int bit_offset: bit offset of the `Decimal` field within the aligned `Field` group, can be between ``0`` and ``63``. :param bool auto_align: if ``True`` the `Decimal` field aligns itself to the next matching byte size according to the *size* of the `Decimal` field. """ # Field alignment offset field_offset = int(bit_offset) # Auto alignment if auto_align: # Field alignment size field_size, bit_offset = divmod(field_offset, 8) if bit_offset is not 0: field_size += 1 field_size = max(field_size, 1) # No auto alignment else: # Field alignment size field_size = int(group_size) # Field alignment alignment = Alignment(field_size, field_offset) # Invalid field alignment size if field_size not in range(1, 8): raise FieldAlignmentError(self, self.index, alignment) # Invalid field alignment offset if not (0 <= field_offset <= 63): raise FieldAlignmentError(self, self.index, alignment) # Invalid field alignment if field_offset >= field_size * 8: raise FieldAlignmentError(self, self.index, alignment) # Set field alignment self._align_to_byte_size = alignment.byte_size self._align_to_bit_offset = alignment.bit_offset
[ "def", "_set_alignment", "(", "self", ",", "group_size", ",", "bit_offset", "=", "0", ",", "auto_align", "=", "False", ")", ":", "# Field alignment offset", "field_offset", "=", "int", "(", "bit_offset", ")", "# Auto alignment", "if", "auto_align", ":", "# Field...
Sets the alignment of the ``Decimal`` field. :param int group_size: size of the aligned `Field` group in bytes, can be between ``1`` and ``8``. :param int bit_offset: bit offset of the `Decimal` field within the aligned `Field` group, can be between ``0`` and ``63``. :param bool auto_align: if ``True`` the `Decimal` field aligns itself to the next matching byte size according to the *size* of the `Decimal` field.
[ "Sets", "the", "alignment", "of", "the", "Decimal", "field", "." ]
python
train
voxpupuli/pypuppetdb
pypuppetdb/types.py
https://github.com/voxpupuli/pypuppetdb/blob/cedeecf48014b4ad5b8e2513ca8230c814f45603/pypuppetdb/types.py#L461-L464
def fact(self, name): """Get a single fact from this node.""" facts = self.facts(name=name) return next(fact for fact in facts)
[ "def", "fact", "(", "self", ",", "name", ")", ":", "facts", "=", "self", ".", "facts", "(", "name", "=", "name", ")", "return", "next", "(", "fact", "for", "fact", "in", "facts", ")" ]
Get a single fact from this node.
[ "Get", "a", "single", "fact", "from", "this", "node", "." ]
python
valid
mitsei/dlkit
dlkit/json_/logging_/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/logging_/sessions.py#L390-L416
def get_log_entries_by_genus_type(self, log_entry_genus_type): """Gets a ``LogEntryList`` corresponding to the given log entry genus ``Type`` which doe snot include entries of genus types derived form the specified ``Type``. In plenary mode, the returned list contains all known entries or an error results. Otherwise, the returned list may contain only those entries that are accessible through this session arg: log_entry_genus_type (osid.type.Type): a log entry genus type return: (osid.logging.LogEntryList) - the returned ``LogEntry`` list raise: NullArgument - ``log_entry_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('logging', collection='LogEntry', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(log_entry_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.LogEntryList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_log_entries_by_genus_type", "(", "self", ",", "log_entry_genus_type", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_genus_type", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientVa...
Gets a ``LogEntryList`` corresponding to the given log entry genus ``Type`` which doe snot include entries of genus types derived form the specified ``Type``. In plenary mode, the returned list contains all known entries or an error results. Otherwise, the returned list may contain only those entries that are accessible through this session arg: log_entry_genus_type (osid.type.Type): a log entry genus type return: (osid.logging.LogEntryList) - the returned ``LogEntry`` list raise: NullArgument - ``log_entry_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "LogEntryList", "corresponding", "to", "the", "given", "log", "entry", "genus", "Type", "which", "doe", "snot", "include", "entries", "of", "genus", "types", "derived", "form", "the", "specified", "Type", "." ]
python
train
rpcope1/PythonConfluenceAPI
PythonConfluenceAPI/api.py
https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/api.py#L707-L734
def get_space_content(self, space_key, depth=None, expand=None, start=None, limit=None, callback=None): """ Returns the content in this given space. :param space_key (string): A string containing the key of the space. :param depth (string): OPTIONAL: A string indicating if all content, or just the root content of the space is returned. Default: "all". Valid values: "all", "root". :param expand (string): OPTIONAL: A comma separated list of properties to expand on each piece of content retrieved. Default: Empty. :param start (int): OPTIONAL: The start point of the collection to return. Default: 0. :param limit (int): OPTIONAL: The limit of the number of labels to return, this may be restricted by fixed system limits. Default: 25. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey}/content endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ params = {} if depth: assert depth in {"all", "root"} params["depth"] = depth if expand: params["expand"] = expand if start is not None: params["start"] = int(start) if limit is not None: params["limit"] = int(limit) return self._service_get_request("rest/api/space/{key}/content".format(key=space_key), params=params, callback=callback)
[ "def", "get_space_content", "(", "self", ",", "space_key", ",", "depth", "=", "None", ",", "expand", "=", "None", ",", "start", "=", "None", ",", "limit", "=", "None", ",", "callback", "=", "None", ")", ":", "params", "=", "{", "}", "if", "depth", ...
Returns the content in this given space. :param space_key (string): A string containing the key of the space. :param depth (string): OPTIONAL: A string indicating if all content, or just the root content of the space is returned. Default: "all". Valid values: "all", "root". :param expand (string): OPTIONAL: A comma separated list of properties to expand on each piece of content retrieved. Default: Empty. :param start (int): OPTIONAL: The start point of the collection to return. Default: 0. :param limit (int): OPTIONAL: The limit of the number of labels to return, this may be restricted by fixed system limits. Default: 25. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey}/content endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
[ "Returns", "the", "content", "in", "this", "given", "space", ".", ":", "param", "space_key", "(", "string", ")", ":", "A", "string", "containing", "the", "key", "of", "the", "space", ".", ":", "param", "depth", "(", "string", ")", ":", "OPTIONAL", ":",...
python
train
dwwkelly/note
note/mongo_driver.py
https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L298-L328
def getByTime(self, startTime=None, endTime=None): """ :desc: Get all the notes in the given time window :param int startTime: The begining of the window :param int endTime: The end of the window :returns: A list of IDs :ravl: list """ collections = self.get_data_collections() if startTime is not None: startTime = float(startTime) if endTime is not None: endTime = float(endTime) if startTime is not None and endTime is not None: timeQuery = {"$and": [{"timestamps": {"$gt": startTime}}, {"timestamps": {"$lt": endTime}}]} elif startTime is not None and endTime is None: timeQuery = {"timestamps": {"$gt": startTime}} elif startTime is None and endTime is not None: timeQuery = {"timestamps": {"$lt": endTime}} IDs = [] for coll in collections: docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0}) for doc in docs: IDs.append(doc['ID']) return IDs
[ "def", "getByTime", "(", "self", ",", "startTime", "=", "None", ",", "endTime", "=", "None", ")", ":", "collections", "=", "self", ".", "get_data_collections", "(", ")", "if", "startTime", "is", "not", "None", ":", "startTime", "=", "float", "(", "startT...
:desc: Get all the notes in the given time window :param int startTime: The begining of the window :param int endTime: The end of the window :returns: A list of IDs :ravl: list
[ ":", "desc", ":", "Get", "all", "the", "notes", "in", "the", "given", "time", "window", ":", "param", "int", "startTime", ":", "The", "begining", "of", "the", "window", ":", "param", "int", "endTime", ":", "The", "end", "of", "the", "window", ":", "r...
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L2376-L2397
def hr_dp020(self, value=None): """ Corresponds to IDD Field `hr_dp020` humidity ratio corresponding to Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence calculated at the standard atmospheric pressure at elevation of station Args: value (float): value for IDD Field `hr_dp020` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `hr_dp020`'.format(value)) self._hr_dp020 = value
[ "def", "hr_dp020", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float...
Corresponds to IDD Field `hr_dp020` humidity ratio corresponding to Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence calculated at the standard atmospheric pressure at elevation of station Args: value (float): value for IDD Field `hr_dp020` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "hr_dp020", "humidity", "ratio", "corresponding", "to", "Dew", "-", "point", "temperature", "corresponding", "to", "2", ".", "0%", "annual", "cumulative", "frequency", "of", "occurrence", "calculated", "at", "the", "standard", ...
python
train
maljovec/topopy
topopy/MorseComplex.py
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseComplex.py#L103-L157
def build(self, X, Y, w=None, edges=None): """ Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph. """ super(MorseComplex, self).build(X, Y, w, edges) if self.debug: sys.stdout.write("Decomposition: ") start = time.clock() morse_complex = MorseComplexFloat( vectorFloat(self.Xnorm.flatten()), vectorFloat(self.Y), str(self.gradient), str(self.simplification), vectorFloat(self.w), self.graph_rep.full_graph(), self.debug, ) self.__amc = morse_complex self.persistences = [] self.merge_sequence = {} morse_complex_json = json.loads(morse_complex.to_json()) hierarchy = morse_complex_json["Hierarchy"] for merge in hierarchy: self.persistences.append(merge["Persistence"]) self.merge_sequence[merge["Dying"]] = ( merge["Persistence"], merge["Surviving"], merge["Saddle"], ) self.persistences = sorted(list(set(self.persistences))) partitions = morse_complex_json["Partitions"] self.base_partitions = {} for i, label in enumerate(partitions): if label not in self.base_partitions: self.base_partitions[label] = [] self.base_partitions[label].append(i) self.max_indices = list(self.base_partitions.keys()) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "build", "(", "self", ",", "X", ",", "Y", ",", "w", "=", "None", ",", "edges", "=", "None", ")", ":", "super", "(", "MorseComplex", ",", "self", ")", ".", "build", "(", "X", ",", "Y", ",", "w", ",", "edges", ")", "if", "self", ".", "...
Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph.
[ "Assigns", "data", "to", "this", "object", "and", "builds", "the", "Morse", "-", "Smale", "Complex" ]
python
train
dereneaton/ipyrad
ipyrad/core/assembly.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/assembly.py#L1398-L1433
def _get_samples(self, samples): """ Internal function. Prelude for each step() to read in perhaps non empty list of samples to process. Input is a list of sample names, output is a list of sample objects.""" ## if samples not entered use all samples if not samples: samples = self.samples.keys() ## Be nice and allow user to pass in only one sample as a string, ## rather than a one element list. When you make the string into a list ## you have to wrap it in square braces or else list makes a list of ## each character individually. if isinstance(samples, str): samples = list([samples]) ## if sample keys, replace with sample obj assert isinstance(samples, list), \ "to subselect samples enter as a list, e.g., [A, B]." newsamples = [self.samples.get(key) for key in samples \ if self.samples.get(key)] strnewsamples = [i.name for i in newsamples] ## are there any samples that did not make it into the dict? badsamples = set(samples).difference(set(strnewsamples)) if badsamples: outstring = ", ".join(badsamples) raise IPyradError(\ "Unrecognized Sample name(s) not linked to {}: {}"\ .format(self.name, outstring)) ## require Samples assert newsamples, \ "No Samples passed in and none in assembly {}".format(self.name) return newsamples
[ "def", "_get_samples", "(", "self", ",", "samples", ")", ":", "## if samples not entered use all samples", "if", "not", "samples", ":", "samples", "=", "self", ".", "samples", ".", "keys", "(", ")", "## Be nice and allow user to pass in only one sample as a string,", "#...
Internal function. Prelude for each step() to read in perhaps non empty list of samples to process. Input is a list of sample names, output is a list of sample objects.
[ "Internal", "function", ".", "Prelude", "for", "each", "step", "()", "to", "read", "in", "perhaps", "non", "empty", "list", "of", "samples", "to", "process", ".", "Input", "is", "a", "list", "of", "sample", "names", "output", "is", "a", "list", "of", "...
python
valid
Alignak-monitoring/alignak
alignak/objects/contactgroup.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/contactgroup.py#L110-L148
def get_contacts_by_explosion(self, contactgroups): # pylint: disable=access-member-before-definition """ Get contacts of this group :param contactgroups: Contactgroups object, use to look for a specific one :type contactgroups: alignak.objects.contactgroup.Contactgroups :return: list of contact of this group :rtype: list[alignak.objects.contact.Contact] """ # First we tag the hg so it will not be explode # if a son of it already call it self.already_exploded = True # Now the recursive part # rec_tag is set to False every CG we explode # so if True here, it must be a loop in HG # calls... not GOOD! if self.rec_tag: logger.error("[contactgroup::%s] got a loop in contactgroup definition", self.get_name()) if hasattr(self, 'members'): return self.members return '' # Ok, not a loop, we tag it and continue self.rec_tag = True cg_mbrs = self.get_contactgroup_members() for cg_mbr in cg_mbrs: contactgroup = contactgroups.find_by_name(cg_mbr.strip()) if contactgroup is not None: value = contactgroup.get_contacts_by_explosion(contactgroups) if value is not None: self.add_members(value) if hasattr(self, 'members'): return self.members return ''
[ "def", "get_contacts_by_explosion", "(", "self", ",", "contactgroups", ")", ":", "# pylint: disable=access-member-before-definition", "# First we tag the hg so it will not be explode", "# if a son of it already call it", "self", ".", "already_exploded", "=", "True", "# Now the recurs...
Get contacts of this group :param contactgroups: Contactgroups object, use to look for a specific one :type contactgroups: alignak.objects.contactgroup.Contactgroups :return: list of contact of this group :rtype: list[alignak.objects.contact.Contact]
[ "Get", "contacts", "of", "this", "group" ]
python
train
glormph/msstitch
src/app/actions/peptable/merge.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/peptable/merge.py#L81-L85
def get_protein_data(peptide, pdata, headerfields, accfield): """These fields are currently not pool dependent so headerfields is ignored""" report = get_proteins(peptide, pdata, headerfields) return get_cov_descriptions(peptide, pdata, report)
[ "def", "get_protein_data", "(", "peptide", ",", "pdata", ",", "headerfields", ",", "accfield", ")", ":", "report", "=", "get_proteins", "(", "peptide", ",", "pdata", ",", "headerfields", ")", "return", "get_cov_descriptions", "(", "peptide", ",", "pdata", ",",...
These fields are currently not pool dependent so headerfields is ignored
[ "These", "fields", "are", "currently", "not", "pool", "dependent", "so", "headerfields", "is", "ignored" ]
python
train
blockstack/blockstack-core
blockstack/lib/subdomains.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L260-L273
def parse_subdomain_missing_zonefiles_record(cls, rec): """ Parse a missing-zonefiles vector given by the domain. Returns the list of zone file indexes on success Raises ParseError on unparseable records """ txt_entry = rec['txt'] if isinstance(txt_entry, list): raise ParseError("TXT entry too long for a missing zone file list") try: return [int(i) for i in txt_entry.split(',')] if txt_entry is not None and len(txt_entry) > 0 else [] except ValueError: raise ParseError('Invalid integers')
[ "def", "parse_subdomain_missing_zonefiles_record", "(", "cls", ",", "rec", ")", ":", "txt_entry", "=", "rec", "[", "'txt'", "]", "if", "isinstance", "(", "txt_entry", ",", "list", ")", ":", "raise", "ParseError", "(", "\"TXT entry too long for a missing zone file li...
Parse a missing-zonefiles vector given by the domain. Returns the list of zone file indexes on success Raises ParseError on unparseable records
[ "Parse", "a", "missing", "-", "zonefiles", "vector", "given", "by", "the", "domain", ".", "Returns", "the", "list", "of", "zone", "file", "indexes", "on", "success", "Raises", "ParseError", "on", "unparseable", "records" ]
python
train
saltstack/salt
salt/client/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1294-L1348
def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret
[ "def", "get_returns", "(", "self", ",", "jid", ",", "minions", ",", "timeout", "=", "None", ")", ":", "minions", "=", "set", "(", "minions", ")", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "opts", "[", "'timeout'", "]", "start",...
Get the returns for the command line interface via the event system
[ "Get", "the", "returns", "for", "the", "command", "line", "interface", "via", "the", "event", "system" ]
python
train
roclark/sportsreference
sportsreference/nfl/schedule.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nfl/schedule.py#L623-L642
def _add_games_to_schedule(self, schedule, game_type, year): """ Add games instances to schedule. Create a Game instance for every applicable game in the season and append the instance to the '_game' property. Parameters ---------- schedule : PyQuery object A PyQuery object pertaining to a team's schedule table. game_type : string A string constant denoting whether the game is being played as part of the regular season or the playoffs. year : string The requested year to pull stats from. """ for item in schedule: game = Game(item, game_type, year) self._games.append(game)
[ "def", "_add_games_to_schedule", "(", "self", ",", "schedule", ",", "game_type", ",", "year", ")", ":", "for", "item", "in", "schedule", ":", "game", "=", "Game", "(", "item", ",", "game_type", ",", "year", ")", "self", ".", "_games", ".", "append", "(...
Add games instances to schedule. Create a Game instance for every applicable game in the season and append the instance to the '_game' property. Parameters ---------- schedule : PyQuery object A PyQuery object pertaining to a team's schedule table. game_type : string A string constant denoting whether the game is being played as part of the regular season or the playoffs. year : string The requested year to pull stats from.
[ "Add", "games", "instances", "to", "schedule", "." ]
python
train
benmoran56/esper
esper.py
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L199-L222
def remove_component(self, entity: int, component_type: Any) -> int: """Remove a Component instance from an Entity, by type. A Component instance can be removed by providing it's type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. Raises a KeyError if either the given entity or Component type does not exist in the database. :param entity: The Entity to remove the Component from. :param component_type: The type of the Component to remove. """ self._components[component_type].discard(entity) if not self._components[component_type]: del self._components[component_type] del self._entities[entity][component_type] if not self._entities[entity]: del self._entities[entity] self.clear_cache() return entity
[ "def", "remove_component", "(", "self", ",", "entity", ":", "int", ",", "component_type", ":", "Any", ")", "->", "int", ":", "self", ".", "_components", "[", "component_type", "]", ".", "discard", "(", "entity", ")", "if", "not", "self", ".", "_component...
Remove a Component instance from an Entity, by type. A Component instance can be removed by providing it's type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. Raises a KeyError if either the given entity or Component type does not exist in the database. :param entity: The Entity to remove the Component from. :param component_type: The type of the Component to remove.
[ "Remove", "a", "Component", "instance", "from", "an", "Entity", "by", "type", "." ]
python
train
edx/edx-val
edxval/api.py
https://github.com/edx/edx-val/blob/30df48061e77641edb5272895b7c7f7f25eb7aa7/edxval/api.py#L664-L700
def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None): """ Returns a list that contains all the course ids and video ids with the youtube profile Args: course_ids (list): valid course ids limit (int): batch records limit offset (int): an offset for selecting a batch Returns: (list): Tuples of course_id, edx_video_id and youtube video url """ course_videos = (CourseVideo.objects.select_related('video') .prefetch_related('video__encoded_videos', 'video__encoded_videos__profile') .filter(video__encoded_videos__profile__profile_name='youtube') .order_by('id') .distinct()) if course_ids: course_videos = course_videos.filter(course_id__in=course_ids) course_videos = course_videos.values_list('course_id', 'video__edx_video_id') if limit is not None and offset is not None: course_videos = course_videos[offset: offset+limit] course_videos_with_yt_profile = [] for course_id, edx_video_id in course_videos: yt_profile = EncodedVideo.objects.filter( video__edx_video_id=edx_video_id, profile__profile_name='youtube' ).first() if yt_profile: course_videos_with_yt_profile.append(( course_id, edx_video_id, yt_profile.url )) return course_videos_with_yt_profile
[ "def", "get_course_video_ids_with_youtube_profile", "(", "course_ids", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ")", ":", "course_videos", "=", "(", "CourseVideo", ".", "objects", ".", "select_related", "(", "'video'", ")", ".", "pre...
Returns a list that contains all the course ids and video ids with the youtube profile Args: course_ids (list): valid course ids limit (int): batch records limit offset (int): an offset for selecting a batch Returns: (list): Tuples of course_id, edx_video_id and youtube video url
[ "Returns", "a", "list", "that", "contains", "all", "the", "course", "ids", "and", "video", "ids", "with", "the", "youtube", "profile" ]
python
train
emory-libraries/eulxml
eulxml/xmlmap/core.py
https://github.com/emory-libraries/eulxml/blob/17d71c7d98c0cebda9932b7f13e72093805e1fe2/eulxml/xmlmap/core.py#L61-L65
def parseString(string, uri=None): """Read an XML document provided as a byte string, and return a :mod:`lxml.etree` document. String cannot be a Unicode string. Base_uri should be provided for the calculation of relative URIs.""" return etree.fromstring(string, parser=_get_xmlparser(), base_url=uri)
[ "def", "parseString", "(", "string", ",", "uri", "=", "None", ")", ":", "return", "etree", ".", "fromstring", "(", "string", ",", "parser", "=", "_get_xmlparser", "(", ")", ",", "base_url", "=", "uri", ")" ]
Read an XML document provided as a byte string, and return a :mod:`lxml.etree` document. String cannot be a Unicode string. Base_uri should be provided for the calculation of relative URIs.
[ "Read", "an", "XML", "document", "provided", "as", "a", "byte", "string", "and", "return", "a", ":", "mod", ":", "lxml", ".", "etree", "document", ".", "String", "cannot", "be", "a", "Unicode", "string", ".", "Base_uri", "should", "be", "provided", "for"...
python
train
mogproject/mog-commons-python
src/mog_commons/terminal.py
https://github.com/mogproject/mog-commons-python/blob/951cf0fa9a56248b4d45be720be25f1d4b7e1bff/src/mog_commons/terminal.py#L151-L168
def getch(self): """ Read one character from stdin. If stdin is not a tty or set `getch_enabled`=False, read input as one line. :return: unicode: """ ch = self._get_one_char() if self.keep_input_clean: self.clear_input_buffer() try: # accept only unicode characters (for Python 2) uch = to_unicode(ch, 'ascii') except UnicodeError: return '' return uch if self._check_key_repeat(uch) else ''
[ "def", "getch", "(", "self", ")", ":", "ch", "=", "self", ".", "_get_one_char", "(", ")", "if", "self", ".", "keep_input_clean", ":", "self", ".", "clear_input_buffer", "(", ")", "try", ":", "# accept only unicode characters (for Python 2)", "uch", "=", "to_un...
Read one character from stdin. If stdin is not a tty or set `getch_enabled`=False, read input as one line. :return: unicode:
[ "Read", "one", "character", "from", "stdin", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1679-L1682
def p_event_statement(self, p): 'event_statement : senslist SEMICOLON' p[0] = EventStatement(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_event_statement", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "EventStatement", "(", "p", "[", "1", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lin...
event_statement : senslist SEMICOLON
[ "event_statement", ":", "senslist", "SEMICOLON" ]
python
train
joferkington/mpldatacursor
mpldatacursor/pick_info.py
https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/pick_info.py#L356-L370
def get_xy(artist): """ Attempts to get the x,y data for individual items subitems of the artist. Returns None if this is not possible. At present, this only supports Line2D's and basic collections. """ xy = None if hasattr(artist, 'get_offsets'): xy = artist.get_offsets().T elif hasattr(artist, 'get_xydata'): xy = artist.get_xydata().T return xy
[ "def", "get_xy", "(", "artist", ")", ":", "xy", "=", "None", "if", "hasattr", "(", "artist", ",", "'get_offsets'", ")", ":", "xy", "=", "artist", ".", "get_offsets", "(", ")", ".", "T", "elif", "hasattr", "(", "artist", ",", "'get_xydata'", ")", ":",...
Attempts to get the x,y data for individual items subitems of the artist. Returns None if this is not possible. At present, this only supports Line2D's and basic collections.
[ "Attempts", "to", "get", "the", "x", "y", "data", "for", "individual", "items", "subitems", "of", "the", "artist", ".", "Returns", "None", "if", "this", "is", "not", "possible", "." ]
python
train
cslarsen/elv
elv/elv.py
https://github.com/cslarsen/elv/blob/4bacf2093a0dcbe6a2b4d79be0fe339bb2b99097/elv/elv.py#L367-L373
def group_by(self, key, field=lambda x: x.xfer): """Returns all transactions whose given ``field`` matches ``key``. Returns: A ``Transactions`` object. """ return Transactions([t for t in self.trans if field(t) == key])
[ "def", "group_by", "(", "self", ",", "key", ",", "field", "=", "lambda", "x", ":", "x", ".", "xfer", ")", ":", "return", "Transactions", "(", "[", "t", "for", "t", "in", "self", ".", "trans", "if", "field", "(", "t", ")", "==", "key", "]", ")" ...
Returns all transactions whose given ``field`` matches ``key``. Returns: A ``Transactions`` object.
[ "Returns", "all", "transactions", "whose", "given", "field", "matches", "key", "." ]
python
train
apache/airflow
airflow/contrib/operators/databricks_operator.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/databricks_operator.py#L61-L92
def _handle_databricks_operator_execution(operator, hook, log, context): """ Handles the Airflow + Databricks lifecycle logic for a Databricks operator :param operator: Databricks operator being handled :param context: Airflow context """ if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id) log.info('Run submitted with run_id: %s', operator.run_id) run_page_url = hook.get_run_page_url(operator.run_id) if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url) log.info('View run status, Spark UI, and logs at %s', run_page_url) while True: run_state = hook.get_run_state(operator.run_id) if run_state.is_terminal: if run_state.is_successful: log.info('%s completed successfully.', operator.task_id) log.info('View run status, Spark UI, and logs at %s', run_page_url) return else: error_message = '{t} failed with terminal state: {s}'.format( t=operator.task_id, s=run_state) raise AirflowException(error_message) else: log.info('%s in run state: %s', operator.task_id, run_state) log.info('View run status, Spark UI, and logs at %s', run_page_url) log.info('Sleeping for %s seconds.', operator.polling_period_seconds) time.sleep(operator.polling_period_seconds)
[ "def", "_handle_databricks_operator_execution", "(", "operator", ",", "hook", ",", "log", ",", "context", ")", ":", "if", "operator", ".", "do_xcom_push", ":", "context", "[", "'ti'", "]", ".", "xcom_push", "(", "key", "=", "XCOM_RUN_ID_KEY", ",", "value", "...
Handles the Airflow + Databricks lifecycle logic for a Databricks operator :param operator: Databricks operator being handled :param context: Airflow context
[ "Handles", "the", "Airflow", "+", "Databricks", "lifecycle", "logic", "for", "a", "Databricks", "operator" ]
python
test
Unidata/MetPy
metpy/calc/kinematics.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/kinematics.py#L46-L84
def ensure_yx_order(func): """Wrap a function to ensure all array arguments are y, x ordered, based on kwarg.""" @functools.wraps(func) def wrapper(*args, **kwargs): # Check what order we're given dim_order = kwargs.pop('dim_order', None) x_first = _is_x_first_dim(dim_order) # If x is the first dimension, flip (transpose) every array within the function args. if x_first: args = tuple(_check_and_flip(arr) for arr in args) for k, v in kwargs: kwargs[k] = _check_and_flip(v) ret = func(*args, **kwargs) # If we flipped on the way in, need to flip on the way out so that output array(s) # match the dimension order of the original input. if x_first: return _check_and_flip(ret) else: return ret # Inject a docstring for the dim_order argument into the function's docstring. dim_order_doc = """ dim_order : str or ``None``, optional The ordering of dimensions in passed in arrays. Can be one of ``None``, ``'xy'``, or ``'yx'``. ``'xy'`` indicates that the dimension corresponding to x is the leading dimension, followed by y. ``'yx'`` indicates that x is the last dimension, preceded by y. ``None`` indicates that the default ordering should be assumed, which is 'yx'. Can only be passed as a keyword argument, i.e. func(..., dim_order='xy').""" # Find the first blank line after the start of the parameters section params = wrapper.__doc__.find('Parameters') blank = wrapper.__doc__.find('\n\n', params) wrapper.__doc__ = wrapper.__doc__[:blank] + dim_order_doc + wrapper.__doc__[blank:] return wrapper
[ "def", "ensure_yx_order", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Check what order we're given", "dim_order", "=", "kwargs", ".", "pop", "(", "'dim...
Wrap a function to ensure all array arguments are y, x ordered, based on kwarg.
[ "Wrap", "a", "function", "to", "ensure", "all", "array", "arguments", "are", "y", "x", "ordered", "based", "on", "kwarg", "." ]
python
train
ambitioninc/rabbitmq-admin
rabbitmq_admin/api.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L334-L343
def delete_user(self, name): """ Delete a user. :param name: The user's name :type name: str """ self._api_delete('/api/users/{0}'.format( urllib.parse.quote_plus(name) ))
[ "def", "delete_user", "(", "self", ",", "name", ")", ":", "self", ".", "_api_delete", "(", "'/api/users/{0}'", ".", "format", "(", "urllib", ".", "parse", ".", "quote_plus", "(", "name", ")", ")", ")" ]
Delete a user. :param name: The user's name :type name: str
[ "Delete", "a", "user", "." ]
python
train
bslatkin/dpxdt
dpxdt/client/workers.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L74-L92
def _print_repr(self, depth): """Print this WorkItem to the given stack depth. The depth parameter ensures that we can print WorkItems in arbitrarily long chains without hitting the max stack depth. This can happen with WaitForUrlWorkflowItems, which create long chains of small waits. """ if depth <= 0: return '%s.%s#%d' % ( self.__class__.__module__, self.__class__.__name__, id(self)) return '%s.%s(%s)#%d' % ( self.__class__.__module__, self.__class__.__name__, self._print_tree(self._get_dict_for_repr(), depth - 1), id(self))
[ "def", "_print_repr", "(", "self", ",", "depth", ")", ":", "if", "depth", "<=", "0", ":", "return", "'%s.%s#%d'", "%", "(", "self", ".", "__class__", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ",", "id", "(", "self", ")", ")",...
Print this WorkItem to the given stack depth. The depth parameter ensures that we can print WorkItems in arbitrarily long chains without hitting the max stack depth. This can happen with WaitForUrlWorkflowItems, which create long chains of small waits.
[ "Print", "this", "WorkItem", "to", "the", "given", "stack", "depth", "." ]
python
train
cenima-ibama/lc8_download
lc8_download/lc8.py
https://github.com/cenima-ibama/lc8_download/blob/d366e8b42b143597c71663ccb838bf8375c8d817/lc8_download/lc8.py#L205-L227
def download(self, bands, download_dir=None, metadata=False): """Download each specified band and metadata.""" super(AWSDownloader, self).validate_bands(bands) if download_dir is None: download_dir = DOWNLOAD_DIR dest_dir = check_create_folder(join(download_dir, self.sceneInfo.name)) downloaded = [] for band in bands: if band == 'BQA': filename = '%s_%s.%s' % (self.sceneInfo.name, band, self.__remote_file_ext) else: filename = '%s_B%s.%s' % (self.sceneInfo.name, band, self.__remote_file_ext) band_url = join(self.base_url, filename) downloaded.append(self.fetch(band_url, dest_dir, filename)) if metadata: filename = '%s_MTL.txt' % (self.sceneInfo.name) url = join(self.base_url, filename) self.fetch(url, dest_dir, filename) return downloaded
[ "def", "download", "(", "self", ",", "bands", ",", "download_dir", "=", "None", ",", "metadata", "=", "False", ")", ":", "super", "(", "AWSDownloader", ",", "self", ")", ".", "validate_bands", "(", "bands", ")", "if", "download_dir", "is", "None", ":", ...
Download each specified band and metadata.
[ "Download", "each", "specified", "band", "and", "metadata", "." ]
python
valid
robotools/fontParts
Lib/fontParts/base/image.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/image.py#L199-L205
def _set_scale(self, value): """ Subclasses may override this method. """ sx, sxy, syx, sy, ox, oy = self.transformation sx, sy = value self.transformation = (sx, sxy, syx, sy, ox, oy)
[ "def", "_set_scale", "(", "self", ",", "value", ")", ":", "sx", ",", "sxy", ",", "syx", ",", "sy", ",", "ox", ",", "oy", "=", "self", ".", "transformation", "sx", ",", "sy", "=", "value", "self", ".", "transformation", "=", "(", "sx", ",", "sxy",...
Subclasses may override this method.
[ "Subclasses", "may", "override", "this", "method", "." ]
python
train
apriha/lineage
src/lineage/__init__.py
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/__init__.py#L859-L882
def create_dir(path): """ Create directory specified by `path` if it doesn't already exist. Parameters ---------- path : str path to directory Returns ------- bool True if `path` exists """ # https://stackoverflow.com/a/5032238 try: os.makedirs(path, exist_ok=True) except Exception as err: print(err) return False if os.path.exists(path): return True else: return False
[ "def", "create_dir", "(", "path", ")", ":", "# https://stackoverflow.com/a/5032238", "try", ":", "os", ".", "makedirs", "(", "path", ",", "exist_ok", "=", "True", ")", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "False", "i...
Create directory specified by `path` if it doesn't already exist. Parameters ---------- path : str path to directory Returns ------- bool True if `path` exists
[ "Create", "directory", "specified", "by", "path", "if", "it", "doesn", "t", "already", "exist", "." ]
python
train
ejeschke/ginga
ginga/Bindings.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1994-L2014
def sc_pan(self, viewer, event, msg=True): """Interactively pan the image by scrolling motion. """ if not self.canpan: return True # User has "Pan Reverse" preference set? rev = self.settings.get('pan_reverse', False) direction = event.direction if rev: direction = math.fmod(direction + 180.0, 360.0) pan_accel = self.settings.get('scroll_pan_acceleration', 1.0) # Internal factor to adjust the panning speed so that user-adjustable # scroll_pan_acceleration is normalized to 1.0 for "normal" speed scr_pan_adj_factor = 1.4142135623730951 amount = (event.amount * scr_pan_adj_factor * pan_accel) / 360.0 self.pan_omni(viewer, direction, amount) return True
[ "def", "sc_pan", "(", "self", ",", "viewer", ",", "event", ",", "msg", "=", "True", ")", ":", "if", "not", "self", ".", "canpan", ":", "return", "True", "# User has \"Pan Reverse\" preference set?", "rev", "=", "self", ".", "settings", ".", "get", "(", "...
Interactively pan the image by scrolling motion.
[ "Interactively", "pan", "the", "image", "by", "scrolling", "motion", "." ]
python
train
ynop/audiomate
audiomate/corpus/subset/selection.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/subset/selection.py#L143-L190
def maximal_balanced_subset(self, by_duration=False, label_list_ids=None): """ Create a subset of the corpus as big as possible, so that the labels are balanced approximately. The label with the shortest duration (or with the fewest utterance if by_duration=False) is taken as reference. All other labels are selected so they match the shortest one as far as possible. Args: by_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus. label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered for balancing. Otherwise only the ones that are in the list are considered. Returns: Subview: The subview representing the subset. """ all_label_values = self.corpus.all_label_values(label_list_ids=label_list_ids) if by_duration: utterance_durations = {utt_idx: utt.duration for utt_idx, utt in self.corpus.utterances.items()} total_duration_per_label = self.corpus.label_durations(label_list_ids=label_list_ids) rarest_label_duration = sorted(total_duration_per_label.values())[0] target_duration = len(all_label_values) * rarest_label_duration label_durations_per_utterance = {} for utt_idx, utt in self.corpus.utterances.items(): label_durations_per_utterance[utt_idx] = utt.label_total_duration(label_list_ids) subset_utterance_ids = utils.select_balanced_subset(label_durations_per_utterance, target_duration, list(all_label_values), select_count_values=utterance_durations, seed=self.rand.random()) else: total_count_per_label = self.corpus.label_count(label_list_ids=label_list_ids) lowest_label_count = sorted(total_count_per_label.values())[0] target_label_count = lowest_label_count * len(all_label_values) utterance_with_label_counts = collections.defaultdict(dict) for utterance_idx, utterance in self.corpus.utterances.items(): utterance_with_label_counts[utterance_idx] = utterance.label_count(label_list_ids=label_list_ids) subset_utterance_ids = utils.select_balanced_subset(utterance_with_label_counts, target_label_count, list(all_label_values), seed=self.rand.random()) filter = subview.MatchingUtteranceIdxFilter(utterance_idxs=set(subset_utterance_ids)) return subview.Subview(self.corpus, filter_criteria=[filter])
[ "def", "maximal_balanced_subset", "(", "self", ",", "by_duration", "=", "False", ",", "label_list_ids", "=", "None", ")", ":", "all_label_values", "=", "self", ".", "corpus", ".", "all_label_values", "(", "label_list_ids", "=", "label_list_ids", ")", "if", "by_d...
Create a subset of the corpus as big as possible, so that the labels are balanced approximately. The label with the shortest duration (or with the fewest utterance if by_duration=False) is taken as reference. All other labels are selected so they match the shortest one as far as possible. Args: by_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus. label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered for balancing. Otherwise only the ones that are in the list are considered. Returns: Subview: The subview representing the subset.
[ "Create", "a", "subset", "of", "the", "corpus", "as", "big", "as", "possible", "so", "that", "the", "labels", "are", "balanced", "approximately", ".", "The", "label", "with", "the", "shortest", "duration", "(", "or", "with", "the", "fewest", "utterance", "...
python
train
eisensheng/kaviar
kaviar/adapter.py
https://github.com/eisensheng/kaviar/blob/77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f/kaviar/adapter.py#L54-L69
def get_logger(cls, *name, **kwargs): """Construct a new :class:`KvLoggerAdapter` which encapsulates the :class:`logging.Logger` specified by ``name``. :param name: Any amount of symbols. Will be concatenated and normalized to form the logger name. Can also be empty. :param extra: Additional context relevant information. :return: A new :class:`KvLoggerAdapter` instance ready to use. :rtype: :class:`KvLoggerAdapter` """ return cls(getLogger(_normalize_name(name)), kwargs.get('extra', None))
[ "def", "get_logger", "(", "cls", ",", "*", "name", ",", "*", "*", "kwargs", ")", ":", "return", "cls", "(", "getLogger", "(", "_normalize_name", "(", "name", ")", ")", ",", "kwargs", ".", "get", "(", "'extra'", ",", "None", ")", ")" ]
Construct a new :class:`KvLoggerAdapter` which encapsulates the :class:`logging.Logger` specified by ``name``. :param name: Any amount of symbols. Will be concatenated and normalized to form the logger name. Can also be empty. :param extra: Additional context relevant information. :return: A new :class:`KvLoggerAdapter` instance ready to use. :rtype: :class:`KvLoggerAdapter`
[ "Construct", "a", "new", ":", "class", ":", "KvLoggerAdapter", "which", "encapsulates", "the", ":", "class", ":", "logging", ".", "Logger", "specified", "by", "name", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAData/base_datastruct.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L593-L596
def normalized(self): '归一化' res = self.groupby('code').apply(lambda x: x / x.iloc[0]) return res
[ "def", "normalized", "(", "self", ")", ":", "res", "=", "self", ".", "groupby", "(", "'code'", ")", ".", "apply", "(", "lambda", "x", ":", "x", "/", "x", ".", "iloc", "[", "0", "]", ")", "return", "res" ]
归一化
[ "归一化" ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L3497-L3503
def LJMP(cpu, cs_selector, target): """ We are just going to ignore the CS selector for now. """ logger.info("LJMP: Jumping to: %r:%r", cs_selector.read(), target.read()) cpu.CS = cs_selector.read() cpu.PC = target.read()
[ "def", "LJMP", "(", "cpu", ",", "cs_selector", ",", "target", ")", ":", "logger", ".", "info", "(", "\"LJMP: Jumping to: %r:%r\"", ",", "cs_selector", ".", "read", "(", ")", ",", "target", ".", "read", "(", ")", ")", "cpu", ".", "CS", "=", "cs_selector...
We are just going to ignore the CS selector for now.
[ "We", "are", "just", "going", "to", "ignore", "the", "CS", "selector", "for", "now", "." ]
python
valid
vtkiorg/vtki
vtki/renderer.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L428-L431
def add_bounds_axes(self, *args, **kwargs): """Deprecated""" logging.warning('`add_bounds_axes` is deprecated. Use `show_bounds` or `show_grid`.') return self.show_bounds(*args, **kwargs)
[ "def", "add_bounds_axes", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "warning", "(", "'`add_bounds_axes` is deprecated. Use `show_bounds` or `show_grid`.'", ")", "return", "self", ".", "show_bounds", "(", "*", "args", ",", ...
Deprecated
[ "Deprecated" ]
python
train
edx/edx-val
edxval/transcript_utils.py
https://github.com/edx/edx-val/blob/30df48061e77641edb5272895b7c7f7f25eb7aa7/edxval/transcript_utils.py#L50-L76
def generate_srt_from_sjson(sjson_subs): """ Generate transcripts from sjson to SubRip (*.srt). Arguments: sjson_subs (dict): `sjson` subs. Returns: Subtitles in SRT format. """ output = '' equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text']) if not equal_len: return output for i in range(len(sjson_subs['start'])): item = SubRipItem( index=i, start=SubRipTime(milliseconds=sjson_subs['start'][i]), end=SubRipTime(milliseconds=sjson_subs['end'][i]), text=sjson_subs['text'][i] ) output += (six.text_type(item)) output += '\n' return output
[ "def", "generate_srt_from_sjson", "(", "sjson_subs", ")", ":", "output", "=", "''", "equal_len", "=", "len", "(", "sjson_subs", "[", "'start'", "]", ")", "==", "len", "(", "sjson_subs", "[", "'end'", "]", ")", "==", "len", "(", "sjson_subs", "[", "'text'...
Generate transcripts from sjson to SubRip (*.srt). Arguments: sjson_subs (dict): `sjson` subs. Returns: Subtitles in SRT format.
[ "Generate", "transcripts", "from", "sjson", "to", "SubRip", "(", "*", ".", "srt", ")", "." ]
python
train
log2timeline/plaso
plaso/formatters/chrome.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/formatters/chrome.py#L89-L142
def GetMessages(self, formatter_mediator, event): """Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter. """ if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() page_transition_type = event_values.get('page_transition_type', None) if page_transition_type is not None: page_transition, page_transition_long = self._PAGE_TRANSITIONS.get( page_transition_type, self._UNKNOWN_PAGE_TRANSITION) if page_transition_long: event_values['page_transition'] = '{0:s} - {1:s}'.format( page_transition, page_transition_long) else: event_values['page_transition'] = page_transition visit_source = event_values.get('visit_source', None) if visit_source is not None: event_values['visit_source'] = self._VISIT_SOURCE.get( visit_source, 'UNKNOWN') extras = [] url_hidden = event_values.get('url_hidden', False) if url_hidden: extras.append('(url hidden)') typed_count = event_values.get('typed_count', 0) if typed_count == 0: extras.append('(URL not typed directly - no typed count)') elif typed_count == 1: extras.append('(type count {0:d} time)'.format(typed_count)) else: extras.append('(type count {0:d} times)'.format(typed_count)) event_values['extra'] = ' '.join(extras) return self._ConditionalFormatMessages(event_values)
[ "def", "GetMessages", "(", "self", ",", "formatter_mediator", ",", "event", ")", ":", "if", "self", ".", "DATA_TYPE", "!=", "event", ".", "data_type", ":", "raise", "errors", ".", "WrongFormatter", "(", "'Unsupported data type: {0:s}.'", ".", "format", "(", "e...
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
[ "Determines", "the", "formatted", "message", "strings", "for", "an", "event", "object", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L352-L368
def atlas_peer_table_lock(): """ Lock the global health info table. Return the table. """ global PEER_TABLE_LOCK, PEER_TABLE, PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK if PEER_TABLE_LOCK_HOLDER is not None: assert PEER_TABLE_LOCK_HOLDER != threading.current_thread(), "DEADLOCK" # log.warning("\n\nPossible contention: lock from %s (but held by %s at)\n%s\n\n" % (threading.current_thread(), PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK)) PEER_TABLE_LOCK.acquire() PEER_TABLE_LOCK_HOLDER = threading.current_thread() PEER_TABLE_LOCK_TRACEBACK = traceback.format_stack() # log.debug("\n\npeer table lock held by %s at \n%s\n\n" % (PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK)) return PEER_TABLE
[ "def", "atlas_peer_table_lock", "(", ")", ":", "global", "PEER_TABLE_LOCK", ",", "PEER_TABLE", ",", "PEER_TABLE_LOCK_HOLDER", ",", "PEER_TABLE_LOCK_TRACEBACK", "if", "PEER_TABLE_LOCK_HOLDER", "is", "not", "None", ":", "assert", "PEER_TABLE_LOCK_HOLDER", "!=", "threading",...
Lock the global health info table. Return the table.
[ "Lock", "the", "global", "health", "info", "table", ".", "Return", "the", "table", "." ]
python
train
robertpeteuil/aws-shortcuts
awss/core.py
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L160-L173
def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str))
[ "def", "cmd_list", "(", "options", ")", ":", "(", "i_info", ",", "param_str", ")", "=", "gather_data", "(", "options", ")", "if", "i_info", ":", "awsc", ".", "get_all_aminames", "(", "i_info", ")", "param_str", "=", "\"Instance List - \"", "+", "param_str", ...
Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser.
[ "Gather", "data", "for", "instances", "matching", "args", "and", "call", "display", "func", "." ]
python
train
deontologician/restnavigator
restnavigator/halnav.py
https://github.com/deontologician/restnavigator/blob/453b9de4e70e602009d3e3ffafcf77d23c8b07c5/restnavigator/halnav.py#L447-L463
def _navigator_or_thunk(self, link): '''Crafts a navigator or from a hal-json link dict. If the link is relative, the returned navigator will have a uri that relative to this navigator's uri. If the link passed in is templated, a PartialNavigator will be returned instead. ''' # resolve relative uris against the current uri uri = urlparse.urljoin(self.uri, link['href']) link_obj = Link(uri=uri, properties=link) if link.get('templated'): # Can expand into a real HALNavigator return PartialNavigator(link_obj, core=self._core) else: return HALNavigator(link_obj, core=self._core)
[ "def", "_navigator_or_thunk", "(", "self", ",", "link", ")", ":", "# resolve relative uris against the current uri", "uri", "=", "urlparse", ".", "urljoin", "(", "self", ".", "uri", ",", "link", "[", "'href'", "]", ")", "link_obj", "=", "Link", "(", "uri", "...
Crafts a navigator or from a hal-json link dict. If the link is relative, the returned navigator will have a uri that relative to this navigator's uri. If the link passed in is templated, a PartialNavigator will be returned instead.
[ "Crafts", "a", "navigator", "or", "from", "a", "hal", "-", "json", "link", "dict", "." ]
python
train
kellerza/pyqwikswitch
pyqwikswitch/qwikswitch.py
https://github.com/kellerza/pyqwikswitch/blob/9d4f080048221eaee93e3eefcf641919ff1af586/pyqwikswitch/qwikswitch.py#L246-L251
def decode_pir(packet, channel=1): """Decode a PIR.""" val = str(packet.get(QSDATA, '')) if len(val) == 8 and val.startswith('0f') and channel == 1: return int(val[-4:], 16) > 0 return None
[ "def", "decode_pir", "(", "packet", ",", "channel", "=", "1", ")", ":", "val", "=", "str", "(", "packet", ".", "get", "(", "QSDATA", ",", "''", ")", ")", "if", "len", "(", "val", ")", "==", "8", "and", "val", ".", "startswith", "(", "'0f'", ")"...
Decode a PIR.
[ "Decode", "a", "PIR", "." ]
python
train
datadotworld/data.world-py
datadotworld/datadotworld.py
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/datadotworld.py#L116-L199
def load_dataset(self, dataset_key, force_update=False, auto_update=False): """Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs """ owner_id, dataset_id = parse_dataset_key(dataset_key) cache_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'latest') backup_dir = None if path.isdir(cache_dir) and force_update: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = path.join(cache_dir, 'datapackage.json') if not path.isfile(descriptor_file): try: descriptor_file = self.api_client.download_datapackage( dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to download datapackage ({}). ' 'Loading previously saved version.'.format(e.reason)) else: raise else: try: dataset_info = self.api_client.get_dataset(dataset_key) except RestApiError as e: return LocalDataset(descriptor_file) last_modified = datetime.strptime(dataset_info['updated'], '%Y-%m-%dT%H:%M:%S.%fZ') if (last_modified > datetime.utcfromtimestamp( path.getmtime(str(descriptor_file)))): if auto_update: try: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = self.api_client. \ download_datapackage(dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to auto update datapackage ({}). ' 'Loading previously saved version.' .format(e.reason)) else: raise else: filterwarnings('always', message='You are using an outdated copy') warn('You are using an outdated copy of {}. ' 'If you wish to use the latest version, call this ' 'function with the argument ' 'auto_update=True or ' 'force_update=True'.format(dataset_key)) if backup_dir is not None: shutil.rmtree(backup_dir, ignore_errors=True) return LocalDataset(descriptor_file)
[ "def", "load_dataset", "(", "self", ",", "dataset_key", ",", "force_update", "=", "False", ",", "auto_update", "=", "False", ")", ":", "owner_id", ",", "dataset_id", "=", "parse_dataset_key", "(", "dataset_key", ")", "cache_dir", "=", "path", ".", "join", "(...
Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs
[ "Load", "a", "dataset", "from", "the", "local", "filesystem", "downloading", "it", "from", "data", ".", "world", "first", "if", "necessary", "." ]
python
train
Prev/shaman
shamanld/shaman.py
https://github.com/Prev/shaman/blob/82891c17c6302f7f9881a215789856d460a85f9c/shamanld/shaman.py#L43-L84
def detect(self, code) : """ Detect language with code """ keywords = KeywordFetcher.fetch( code ) probabilities = {} for keyword in keywords : if keyword not in self.trained_set['keywords'] : continue data = self.trained_set['keywords'][keyword] p_avg = sum(data.values()) / len(data) # Average probability of all languages for language, probability in data.items() : # By Naïve Bayes Classification p = probability / p_avg probabilities[ language ] = probabilities.get(language, 0) + math.log(1 + p) for pattern, data in self.trained_set['patterns'].items() : matcher = PatternMatcher(pattern) p0 = matcher.getratio(code) for language, p_avg in data.items() : if language not in probabilities : continue p = 1 - abs(p_avg - p0) probabilities[ language ] *= p # Convert `log` operated probability to percentile sum_val = 0 for language, p in probabilities.items() : sum_val += math.pow(math.e / 2, p) for language, p in probabilities.items() : probabilities[language] = math.pow(math.e / 2, p) / sum_val * 100 return sorted(probabilities.items(), key=lambda a: a[1], reverse=True)
[ "def", "detect", "(", "self", ",", "code", ")", ":", "keywords", "=", "KeywordFetcher", ".", "fetch", "(", "code", ")", "probabilities", "=", "{", "}", "for", "keyword", "in", "keywords", ":", "if", "keyword", "not", "in", "self", ".", "trained_set", "...
Detect language with code
[ "Detect", "language", "with", "code" ]
python
train
dnouri/nolearn
nolearn/lasagne/visualize.py
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L373-L385
def draw_to_notebook(layers, **kwargs): """ Draws a network diagram in an IPython notebook :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - **kwargs : see the docstring of make_pydot_graph for other options """ from IPython.display import Image layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers') else layers) dot = make_pydot_graph(layers, **kwargs) return Image(dot.create_png())
[ "def", "draw_to_notebook", "(", "layers", ",", "*", "*", "kwargs", ")", ":", "from", "IPython", ".", "display", "import", "Image", "layers", "=", "(", "layers", ".", "get_all_layers", "(", ")", "if", "hasattr", "(", "layers", ",", "'get_all_layers'", ")", ...
Draws a network diagram in an IPython notebook :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - **kwargs : see the docstring of make_pydot_graph for other options
[ "Draws", "a", "network", "diagram", "in", "an", "IPython", "notebook", ":", "parameters", ":", "-", "layers", ":", "list", "or", "NeuralNet", "instance", "List", "of", "layers", "or", "the", "neural", "net", "to", "draw", ".", "-", "**", "kwargs", ":", ...
python
train
MrYsLab/PyMata
PyMata/pymata.py
https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/PyMata/pymata.py#L862-L897
def sonar_config(self, trigger_pin, echo_pin, cb=None, ping_interval=50, max_distance=200): """ Configure the pins,ping interval and maximum distance for an HC-SR04 type device. Single pin configuration may be used. To do so, set both the trigger and echo pins to the same value. Up to a maximum of 6 SONAR devices is supported If the maximum is exceeded a message is sent to the console and the request is ignored. NOTE: data is measured in centimeters :param trigger_pin: The pin number of for the trigger (transmitter). :param echo_pin: The pin number for the received echo. :param ping_interval: Minimum interval between pings. Lowest number to use is 33 ms.Max is 127 :param max_distance: Maximum distance in cm. Max is 200. :param cb: optional callback function to report sonar data changes """ if max_distance > 200: max_distance = 200 max_distance_lsb = max_distance & 0x7f max_distance_msb = (max_distance >> 7) & 0x7f data = [trigger_pin, echo_pin, ping_interval, max_distance_lsb, max_distance_msb] self.set_pin_mode(trigger_pin, self.SONAR, self.INPUT) self.set_pin_mode(echo_pin, self.SONAR, self.INPUT) # update the ping data map for this pin if len(self._command_handler.active_sonar_map) > 6: if self.verbose: print("sonar_config: maximum number of devices assigned - ignoring request") return else: with self.data_lock: # self._command_handler.active_sonar_map[trigger_pin] = self.IGNORE self._command_handler.active_sonar_map[trigger_pin] = [cb, [self.IGNORE]] self._command_handler.send_sysex(self._command_handler.SONAR_CONFIG, data)
[ "def", "sonar_config", "(", "self", ",", "trigger_pin", ",", "echo_pin", ",", "cb", "=", "None", ",", "ping_interval", "=", "50", ",", "max_distance", "=", "200", ")", ":", "if", "max_distance", ">", "200", ":", "max_distance", "=", "200", "max_distance_ls...
Configure the pins,ping interval and maximum distance for an HC-SR04 type device. Single pin configuration may be used. To do so, set both the trigger and echo pins to the same value. Up to a maximum of 6 SONAR devices is supported If the maximum is exceeded a message is sent to the console and the request is ignored. NOTE: data is measured in centimeters :param trigger_pin: The pin number of for the trigger (transmitter). :param echo_pin: The pin number for the received echo. :param ping_interval: Minimum interval between pings. Lowest number to use is 33 ms.Max is 127 :param max_distance: Maximum distance in cm. Max is 200. :param cb: optional callback function to report sonar data changes
[ "Configure", "the", "pins", "ping", "interval", "and", "maximum", "distance", "for", "an", "HC", "-", "SR04", "type", "device", ".", "Single", "pin", "configuration", "may", "be", "used", ".", "To", "do", "so", "set", "both", "the", "trigger", "and", "ec...
python
valid
VorskiImagineering/C3PO
c3po/mod/communicator.py
https://github.com/VorskiImagineering/C3PO/blob/e3e35835e5ac24158848afed4f905ca44ac3ae00/c3po/mod/communicator.py#L127-L146
def _download_csv_from_gdocs(self, trans_csv_path, meta_csv_path): """ Download csv from GDoc. :return: returns resource if worksheets are present :except: raises PODocsError with info if communication with GDocs lead to any errors """ try: entry = self.gd_client.GetResourceById(self.key) self.gd_client.DownloadResource( entry, trans_csv_path, extra_params={'gid': 0, 'exportFormat': 'csv'} ) self.gd_client.DownloadResource( entry, meta_csv_path, extra_params={'gid': 1, 'exportFormat': 'csv'} ) except (RequestError, IOError) as e: raise PODocsError(e) return entry
[ "def", "_download_csv_from_gdocs", "(", "self", ",", "trans_csv_path", ",", "meta_csv_path", ")", ":", "try", ":", "entry", "=", "self", ".", "gd_client", ".", "GetResourceById", "(", "self", ".", "key", ")", "self", ".", "gd_client", ".", "DownloadResource", ...
Download csv from GDoc. :return: returns resource if worksheets are present :except: raises PODocsError with info if communication with GDocs lead to any errors
[ "Download", "csv", "from", "GDoc", ".", ":", "return", ":", "returns", "resource", "if", "worksheets", "are", "present", ":", "except", ":", "raises", "PODocsError", "with", "info", "if", "communication", "with", "GDocs", "lead", "to", "any", "errors" ]
python
test
nicolargo/glances
glances/plugins/glances_plugin.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_plugin.py#L541-L617
def get_alert(self, current=0, minimum=0, maximum=100, highlight_zero=True, is_max=False, header="", action_key=None, log=False): """Return the alert status relative to a current value. Use this function for minor stats. If current < CAREFUL of max then alert = OK If current > CAREFUL of max then alert = CAREFUL If current > WARNING of max then alert = WARNING If current > CRITICAL of max then alert = CRITICAL If highlight=True than 0.0 is highlighted If defined 'header' is added between the plugin name and the status. Only useful for stats with several alert status. If defined, 'action_key' define the key for the actions. By default, the action_key is equal to the header. If log=True than add log if necessary elif log=False than do not log elif log=None than apply the config given in the conf file """ # Manage 0 (0.0) value if highlight_zero is not True if not highlight_zero and current == 0: return 'DEFAULT' # Compute the % try: value = (current * 100) / maximum except ZeroDivisionError: return 'DEFAULT' except TypeError: return 'DEFAULT' # Build the stat_name stat_name = self.get_stat_name(header=header) # Manage limits # If is_max is set then display the value in MAX ret = 'MAX' if is_max else 'OK' try: if value >= self.get_limit('critical', stat_name=stat_name): ret = 'CRITICAL' elif value >= self.get_limit('warning', stat_name=stat_name): ret = 'WARNING' elif value >= self.get_limit('careful', stat_name=stat_name): ret = 'CAREFUL' elif current < minimum: ret = 'CAREFUL' except KeyError: return 'DEFAULT' # Manage log log_str = "" if self.get_limit_log(stat_name=stat_name, default_action=log): # Add _LOG to the return string # So stats will be highlited with a specific color log_str = "_LOG" # Add the log to the list glances_events.add(ret, stat_name.upper(), value) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, action_key) # Default is 'OK' return ret + log_str
[ "def", "get_alert", "(", "self", ",", "current", "=", "0", ",", "minimum", "=", "0", ",", "maximum", "=", "100", ",", "highlight_zero", "=", "True", ",", "is_max", "=", "False", ",", "header", "=", "\"\"", ",", "action_key", "=", "None", ",", "log", ...
Return the alert status relative to a current value. Use this function for minor stats. If current < CAREFUL of max then alert = OK If current > CAREFUL of max then alert = CAREFUL If current > WARNING of max then alert = WARNING If current > CRITICAL of max then alert = CRITICAL If highlight=True than 0.0 is highlighted If defined 'header' is added between the plugin name and the status. Only useful for stats with several alert status. If defined, 'action_key' define the key for the actions. By default, the action_key is equal to the header. If log=True than add log if necessary elif log=False than do not log elif log=None than apply the config given in the conf file
[ "Return", "the", "alert", "status", "relative", "to", "a", "current", "value", "." ]
python
train
wkentaro/pytorch-fcn
torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py#L110-L123
def load_image(self, idx): """ Load input image and preprocess for Caffe: - cast to float - switch channels RGB -> BGR - subtract mean - transpose to channel x height x width order """ im = Image.open('{}/data/images/img_{}.png'.format(self.nyud_dir, idx)) in_ = np.array(im, dtype=np.float32) in_ = in_[:,:,::-1] in_ -= self.mean_bgr in_ = in_.transpose((2,0,1)) return in_
[ "def", "load_image", "(", "self", ",", "idx", ")", ":", "im", "=", "Image", ".", "open", "(", "'{}/data/images/img_{}.png'", ".", "format", "(", "self", ".", "nyud_dir", ",", "idx", ")", ")", "in_", "=", "np", ".", "array", "(", "im", ",", "dtype", ...
Load input image and preprocess for Caffe: - cast to float - switch channels RGB -> BGR - subtract mean - transpose to channel x height x width order
[ "Load", "input", "image", "and", "preprocess", "for", "Caffe", ":", "-", "cast", "to", "float", "-", "switch", "channels", "RGB", "-", ">", "BGR", "-", "subtract", "mean", "-", "transpose", "to", "channel", "x", "height", "x", "width", "order" ]
python
train
zeroSteiner/smoke-zephyr
smoke_zephyr/utilities.py
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L693-L703
def random_string_alphanumeric(size): """ Generate a random string of *size* length consisting of mixed case letters and numbers. This function is not meant for cryptographic purposes. :param int size: The length of the string to return. :return: A string consisting of random characters. :rtype: str """ # requirements = random, string return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size))
[ "def", "random_string_alphanumeric", "(", "size", ")", ":", "# requirements = random, string", "return", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", "for", "x", "in", "range", "(", ...
Generate a random string of *size* length consisting of mixed case letters and numbers. This function is not meant for cryptographic purposes. :param int size: The length of the string to return. :return: A string consisting of random characters. :rtype: str
[ "Generate", "a", "random", "string", "of", "*", "size", "*", "length", "consisting", "of", "mixed", "case", "letters", "and", "numbers", ".", "This", "function", "is", "not", "meant", "for", "cryptographic", "purposes", "." ]
python
train
klmitch/bark
bark/proxy.py
https://github.com/klmitch/bark/blob/6e0e002d55f01fee27e3e45bb86e30af1bfeef36/bark/proxy.py#L282-L304
def validate(self, proxy_ip, client_ip): """ Looks up the proxy identified by its IP, then verifies that the given client IP may be introduced by that proxy. :param proxy_ip: The IP address of the proxy. :param client_ip: The IP address of the supposed client. :returns: True if the proxy is permitted to introduce the client; False if the proxy doesn't exist or isn't permitted to introduce the client. """ # First, look up the proxy if self.pseudo_proxy: proxy = self.pseudo_proxy elif proxy_ip not in self.proxies: return False else: proxy = self.proxies[proxy_ip] # Now, verify that the client is valid return client_ip in proxy
[ "def", "validate", "(", "self", ",", "proxy_ip", ",", "client_ip", ")", ":", "# First, look up the proxy", "if", "self", ".", "pseudo_proxy", ":", "proxy", "=", "self", ".", "pseudo_proxy", "elif", "proxy_ip", "not", "in", "self", ".", "proxies", ":", "retur...
Looks up the proxy identified by its IP, then verifies that the given client IP may be introduced by that proxy. :param proxy_ip: The IP address of the proxy. :param client_ip: The IP address of the supposed client. :returns: True if the proxy is permitted to introduce the client; False if the proxy doesn't exist or isn't permitted to introduce the client.
[ "Looks", "up", "the", "proxy", "identified", "by", "its", "IP", "then", "verifies", "that", "the", "given", "client", "IP", "may", "be", "introduced", "by", "that", "proxy", "." ]
python
train
ciena/afkak
afkak/consumer.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L652-L691
def _handle_commit_error(self, failure, retry_delay, attempt): """ Retry the commit request, depending on failure type Depending on the type of the failure, we retry the commit request with the latest processed offset, or callback/errback self._commit_ds """ # Check if we are stopping and the request was cancelled if self._stopping and failure.check(CancelledError): # Not really an error return self._deliver_commit_result(self._last_committed_offset) # Check that the failure type is a Kafka error...this could maybe be # a tighter check to determine whether a retry will succeed... if not failure.check(KafkaError): log.error("Unhandleable failure during commit attempt: %r\n\t%r", failure, failure.getBriefTraceback()) return self._deliver_commit_result(failure) # Do we need to abort? if (self.request_retry_max_attempts != 0 and attempt >= self.request_retry_max_attempts): log.debug("%r: Exhausted attempts: %d to commit offset: %r", self, self.request_retry_max_attempts, failure) return self._deliver_commit_result(failure) # Check the retry_delay to see if we should log at the higher level # Using attempts % 2 gets us 1-warn/minute with defaults timings if retry_delay < self.retry_max_delay or 0 == (attempt % 2): log.debug("%r: Failure committing offset to kafka: %r", self, failure) else: # We've retried until we hit the max delay, log alternately at warn log.warning("%r: Still failing committing offset to kafka: %r", self, failure) # Schedule a delayed call to retry the commit retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._commit_call = self.client.reactor.callLater( retry_delay, self._send_commit_request, retry_delay, attempt + 1)
[ "def", "_handle_commit_error", "(", "self", ",", "failure", ",", "retry_delay", ",", "attempt", ")", ":", "# Check if we are stopping and the request was cancelled", "if", "self", ".", "_stopping", "and", "failure", ".", "check", "(", "CancelledError", ")", ":", "# ...
Retry the commit request, depending on failure type Depending on the type of the failure, we retry the commit request with the latest processed offset, or callback/errback self._commit_ds
[ "Retry", "the", "commit", "request", "depending", "on", "failure", "type" ]
python
train
toomore/grs
grs/realtime.py
https://github.com/toomore/grs/blob/a1285cb57878284a886952968be9e31fbfa595dd/grs/realtime.py#L74-L148
def real(self): """ Real time data :rtype: dict :returns: :name: 股票名稱 Unicode :no: 股票代碼 :range: 漲跌價 :ranges: 漲跌判斷 True, False :time: 取得時間 :max: 漲停價 :min: 跌停價 :unch: 昨日收盤價 :pp: 漲跌幅 % :o: 開盤價 :h: 當日最高價 :l: 當日最低價 :c: 成交價/收盤價 :value: 累計成交量 :pvalue: 該盤成交量 :top5buy: 最佳五檔買進價量資訊 :top5sell: 最佳五檔賣出價量資訊 :crosspic: K線圖 by Google Chart """ try: unch = sum([covstr(self.__raw[3]), covstr(self.__raw[4])]) / 2 result = { 'name': unicode(self.__raw[36].replace(' ', ''), 'cp950'), 'no': self.__raw[0], 'range': self.__raw[1], # 漲跌價 'time': self.__raw[2], # 取得時間 'max': self.__raw[3], # 漲停價 'min': self.__raw[4], # 跌停價 'unch': '%.2f' % unch, # 昨日收盤價 'pp': '%.2f' % ((covstr(self.__raw[8]) - unch) / unch * 100), # 漲跌幅 % 'o': self.__raw[5], # 開盤價 'h': self.__raw[6], # 當日最高價 'l': self.__raw[7], # 當日最低價 'c': self.__raw[8], # 成交價/收盤價 'value': self.__raw[9], # 累計成交量 'pvalue': self.__raw[10], # 該盤成交量 'top5buy': [ (self.__raw[11], self.__raw[12]), (self.__raw[13], self.__raw[14]), (self.__raw[15], self.__raw[16]), (self.__raw[17], self.__raw[18]), (self.__raw[19], self.__raw[20]) ], 'top5sell': [ (self.__raw[21], self.__raw[22]), (self.__raw[23], self.__raw[24]), (self.__raw[25], self.__raw[26]), (self.__raw[27], self.__raw[28]), (self.__raw[29], self.__raw[30]) ] } if '-' in self.__raw[1]: # 漲跌判斷 True, False result['ranges'] = False # price down else: result['ranges'] = True # price up result['crosspic'] = ("http://chart.apis.google.com/chart?" + "chf=bg,s,ffffff&chs=20x50&cht=ls" + "&chd=t1:0,0,0|0,%(h)s,0|0,%(c)s,0|0,%(o)s,0|0,%(l)s,0" + "&chds=%(l)s,%(h)s&chm=F,,1,1:4,20") % result result['top5buy'].sort() result['top5sell'].sort() return result except (IndexError, ValueError): return False
[ "def", "real", "(", "self", ")", ":", "try", ":", "unch", "=", "sum", "(", "[", "covstr", "(", "self", ".", "__raw", "[", "3", "]", ")", ",", "covstr", "(", "self", ".", "__raw", "[", "4", "]", ")", "]", ")", "/", "2", "result", "=", "{", ...
Real time data :rtype: dict :returns: :name: 股票名稱 Unicode :no: 股票代碼 :range: 漲跌價 :ranges: 漲跌判斷 True, False :time: 取得時間 :max: 漲停價 :min: 跌停價 :unch: 昨日收盤價 :pp: 漲跌幅 % :o: 開盤價 :h: 當日最高價 :l: 當日最低價 :c: 成交價/收盤價 :value: 累計成交量 :pvalue: 該盤成交量 :top5buy: 最佳五檔買進價量資訊 :top5sell: 最佳五檔賣出價量資訊 :crosspic: K線圖 by Google Chart
[ "Real", "time", "data" ]
python
train
zomux/deepy
deepy/networks/network.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/networks/network.py#L106-L125
def register_layer(self, layer): """ Register the layer so that it's param will be trained. But the output of the layer will not be stacked. """ if type(layer) == Block: layer.fix() self.parameter_count += layer.parameter_count self.parameters.extend(layer.parameters) self.free_parameters.extend(layer.free_parameters) self.training_monitors.extend(layer.training_monitors) self.testing_monitors.extend(layer.testing_monitors) self.updates.extend(layer.updates) self.training_updates.extend(layer.training_updates) self.input_variables.extend(layer.external_inputs) self.target_variables.extend(layer.external_targets) self.training_callbacks.extend(layer.training_callbacks) self.testing_callbacks.extend(layer.testing_callbacks) self.epoch_callbacks.extend(layer.epoch_callbacks)
[ "def", "register_layer", "(", "self", ",", "layer", ")", ":", "if", "type", "(", "layer", ")", "==", "Block", ":", "layer", ".", "fix", "(", ")", "self", ".", "parameter_count", "+=", "layer", ".", "parameter_count", "self", ".", "parameters", ".", "ex...
Register the layer so that it's param will be trained. But the output of the layer will not be stacked.
[ "Register", "the", "layer", "so", "that", "it", "s", "param", "will", "be", "trained", ".", "But", "the", "output", "of", "the", "layer", "will", "not", "be", "stacked", "." ]
python
test
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L647-L658
def header(self): ''' :class:`HeaderDict` filled with request headers. HeaderDict keys are case insensitive str.title()d ''' if self._header is None: self._header = HeaderDict() for key, value in self.environ.iteritems(): if key.startswith('HTTP_'): key = key[5:].replace('_','-').title() self._header[key] = value return self._header
[ "def", "header", "(", "self", ")", ":", "if", "self", ".", "_header", "is", "None", ":", "self", ".", "_header", "=", "HeaderDict", "(", ")", "for", "key", ",", "value", "in", "self", ".", "environ", ".", "iteritems", "(", ")", ":", "if", "key", ...
:class:`HeaderDict` filled with request headers. HeaderDict keys are case insensitive str.title()d
[ ":", "class", ":", "HeaderDict", "filled", "with", "request", "headers", "." ]
python
train
click-contrib/click-configfile
tasks/_vendor/path.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/tasks/_vendor/path.py#L1357-L1366
def rmtree_p(self): """ Like :meth:`rmtree`, but does not raise an exception if the directory does not exist. """ try: self.rmtree() except OSError: _, e, _ = sys.exc_info() if e.errno != errno.ENOENT: raise return self
[ "def", "rmtree_p", "(", "self", ")", ":", "try", ":", "self", ".", "rmtree", "(", ")", "except", "OSError", ":", "_", ",", "e", ",", "_", "=", "sys", ".", "exc_info", "(", ")", "if", "e", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise"...
Like :meth:`rmtree`, but does not raise an exception if the directory does not exist.
[ "Like", ":", "meth", ":", "rmtree", "but", "does", "not", "raise", "an", "exception", "if", "the", "directory", "does", "not", "exist", "." ]
python
train
hydraplatform/hydra-base
hydra_base/db/audit.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/db/audit.py#L49-L89
def create_sqlite_backup_db(audit_tables): """ return an inspector object """ #we always want to create a whole new DB, so delete the old one first #if it exists. try: Popen("rm %s"%(config.get('sqlite', 'backup_url')), shell=True) logging.warn("Old sqlite backup DB removed") except Exception as e: logging.warn(e) try: aux_dir = config.get('DEFAULT', 'hydra_aux_dir') os.mkdir(aux_dir) logging.warn("%s created", aux_dir) except Exception as e: logging.warn(e) try: backup_dir = config.get('db', 'export_target') os.mkdir(backup_dir) logging.warn("%s created", backup_dir) except Exception as e: logging.warn(e) db = create_engine(sqlite_engine, echo=True) db.connect() metadata = MetaData(db) for main_audit_table in audit_tables: cols = [] for c in main_audit_table.columns: col = c.copy() if col.type.python_type == Decimal: col.type = DECIMAL() cols.append(col) Table(main_audit_table.name, metadata, *cols, sqlite_autoincrement=True) metadata.create_all(db)
[ "def", "create_sqlite_backup_db", "(", "audit_tables", ")", ":", "#we always want to create a whole new DB, so delete the old one first", "#if it exists.", "try", ":", "Popen", "(", "\"rm %s\"", "%", "(", "config", ".", "get", "(", "'sqlite'", ",", "'backup_url'", ")", ...
return an inspector object
[ "return", "an", "inspector", "object" ]
python
train
googledatalab/pydatalab
solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py#L69-L144
def run_numerical_categorical_analysis(args, schema_list): """Makes the numerical and categorical analysis files. Args: args: the command line args schema_list: python object of the schema json file. Raises: ValueError: if schema contains unknown column types. """ header = [column['name'] for column in schema_list] input_files = file_io.get_matching_files(args.input_file_pattern) # Check the schema is valid for col_schema in schema_list: col_type = col_schema['type'].lower() if col_type != 'string' and col_type != 'integer' and col_type != 'float': raise ValueError('Schema contains an unsupported type %s.' % col_type) # initialize the results def _init_numerical_results(): return {'min': float('inf'), 'max': float('-inf'), 'count': 0, 'sum': 0.0} numerical_results = collections.defaultdict(_init_numerical_results) categorical_results = collections.defaultdict(set) # for each file, update the numerical stats from that file, and update the set # of unique labels. for input_file in input_files: with file_io.FileIO(input_file, 'r') as f: for line in f: parsed_line = dict(zip(header, line.strip().split(','))) for col_schema in schema_list: col_name = col_schema['name'] col_type = col_schema['type'] if col_type.lower() == 'string': categorical_results[col_name].update([parsed_line[col_name]]) else: # numerical column. # if empty, skip if not parsed_line[col_name].strip(): continue numerical_results[col_name]['min'] = ( min(numerical_results[col_name]['min'], float(parsed_line[col_name]))) numerical_results[col_name]['max'] = ( max(numerical_results[col_name]['max'], float(parsed_line[col_name]))) numerical_results[col_name]['count'] += 1 numerical_results[col_name]['sum'] += float(parsed_line[col_name]) # Update numerical_results to just have min/min/mean for col_schema in schema_list: if col_schema['type'].lower() != 'string': col_name = col_schema['name'] mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count'] del numerical_results[col_name]['sum'] del numerical_results[col_name]['count'] numerical_results[col_name]['mean'] = mean # Write the numerical_results to a json file. file_io.write_string_to_file( os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE), json.dumps(numerical_results, indent=2, separators=(',', ': '))) # Write the vocab files. Each label is on its own line. for name, unique_labels in six.iteritems(categorical_results): labels = '\n'.join(list(unique_labels)) file_io.write_string_to_file( os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name), labels)
[ "def", "run_numerical_categorical_analysis", "(", "args", ",", "schema_list", ")", ":", "header", "=", "[", "column", "[", "'name'", "]", "for", "column", "in", "schema_list", "]", "input_files", "=", "file_io", ".", "get_matching_files", "(", "args", ".", "in...
Makes the numerical and categorical analysis files. Args: args: the command line args schema_list: python object of the schema json file. Raises: ValueError: if schema contains unknown column types.
[ "Makes", "the", "numerical", "and", "categorical", "analysis", "files", "." ]
python
train
cloud9ers/gurumate
environment/share/doc/ipython/examples/parallel/pi/pidigits.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/pi/pidigits.py#L74-L82
def txt_file_to_digits(filename, the_type=str): """ Yield the digits of pi read from a .txt file. """ with open(filename, 'r') as f: for line in f.readlines(): for c in line: if c != '\n' and c!= ' ': yield the_type(c)
[ "def", "txt_file_to_digits", "(", "filename", ",", "the_type", "=", "str", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "for", "c", "in", "line", ":", "if", ...
Yield the digits of pi read from a .txt file.
[ "Yield", "the", "digits", "of", "pi", "read", "from", "a", ".", "txt", "file", "." ]
python
test
nion-software/nionswift
nion/swift/model/NDataHandler.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/NDataHandler.py#L425-L442
def write_data(self, data, file_datetime): """ Write data to the ndata file specified by reference. :param data: the numpy array data to write :param file_datetime: the datetime for the file """ with self.__lock: assert data is not None absolute_file_path = self.__file_path #logging.debug("WRITE data file %s for %s", absolute_file_path, key) make_directory_if_needed(os.path.dirname(absolute_file_path)) properties = self.read_properties() if os.path.exists(absolute_file_path) else dict() write_zip(absolute_file_path, data, properties) # convert to utc time. tz_minutes = Utility.local_utcoffset_minutes(file_datetime) timestamp = calendar.timegm(file_datetime.timetuple()) - tz_minutes * 60 os.utime(absolute_file_path, (time.time(), timestamp))
[ "def", "write_data", "(", "self", ",", "data", ",", "file_datetime", ")", ":", "with", "self", ".", "__lock", ":", "assert", "data", "is", "not", "None", "absolute_file_path", "=", "self", ".", "__file_path", "#logging.debug(\"WRITE data file %s for %s\", absolute_f...
Write data to the ndata file specified by reference. :param data: the numpy array data to write :param file_datetime: the datetime for the file
[ "Write", "data", "to", "the", "ndata", "file", "specified", "by", "reference", "." ]
python
train
gboeing/osmnx
osmnx/save_load.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/save_load.py#L309-L393
def load_graphml(filename, folder=None, node_type=int): """ Load a GraphML file from disk and convert the node/edge attributes to correct data types. Parameters ---------- filename : string the name of the graphml file (including file extension) folder : string the folder containing the file, if None, use default data folder node_type : type (Python type (default: int)) - Convert node ids to this type Returns ------- networkx multidigraph """ start_time = time.time() # read the graph from disk if folder is None: folder = settings.data_folder path = os.path.join(folder, filename) G = nx.MultiDiGraph(nx.read_graphml(path, node_type=node_type)) # convert graph crs attribute from saved string to correct dict data type G.graph['crs'] = ast.literal_eval(G.graph['crs']) if 'streets_per_node' in G.graph: G.graph['streets_per_node'] = ast.literal_eval(G.graph['streets_per_node']) # convert numeric node tags from string to numeric data types log('Converting node and edge attribute data types') for _, data in G.nodes(data=True): data['osmid'] = node_type(data['osmid']) data['x'] = float(data['x']) data['y'] = float(data['y']) # convert numeric, bool, and list node tags from string to correct data types for _, _, data in G.edges(data=True, keys=False): # first parse oneway to bool and length to float - they should always # have only 1 value each data['oneway'] = ast.literal_eval(data['oneway']) data['length'] = float(data['length']) # these attributes might have a single value, or a list if edge's # topology was simplified for attr in ['highway', 'name', 'bridge', 'tunnel', 'lanes', 'ref', 'maxspeed', 'service', 'access', 'area', 'landuse', 'width', 'est_width']: # if this edge has this attribute, and it starts with '[' and ends # with ']', then it's a list to be parsed if attr in data and data[attr][0] == '[' and data[attr][-1] == ']': # try to convert the string list to a list type, else leave as # single-value string (and leave as string if error) try: data[attr] = ast.literal_eval(data[attr]) except: pass # osmid might have a single value or a list if 'osmid' in data: if data['osmid'][0] == '[' and data['osmid'][-1] == ']': # if it's a list, eval the list then convert each element to node_type data['osmid'] = [node_type(i) for i in ast.literal_eval(data['osmid'])] else: # if it's not a list, convert it to the node_type data['osmid'] = node_type(data['osmid']) # if geometry attribute exists, load the string as well-known text to # shapely LineString if 'geometry' in data: data['geometry'] = wkt.loads(data['geometry']) # remove node_default and edge_default metadata keys if they exist if 'node_default' in G.graph: del G.graph['node_default'] if 'edge_default' in G.graph: del G.graph['edge_default'] log('Loaded graph with {:,} nodes and {:,} edges in {:,.2f} seconds from "{}"'.format(len(list(G.nodes())), len(list(G.edges())), time.time()-start_time, path)) return G
[ "def", "load_graphml", "(", "filename", ",", "folder", "=", "None", ",", "node_type", "=", "int", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "# read the graph from disk", "if", "folder", "is", "None", ":", "folder", "=", "settings", ".", ...
Load a GraphML file from disk and convert the node/edge attributes to correct data types. Parameters ---------- filename : string the name of the graphml file (including file extension) folder : string the folder containing the file, if None, use default data folder node_type : type (Python type (default: int)) - Convert node ids to this type Returns ------- networkx multidigraph
[ "Load", "a", "GraphML", "file", "from", "disk", "and", "convert", "the", "node", "/", "edge", "attributes", "to", "correct", "data", "types", "." ]
python
train
aouyar/PyMunin
pysysinfo/filesystem.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/filesystem.py#L67-L91
def getSpaceUse(self): """Get disk space usage. @return: Dictionary of filesystem space utilization stats for filesystems. """ stats = {} try: out = subprocess.Popen([dfCmd, "-Pk"], stdout=subprocess.PIPE).communicate()[0] except: raise Exception('Execution of command %s failed.' % dfCmd) lines = out.splitlines() if len(lines) > 1: for line in lines[1:]: fsstats = {} cols = line.split() fsstats['device'] = cols[0] fsstats['type'] = self._fstypeDict[cols[5]] fsstats['total'] = 1024 * int(cols[1]) fsstats['inuse'] = 1024 * int(cols[2]) fsstats['avail'] = 1024 * int(cols[3]) fsstats['inuse_pcent'] = int(cols[4][:-1]) stats[cols[5]] = fsstats return stats
[ "def", "getSpaceUse", "(", "self", ")", ":", "stats", "=", "{", "}", "try", ":", "out", "=", "subprocess", ".", "Popen", "(", "[", "dfCmd", ",", "\"-Pk\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", ...
Get disk space usage. @return: Dictionary of filesystem space utilization stats for filesystems.
[ "Get", "disk", "space", "usage", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2002-L2022
def com_google_fonts_check_metadata_canonical_filename(font_metadata, canonical_filename, is_variable_font): """METADATA.pb: Filename is set canonically?""" if is_variable_font: valid_varfont_suffixes = [ ("Roman-VF", "Regular"), ("Italic-VF", "Italic"), ] for valid_suffix, style in valid_varfont_suffixes: if style in canonical_filename: canonical_filename = valid_suffix.join(canonical_filename.split(style)) if canonical_filename != font_metadata.filename: yield FAIL, ("METADATA.pb: filename field (\"{}\")" " does not match " "canonical name \"{}\".".format(font_metadata.filename, canonical_filename)) else: yield PASS, "Filename in METADATA.pb is set canonically."
[ "def", "com_google_fonts_check_metadata_canonical_filename", "(", "font_metadata", ",", "canonical_filename", ",", "is_variable_font", ")", ":", "if", "is_variable_font", ":", "valid_varfont_suffixes", "=", "[", "(", "\"Roman-VF\"", ",", "\"Regular\"", ")", ",", "(", "\...
METADATA.pb: Filename is set canonically?
[ "METADATA", ".", "pb", ":", "Filename", "is", "set", "canonically?" ]
python
train
happyleavesaoc/python-limitlessled
limitlessled/group/dimmer.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/dimmer.py#L50-L63
def transition(self, duration, brightness=None): """ Transition wrapper. Short-circuit transition if necessary. :param duration: Duration of transition. :param brightness: Transition to this brightness. """ if duration == 0: if brightness is not None: self.brightness = brightness return if brightness != self.brightness: self._transition(duration, brightness)
[ "def", "transition", "(", "self", ",", "duration", ",", "brightness", "=", "None", ")", ":", "if", "duration", "==", "0", ":", "if", "brightness", "is", "not", "None", ":", "self", ".", "brightness", "=", "brightness", "return", "if", "brightness", "!=",...
Transition wrapper. Short-circuit transition if necessary. :param duration: Duration of transition. :param brightness: Transition to this brightness.
[ "Transition", "wrapper", "." ]
python
train
eandersson/amqpstorm
amqpstorm/management/queue.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/queue.py#L34-L50
def list(self, virtual_host='/', show_all=False): """List Queues. :param str virtual_host: Virtual host name :param bool show_all: List all Queues :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: list """ if show_all: return self.http_client.get(API_QUEUES) virtual_host = quote(virtual_host, '') return self.http_client.get( API_QUEUES_VIRTUAL_HOST % virtual_host )
[ "def", "list", "(", "self", ",", "virtual_host", "=", "'/'", ",", "show_all", "=", "False", ")", ":", "if", "show_all", ":", "return", "self", ".", "http_client", ".", "get", "(", "API_QUEUES", ")", "virtual_host", "=", "quote", "(", "virtual_host", ",",...
List Queues. :param str virtual_host: Virtual host name :param bool show_all: List all Queues :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: list
[ "List", "Queues", "." ]
python
train
angr/angr
angr/sim_manager.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_manager.py#L171-L191
def use_technique(self, tech): """ Use an exploration technique with this SimulationManager. Techniques can be found in :mod:`angr.exploration_techniques`. :param tech: An ExplorationTechnique object that contains code to modify this SimulationManager's behavior. :type tech: ExplorationTechnique :return: The technique that was added, for convenience """ if not isinstance(tech, ExplorationTechnique): raise SimulationManagerError # XXX: as promised tech.project = self._project tech.setup(self) HookSet.install_hooks(self, **tech._get_hooks()) self._techniques.append(tech) return tech
[ "def", "use_technique", "(", "self", ",", "tech", ")", ":", "if", "not", "isinstance", "(", "tech", ",", "ExplorationTechnique", ")", ":", "raise", "SimulationManagerError", "# XXX: as promised", "tech", ".", "project", "=", "self", ".", "_project", "tech", "....
Use an exploration technique with this SimulationManager. Techniques can be found in :mod:`angr.exploration_techniques`. :param tech: An ExplorationTechnique object that contains code to modify this SimulationManager's behavior. :type tech: ExplorationTechnique :return: The technique that was added, for convenience
[ "Use", "an", "exploration", "technique", "with", "this", "SimulationManager", "." ]
python
train
pndurette/gTTS
gtts/tokenizer/pre_processors.py
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/pre_processors.py#L31-L48
def abbreviations(text): """Remove periods after an abbreviation from a list of known abbrevations that can be spoken the same without that period. This prevents having to handle tokenization of that period. Note: Could potentially remove the ending period of a sentence. Note: Abbreviations that Google Translate can't pronounce without (or even with) a period should be added as a word substitution with a :class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'. """ return PreProcessorRegex( search_args=symbols.ABBREVIATIONS, search_func=lambda x: r"(?<={})(?=\.).".format(x), repl='', flags=re.IGNORECASE).run(text)
[ "def", "abbreviations", "(", "text", ")", ":", "return", "PreProcessorRegex", "(", "search_args", "=", "symbols", ".", "ABBREVIATIONS", ",", "search_func", "=", "lambda", "x", ":", "r\"(?<={})(?=\\.).\"", ".", "format", "(", "x", ")", ",", "repl", "=", "''",...
Remove periods after an abbreviation from a list of known abbrevations that can be spoken the same without that period. This prevents having to handle tokenization of that period. Note: Could potentially remove the ending period of a sentence. Note: Abbreviations that Google Translate can't pronounce without (or even with) a period should be added as a word substitution with a :class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
[ "Remove", "periods", "after", "an", "abbreviation", "from", "a", "list", "of", "known", "abbrevations", "that", "can", "be", "spoken", "the", "same", "without", "that", "period", ".", "This", "prevents", "having", "to", "handle", "tokenization", "of", "that", ...
python
train
timster/peewee-validates
peewee_validates.py
https://github.com/timster/peewee-validates/blob/417f0fafb87fe9209439d65bc279d86a3d9e8028/peewee_validates.py#L206-L222
def validate_regexp(pattern, flags=0): """ Validate the field matches the given regular expression. Should work with anything that supports '==' operator. :param pattern: Regular expresion to match. String or regular expression instance. :param pattern: Flags for the regular expression. :raises: ``ValidationError('equal')`` """ regex = re.compile(pattern, flags) if isinstance(pattern, str) else pattern def regexp_validator(field, data): if field.value is None: return if regex.match(str(field.value)) is None: raise ValidationError('regexp', pattern=pattern) return regexp_validator
[ "def", "validate_regexp", "(", "pattern", ",", "flags", "=", "0", ")", ":", "regex", "=", "re", ".", "compile", "(", "pattern", ",", "flags", ")", "if", "isinstance", "(", "pattern", ",", "str", ")", "else", "pattern", "def", "regexp_validator", "(", "...
Validate the field matches the given regular expression. Should work with anything that supports '==' operator. :param pattern: Regular expresion to match. String or regular expression instance. :param pattern: Flags for the regular expression. :raises: ``ValidationError('equal')``
[ "Validate", "the", "field", "matches", "the", "given", "regular", "expression", ".", "Should", "work", "with", "anything", "that", "supports", "==", "operator", "." ]
python
train
msmbuilder/msmbuilder
msmbuilder/example_datasets/base.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/example_datasets/base.py#L45-L49
def description(cls): """Get a description from the Notes section of the docstring.""" lines = [s.strip() for s in cls.__doc__.splitlines()] note_i = lines.index("Notes") return "\n".join(lines[note_i + 2:])
[ "def", "description", "(", "cls", ")", ":", "lines", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "cls", ".", "__doc__", ".", "splitlines", "(", ")", "]", "note_i", "=", "lines", ".", "index", "(", "\"Notes\"", ")", "return", "\"\\n\"",...
Get a description from the Notes section of the docstring.
[ "Get", "a", "description", "from", "the", "Notes", "section", "of", "the", "docstring", "." ]
python
train
molmod/molmod
molmod/pairff.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/pairff.py#L221-L238
def yield_pair_gradients(self, index1, index2): """Yields pairs ((s'(r_ij), grad_i v(bar{r}_ij))""" d_2 = 1/self.distances[index1, index2]**2 if self.charges is not None: c1 = self.charges[index1] c2 = self.charges[index2] yield -c1*c2*d_2, np.zeros(3) if self.dipoles is not None: d_4 = d_2**2 d_6 = d_2**3 delta = self.deltas[index1, index2] p1 = self.dipoles[index1] p2 = self.dipoles[index2] yield -3*d_4*np.dot(p1, p2), np.zeros(3) yield 15*d_6, p1*np.dot(p2, delta) + p2*np.dot(p1, delta) if self.charges is not None: yield -3*c1*d_4, p2 yield -3*c2*d_4, -p1
[ "def", "yield_pair_gradients", "(", "self", ",", "index1", ",", "index2", ")", ":", "d_2", "=", "1", "/", "self", ".", "distances", "[", "index1", ",", "index2", "]", "**", "2", "if", "self", ".", "charges", "is", "not", "None", ":", "c1", "=", "se...
Yields pairs ((s'(r_ij), grad_i v(bar{r}_ij))
[ "Yields", "pairs", "((", "s", "(", "r_ij", ")", "grad_i", "v", "(", "bar", "{", "r", "}", "_ij", "))" ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/account/invoice_detail.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/account/invoice_detail.py#L17-L56
def cli(env, identifier, details): """Invoices and all that mess""" manager = AccountManager(env.client) top_items = manager.get_billing_items(identifier) title = "Invoice %s" % identifier table = formatting.Table(["Item Id", "Category", "Description", "Single", "Monthly", "Create Date", "Location"], title=title) table.align['category'] = 'l' table.align['description'] = 'l' for item in top_items: fqdn = "%s.%s" % (item.get('hostName', ''), item.get('domainName', '')) # category id=2046, ram_usage doesn't have a name... category = utils.lookup(item, 'category', 'name') or item.get('categoryCode') description = nice_string(item.get('description')) if fqdn != '.': description = "%s (%s)" % (item.get('description'), fqdn) table.add_row([ item.get('id'), category, nice_string(description), "$%.2f" % float(item.get('oneTimeAfterTaxAmount')), "$%.2f" % float(item.get('recurringAfterTaxAmount')), utils.clean_time(item.get('createDate'), out_format="%Y-%m-%d"), utils.lookup(item, 'location', 'name') ]) if details: for child in item.get('children', []): table.add_row([ '>>>', utils.lookup(child, 'category', 'name'), nice_string(child.get('description')), "$%.2f" % float(child.get('oneTimeAfterTaxAmount')), "$%.2f" % float(child.get('recurringAfterTaxAmount')), '---', '---' ]) env.fout(table)
[ "def", "cli", "(", "env", ",", "identifier", ",", "details", ")", ":", "manager", "=", "AccountManager", "(", "env", ".", "client", ")", "top_items", "=", "manager", ".", "get_billing_items", "(", "identifier", ")", "title", "=", "\"Invoice %s\"", "%", "id...
Invoices and all that mess
[ "Invoices", "and", "all", "that", "mess" ]
python
train
richardchien/python-cqhttp
cqhttp_helper.py
https://github.com/richardchien/python-cqhttp/blob/1869819a8f89001e3f70668e31afc6c78f7f5bc2/cqhttp_helper.py#L513-L526
def set_friend_add_request(self, *, flag, approve=True, remark=None): """ 处理加好友请求 ------------ :param str flag: 加好友请求的 flag(需从上报的数据中获得) :param bool approve: 是否同意请求 :param str remark: 添加后的好友备注(仅在同意时有效) :return: None :rtype: None """ return super().__getattr__('set_friend_add_request') \ (flag=flag, approve=approve, remark=remark)
[ "def", "set_friend_add_request", "(", "self", ",", "*", ",", "flag", ",", "approve", "=", "True", ",", "remark", "=", "None", ")", ":", "return", "super", "(", ")", ".", "__getattr__", "(", "'set_friend_add_request'", ")", "(", "flag", "=", "flag", ",", ...
处理加好友请求 ------------ :param str flag: 加好友请求的 flag(需从上报的数据中获得) :param bool approve: 是否同意请求 :param str remark: 添加后的好友备注(仅在同意时有效) :return: None :rtype: None
[ "处理加好友请求" ]
python
valid
rr-/docstring_parser
docstring_parser/parser/common.py
https://github.com/rr-/docstring_parser/blob/389773f6790a84d33b10160589ce8591122e12bb/docstring_parser/parser/common.py#L51-L57
def arg_name(self) -> T.Optional[str]: """Return argument name associated with given param.""" if len(self.args) > 2: return self.args[2] elif len(self.args) > 1: return self.args[1] return None
[ "def", "arg_name", "(", "self", ")", "->", "T", ".", "Optional", "[", "str", "]", ":", "if", "len", "(", "self", ".", "args", ")", ">", "2", ":", "return", "self", ".", "args", "[", "2", "]", "elif", "len", "(", "self", ".", "args", ")", ">",...
Return argument name associated with given param.
[ "Return", "argument", "name", "associated", "with", "given", "param", "." ]
python
train
iwanbk/nyamuk
nyamuk/nyamuk.py
https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/nyamuk.py#L335-L367
def handle_suback(self): """Handle incoming SUBACK packet.""" self.logger.info("SUBACK received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret qos_count = self.in_packet.remaining_length - self.in_packet.pos granted_qos = bytearray(qos_count) if granted_qos is None: return NC.ERR_NO_MEM i = 0 while self.in_packet.pos < self.in_packet.remaining_length: ret, byte = self.in_packet.read_byte() if ret != NC.ERR_SUCCESS: granted_qos = None return ret granted_qos[i] = byte i += 1 evt = event.EventSuback(mid, list(granted_qos)) self.push_event(evt) granted_qos = None return NC.ERR_SUCCESS
[ "def", "handle_suback", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"SUBACK received\"", ")", "ret", ",", "mid", "=", "self", ".", "in_packet", ".", "read_uint16", "(", ")", "if", "ret", "!=", "NC", ".", "ERR_SUCCESS", ":", "retur...
Handle incoming SUBACK packet.
[ "Handle", "incoming", "SUBACK", "packet", "." ]
python
train
gwastro/pycbc-glue
pycbc_glue/ligolw/table.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/table.py#L168-L187
def StripTableName(name): """ Return the significant portion of a table name according to LIGO LW naming conventions. Example: >>> StripTableName("sngl_burst_group:sngl_burst:table") 'sngl_burst' >>> StripTableName("sngl_burst:table") 'sngl_burst' >>> StripTableName("sngl_burst") 'sngl_burst' """ if name.lower() != name: warnings.warn("table name \"%s\" is not lower case" % name) try: return TablePattern.search(name).group("Name") except AttributeError: return name
[ "def", "StripTableName", "(", "name", ")", ":", "if", "name", ".", "lower", "(", ")", "!=", "name", ":", "warnings", ".", "warn", "(", "\"table name \\\"%s\\\" is not lower case\"", "%", "name", ")", "try", ":", "return", "TablePattern", ".", "search", "(", ...
Return the significant portion of a table name according to LIGO LW naming conventions. Example: >>> StripTableName("sngl_burst_group:sngl_burst:table") 'sngl_burst' >>> StripTableName("sngl_burst:table") 'sngl_burst' >>> StripTableName("sngl_burst") 'sngl_burst'
[ "Return", "the", "significant", "portion", "of", "a", "table", "name", "according", "to", "LIGO", "LW", "naming", "conventions", "." ]
python
train
pantsbuild/pants
src/python/pants/reporting/plaintext_reporter.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/reporting/plaintext_reporter.py#L128-L147
def start_workunit(self, workunit): """Implementation of Reporter callback.""" if not self.is_under_main_root(workunit): return label_format = self._get_label_format(workunit) if label_format == LabelFormat.FULL: if not WorkUnitLabel.SUPPRESS_LABEL in workunit.labels: self._emit_indented_workunit_label(workunit) # Start output on a new line. tool_output_format = self._get_tool_output_format(workunit) if tool_output_format == ToolOutputFormat.INDENT: self.emit(self._prefix(workunit, '\n')) elif tool_output_format == ToolOutputFormat.UNINDENTED: self.emit('\n') elif label_format == LabelFormat.DOT: self.emit('.') self.flush()
[ "def", "start_workunit", "(", "self", ",", "workunit", ")", ":", "if", "not", "self", ".", "is_under_main_root", "(", "workunit", ")", ":", "return", "label_format", "=", "self", ".", "_get_label_format", "(", "workunit", ")", "if", "label_format", "==", "La...
Implementation of Reporter callback.
[ "Implementation", "of", "Reporter", "callback", "." ]
python
train
noxdafox/clipspy
clips/environment.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/environment.py#L126-L139
def save(self, path, binary=False): """Save a set of constructs into the CLIPS data base. If binary is True, the constructs will be saved in binary format. The Python equivalent of the CLIPS load command. """ if binary: ret = lib.EnvBsave(self._env, path.encode()) else: ret = lib.EnvSave(self._env, path.encode()) if ret == 0: raise CLIPSError(self._env)
[ "def", "save", "(", "self", ",", "path", ",", "binary", "=", "False", ")", ":", "if", "binary", ":", "ret", "=", "lib", ".", "EnvBsave", "(", "self", ".", "_env", ",", "path", ".", "encode", "(", ")", ")", "else", ":", "ret", "=", "lib", ".", ...
Save a set of constructs into the CLIPS data base. If binary is True, the constructs will be saved in binary format. The Python equivalent of the CLIPS load command.
[ "Save", "a", "set", "of", "constructs", "into", "the", "CLIPS", "data", "base", "." ]
python
train
pingali/dgit
dgitcore/contrib/repomanagers/gitmanager.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/contrib/repomanagers/gitmanager.py#L73-L97
def _run_generic_command(self, repo, cmd): """ Run a generic command within the repo. Assumes that you are in the repo's root directory """ result = None with cd(repo.rootdir): # Dont use sh. It is not collecting the stdout of all # child processes. output = self._run(cmd) try: result = { 'cmd': cmd, 'status': 'success', 'message': output, } except Exception as e: result = { 'cmd': cmd, 'status': 'error', 'message': str(e) } return result
[ "def", "_run_generic_command", "(", "self", ",", "repo", ",", "cmd", ")", ":", "result", "=", "None", "with", "cd", "(", "repo", ".", "rootdir", ")", ":", "# Dont use sh. It is not collecting the stdout of all", "# child processes.", "output", "=", "self", ".", ...
Run a generic command within the repo. Assumes that you are in the repo's root directory
[ "Run", "a", "generic", "command", "within", "the", "repo", ".", "Assumes", "that", "you", "are", "in", "the", "repo", "s", "root", "directory" ]
python
valid
gem/oq-engine
openquake/hazardlib/shakemap.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/shakemap.py#L67-L86
def download_array(shakemap_id, shakemap_url=SHAKEMAP_URL): """ :param shakemap_id: USGS Shakemap ID :returns: an array with the shakemap """ url = shakemap_url.format(shakemap_id) logging.info('Downloading %s', url) contents = json.loads(urlopen(url).read())[ 'properties']['products']['shakemap'][-1]['contents'] grid = contents.get('download/grid.xml') if grid is None: raise MissingLink('Could not find grid.xml link in %s' % url) uncertainty = contents.get('download/uncertainty.xml.zip') if uncertainty is None: with urlopen(grid['url']) as f: return get_shakemap_array(f) else: with urlopen(grid['url']) as f1, urlextract( uncertainty['url'], 'uncertainty.xml') as f2: return get_shakemap_array(f1, f2)
[ "def", "download_array", "(", "shakemap_id", ",", "shakemap_url", "=", "SHAKEMAP_URL", ")", ":", "url", "=", "shakemap_url", ".", "format", "(", "shakemap_id", ")", "logging", ".", "info", "(", "'Downloading %s'", ",", "url", ")", "contents", "=", "json", "....
:param shakemap_id: USGS Shakemap ID :returns: an array with the shakemap
[ ":", "param", "shakemap_id", ":", "USGS", "Shakemap", "ID", ":", "returns", ":", "an", "array", "with", "the", "shakemap" ]
python
train
jkenlooper/chill
src/chill/app.py
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/app.py#L67-L87
def multiple_directory_files_loader(*args): """ Loads all the files in each directory as values in a dict with the key being the relative file path of the directory. Updates the value if subsequent file paths are the same. """ d = dict() def load_files(folder): for (dirpath, dirnames, filenames) in os.walk(folder): for f in filenames: filepath = os.path.join(dirpath, f) with open( filepath, 'r' ) as f: key = filepath[len(os.path.commonprefix([root, filepath]))+1:] d[ key ] = f.read() for foldername in dirnames: load_files(os.path.join(dirpath, foldername)) for root in args: load_files(root) return d
[ "def", "multiple_directory_files_loader", "(", "*", "args", ")", ":", "d", "=", "dict", "(", ")", "def", "load_files", "(", "folder", ")", ":", "for", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "folder", ")", ...
Loads all the files in each directory as values in a dict with the key being the relative file path of the directory. Updates the value if subsequent file paths are the same.
[ "Loads", "all", "the", "files", "in", "each", "directory", "as", "values", "in", "a", "dict", "with", "the", "key", "being", "the", "relative", "file", "path", "of", "the", "directory", ".", "Updates", "the", "value", "if", "subsequent", "file", "paths", ...
python
train
pypa/setuptools
setuptools/command/easy_install.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/easy_install.py#L1912-L1919
def is_python(text, filename='<string>'): "Is this string a valid Python script?" try: compile(text, filename, 'exec') except (SyntaxError, TypeError): return False else: return True
[ "def", "is_python", "(", "text", ",", "filename", "=", "'<string>'", ")", ":", "try", ":", "compile", "(", "text", ",", "filename", ",", "'exec'", ")", "except", "(", "SyntaxError", ",", "TypeError", ")", ":", "return", "False", "else", ":", "return", ...
Is this string a valid Python script?
[ "Is", "this", "string", "a", "valid", "Python", "script?" ]
python
train
hobson/aima
aima/agents.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/agents.py#L339-L345
def move_to(self, thing, destination): "Move a thing to a new location." thing.bump = self.some_things_at(destination, Obstacle) if not thing.bump: thing.location = destination for o in self.observers: o.thing_moved(thing)
[ "def", "move_to", "(", "self", ",", "thing", ",", "destination", ")", ":", "thing", ".", "bump", "=", "self", ".", "some_things_at", "(", "destination", ",", "Obstacle", ")", "if", "not", "thing", ".", "bump", ":", "thing", ".", "location", "=", "desti...
Move a thing to a new location.
[ "Move", "a", "thing", "to", "a", "new", "location", "." ]
python
valid
diux-dev/ncluster
ncluster/local_backend.py
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/local_backend.py#L422-L426
def run(self, *args, **kwargs): """Runs command on every job in the run.""" for job in self.jobs: job.run(*args, **kwargs)
[ "def", "run", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "job", "in", "self", ".", "jobs", ":", "job", ".", "run", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Runs command on every job in the run.
[ "Runs", "command", "on", "every", "job", "in", "the", "run", "." ]
python
train
sdispater/orator
orator/orm/relations/morph_to_many.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/morph_to_many.py#L50-L59
def _set_where(self): """ Set the where clause for the relation query. :return: self :rtype: BelongsToMany """ super(MorphToMany, self)._set_where() self._query.where("%s.%s" % (self._table, self._morph_type), self._morph_name)
[ "def", "_set_where", "(", "self", ")", ":", "super", "(", "MorphToMany", ",", "self", ")", ".", "_set_where", "(", ")", "self", ".", "_query", ".", "where", "(", "\"%s.%s\"", "%", "(", "self", ".", "_table", ",", "self", ".", "_morph_type", ")", ",",...
Set the where clause for the relation query. :return: self :rtype: BelongsToMany
[ "Set", "the", "where", "clause", "for", "the", "relation", "query", "." ]
python
train
hsolbrig/pyjsg
pyjsg/parser_impl/jsg_pairdef_parser.py
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_pairdef_parser.py#L43-L57
def members_entries(self, all_are_optional: Optional[bool] = False) -> List[Tuple[str, str]]: """ Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing reference types :param all_are_optional: If true, all types are forced optional :return: raw name/ signature type for all elements in this pair """ if self._type_reference: rval: List[Tuple[str, str]] = [] for n, t in self._context.reference(self._type_reference).members_entries(all_are_optional): rval.append((n, self._ebnf.signature_cardinality(t, all_are_optional).format(name=n))) return rval else: sig = self._ebnf.signature_cardinality(self._typ.reference_type(), all_are_optional) return [(name, sig.format(name=name)) for name in self._names]
[ "def", "members_entries", "(", "self", ",", "all_are_optional", ":", "Optional", "[", "bool", "]", "=", "False", ")", "->", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", ":", "if", "self", ".", "_type_reference", ":", "rval", ":", "List", "...
Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing reference types :param all_are_optional: If true, all types are forced optional :return: raw name/ signature type for all elements in this pair
[ "Generate", "a", "list", "quoted", "raw", "name", "signature", "type", "entries", "for", "this", "pairdef", "recursively", "traversing", "reference", "types" ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L641-L671
def atlasdb_get_zonefile( zonefile_hash, con=None, path=None ): """ Look up all information on this zonefile. Returns {'zonefile_hash': ..., 'indexes': [...], etc} Zonefile information will be ordered by inv_index """ ret = None with AtlasDBOpen(con=con, path=path) as dbcon: sql = "SELECT * FROM zonefiles WHERE zonefile_hash = ? ORDER BY inv_index;" args = (zonefile_hash,) cur = dbcon.cursor() res = atlasdb_query_execute( cur, sql, args ) ret = { 'zonefile_hash': zonefile_hash, 'indexes': [], 'block_heights': [], 'present': False, 'tried_storage': False } for zfinfo in res: ret['indexes'].append( zfinfo['inv_index'] ) ret['block_heights'].append( zfinfo['block_height'] ) ret['present'] = ret['present'] or zfinfo['present'] ret['tried_storage'] = ret['tried_storage'] or zfinfo['tried_storage'] return ret
[ "def", "atlasdb_get_zonefile", "(", "zonefile_hash", ",", "con", "=", "None", ",", "path", "=", "None", ")", ":", "ret", "=", "None", "with", "AtlasDBOpen", "(", "con", "=", "con", ",", "path", "=", "path", ")", "as", "dbcon", ":", "sql", "=", "\"SEL...
Look up all information on this zonefile. Returns {'zonefile_hash': ..., 'indexes': [...], etc} Zonefile information will be ordered by inv_index
[ "Look", "up", "all", "information", "on", "this", "zonefile", ".", "Returns", "{", "zonefile_hash", ":", "...", "indexes", ":", "[", "...", "]", "etc", "}", "Zonefile", "information", "will", "be", "ordered", "by", "inv_index" ]
python
train
awslabs/serverless-application-model
samtranslator/intrinsics/resolver.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/intrinsics/resolver.py#L67-L88
def resolve_sam_resource_id_refs(self, input, supported_resource_id_refs): """ Some SAM resources have their logical ids mutated from the original id that the customer writes in the template. This method recursively walks the tree and updates these logical ids from the old value to the new value that is generated by SAM. Example: {"Ref": "MyLayer"} -> {"Ref": "MyLayerABC123"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise """ return self._traverse(input, supported_resource_id_refs, self._try_resolve_sam_resource_id_refs)
[ "def", "resolve_sam_resource_id_refs", "(", "self", ",", "input", ",", "supported_resource_id_refs", ")", ":", "return", "self", ".", "_traverse", "(", "input", ",", "supported_resource_id_refs", ",", "self", ".", "_try_resolve_sam_resource_id_refs", ")" ]
Some SAM resources have their logical ids mutated from the original id that the customer writes in the template. This method recursively walks the tree and updates these logical ids from the old value to the new value that is generated by SAM. Example: {"Ref": "MyLayer"} -> {"Ref": "MyLayerABC123"} This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the occurrence and continues with the rest. It is recommended that you have an external process that detects and surfaces invalid references. For first call, it is recommended that `template` is the entire CFN template in order to handle references in Mapping or Output sections. :param dict input: CFN template that needs resolution. This method will modify the input directly resolving references. In subsequent recursions, this will be a fragment of the CFN template. :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return list errors: List of dictionary containing information about invalid reference. Empty list otherwise
[ "Some", "SAM", "resources", "have", "their", "logical", "ids", "mutated", "from", "the", "original", "id", "that", "the", "customer", "writes", "in", "the", "template", ".", "This", "method", "recursively", "walks", "the", "tree", "and", "updates", "these", ...
python
train
aht/stream.py
stream.py
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L170-L179
def pipe(inpipe, outpipe): """Connect inpipe and outpipe. If outpipe is not a Stream instance, it should be an function callable on an iterable. """ if hasattr(outpipe, '__pipe__'): return outpipe.__pipe__(inpipe) elif hasattr(outpipe, '__call__'): return outpipe(inpipe) else: raise BrokenPipe('No connection mechanism defined')
[ "def", "pipe", "(", "inpipe", ",", "outpipe", ")", ":", "if", "hasattr", "(", "outpipe", ",", "'__pipe__'", ")", ":", "return", "outpipe", ".", "__pipe__", "(", "inpipe", ")", "elif", "hasattr", "(", "outpipe", ",", "'__call__'", ")", ":", "return", "o...
Connect inpipe and outpipe. If outpipe is not a Stream instance, it should be an function callable on an iterable.
[ "Connect", "inpipe", "and", "outpipe", ".", "If", "outpipe", "is", "not", "a", "Stream", "instance", "it", "should", "be", "an", "function", "callable", "on", "an", "iterable", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L1222-L1238
def get_interface_detail_output_interface_ifHCOutOctets(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') ifHCOutOctets = ET.SubElement(interface, "ifHCOutOctets") ifHCOutOctets.text = kwargs.pop('ifHCOutOctets') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_interface_detail_output_interface_ifHCOutOctets", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_interface_detail", "=", "ET", ".", "Element", "(", "\"get_interface_detail\"", ")", "con...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
hydpy-dev/hydpy
hydpy/models/lland/lland_states.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_states.py#L88-L101
def trim(self, lower=None, upper=None): """Trim values in accordance with :math:`BoWa \\leq NFk`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(5) >>> nfk(200.) >>> states.bowa(-100.,0., 100., 200., 300.) >>> states.bowa bowa(0.0, 0.0, 100.0, 200.0, 200.0) """ if upper is None: upper = self.subseqs.seqs.model.parameters.control.nfk lland_sequences.State1DSequence.trim(self, lower, upper)
[ "def", "trim", "(", "self", ",", "lower", "=", "None", ",", "upper", "=", "None", ")", ":", "if", "upper", "is", "None", ":", "upper", "=", "self", ".", "subseqs", ".", "seqs", ".", "model", ".", "parameters", ".", "control", ".", "nfk", "lland_seq...
Trim values in accordance with :math:`BoWa \\leq NFk`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(5) >>> nfk(200.) >>> states.bowa(-100.,0., 100., 200., 300.) >>> states.bowa bowa(0.0, 0.0, 100.0, 200.0, 200.0)
[ "Trim", "values", "in", "accordance", "with", ":", "math", ":", "BoWa", "\\\\", "leq", "NFk", "." ]
python
train
jaysonsantos/python-binary-memcached
bmemcached/client/distributed.py
https://github.com/jaysonsantos/python-binary-memcached/blob/6a792829349c69204d9c5045e5c34b4231216dd6/bmemcached/client/distributed.py#L108-L126
def replace(self, key, value, time=0, compress_level=-1): """ Replace a key/value to server ony if it does exist. :param key: Key's name :type key: six.string_types :param value: A value to be stored on server. :type value: object :param time: Time in seconds that your key will expire. :type time: int :param compress_level: How much to compress. 0 = no compression, 1 = fastest, 9 = slowest but best, -1 = default compression level. :type compress_level: int :return: True if key is replace False if key does not exists :rtype: bool """ server = self._get_server(key) return server.replace(key, value, time, compress_level)
[ "def", "replace", "(", "self", ",", "key", ",", "value", ",", "time", "=", "0", ",", "compress_level", "=", "-", "1", ")", ":", "server", "=", "self", ".", "_get_server", "(", "key", ")", "return", "server", ".", "replace", "(", "key", ",", "value"...
Replace a key/value to server ony if it does exist. :param key: Key's name :type key: six.string_types :param value: A value to be stored on server. :type value: object :param time: Time in seconds that your key will expire. :type time: int :param compress_level: How much to compress. 0 = no compression, 1 = fastest, 9 = slowest but best, -1 = default compression level. :type compress_level: int :return: True if key is replace False if key does not exists :rtype: bool
[ "Replace", "a", "key", "/", "value", "to", "server", "ony", "if", "it", "does", "exist", "." ]
python
train
zabertech/python-swampyer
swampyer/__init__.py
https://github.com/zabertech/python-swampyer/blob/31b040e7570455718709a496d6d9faacfb372a00/swampyer/__init__.py#L481-L504
def publish(self,topic,options=None,args=None,kwargs=None): """ Publishes a messages to the server """ topic = self.get_full_uri(topic) if options is None: options = {'acknowledge':True} if options.get('acknowledge'): request = PUBLISH( options=options or {}, topic=topic, args=args or [], kwargs=kwargs or {} ) result = self.send_and_await_response(request) return result else: request = PUBLISH( options=options or {}, topic=topic, args=args or [], kwargs=kwargs or {} ) self.send_message(request) return request.request_id
[ "def", "publish", "(", "self", ",", "topic", ",", "options", "=", "None", ",", "args", "=", "None", ",", "kwargs", "=", "None", ")", ":", "topic", "=", "self", ".", "get_full_uri", "(", "topic", ")", "if", "options", "is", "None", ":", "options", "...
Publishes a messages to the server
[ "Publishes", "a", "messages", "to", "the", "server" ]
python
train
BlackEarth/bl
bl/id.py
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/id.py#L60-L82
def random_id(length=16, charset=alphanum_chars, first_charset=alpha_chars, sep='', group=0): """Creates a random id with the given length and charset. ## Parameters * length the number of characters in the id * charset what character set to use (a list of characters) * first_charset what character set for the first character * sep='' what character to insert between groups * group=0 how long the groups are (default 0 means no groups) """ t = [] first_chars = list(set(charset).intersection(first_charset)) if len(first_chars) == 0: first_chars = charset t.append(first_chars[random.randrange(len(first_chars))]) for i in range(len(t), length): if (group > 0) and (i % group == 0) and (i < length): t.append(sep) t.append(charset[random.randrange(len(charset))]) return ''.join(t)
[ "def", "random_id", "(", "length", "=", "16", ",", "charset", "=", "alphanum_chars", ",", "first_charset", "=", "alpha_chars", ",", "sep", "=", "''", ",", "group", "=", "0", ")", ":", "t", "=", "[", "]", "first_chars", "=", "list", "(", "set", "(", ...
Creates a random id with the given length and charset. ## Parameters * length the number of characters in the id * charset what character set to use (a list of characters) * first_charset what character set for the first character * sep='' what character to insert between groups * group=0 how long the groups are (default 0 means no groups)
[ "Creates", "a", "random", "id", "with", "the", "given", "length", "and", "charset", ".", "##", "Parameters", "*", "length", "the", "number", "of", "characters", "in", "the", "id", "*", "charset", "what", "character", "set", "to", "use", "(", "a", "list",...
python
train
openmicroscopy/yaclifw
yaclifw/main.py
https://github.com/openmicroscopy/yaclifw/blob/a01179fefb2c2c4260c75e6d1dc6e19de9979d64/yaclifw/main.py#L37-L59
def entry_point(items=tuple()): """ External entry point which calls main() and if Stop is raised, calls sys.exit() """ try: if not items: from .example import ExampleCommand from .version import Version items = [(ExampleCommand.NAME, ExampleCommand), (Version.NAME, Version)] main("yaclifw", items=items) except Stop as stop: print(stop) sys.exit(stop.rc) except SystemExit: raise except KeyboardInterrupt: print("Cancelled") sys.exit(1) except Exception: traceback.print_exc() sys.exit(1)
[ "def", "entry_point", "(", "items", "=", "tuple", "(", ")", ")", ":", "try", ":", "if", "not", "items", ":", "from", ".", "example", "import", "ExampleCommand", "from", ".", "version", "import", "Version", "items", "=", "[", "(", "ExampleCommand", ".", ...
External entry point which calls main() and if Stop is raised, calls sys.exit()
[ "External", "entry", "point", "which", "calls", "main", "()", "and", "if", "Stop", "is", "raised", "calls", "sys", ".", "exit", "()" ]
python
test