code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def numpymat2df(mat): """ Sometimes (though not very often) it is useful to convert a numpy matrix which has no column names to a Pandas dataframe for use of the Pandas functions. This method converts a 2D numpy matrix to Pandas dataframe with default column headers. Parameters ---------- mat : The numpy matrix Returns ------- A pandas dataframe with the same data as the input matrix but with columns named x0, x1, ... x[n-1] for the number of columns. """ return pd.DataFrame( dict(('x%d' % i, mat[:, i]) for i in range(mat.shape[1])))
Sometimes (though not very often) it is useful to convert a numpy matrix which has no column names to a Pandas dataframe for use of the Pandas functions. This method converts a 2D numpy matrix to Pandas dataframe with default column headers. Parameters ---------- mat : The numpy matrix Returns ------- A pandas dataframe with the same data as the input matrix but with columns named x0, x1, ... x[n-1] for the number of columns.
def set_settings_env(executable_folder=None): """ Add all application folders :param executable_folder: the folder that contains local and external_app_repos :return: """ executable_folder = executable_folder or get_executable_folder() # print "!!!!!!!!!!!!!! executable:", executable_folder if os.path.exists(os.path.join(executable_folder, "local/total_settings.py")): print("Using total settings") os.chdir(executable_folder) os.environ["DJANGO_SETTINGS_MODULE"] = "local.total_settings" os.environ["STATIC_ROOT"] = os.path.join(executable_folder, "static") os.environ["MEDIA_ROOT"] = os.path.join(executable_folder, "media") else: os.environ.setdefault('ROOT_DIR', get_folder(get_inspection_frame(2))) os.environ["DJANGO_SETTINGS_MODULE"] = "djangoautoconf.base_settings"
Add all application folders :param executable_folder: the folder that contains local and external_app_repos :return:
def build(self): """ Builds this object into the desired output information. """ signed = bool(self.options() & Builder.Options.Signed) # remove previous build information buildpath = self.buildPath() if not buildpath: raise errors.InvalidBuildPath(buildpath) # setup the environment for key, value in self.environment().items(): log.info('SET {0}={1}'.format(key, value)) os.environ[key] = value if os.path.exists(buildpath): shutil.rmtree(buildpath) # generate the build path for the installer os.makedirs(buildpath) # create the output path outpath = self.outputPath() if not os.path.exists(outpath): os.makedirs(outpath) # copy license information src = self.licenseFile() if src and os.path.exists(src): targ = os.path.join(buildpath, 'license.txt') shutil.copyfile(src, targ) # generate revision information if self.options() & Builder.Options.GenerateRevision: self.generateRevision() # generate documentation information if self.options() & Builder.Options.GenerateDocs: self.generateDocumentation(buildpath) # generate setup file if self.options() & Builder.Options.GenerateSetupFile: setuppath = os.path.join(self.sourcePath(), '..') egg = (self.options() & Builder.Options.GenerateEgg) != 0 self.generateSetupFile(setuppath, egg=egg) # generate executable information if self.options() & Builder.Options.GenerateExecutable: if not self.generateExecutable(signed=signed): return # generate zipfile information if self.options() & Builder.Options.GenerateZipFile: self.generateZipFile(self.outputPath()) # generate installer information if self.options() & Builder.Options.GenerateInstaller: self.generateInstaller(buildpath, signed=signed)
Builds this object into the desired output information.
def generate_key_data_from_nonce(server_nonce, new_nonce): """Generates the key data corresponding to the given nonce""" server_nonce = server_nonce.to_bytes(16, 'little', signed=True) new_nonce = new_nonce.to_bytes(32, 'little', signed=True) hash1 = sha1(new_nonce + server_nonce).digest() hash2 = sha1(server_nonce + new_nonce).digest() hash3 = sha1(new_nonce + new_nonce).digest() key = hash1 + hash2[:12] iv = hash2[12:20] + hash3 + new_nonce[:4] return key, iv
Generates the key data corresponding to the given nonce
def set_parent_on_new(self, parentrefobj): """Contextmanager that on close will get all new unwrapped refobjects, and for every refobject with no parent sets is to the given one. :returns: None :rtype: None :raises: None """ refobjinter = self.get_refobjinter() # to make sure we only get the new one # we get all current unwrapped first old = self.get_unwrapped(self.get_root(), refobjinter) yield new = self.get_unwrapped(self.get_root(), refobjinter) - old for refobj in new: if refobjinter.get_parent(refobj) is None: refobjinter.set_parent(refobj, parentrefobj)
Contextmanager that on close will get all new unwrapped refobjects, and for every refobject with no parent sets is to the given one. :returns: None :rtype: None :raises: None
def oggvorbis(s): """ This is taken from the ogg vorbis spec (http://xiph.org/vorbis/doc/Vorbis_I_spec.html) :param s: the total length of the window, in samples """ try: s = np.arange(s) except TypeError: s = np.arange(s[0]) i = np.sin((s + .5) / len(s) * np.pi) ** 2 f = np.sin(.5 * np.pi * i) return f * (1. / f.max())
This is taken from the ogg vorbis spec (http://xiph.org/vorbis/doc/Vorbis_I_spec.html) :param s: the total length of the window, in samples
def start(self, io_loop): """ Run the ``before_run`` callbacks and queue to ``on_start`` callbacks. :param tornado.ioloop.IOLoop io_loop: loop to start the app on. """ for callback in self.before_run_callbacks: try: callback(self.tornado_application, io_loop) except Exception: self.logger.error('before_run callback %r cancelled start', callback, exc_info=1) self.stop(io_loop) raise for callback in self.on_start_callbacks: io_loop.spawn_callback(callback, self.tornado_application, io_loop)
Run the ``before_run`` callbacks and queue to ``on_start`` callbacks. :param tornado.ioloop.IOLoop io_loop: loop to start the app on.
def update(self, list_id, subscriber_hash, data): """ Update tags for a specific subscriber. The documentation lists only the tags request body parameter so it is being documented and error-checked as if it were required based on the description of the method. The data list needs to include a "status" key. This determines if the tag should be added or removed from the user: data = { 'tags': [ {'name': 'foo', 'status': 'active'}, {'name': 'bar', 'status': 'inactive'} ] } :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param subscriber_hash: The MD5 hash of the lowercase version of the list member’s email address. :type subscriber_hash: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "tags": list* } """ subscriber_hash = check_subscriber_hash(subscriber_hash) self.list_id = list_id self.subscriber_hash = subscriber_hash if 'tags' not in data: raise KeyError('The list member tags must have a tag') response = self._mc_client._post(url=self._build_path(list_id, 'members', subscriber_hash, 'tags'), data=data) return response
Update tags for a specific subscriber. The documentation lists only the tags request body parameter so it is being documented and error-checked as if it were required based on the description of the method. The data list needs to include a "status" key. This determines if the tag should be added or removed from the user: data = { 'tags': [ {'name': 'foo', 'status': 'active'}, {'name': 'bar', 'status': 'inactive'} ] } :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param subscriber_hash: The MD5 hash of the lowercase version of the list member’s email address. :type subscriber_hash: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "tags": list* }
def get_host_port_names(self, host_name): """ return a list of the port names of XIV host """ port_names = list() host = self.get_hosts_by_name(host_name) fc_ports = host.fc_ports iscsi_ports = host.iscsi_ports port_names.extend(fc_ports.split(',') if fc_ports != '' else []) port_names.extend(iscsi_ports.split(',') if iscsi_ports != '' else []) return port_names
return a list of the port names of XIV host
def parse(chord): """ Parse a string to get chord component :param str chord: str expression of a chord :rtype: (str, pychord.Quality, str, str) :return: (root, quality, appended, on) """ if len(chord) > 1 and chord[1] in ("b", "#"): root = chord[:2] rest = chord[2:] else: root = chord[:1] rest = chord[1:] check_note(root, chord) on_chord_idx = rest.find("/") if on_chord_idx >= 0: on = rest[on_chord_idx + 1:] rest = rest[:on_chord_idx] check_note(on, chord) else: on = None if rest in QUALITY_DICT: quality = Quality(rest) else: raise ValueError("Invalid chord {}: Unknown quality {}".format(chord, rest)) # TODO: Implement parser for appended notes appended = [] return root, quality, appended, on
Parse a string to get chord component :param str chord: str expression of a chord :rtype: (str, pychord.Quality, str, str) :return: (root, quality, appended, on)
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None): """Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \ tensor slices at given indices. The RowSparseNDArray can be instantiated in several ways: - row_sparse_array(D): to construct a RowSparseNDArray with a dense ndarray ``D`` - **D** (*array_like*) - An object exposing the array interface, an object whose \ `__array__` method returns an array, or any (nested) sequence. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \ float32 otherwise. - row_sparse_array(S) to construct a RowSparseNDArray with a sparse ndarray ``S`` - **S** (*RowSparseNDArray*) - A sparse ndarray. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``S.dtype``. - row_sparse_array((D0, D1 .. Dn)) to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)`` - **D0, D1 .. Dn** (*int*) - The shape of the ndarray - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. - row_sparse_array((data, indices)) to construct a RowSparseNDArray based on the definition of row sparse format \ using two separate arrays, \ where the `indices` stores the indices of the row slices with non-zeros, while the values are stored in `data`. The corresponding NDArray ``dense`` represented by RowSparseNDArray ``rsp`` has \ ``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]`` The row indices for are expected to be **sorted in ascending order.** \ - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero row slices of the array. - **indices** (*array_like*) - An object exposing the array interface, which \ stores the row index for each row slice with non-zero elements. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the indices and indptr arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. Parameters ---------- arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like The argument to help instantiate the row sparse ndarray. See above for further details. shape : tuple of int, optional The shape of the row sparse ndarray. (Default value = None) ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. (Default value = None) Returns ------- RowSparseNDArray An `RowSparseNDArray` with the `row_sparse` storage representation. Examples -------- >>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2)) >>> a.asnumpy() array([[ 0., 0.], [ 1., 2.], [ 0., 0.], [ 0., 0.], [ 3., 4.], [ 0., 0.]], dtype=float32) See Also -------- RowSparseNDArray : MXNet NDArray in row sparse format. """ # construct a row sparse array from (D0, D1 ..) or (data, indices) if isinstance(arg1, tuple): arg_len = len(arg1) if arg_len < 2: raise ValueError("Unexpected length of input tuple: " + str(arg_len)) elif arg_len > 2: # empty ndarray with shape _check_shape(arg1, shape) return empty('row_sparse', arg1, ctx=ctx, dtype=dtype) else: # len(arg1) = 2, is either shape or (data, indices) if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types): # empty ndarray with shape _check_shape(arg1, shape) return empty('row_sparse', arg1, ctx=ctx, dtype=dtype) else: # data, indices, indptr return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape, ctx=ctx, dtype=dtype) else: # construct a row sparse ndarray from a dense / sparse array if isinstance(arg1, RowSparseNDArray): # construct a row sparse ndarray from RowSparseNDArray _check_shape(arg1.shape, shape) return array(arg1, ctx=ctx, dtype=dtype) elif isinstance(arg1, CSRNDArray): raise ValueError("Unexpected input type: CSRNDArray") else: # construct a csr matrix from a dense one # prepare default dtype since mx.nd.array doesn't use default values # based on source_array dtype = _prepare_default_dtype(arg1, dtype) # create dns array with provided dtype. ctx is not passed since copy across # ctx requires dtype to be the same dns = _array(arg1, dtype=dtype) if ctx is not None and dns.context != ctx: dns = dns.as_in_context(ctx) _check_shape(dns.shape, shape) return dns.tostype('row_sparse')
Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \ tensor slices at given indices. The RowSparseNDArray can be instantiated in several ways: - row_sparse_array(D): to construct a RowSparseNDArray with a dense ndarray ``D`` - **D** (*array_like*) - An object exposing the array interface, an object whose \ `__array__` method returns an array, or any (nested) sequence. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \ float32 otherwise. - row_sparse_array(S) to construct a RowSparseNDArray with a sparse ndarray ``S`` - **S** (*RowSparseNDArray*) - A sparse ndarray. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``S.dtype``. - row_sparse_array((D0, D1 .. Dn)) to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)`` - **D0, D1 .. Dn** (*int*) - The shape of the ndarray - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. - row_sparse_array((data, indices)) to construct a RowSparseNDArray based on the definition of row sparse format \ using two separate arrays, \ where the `indices` stores the indices of the row slices with non-zeros, while the values are stored in `data`. The corresponding NDArray ``dense`` represented by RowSparseNDArray ``rsp`` has \ ``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]`` The row indices for are expected to be **sorted in ascending order.** \ - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero row slices of the array. - **indices** (*array_like*) - An object exposing the array interface, which \ stores the row index for each row slice with non-zero elements. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the indices and indptr arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. Parameters ---------- arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like The argument to help instantiate the row sparse ndarray. See above for further details. shape : tuple of int, optional The shape of the row sparse ndarray. (Default value = None) ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. (Default value = None) Returns ------- RowSparseNDArray An `RowSparseNDArray` with the `row_sparse` storage representation. Examples -------- >>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2)) >>> a.asnumpy() array([[ 0., 0.], [ 1., 2.], [ 0., 0.], [ 0., 0.], [ 3., 4.], [ 0., 0.]], dtype=float32) See Also -------- RowSparseNDArray : MXNet NDArray in row sparse format.
def storage_at_hvmv_substation(mv_grid, parameters, mode=None): """ Place storage at HV/MV substation bus bar. Parameters ---------- mv_grid : :class:`~.grid.grids.MVGrid` MV grid instance parameters : :obj:`dict` Dictionary with storage parameters. Must at least contain 'nominal_power'. See :class:`~.grid.network.StorageControl` for more information. mode : :obj:`str`, optional Operational mode. See :class:`~.grid.network.StorageControl` for possible options and more information. Default: None. Returns ------- :class:`~.grid.components.Storage`, :class:`~.grid.components.Line` Created storage instance and newly added line to connect storage. """ storage = set_up_storage(node=mv_grid.station, parameters=parameters, operational_mode=mode) line = connect_storage(storage, mv_grid.station) return storage, line
Place storage at HV/MV substation bus bar. Parameters ---------- mv_grid : :class:`~.grid.grids.MVGrid` MV grid instance parameters : :obj:`dict` Dictionary with storage parameters. Must at least contain 'nominal_power'. See :class:`~.grid.network.StorageControl` for more information. mode : :obj:`str`, optional Operational mode. See :class:`~.grid.network.StorageControl` for possible options and more information. Default: None. Returns ------- :class:`~.grid.components.Storage`, :class:`~.grid.components.Line` Created storage instance and newly added line to connect storage.
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ SKIP_KEYS = ('_source_table', '_dest_table', 'd_vid', 't_vid', 'st_id', 'dataset', 'hash', 'process_records') return OrderedDict([(k, getattr(self, k)) for k in self.properties if k not in SKIP_KEYS])
A dict that holds key/values for all of the properties in the object. :return:
def save(self, *args, **kwargs): """ **uid**: :code:`{office.uid}_{cycle.uid}_race` """ self.uid = '{}_{}_race'.format( self.office.uid, self.cycle.uid ) name_label = '{0} {1}'.format( self.cycle.name, self.office.label ) if self.special: self.uid = '{}:special'.format( self.uid ) name_label = '{} Special'.format( name_label ) self.label = name_label self.name = name_label if not self.slug: self.slug = uuslug( name_label, instance=self, max_length=100, separator='-', start_no=2 ) super(Race, self).save(*args, **kwargs)
**uid**: :code:`{office.uid}_{cycle.uid}_race`
def apply_inverse(self, y): """ Self-consistently apply the inverse of the computed kernel matrix to some vector or matrix of samples. This method subtracts the mean, sorts the samples, then returns the samples in the correct (unsorted) order. :param y: ``(nsamples, )`` or ``(nsamples, K)`` The vector (or matrix) of sample values. """ self.recompute(quiet=False) r = np.array(y, dtype=np.float64, order="F") r = self._check_dimensions(r, check_dim=False) # Broadcast the mean function m = [slice(None)] + [np.newaxis for _ in range(len(r.shape) - 1)] r -= self._call_mean(self._x)[m] # Do the solve if len(r.shape) == 1: b = self.solver.apply_inverse(r, in_place=True).flatten() else: b = self.solver.apply_inverse(r, in_place=True) return b
Self-consistently apply the inverse of the computed kernel matrix to some vector or matrix of samples. This method subtracts the mean, sorts the samples, then returns the samples in the correct (unsorted) order. :param y: ``(nsamples, )`` or ``(nsamples, K)`` The vector (or matrix) of sample values.
def draw_graph(matrix, clusters, **kwargs): """ Visualize the clustering :param matrix: The unprocessed adjacency matrix :param clusters: list of tuples containing clusters as returned by 'get_clusters' :param kwargs: Additional keyword arguments to be passed to networkx.draw_networkx """ # make a networkx graph from the adjacency matrix graph = nx.Graph(matrix) # map node to cluster id for colors cluster_map = {node: i for i, cluster in enumerate(clusters) for node in cluster} colors = [cluster_map[i] for i in range(len(graph.nodes()))] # if colormap not specified in kwargs, use a default if not kwargs.get("cmap", False): kwargs["cmap"] = cm.tab20 # draw nx.draw_networkx(graph, node_color=colors, **kwargs) axis("off") show(block=False)
Visualize the clustering :param matrix: The unprocessed adjacency matrix :param clusters: list of tuples containing clusters as returned by 'get_clusters' :param kwargs: Additional keyword arguments to be passed to networkx.draw_networkx
def makedirs(path, mode=0o777, exist_ok=False): """A wrapper of os.makedirs().""" os.makedirs(path, mode, exist_ok)
A wrapper of os.makedirs().
def create_run(cls, *args, **kwargs): """ :return: a delegator function that calls the ``cls`` constructor whose arguments being a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns the object's ``run`` method. By default, a thread wrapping that ``run`` method is spawned. """ def f(seed_tuple): j = cls(seed_tuple, *args, **kwargs) return j.run return f
:return: a delegator function that calls the ``cls`` constructor whose arguments being a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns the object's ``run`` method. By default, a thread wrapping that ``run`` method is spawned.
def set_pump_status(self, status): """ Updates pump status and logs update to console. """ self.pump_status = status _logger.info("%r partition %r", status, self.lease.partition_id)
Updates pump status and logs update to console.
def feed_data(self, data: bytes) -> None: """ 代理 feed_data """ if self._parser is not None: self._parser.feed_data(data)
代理 feed_data
def expectedLabelPosition(peptide, labelStateInfo, sequence=None, modPositions=None): """Returns a modification description of a certain label state of a peptide. :param peptide: Peptide sequence used to calculat the expected label state modifications :param labelStateInfo: An entry of :attr:`LabelDescriptor.labels` that describes a label state :param sequence: unmodified amino acid sequence of :var:`peptide`, if None it is generated by :func:`maspy.peptidemethods.removeModifications()` :param modPositions: dictionary describing the modification state of "peptide", if None it is generated by :func:`maspy.peptidemethods.returnModPositions()` :returns: {sequence position: sorted list of expected label modifications on that position, ... } """ if modPositions is None: modPositions = maspy.peptidemethods.returnModPositions(peptide, indexStart=0 ) if sequence is None: sequence = maspy.peptidemethods.removeModifications(peptide) currLabelMods = dict() for labelPosition, labelSymbols in viewitems(labelStateInfo['aminoAcidLabels']): labelSymbols = aux.toList(labelSymbols) if labelSymbols == ['']: pass elif labelPosition == 'nTerm': currLabelMods.setdefault(0, list()) currLabelMods[0].extend(labelSymbols) else: for sequencePosition in aux.findAllSubstrings(sequence, labelPosition): currLabelMods.setdefault(sequencePosition, list()) currLabelMods[sequencePosition].extend(labelSymbols) if labelStateInfo['excludingModifications'] is not None: for excludingMod, excludedLabelSymbol in viewitems(labelStateInfo['excludingModifications']): if excludingMod not in modPositions: continue for excludingModPos in modPositions[excludingMod]: if excludingModPos not in currLabelMods: continue if excludedLabelSymbol not in currLabelMods[excludingModPos]: continue if len(currLabelMods[excludingModPos]) == 1: del(currLabelMods[excludingModPos]) else: excludedModIndex = currLabelMods[excludingModPos].index(excludedLabelSymbol) currLabelMods[excludingModPos].pop(excludedModIndex) for sequencePosition in list(viewkeys(currLabelMods)): currLabelMods[sequencePosition] = sorted(currLabelMods[sequencePosition]) return currLabelMods
Returns a modification description of a certain label state of a peptide. :param peptide: Peptide sequence used to calculat the expected label state modifications :param labelStateInfo: An entry of :attr:`LabelDescriptor.labels` that describes a label state :param sequence: unmodified amino acid sequence of :var:`peptide`, if None it is generated by :func:`maspy.peptidemethods.removeModifications()` :param modPositions: dictionary describing the modification state of "peptide", if None it is generated by :func:`maspy.peptidemethods.returnModPositions()` :returns: {sequence position: sorted list of expected label modifications on that position, ... }
def ask_confirmation(): """Ask for confirmation to the user. Return true if the user confirmed the execution, false otherwise. :returns: bool """ while True: print("Do you want to restart these brokers? ", end="") choice = input().lower() if choice in ['yes', 'y']: return True elif choice in ['no', 'n']: return False else: print("Please respond with 'yes' or 'no'")
Ask for confirmation to the user. Return true if the user confirmed the execution, false otherwise. :returns: bool
def purge(self): """ Delete PARTIAL data files and remove torrent from client. """ def partial_file(item): "Filter out partial files" #print "???", repr(item) return item.completed_chunks < item.size_chunks self.cull(file_filter=partial_file, attrs=["get_completed_chunks", "get_size_chunks"])
Delete PARTIAL data files and remove torrent from client.
def QueueResponse(self, response, timestamp=None): """Queues the message on the flow's state.""" if timestamp is None: timestamp = self.frozen_timestamp self.response_queue.append((response, timestamp))
Queues the message on the flow's state.
def _connect_to_ec2(region, credentials): """ :param region: The region of AWS to connect to. :param EC2Credentials credentials: The credentials to use to authenticate with EC2. :return: a connection object to AWS EC2 """ conn = boto.ec2.connect_to_region( region, aws_access_key_id=credentials.access_key_id, aws_secret_access_key=credentials.secret_access_key ) if conn: return conn else: log_red('Failure to authenticate to EC2.') return False
:param region: The region of AWS to connect to. :param EC2Credentials credentials: The credentials to use to authenticate with EC2. :return: a connection object to AWS EC2
def is_continuous(docgraph, dominating_node): """return True, if the tokens dominated by the given node are all adjacent""" first_onset, last_offset = get_span_offsets(docgraph, dominating_node) span_range = xrange(first_onset, last_offset+1) token_offsets = (docgraph.get_offsets(tok) for tok in get_span(docgraph, dominating_node)) char_positions = set(itertools.chain.from_iterable(xrange(on, off+1) for on, off in token_offsets)) for item in span_range: if item not in char_positions: return False return True
return True, if the tokens dominated by the given node are all adjacent
def load(self, id=None): """Load from database. Old values will be discarded.""" if id is not None: # We are asked to change our ID to something else self.reset() self._setID(id) if not self._new and self._validID(): self._loadDB() self._updated = time.time()
Load from database. Old values will be discarded.
def as_array(self, transpose=False, items=None): """Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray """ if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if transpose else arr if items is not None: mgr = self.reindex_axis(items, axis=0) else: mgr = self if self._is_single_block and mgr.blocks[0].is_datetimetz: # TODO(Block.get_values): Make DatetimeTZBlock.get_values # always be object dtype. Some callers seem to want the # DatetimeArray (previously DTI) arr = mgr.blocks[0].get_values(dtype=object) elif self._is_single_block or not self.is_mixed_type: arr = np.asarray(mgr.blocks[0].get_values()) else: arr = mgr._interleave() return arr.transpose() if transpose else arr
Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray
def build_authorization_endpoint(self, request, disable_sso=None): """ This function returns the ADFS authorization URL. Args: request(django.http.request.HttpRequest): A django Request object disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt. Returns: str: The redirect URI """ self.load_config() redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None) if not redirect_to: redirect_to = django_settings.LOGIN_REDIRECT_URL redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode() query = QueryDict(mutable=True) query.update({ "response_type": "code", "client_id": settings.CLIENT_ID, "resource": settings.RELYING_PARTY_ID, "redirect_uri": self.redirect_uri(request), "state": redirect_to, }) if self._mode == "openid_connect": query["scope"] = "openid" if (disable_sso is None and settings.DISABLE_SSO) or disable_sso is True: query["prompt"] = "login" return "{0}?{1}".format(self.authorization_endpoint, query.urlencode())
This function returns the ADFS authorization URL. Args: request(django.http.request.HttpRequest): A django Request object disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt. Returns: str: The redirect URI
def rotate(self, count=1, with_pane_before_only=False, with_pane_after_only=False): """ Rotate panes. When `with_pane_before_only` or `with_pane_after_only` is True, only rotate with the pane before/after the active pane. """ # Create (split, index, pane, weight) tuples. items = [] current_pane_index = None for s in self.splits: for index, item in enumerate(s): if isinstance(item, Pane): items.append((s, index, item, s.weights[item])) if item == self.active_pane: current_pane_index = len(items) - 1 # Only before after? Reduce list of panes. if with_pane_before_only: items = items[current_pane_index - 1:current_pane_index + 1] elif with_pane_after_only: items = items[current_pane_index:current_pane_index + 2] # Rotate positions. for i, triple in enumerate(items): split, index, pane, weight = triple new_item = items[(i + count) % len(items)][2] split[index] = new_item split.weights[new_item] = weight
Rotate panes. When `with_pane_before_only` or `with_pane_after_only` is True, only rotate with the pane before/after the active pane.
def db_from_dataframes( db_filename, dataframes, primary_keys={}, indices={}, subdir=None, overwrite=False, version=1): """ Create a sqlite3 database from a collection of DataFrame objects Parameters ---------- db_filename : str Name of database file to create dataframes : dict Dictionary from table names to DataFrame objects primary_keys : dict, optional Name of primary key column for each table indices : dict, optional Dictionary from table names to list of column name tuples subdir : str, optional overwrite : bool, optional If the database already exists, overwrite it? version : int, optional """ if not (subdir is None or isinstance(subdir, str)): raise TypeError("Expected subdir to be None or str, got %s : %s" % ( subdir, type(subdir))) db_path = build_path(db_filename, subdir) return db_from_dataframes_with_absolute_path( db_path, table_names_to_dataframes=dataframes, table_names_to_primary_keys=primary_keys, table_names_to_indices=indices, overwrite=overwrite, version=version)
Create a sqlite3 database from a collection of DataFrame objects Parameters ---------- db_filename : str Name of database file to create dataframes : dict Dictionary from table names to DataFrame objects primary_keys : dict, optional Name of primary key column for each table indices : dict, optional Dictionary from table names to list of column name tuples subdir : str, optional overwrite : bool, optional If the database already exists, overwrite it? version : int, optional
def print_yielded(func): """ Convert a generator into a function that prints all yielded elements >>> @print_yielded ... def x(): ... yield 3; yield None >>> x() 3 None """ print_all = functools.partial(map, print) print_results = compose(more_itertools.recipes.consume, print_all, func) return functools.wraps(func)(print_results)
Convert a generator into a function that prints all yielded elements >>> @print_yielded ... def x(): ... yield 3; yield None >>> x() 3 None
def add_permission(self): """Add permission to Lambda for the API Trigger.""" statement_id = '{}_api_{}'.format(self.app_name, self.trigger_settings['api_name']) principal = 'apigateway.amazonaws.com' lambda_alias_arn = get_lambda_alias_arn(self.app_name, self.env, self.region) lambda_unqualified_arn = get_lambda_arn(self.app_name, self.env, self.region) resource_name = self.trigger_settings.get('resource', '') resource_name = resource_name.replace('/', '') method_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/{}/{}/{}'.format( self.region, self.account_id, self.api_id, self.env, self.trigger_settings['method'], resource_name) global_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/*/*/{}'.format(self.region, self.account_id, self.api_id, resource_name) add_lambda_permissions( function=lambda_alias_arn, statement_id=statement_id + self.trigger_settings['method'], action='lambda:InvokeFunction', principal=principal, env=self.env, region=self.region, source_arn=method_api_source_arn) add_lambda_permissions( function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, env=self.env, region=self.region, source_arn=global_api_source_arn) add_lambda_permissions( function=lambda_unqualified_arn, statement_id=statement_id + self.trigger_settings['method'], action='lambda:InvokeFunction', principal=principal, env=self.env, region=self.region, source_arn=method_api_source_arn) add_lambda_permissions( function=lambda_unqualified_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, env=self.env, region=self.region, source_arn=global_api_source_arn)
Add permission to Lambda for the API Trigger.
def _storage_attach(self, params): """ Change storage medium in this VM. :param params: params to use with sub-command storageattach """ args = shlex.split(params) yield from self.manager.execute("storageattach", [self._vmname] + args)
Change storage medium in this VM. :param params: params to use with sub-command storageattach
def pool_define(name, ptype, target=None, permissions=None, source_devices=None, source_dir=None, source_adapter=None, source_hosts=None, source_auth=None, source_name=None, source_format=None, transient=False, start=True, # pylint: disable=redefined-outer-name **kwargs): ''' Create libvirt pool. :param name: Pool name :param ptype: Pool type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for the possible values. :param target: Pool full path target :param permissions: Permissions to set on the target folder. This is mostly used for filesystem-based pool types. See pool-define-permissions_ for more details on this structure. :param source_devices: List of source devices for pools backed by physical devices. (Default: ``None``) Each item in the list is a dictionary with ``path`` and optionally ``part_separator`` keys. The path is the qualified name for iSCSI devices. Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_ for more informations on the use of ``part_separator`` :param source_dir: Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``. (Default: ``None``) :param source_adapter: SCSI source definition. The value is a dictionary with ``type``, ``name``, ``parent``, ``managed``, ``parent_wwnn``, ``parent_wwpn``, ``parent_fabric_wwn``, ``wwnn``, ``wwpn`` and ``parent_address`` keys. The ``parent_address`` value is a dictionary with ``unique_id`` and ``address`` keys. The address represents a PCI address and is itself a dictionary with ``domain``, ``bus``, ``slot`` and ``function`` properties. Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_ for the meaning and possible values of these properties. :param source_hosts: List of source for pools backed by storage from remote servers. Each item is the hostname optionally followed by the port separated by a colon. (Default: ``None``) :param source_auth: Source authentication details. (Default: ``None``) The value is a dictionary with ``type``, ``username`` and ``secret`` keys. The type can be one of ``ceph`` for Ceph RBD or ``chap`` for iSCSI sources. The ``secret`` value links to a libvirt secret object. It is a dictionary with ``type`` and ``value`` keys. The type value can be either ``uuid`` or ``usage``. Examples: .. code-block:: python source_auth={ 'type': 'ceph', 'username': 'admin', 'secret': { 'type': 'uuid', 'uuid': '2ec115d7-3a88-3ceb-bc12-0ac909a6fd87' } } .. code-block:: python source_auth={ 'type': 'chap', 'username': 'myname', 'secret': { 'type': 'usage', 'uuid': 'mycluster_myname' } } :param source_name: Identifier of name-based sources. :param source_format: String representing the source format. The possible values are depending on the source type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for the possible values. :param start: Pool start (default True) :param transient: When ``True``, the pool will be automatically undefined after being stopped. Note that a transient pool will force ``start`` to ``True``. (Default: ``False``) :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. _pool-define-permissions: **Permissions definition** The permissions are described by a dictionary containing the following keys: mode The octal representation of the permissions. (Default: `0711`) owner the numeric user ID of the owner. (Default: from the parent folder) group the numeric ID of the group. (Default: from the parent folder) label the SELinux label. (Default: `None`) CLI Example: Local folder pool: .. code-block:: bash salt '*' virt.pool_define somepool dir target=/srv/mypool \ permissions="{'mode': '0744' 'ower': 107, 'group': 107 }" CIFS backed pool: .. code-block:: bash salt '*' virt.pool_define myshare netfs source_format=cifs \ source_dir=samba_share source_hosts="['example.com']" target=/mnt/cifs .. versionadded:: 2019.2.0 ''' conn = __get_conn(**kwargs) pool_xml = _gen_pool_xml( name, ptype, target, permissions=permissions, source_devices=source_devices, source_dir=source_dir, source_adapter=source_adapter, source_hosts=source_hosts, source_auth=source_auth, source_name=source_name, source_format=source_format ) try: if transient: pool = conn.storagePoolCreateXML(pool_xml) else: pool = conn.storagePoolDefineXML(pool_xml) if start: pool.create() except libvirtError as err: raise err # a real error we should report upwards finally: conn.close() # libvirt function will raise a libvirtError in case of failure return True
Create libvirt pool. :param name: Pool name :param ptype: Pool type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for the possible values. :param target: Pool full path target :param permissions: Permissions to set on the target folder. This is mostly used for filesystem-based pool types. See pool-define-permissions_ for more details on this structure. :param source_devices: List of source devices for pools backed by physical devices. (Default: ``None``) Each item in the list is a dictionary with ``path`` and optionally ``part_separator`` keys. The path is the qualified name for iSCSI devices. Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_ for more informations on the use of ``part_separator`` :param source_dir: Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``. (Default: ``None``) :param source_adapter: SCSI source definition. The value is a dictionary with ``type``, ``name``, ``parent``, ``managed``, ``parent_wwnn``, ``parent_wwpn``, ``parent_fabric_wwn``, ``wwnn``, ``wwpn`` and ``parent_address`` keys. The ``parent_address`` value is a dictionary with ``unique_id`` and ``address`` keys. The address represents a PCI address and is itself a dictionary with ``domain``, ``bus``, ``slot`` and ``function`` properties. Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_ for the meaning and possible values of these properties. :param source_hosts: List of source for pools backed by storage from remote servers. Each item is the hostname optionally followed by the port separated by a colon. (Default: ``None``) :param source_auth: Source authentication details. (Default: ``None``) The value is a dictionary with ``type``, ``username`` and ``secret`` keys. The type can be one of ``ceph`` for Ceph RBD or ``chap`` for iSCSI sources. The ``secret`` value links to a libvirt secret object. It is a dictionary with ``type`` and ``value`` keys. The type value can be either ``uuid`` or ``usage``. Examples: .. code-block:: python source_auth={ 'type': 'ceph', 'username': 'admin', 'secret': { 'type': 'uuid', 'uuid': '2ec115d7-3a88-3ceb-bc12-0ac909a6fd87' } } .. code-block:: python source_auth={ 'type': 'chap', 'username': 'myname', 'secret': { 'type': 'usage', 'uuid': 'mycluster_myname' } } :param source_name: Identifier of name-based sources. :param source_format: String representing the source format. The possible values are depending on the source type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for the possible values. :param start: Pool start (default True) :param transient: When ``True``, the pool will be automatically undefined after being stopped. Note that a transient pool will force ``start`` to ``True``. (Default: ``False``) :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. _pool-define-permissions: **Permissions definition** The permissions are described by a dictionary containing the following keys: mode The octal representation of the permissions. (Default: `0711`) owner the numeric user ID of the owner. (Default: from the parent folder) group the numeric ID of the group. (Default: from the parent folder) label the SELinux label. (Default: `None`) CLI Example: Local folder pool: .. code-block:: bash salt '*' virt.pool_define somepool dir target=/srv/mypool \ permissions="{'mode': '0744' 'ower': 107, 'group': 107 }" CIFS backed pool: .. code-block:: bash salt '*' virt.pool_define myshare netfs source_format=cifs \ source_dir=samba_share source_hosts="['example.com']" target=/mnt/cifs .. versionadded:: 2019.2.0
def dump(self, indentation=0): """Returns a string representation of the structure.""" dump = [] dump.append('[{0}]'.format(self.name)) printable_bytes = [ord(i) for i in string.printable if i not in string.whitespace] # Refer to the __set_format__ method for an explanation # of the following construct. for keys in self.__keys__: for key in keys: val = getattr(self, key) if isinstance(val, (int, long)): if key.startswith('Signature_'): val_str = '%-8X' % (val) else: val_str = '0x%-8X' % (val) if key == 'TimeDateStamp' or key == 'dwTimeStamp': try: val_str += ' [%s UTC]' % time.asctime(time.gmtime(val)) except ValueError as e: val_str += ' [INVALID TIME]' else: val_str = bytearray(val) if key.startswith('Signature'): val_str = ''.join( ['{:02X}'.format(i) for i in val_str.rstrip(b'\x00')]) else: val_str = ''.join( [chr(i) if (i in printable_bytes) else '\\x{0:02x}'.format(i) for i in val_str.rstrip(b'\x00')]) dump.append('0x%-8X 0x%-3X %-30s %s' % ( self.__field_offsets__[key] + self.__file_offset__, self.__field_offsets__[key], key+':', val_str)) return dump
Returns a string representation of the structure.
def getData(self,exten=None): """ Return just the data array from the specified extension fileutil is used instead of fits to account for non- FITS input images. openImage returns a fits object. """ if exten.lower().find('sci') > -1: # For SCI extensions, the current file will have the data fname = self._filename else: # otherwise, the data being requested may need to come from a # separate file, as is the case with WFPC2 DQ data. # # convert exten to 'sci',extver to get the DQ info for that chip extn = exten.split(',') sci_chip = self._image[self.scienceExt,int(extn[1])] fname = sci_chip.dqfile extnum = self._interpretExten(exten) if self._image[extnum].data is None: if os.path.exists(fname): _image=fileutil.openImage(fname, clobber=False, memmap=False) _data=fileutil.getExtn(_image, extn=exten).data _image.close() del _image self._image[extnum].data = _data else: _data = None else: _data = self._image[extnum].data return _data
Return just the data array from the specified extension fileutil is used instead of fits to account for non- FITS input images. openImage returns a fits object.
def next_retrieve_group_item(self, last_item=None, entry=None): """Return the item to start from in next reviews group.""" next_item = None gerrit_version = self.version if gerrit_version[0] == 2 and gerrit_version[1] > 9: if last_item is None: next_item = 0 else: next_item = last_item elif gerrit_version[0] == 2 and gerrit_version[1] == 9: # https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E cause = "Gerrit 2.9.0 does not support pagination" raise BackendError(cause=cause) else: if entry is not None: next_item = entry['sortKey'] return next_item
Return the item to start from in next reviews group.
def GetData(fitsfile, EPIC, campaign, clobber=False, saturation_tolerance=-0.1, bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17], get_hires=False, get_nearby=False, aperture=None, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param str fitsfile: The full raw target pixel file path :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True` ''' # Get the npz file name filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz') # Create the dir if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) # Check for saved data if not os.path.exists(filename) or clobber: log.info("Fetching data for target...") # Load the tpf with pyfits.open(fitsfile) as f: qdata = f[1].data # Get the header info fitsheader = [pyfits.getheader(fitsfile, 0).cards, pyfits.getheader(fitsfile, 1).cards, pyfits.getheader(fitsfile, 2).cards] # Get a hi res image of the target if get_hires: try: hires = GetHiResImage(EPIC) except ValueError: hires = None else: hires = None # Get nearby sources if get_nearby: try: nearby = GetSources(EPIC) except ValueError: nearby = [] else: nearby = [] # Get the arrays cadn = np.array(qdata.field('CADENCENO'), dtype='int32') time = np.array(qdata.field('TIME'), dtype='float64') fpix = np.array(qdata.field('FLUX'), dtype='float64') fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64') qual = np.array(qdata.field('QUALITY'), dtype=int) # Get rid of NaNs in the time array by interpolating naninds = np.where(np.isnan(time)) time = Interpolate(np.arange(0, len(time)), naninds, time) # Get the motion vectors (if available!) pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64') pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)): pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1) pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2) else: pc1 = None pc2 = None # Get the static pixel images for plotting pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]] # Get the aperture interactively if aperture is None: aperture = ApertureSelector(time[::10], fpix[::10], title='EPIC %d' % EPIC).aperture if np.sum(aperture) == 0: raise ValueError("Empty aperture!") # Atomically write to disk. # http://stackoverflow.com/questions/2333872/ # atomic-writing-to-file-with-python if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) f = NamedTemporaryFile("wb", delete=False) np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix, fpix_err=fpix_err, qual=qual, aperture=aperture, pc1=pc1, pc2=pc2, fitsheader=fitsheader, pixel_images=pixel_images, nearby=nearby, hires=hires) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, filename) # Load data = np.load(filename) aperture = data['aperture'][()] pixel_images = data['pixel_images'] nearby = data['nearby'][()] hires = data['hires'][()] fitsheader = data['fitsheader'] cadn = data['cadn'] time = data['time'] fpix = data['fpix'] fpix_err = data['fpix_err'] qual = data['qual'] pc1 = data['pc1'] pc2 = data['pc2'] # Compute the saturation flux and the 97.5th percentile # flux in each pixel of the aperture. We're going # to compare these to decide if the star is saturated. satflx = SaturationFlux(EPIC, campaign=campaign) * \ (1. + saturation_tolerance) f97 = np.zeros((fpix.shape[1], fpix.shape[2])) for i in range(fpix.shape[1]): for j in range(fpix.shape[2]): if aperture[i, j]: # Let's remove NaNs... tmp = np.delete(fpix[:, i, j], np.where( np.isnan(fpix[:, i, j]))) # ... and really bad outliers... if len(tmp): f = SavGol(tmp) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] np.delete(tmp, bad) # ... so we can compute the 97.5th percentile flux i97 = int(0.975 * len(tmp)) tmp = tmp[np.argsort(tmp)[i97]] f97[i, j] = tmp # Check if any of the pixels are actually saturated if np.nanmax(f97) <= satflx: log.info("No saturated columns detected.") saturated = False aperture[np.isnan(fpix[0])] = 0 ap = np.where(aperture & 1) fpix2D = np.array([f[ap] for f in fpix], dtype='float64') fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64') else: # We need to collapse the saturated columns saturated = True ncol = 0 fpixnew = [] ferrnew = [] for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): marked = False collapsed = np.zeros(len(fpix[:, 0, 0])) collapsed_err2 = np.zeros(len(fpix[:, 0, 0])) for i in range(aperture.shape[0]): if aperture[i, j]: if not marked: aperture[i, j] = AP_COLLAPSED_PIXEL marked = True else: aperture[i, j] = AP_SATURATED_PIXEL collapsed += fpix[:, i, j] collapsed_err2 += fpix_err[:, i, j] ** 2 if np.any(collapsed): fpixnew.append(collapsed) ferrnew.append(np.sqrt(collapsed_err2)) ncol += 1 else: for i in range(aperture.shape[0]): if aperture[i, j]: fpixnew.append(fpix[:, i, j]) ferrnew.append(fpix_err[:, i, j]) fpix2D = np.array(fpixnew).T fpix_err2D = np.array(ferrnew).T log.info("Collapsed %d saturated column(s)." % ncol) # Compute the background binds = np.where(aperture ^ 1) if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0): bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype='float64'), axis=1) # Uncertainty of the median: # http://davidmlane.com/hyperstat/A106993.html bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err], dtype='float64'), axis=1) \ / np.sqrt(len(binds[0])) bkg = bkg.reshape(-1, 1) bkg_err = bkg_err.reshape(-1, 1) else: bkg = 0. bkg_err = 0. # Make everything 2D and remove the background fpix = fpix2D - bkg fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2) flux = np.sum(fpix, axis=1) # Get NaN data points nanmask = np.where(np.isnan(flux) | (flux == 0))[0] # Get flagged data points -- we won't train our model on them badmask = [] for b in bad_bits: badmask += list(np.where(qual & 2 ** (b - 1))[0]) # Flag >10 sigma outliers -- same thing. tmpmask = np.array(list(set(np.concatenate([badmask, nanmask])))) t = np.delete(time, tmpmask) f = np.delete(flux, tmpmask) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] badmask.extend([np.argmax(time == t[i]) for i in bad]) # Campaign 2 hack: the first day or two are screwed up if campaign == 2: badmask.extend(np.where(time < 2061.5)[0]) # Finalize the mask badmask = np.array(sorted(list(set(badmask)))) # Interpolate the nans fpix = Interpolate(time, nanmask, fpix) fpix_err = Interpolate(time, nanmask, fpix_err) # Return data = DataContainer() data.ID = EPIC data.campaign = campaign data.cadn = cadn data.time = time data.fpix = fpix data.fpix_err = fpix_err data.nanmask = nanmask data.badmask = badmask data.aperture = aperture data.aperture_name = 'custom' data.apertures = dict(custom=aperture) data.quality = qual data.Xpos = pc1 data.Ypos = pc2 data.meta = fitsheader data.mag = fitsheader[0]['KEPMAG'][1] if type(data.mag) is pyfits.card.Undefined: data.mag = np.nan data.pixel_images = pixel_images data.nearby = nearby data.hires = hires data.saturated = saturated data.bkg = bkg return data
Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param str fitsfile: The full raw target pixel file path :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True`
def direct_perms_for_user(cls, instance, user, db_session=None): """ returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.UserResourcePermission.user_id, cls.models_proxy.UserResourcePermission.perm_name, ) query = query.filter(cls.models_proxy.UserResourcePermission.user_id == user.id) query = query.filter( cls.models_proxy.UserResourcePermission.resource_id == instance.resource_id ) perms = [ PermissionTuple(user, row.perm_name, "user", None, instance, False, True) for row in query ] # include all perms if user is the owner of this resource if instance.owner_user_id == user.id: perms.append( PermissionTuple(user, ALL_PERMISSIONS, "user", None, instance, True) ) return perms
returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return:
def get_decimal_time(self): ''' Returns the time of the catalogue as a decimal ''' return decimal_time(self.data['year'], self.data['month'], self.data['day'], self.data['hour'], self.data['minute'], self.data['second'])
Returns the time of the catalogue as a decimal
def is_location(v) -> (bool, str): """ Boolean function for checking if v is a location format Args: v: Returns: bool """ def convert2float(value): try: float_num = float(value) return float_num except ValueError: return False if not isinstance(v, str): return False, v split_lst = v.split(":") if len(split_lst) != 5: return False, v if convert2float(split_lst[3]): longitude = abs(convert2float(split_lst[3])) if longitude > 90: return False, v if convert2float(split_lst[4]): latitude = abs(convert2float(split_lst[3])) if latitude > 180: return False, v return True, v
Boolean function for checking if v is a location format Args: v: Returns: bool
def winapi(context, names): """Query Win32 API declarations. Windows database must be prepared before using this. """ logging.info(_('Entering winapi mode')) sense = context.obj['sense'] none = True for name in names: code = sense.query_args(name) if code: none = False print(stylify_code(code)) else: logging.warning(_('Function not found: %s'), name) sys.exit(1 if none else 0)
Query Win32 API declarations. Windows database must be prepared before using this.
def _uniform_dist(self, spread, total): """ Produce a uniform distribution of `total` across a list of `spread` size. The result is non-random and uniform. """ fraction, fixed_increment = math.modf(total / spread) fixed_increment = int(fixed_increment) balance = 0 dist = [] for _ in range(spread): balance += fraction withdrawl = 1 if balance > 0.5 else 0 if withdrawl: balance -= withdrawl dist.append(fixed_increment + withdrawl) return dist
Produce a uniform distribution of `total` across a list of `spread` size. The result is non-random and uniform.
def maybe_dotted(module, throw=True): """ If ``module`` is a dotted string pointing to the module, imports and returns the module object. """ try: return Configurator().maybe_dotted(module) except ImportError as e: err = '%s not found. %s' % (module, e) if throw: raise ImportError(err) else: log.error(err) return None
If ``module`` is a dotted string pointing to the module, imports and returns the module object.
def state_size(self): """State size of the LSTMStateTuple.""" return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units)
State size of the LSTMStateTuple.
def blpop(self, keys, timeout=0): """ LPOP a value off of the first non-empty list named in the ``keys`` list. If none of the lists in ``keys`` has a value to LPOP, then block for ``timeout`` seconds, or until a value gets pushed on to one of the lists. If timeout is 0, then block indefinitely. """ if timeout is None: timeout = 0 keys = list_or_args(keys, None) keys.append(timeout) return self.execute_command('BLPOP', *keys)
LPOP a value off of the first non-empty list named in the ``keys`` list. If none of the lists in ``keys`` has a value to LPOP, then block for ``timeout`` seconds, or until a value gets pushed on to one of the lists. If timeout is 0, then block indefinitely.
def putout(ofile, keylist, Rec): """ writes out a magic format record to ofile """ pmag_out = open(ofile, 'a') outstring = "" for key in keylist: try: outstring = outstring + '\t' + str(Rec[key]).strip() except: print(key, Rec[key]) # raw_input() outstring = outstring + '\n' pmag_out.write(outstring[1:]) pmag_out.close()
writes out a magic format record to ofile
def cmd(self, cmd, verbose=False): """Executes the specified command on the remote host. The cmd must be format safe, this means { and } must be doubled, thusly: echo /var/local/maildir/{{cur,new}} the cmd can include the format word 'maildir' to be replaced by self.directory. eg: echo {maildir}/{{cur,new}} """ command = cmd.format(maildir=self.directory) if verbose: print(command) p = Popen([ "ssh", "-T", self.host, command ], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() return stdout
Executes the specified command on the remote host. The cmd must be format safe, this means { and } must be doubled, thusly: echo /var/local/maildir/{{cur,new}} the cmd can include the format word 'maildir' to be replaced by self.directory. eg: echo {maildir}/{{cur,new}}
def coords2px(y, x): """ Transforming coordinates to pixels. Arguments: y : np array vector in which (y[0], y[1]) and (y[2], y[3]) are the the corners of a bounding box. x : image an image Returns: Y : image of shape x.shape """ rows = np.rint([y[0], y[0], y[2], y[2]]).astype(int) cols = np.rint([y[1], y[3], y[1], y[3]]).astype(int) r,c,*_ = x.shape Y = np.zeros((r, c)) Y[rows, cols] = 1 return Y
Transforming coordinates to pixels. Arguments: y : np array vector in which (y[0], y[1]) and (y[2], y[3]) are the the corners of a bounding box. x : image an image Returns: Y : image of shape x.shape
def exclusive_match(self, field, value): """Match exactly the given value(s), with no other data in the field. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. value (str or list of str): The value(s) to match exactly. Returns: SearchHelper: Self """ if isinstance(value, str): value = [value] # Hacky way to get ES to do exclusive search # Essentially have a big range search that matches NOT anything # Except for the actual values # Example: [foo, bar, baz] => # (NOT {* TO foo} AND [foo TO foo] AND NOT {foo to bar} AND [bar TO bar] # AND NOT {bar TO baz} AND [baz TO baz] AND NOT {baz TO *}) # Except it must be sorted to not overlap value.sort() # Start with removing everything before first value self.exclude_range(field, "*", value[0], inclusive=False, new_group=True) # Select first value self.match_range(field, value[0], value[0]) # Do the rest of the values for index, val in enumerate(value[1:]): self.exclude_range(field, value[index-1], val, inclusive=False) self.match_range(field, val, val) # Add end self.exclude_range(field, value[-1], "*", inclusive=False) # Done return self
Match exactly the given value(s), with no other data in the field. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. value (str or list of str): The value(s) to match exactly. Returns: SearchHelper: Self
def tags(self): '''Display tag information for all samples in database''' tags = self.workbench.get_all_tags() if not tags: return tag_df = pd.DataFrame(tags) tag_df = self.vectorize(tag_df, 'tags') print '\n%sSamples in Database%s' % (color.LightPurple, color.Normal) self.top_corr(tag_df)
Display tag information for all samples in database
def get_local_version(sigdir, sig): """Get the local version of a signature""" version = None filename = os.path.join(sigdir, '%s.cvd' % sig) if os.path.exists(filename): cmd = ['sigtool', '-i', filename] sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE) while True: line = sigtool.stdout.readline() if line and line.startswith('Version:'): version = line.split()[1] break if not line: break sigtool.wait() return version
Get the local version of a signature
def _netinfo_freebsd_netbsd(): ''' Get process information for network connections using sockstat ''' ret = {} # NetBSD requires '-n' to disable port-to-service resolution out = __salt__['cmd.run']( 'sockstat -46 {0} | tail -n+2'.format( '-n' if __grains__['kernel'] == 'NetBSD' else '' ), python_shell=True ) for line in out.splitlines(): user, cmd, pid, _, proto, local_addr, remote_addr = line.split() local_addr = '.'.join(local_addr.rsplit(':', 1)) remote_addr = '.'.join(remote_addr.rsplit(':', 1)) ret.setdefault( local_addr, {}).setdefault( remote_addr, {}).setdefault( proto, {}).setdefault( pid, {})['user'] = user ret[local_addr][remote_addr][proto][pid]['cmd'] = cmd return ret
Get process information for network connections using sockstat
def write_tsv(self, path, encoding='UTF-8'): """Write expression matrix to a tab-delimited text file. Parameters ---------- path: str The path of the output file. encoding: str, optional The file encoding. ("UTF-8") Returns ------- None """ assert isinstance(path, (str, _oldstr)) assert isinstance(encoding, (str, _oldstr)) sep = '\t' if six.PY2: sep = sep.encode('UTF-8') self.to_csv( path, sep=sep, float_format='%.5f', mode='w', encoding=encoding, header=True ) logger.info('Wrote expression profile "%s" with %d genes to "%s".', self.name, self.p, path)
Write expression matrix to a tab-delimited text file. Parameters ---------- path: str The path of the output file. encoding: str, optional The file encoding. ("UTF-8") Returns ------- None
def handle_cmd_options(): ''' Get the options from the command line. ''' parser = OptionParser() parser.add_option("-s", "--silent", action="store_true", dest="silent", help="print any warnings", default=False) (options, args) = parser.parse_args() return options, args
Get the options from the command line.
def visibility_changed(self, enable): """ Dock widget visibility has changed. """ if self.dockwidget is None: return if enable: self.dockwidget.raise_() widget = self.get_focus_widget() if widget is not None and self.undocked_window is not None: widget.setFocus() visible = self.dockwidget.isVisible() or self.ismaximized if self.DISABLE_ACTIONS_WHEN_HIDDEN: toggle_actions(self.plugin_actions, visible) self.isvisible = enable and visible if self.isvisible: self.refresh_plugin()
Dock widget visibility has changed.
def FromTXOutputsConfirmed(outputs): """ Get unspent outputs from a list of transaction outputs. Args: outputs (list): of neo.Core.TX.Transaction.TransactionOutput items. Returns: UnspentCoinState: """ uns = UnspentCoinState() uns.Items = [0] * len(outputs) for i in range(0, len(outputs)): uns.Items[i] = int(CoinState.Confirmed) return uns
Get unspent outputs from a list of transaction outputs. Args: outputs (list): of neo.Core.TX.Transaction.TransactionOutput items. Returns: UnspentCoinState:
def _get_best_prediction(self, record, train=True): """ Gets the prediction from the tree with the lowest mean absolute error. """ if not self.trees: return best = (+1e999999, None) for tree in self.trees: best = min(best, (tree.mae.mean, tree)) _, best_tree = best prediction, tree_mae = best_tree.predict(record, train=train) return prediction.mean
Gets the prediction from the tree with the lowest mean absolute error.
def finish (self): """Wait for checker threads to finish.""" if not self.urlqueue.empty(): # This happens when all checker threads died. self.cancel() for t in self.threads: t.stop()
Wait for checker threads to finish.
def get_formatter(name): """Return the named formatter function. See the function "set_formatter" for details. """ if name in ('self', 'instance', 'this'): return af_self elif name == 'class': return af_class elif name in ('named', 'param', 'parameter'): return af_named elif name in ('default', 'optional'): return af_default # elif name in ('anonymous', 'arbitrary', 'unnamed'): # return af_anonymous elif name in ('keyword', 'pair', 'pairs'): return af_keyword else: raise ValueError('unknown trace formatter %r' % name)
Return the named formatter function. See the function "set_formatter" for details.
def get_info(df, group, info=['mean', 'std']): """ Aggregate mean and std with the given group. """ agg = df.groupby(group).agg(info) agg.columns = agg.columns.droplevel(0) return agg
Aggregate mean and std with the given group.
def get_option(env_name, section, opt_name, default=None): """Return a configuration setting from environment var or .pyftpsyncrc""" val = os.environ.get(env_name) if val is None: try: val = _pyftpsyncrc_parser.get(section, opt_name) except (compat.configparser.NoSectionError, compat.configparser.NoOptionError): pass if val is None: val = default return val
Return a configuration setting from environment var or .pyftpsyncrc
def remove_project(self, path): """ Removes a project. :param path: Project path. :type path: unicode :return: Method success. :rtype: bool """ project_node = foundations.common.get_first_item(self.__model.get_project_nodes(path)) if not project_node: self.__engine.notifications_manager.warnify( "{0} | '{1}' project is not opened!".format(self.__class__.__name__, path)) return False LOGGER.info("{0} | Removing '{1}' project!".format(self.__class__.__name__, path)) self.__model.delete_project_nodes(project_node) return True
Removes a project. :param path: Project path. :type path: unicode :return: Method success. :rtype: bool
def image_load_time(self): """ Returns aggregate image load time for all pages. """ load_times = self.get_load_times('image') return round(mean(load_times), self.decimal_precision)
Returns aggregate image load time for all pages.
def load(self, source_list: Iterable[List[str]], target_sentences: Iterable[List[Any]], num_samples_per_bucket: List[int]) -> 'ParallelDataSet': """ Creates a parallel dataset base on source list of strings and target sentences. Returns a `sockeye.data_io.ParallelDataSet`. :param source_list: Source list of strings (e.g., filenames). :param target_sentences: Target sentences used to do bucketing. :param num_samples_per_bucket: Number of samples per bucket. :return: Returns a parallel dataset `sockeye.data_io.ParallelDataSet`. """ assert len(num_samples_per_bucket) == len(self.buckets) data_source = [np.full((num_samples,), self.pad_id, dtype=object) for num_samples in num_samples_per_bucket] # data_source is a List[numpy.array[str]] which semantic is bucket, index, str # Its loading to memory is deferred to the iterator, since the full data # is supposed to not fit in memory. data_target = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype) for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)] data_label = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype) for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)] bucket_sample_index = [0 for buck in self.buckets] # track amount of padding introduced through bucketing num_tokens_target = 0 num_pad_target = 0 # Bucket sentences as padded np arrays for source, target in zip(source_list, target_sentences): target_len = len(target) buck_index, buck = get_target_bucket(self.buckets, target_len) if buck is None: continue # skip this sentence pair num_tokens_target += buck[1] num_pad_target += buck[1] - target_len sample_index = bucket_sample_index[buck_index] data_source[buck_index][sample_index] = source data_target[buck_index][sample_index, :target_len] = target # NOTE(fhieber): while this is wasteful w.r.t memory, we need to explicitly create the label sequence # with the EOS symbol here sentence-wise and not per-batch due to variable sequence length within a batch. # Once MXNet allows item assignments given a list of indices (probably MXNet 1.0): e.g a[[0,1,5,2]] = x, # we can try again to compute the label sequence on the fly in next(). data_label[buck_index][sample_index, :target_len] = target[1:] + [self.eos_id] bucket_sample_index[buck_index] += 1 for i in range(len(data_source)): data_target[i] = mx.nd.array(data_target[i], dtype=self.dtype) data_label[i] = mx.nd.array(data_label[i], dtype=self.dtype) if num_tokens_target > 0: logger.info("Created bucketed parallel data set. Introduced padding: target=%.1f%%)", num_pad_target / num_tokens_target * 100) return ParallelDataSet(data_source, data_target, data_label)
Creates a parallel dataset base on source list of strings and target sentences. Returns a `sockeye.data_io.ParallelDataSet`. :param source_list: Source list of strings (e.g., filenames). :param target_sentences: Target sentences used to do bucketing. :param num_samples_per_bucket: Number of samples per bucket. :return: Returns a parallel dataset `sockeye.data_io.ParallelDataSet`.
def __construct_list(self, list_value): """ Loop list/set/tuple and parse values """ array = [] for value in list_value: array.append(self.__iterate_value(value)) return array
Loop list/set/tuple and parse values
def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float): """Convert an array containing datetime-like data to an array of floats. Parameters ---------- da : np.array Input data offset: Scalar with the same type of array or None If None, subtract minimum values to reduce round off error datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as'} dtype: target dtype Returns ------- array """ # TODO: make this function dask-compatible? if offset is None: offset = array.min() array = array - offset if not hasattr(array, 'dtype'): # scalar is converted to 0d-array array = np.array(array) if array.dtype.kind in 'O': # possibly convert object array containing datetime.timedelta array = np.asarray(pd.Series(array.ravel())).reshape(array.shape) if datetime_unit: array = array / np.timedelta64(1, datetime_unit) # convert np.NaT to np.nan if array.dtype.kind in 'mM': return np.where(isnull(array), np.nan, array.astype(dtype)) return array.astype(dtype)
Convert an array containing datetime-like data to an array of floats. Parameters ---------- da : np.array Input data offset: Scalar with the same type of array or None If None, subtract minimum values to reduce round off error datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as'} dtype: target dtype Returns ------- array
def _field_value_html(self, field): """Return the html representation of the value of the given field""" if field in self.fields: return unicode(self.get(field)) else: return self.get_timemachine_instance(field)._object_name_html()
Return the html representation of the value of the given field
def tlg_plaintext_cleanup(text, rm_punctuation=False, rm_periods=False): """Remove and substitute post-processing for Greek TLG text. TODO: Surely more junk to pull out. Please submit bugs! TODO: {.+?}|\(.+?\) working? TODO: This is a rather slow now, help in speeding up welcome. """ remove_comp = regex.compile(r'-\n|«|»|<|>|\.\.\.|‘|’|_|{.+?}|\(.+?\)|[a-zA-Z0-9]', flags=regex.VERSION1) text = remove_comp.sub('', text) new_text = None if rm_punctuation: new_text = '' punctuation = [',', '·', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{', '}'] if rm_periods: punctuation += ['.', ';'] for char in text: # second try at rming some punctuation; merge with above regex if char in punctuation: pass else: new_text += char if new_text: text = new_text # replace line breaks w/ space replace_comp = regex.compile(r'\n') text = replace_comp.sub(' ', text) comp_space = regex.compile(r'\s+') text = comp_space.sub(' ', text) return text
Remove and substitute post-processing for Greek TLG text. TODO: Surely more junk to pull out. Please submit bugs! TODO: {.+?}|\(.+?\) working? TODO: This is a rather slow now, help in speeding up welcome.
def has_space(self, length=1, offset=0): """Returns boolean if self.pos + length < working string length.""" return self.pos + (length + offset) - 1 < self.length
Returns boolean if self.pos + length < working string length.
def assert_all_of_selectors(self, selector, *locators, **kwargs): """ Asserts that all of the provided selectors are present on the given page or descendants of the current node. If options are provided, the assertion will check that each locator is present with those options as well (other than ``wait``). :: page.assert_all_of_selectors("custom", "Tom", "Joe", visible="all") page.assert_all_of_selectors("css", "#my_dif", "a.not_clicked") It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``. The ``wait`` option applies to all of the selectors as a group, so all of the locators must be present within ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds. If the given selector is not a valid selector, the first argument is assumed to be a locator and the default selector will be used. Args: selector (str, optional): The name of the selector to use. Defaults to :data:`capybara.default_selector`. *locators (str): Variable length list of locators. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. """ wait = kwargs['wait'] if 'wait' in kwargs else capybara.default_max_wait_time if not isinstance(selector, Hashable) or selector not in selectors: locators = (selector,) + locators selector = capybara.default_selector @self.synchronize(wait=wait) def assert_all_of_selectors(): for locator in locators: self.assert_selector(selector, locator, **kwargs) return True return assert_all_of_selectors()
Asserts that all of the provided selectors are present on the given page or descendants of the current node. If options are provided, the assertion will check that each locator is present with those options as well (other than ``wait``). :: page.assert_all_of_selectors("custom", "Tom", "Joe", visible="all") page.assert_all_of_selectors("css", "#my_dif", "a.not_clicked") It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``. The ``wait`` option applies to all of the selectors as a group, so all of the locators must be present within ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds. If the given selector is not a valid selector, the first argument is assumed to be a locator and the default selector will be used. Args: selector (str, optional): The name of the selector to use. Defaults to :data:`capybara.default_selector`. *locators (str): Variable length list of locators. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
def delete(self, id): """ Delete the specified label :param id: the label's ID :type id: str :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ return self._post( request=ApiActions.DELETE.value, uri=ApiUri.TAGS.value, params={'id': id} )
Delete the specified label :param id: the label's ID :type id: str :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries
def transform(self, X): ''' :X: numpy ndarray ''' noise = self._noise_func(*self._args, size=X.shape) results = X + noise self.relative_noise_size_ = self.relative_noise_size(X, results) return results
:X: numpy ndarray
def get_expected_bindings(self): """Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., } """ sg_bindings = db_lib.get_baremetal_sg_bindings() all_expected_bindings = collections.defaultdict(set) for sg_binding, port_binding in sg_bindings: sg_id = sg_binding['security_group_id'] try: binding_profile = json.loads(port_binding.profile) except ValueError: binding_profile = {} switchports = self._get_switchports(binding_profile) for switch, intf in switchports: ingress_name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION) egress_name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION) all_expected_bindings[switch].add( (intf, ingress_name, a_const.INGRESS_DIRECTION)) all_expected_bindings[switch].add( (intf, egress_name, a_const.EGRESS_DIRECTION)) return all_expected_bindings
Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., }
def Log(self, format_str, *args): """Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string """ log_entry = rdf_flow_objects.FlowLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=format_str % args) data_store.REL_DB.WriteFlowLogEntries([log_entry]) if self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args)
Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string
def _private_packages_allowed(): """ Checks if the current user is allowed to create private packages. In the public cloud, the user needs to be on a paid plan. There are no restrictions in other deployments. """ if not HAVE_PAYMENTS or TEAM_ID: return True customer = _get_or_create_customer() plan = _get_customer_plan(customer) return plan != PaymentPlan.FREE
Checks if the current user is allowed to create private packages. In the public cloud, the user needs to be on a paid plan. There are no restrictions in other deployments.
def _get_provider_manager(self, osid, local=False): """Gets the most appropriate provider manager depending on config.""" return get_provider_manager(osid, runtime=self._runtime, proxy=getattr(self, '_proxy', None), local=local)
Gets the most appropriate provider manager depending on config.
def stationary_distribution_sensitivity(T, j): r"""Calculate the sensitivity matrix for entry j the stationary distribution vector given transition matrix T. Parameters ---------- T : numpy.ndarray shape = (n, n) Transition matrix j : int entry of stationary distribution for which the sensitivity is to be computed Returns ------- x : ndarray, shape=(n, n) Sensitivity matrix for entry index around transition matrix T. Reversibility is not assumed. Remark ------ Note, that this function uses a different normalization convention for the sensitivity compared to eigenvector_sensitivity. See there for further information. """ n = len(T) lEV = numpy.ones(n) rEV = stationary_distribution(T) eVal = 1.0 T = numpy.transpose(T) vecA = numpy.zeros(n) vecA[j] = 1.0 matA = T - eVal * numpy.identity(n) # normalize s.t. sum is one using rEV which is constant matA = numpy.concatenate((matA, [lEV])) phi = numpy.linalg.lstsq(numpy.transpose(matA), vecA, rcond=-1) phi = numpy.delete(phi[0], -1) sensitivity = -numpy.outer(rEV, phi) + numpy.dot(phi, rEV) * numpy.outer(rEV, lEV) return sensitivity
r"""Calculate the sensitivity matrix for entry j the stationary distribution vector given transition matrix T. Parameters ---------- T : numpy.ndarray shape = (n, n) Transition matrix j : int entry of stationary distribution for which the sensitivity is to be computed Returns ------- x : ndarray, shape=(n, n) Sensitivity matrix for entry index around transition matrix T. Reversibility is not assumed. Remark ------ Note, that this function uses a different normalization convention for the sensitivity compared to eigenvector_sensitivity. See there for further information.
def load(source, semi=None): """ Read a variable-property mapping from *source* and return the VPM. Args: source: a filename or file-like object containing the VPM definitions semi (:class:`~delphin.mrs.semi.SemI`, optional): if provided, it is passed to the VPM constructor Returns: a :class:`VPM` instance """ if hasattr(source, 'read'): return _load(source, semi) else: with open(source, 'r') as fh: return _load(fh, semi)
Read a variable-property mapping from *source* and return the VPM. Args: source: a filename or file-like object containing the VPM definitions semi (:class:`~delphin.mrs.semi.SemI`, optional): if provided, it is passed to the VPM constructor Returns: a :class:`VPM` instance
def convert(self, obj): """Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.2.* type and converts it to DIRECT_HONEY_BADGERFISH version. The object is modified in place and returned. """ if self.pristine_if_invalid: raise NotImplementedError('pristine_if_invalid option is not supported yet') nex = get_nexml_el(obj) assert nex # Create the new objects as locals. This section should not # mutate obj, so that if there is an exception the object # is unchanged on the error exit otusById = nex['otusById'] otusElementOrder = nex['^ot:otusElementOrder'] otus = self.convert_otus(otusById, otusElementOrder) nex['otus'] = otus treesById = nex['treesById'] treesElementOrder = nex['^ot:treesElementOrder'] trees = self.convert_trees(treesById, treesElementOrder) # add the locals to the object nex['trees'] = trees nex['@nexml2json'] = str(DIRECT_HONEY_BADGERFISH) # Make the struct leaner if self.remove_old_structs: del nex['otusById'] del nex['^ot:otusElementOrder'] del nex['treesById'] del nex['^ot:treesElementOrder'] return obj
Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.2.* type and converts it to DIRECT_HONEY_BADGERFISH version. The object is modified in place and returned.
def get_best(self): """Finds the optimal number of features :return: optimal number of features and ranking """ svc = SVC(kernel="linear") rfecv = RFECV( estimator=svc, step=1, cv=StratifiedKFold(self.y_train, 2), scoring="log_loss" ) rfecv.fit(self.x_train, self.y_train) return rfecv.n_features_, rfecv.ranking_
Finds the optimal number of features :return: optimal number of features and ranking
def consume(exchange, queue_name, routing_key, callback, app_name): """Consume messages from an AMQP queue using a Python callback.""" # The configuration validates these are not null and contain all required keys # when it is loaded. bindings = config.conf["bindings"] queues = config.conf["queues"] # The CLI and config.DEFAULTS have different defaults for the queue # settings at the moment. We should select a universal default in the # future and remove this. Unfortunately that will break backwards compatibility. if queues == config.DEFAULTS["queues"]: queues[config._default_queue_name]["durable"] = True queues[config._default_queue_name]["auto_delete"] = False if queue_name: queues = {queue_name: config.conf["queues"][config._default_queue_name]} for binding in bindings: binding["queue"] = queue_name if exchange: for binding in bindings: binding["exchange"] = exchange if routing_key: for binding in bindings: binding["routing_keys"] = routing_key callback_path = callback or config.conf["callback"] if not callback_path: raise click.ClickException( "A Python path to a callable object that accepts the message must be provided" ' with the "--callback" command line option or in the configuration file' ) try: module, cls = callback_path.strip().split(":") except ValueError: raise click.ClickException( "Unable to parse the callback path ({}); the " 'expected format is "my_package.module:' 'callable_object"'.format(callback_path) ) try: module = importlib.import_module(module) except ImportError as e: provider = "--callback argument" if callback else "configuration file" raise click.ClickException( "Failed to import the callback module ({}) provided in the {}".format( str(e), provider ) ) try: callback = getattr(module, cls) except AttributeError as e: raise click.ClickException( "Unable to import {} ({}); is the package installed? The python path should " 'be in the format "my_package.module:callable_object"'.format( callback_path, str(e) ) ) if app_name: config.conf["client_properties"]["app"] = app_name _log.info("Starting consumer with %s callback", callback_path) try: deferred_consumers = api.twisted_consume( callback, bindings=bindings, queues=queues ) deferred_consumers.addCallback(_consume_callback) deferred_consumers.addErrback(_consume_errback) except ValueError as e: click_version = pkg_resources.get_distribution("click").parsed_version if click_version < pkg_resources.parse_version("7.0"): raise click.exceptions.BadOptionUsage(str(e)) else: raise click.exceptions.BadOptionUsage("callback", str(e)) reactor.run() sys.exit(_exit_code)
Consume messages from an AMQP queue using a Python callback.
def ensure_caches_alive(max_retries: int = 100, retry_timeout: int = 5, exit_on_failure: bool = True) -> bool: """ Checks every cache backend alias in ``settings.CACHES`` until it becomes available. After ``max_retries`` attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with ``exit(1)``. It sets the ``django-docker-helpers:available-check`` key for every cache backend to ensure it's receiving connections. If check is passed the key is deleted. :param exit_on_failure: set to ``True`` if there's no sense to continue :param int max_retries: a number of attempts to reach cache backend, default is ``100`` :param int retry_timeout: a timeout in seconds between attempts, default is ``5`` :return: ``True`` if all backends are available ``False`` if any backend check failed """ for cache_alias in settings.CACHES.keys(): cache = caches[cache_alias] wf('Checking if the cache backed is accessible for the alias `%s`... ' % cache_alias, False) for i in range(max_retries): try: cache.set('django-docker-helpers:available-check', '1') assert cache.get('django-docker-helpers:available-check') == '1' cache.delete('django-docker-helpers:available-check') wf('[+]\n') break except Exception as e: wf(str(e) + '\n') sleep(retry_timeout) else: wf('Tried %s time(s). Shutting down.\n' % max_retries) exit_on_failure and exit(1) return False return True
Checks every cache backend alias in ``settings.CACHES`` until it becomes available. After ``max_retries`` attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with ``exit(1)``. It sets the ``django-docker-helpers:available-check`` key for every cache backend to ensure it's receiving connections. If check is passed the key is deleted. :param exit_on_failure: set to ``True`` if there's no sense to continue :param int max_retries: a number of attempts to reach cache backend, default is ``100`` :param int retry_timeout: a timeout in seconds between attempts, default is ``5`` :return: ``True`` if all backends are available ``False`` if any backend check failed
def plot_results(fout_img, goea_results, **kws): """Given a list of GOEA results, plot result GOs up to top.""" if "{NS}" not in fout_img: plt_goea_results(fout_img, goea_results, **kws) else: # Plot separately by NS: BP, MF, CC ns2goea_results = cx.defaultdict(list) for rec in goea_results: ns2goea_results[rec.NS].append(rec) for ns_name, ns_res in ns2goea_results.items(): fout = fout_img.format(NS=ns_name) plt_goea_results(fout, ns_res, **kws)
Given a list of GOEA results, plot result GOs up to top.
def get_attachment_content(self, request, queryset): """ Returns the generated file content. :param request: The request being processed. :param queryset: The model class being processed. :return: The report content (usually expressed in raw bytes but could be unicode as well). """ return self.dump_report_content(request, self.get_report_data_rows(request, queryset))
Returns the generated file content. :param request: The request being processed. :param queryset: The model class being processed. :return: The report content (usually expressed in raw bytes but could be unicode as well).
def findFileParam(self, comp): """Finds the filename auto-parameter that component *comp* is in, and returns all the filenames for that parameter. Notes this assumes that *comp* will only be in a single filename auto-parameter. :param comp: Component to search parameter membership for :type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` :returns: list<str> -- filenames the found parameter will loop through """ for p in self._parameters: if p['parameter'] == 'filename' and comp in p['selection']: return p['names']
Finds the filename auto-parameter that component *comp* is in, and returns all the filenames for that parameter. Notes this assumes that *comp* will only be in a single filename auto-parameter. :param comp: Component to search parameter membership for :type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` :returns: list<str> -- filenames the found parameter will loop through
def _round(self, number): """ Helper function for rounding-as-taught-in-school (X.5 rounds to X+1 if positive). Python 3 now rounds 0.5 to whichever side is even (i.e. 2.5 rounds to 2). :param int number: a float to round. :return: closest integer to number, rounding ties away from 0. """ sign = 1 if number >= 0 else -1 rounded = int(round(number)) nextRounded = int(round(number + 1 * sign)) if nextRounded == rounded: # We rounded X.5 to even, and it was also away from 0. return rounded elif nextRounded == rounded + 1 * sign: # We rounded normally (we are in Python 2) return rounded elif nextRounded == rounded + 2 * sign: # We rounded X.5 to even, but it was towards 0. # Go away from 0 instead. return rounded + 1 * sign else: # If we get here, something has gone wrong. raise RuntimeError("Could not round {}".format(number))
Helper function for rounding-as-taught-in-school (X.5 rounds to X+1 if positive). Python 3 now rounds 0.5 to whichever side is even (i.e. 2.5 rounds to 2). :param int number: a float to round. :return: closest integer to number, rounding ties away from 0.
def get_sdf(identifier, namespace='cid', domain='compound',operation=None, searchtype=None, **kwargs): """Request wrapper that automatically parses SDF response and supresses NotFoundError.""" try: return get(identifier, namespace, domain, operation, 'SDF', searchtype, **kwargs).decode() except NotFoundError as e: log.info(e) return None
Request wrapper that automatically parses SDF response and supresses NotFoundError.
def rs(data, n, unbiased=True): """ Calculates an individual R/S value in the rescaled range approach for a given n. Note: This is just a helper function for hurst_rs and should not be called directly. Args: data (array-like of float): time series n (float): size of the subseries in which data should be split Kwargs: unbiased (boolean): if True, the standard deviation based on the unbiased variance (1/(N-1) instead of 1/N) will be used. This should be the default choice, since the true mean of the sequences is not known. This parameter should only be changed to recreate results of other implementations. Returns: float: (R/S)_n """ data = np.asarray(data) total_N = len(data) m = total_N // n # number of sequences # cut values at the end of data to make the array divisible by n data = data[:total_N - (total_N % n)] # split remaining data into subsequences of length n seqs = np.reshape(data, (m, n)) # calculate means of subsequences means = np.mean(seqs, axis=1) # normalize subsequences by substracting mean y = seqs - means.reshape((m, 1)) # build cumulative sum of subsequences y = np.cumsum(y, axis=1) # find ranges r = np.max(y, axis=1) - np.min(y, axis=1) # find standard deviation # we should use the unbiased estimator, since we do not know the true mean s = np.std(seqs, axis=1, ddof=1 if unbiased else 0) # some ranges may be zero and have to be excluded from the analysis idx = np.where(r != 0) r = r[idx] s = s[idx] # it may happen that all ranges are zero (if all values in data are equal) if len(r) == 0: return np.nan else: # return mean of r/s along subsequence index return np.mean(r / s)
Calculates an individual R/S value in the rescaled range approach for a given n. Note: This is just a helper function for hurst_rs and should not be called directly. Args: data (array-like of float): time series n (float): size of the subseries in which data should be split Kwargs: unbiased (boolean): if True, the standard deviation based on the unbiased variance (1/(N-1) instead of 1/N) will be used. This should be the default choice, since the true mean of the sequences is not known. This parameter should only be changed to recreate results of other implementations. Returns: float: (R/S)_n
def page(request, slug, template=u"pages/page.html", extra_context=None): """ Select a template for a page and render it. The request object should have a ``page`` attribute that's added via ``yacms.pages.middleware.PageMiddleware``. The page is loaded earlier via middleware to perform various other functions. The urlpattern that maps to this view is a catch-all pattern, in which case the page attribute won't exist, so raise a 404 then. For template selection, a list of possible templates is built up based on the current page. This list is order from most granular match, starting with a custom template for the exact page, then adding templates based on the page's parent page, that could be used for sections of a site (eg all children of the parent). Finally at the broadest level, a template for the page's content type (it's model class) is checked for, and then if none of these templates match, the default pages/page.html is used. """ from yacms.pages.middleware import PageMiddleware if not PageMiddleware.installed(): raise ImproperlyConfigured("yacms.pages.middleware.PageMiddleware " "(or a subclass of it) is missing from " + "settings.MIDDLEWARE_CLASSES or " + "settings.MIDDLEWARE") if not hasattr(request, "page") or request.page.slug != slug: raise Http404 # Check for a template name matching the page's slug. If the homepage # is configured as a page instance, the template "pages/index.html" is # used, since the slug "/" won't match a template name. template_name = str(slug) if slug != home_slug() else "index" templates = [u"pages/%s.html" % template_name] method_template = request.page.get_content_model().get_template_name() if method_template: templates.insert(0, method_template) if request.page.content_model is not None: templates.append(u"pages/%s/%s.html" % (template_name, request.page.content_model)) for parent in request.page.get_ascendants(for_user=request.user): parent_template_name = str(parent.slug) # Check for a template matching the page's content model. if request.page.content_model is not None: templates.append(u"pages/%s/%s.html" % (parent_template_name, request.page.content_model)) # Check for a template matching the page's content model. if request.page.content_model is not None: templates.append(u"pages/%s.html" % request.page.content_model) templates.append(template) return TemplateResponse(request, templates, extra_context or {})
Select a template for a page and render it. The request object should have a ``page`` attribute that's added via ``yacms.pages.middleware.PageMiddleware``. The page is loaded earlier via middleware to perform various other functions. The urlpattern that maps to this view is a catch-all pattern, in which case the page attribute won't exist, so raise a 404 then. For template selection, a list of possible templates is built up based on the current page. This list is order from most granular match, starting with a custom template for the exact page, then adding templates based on the page's parent page, that could be used for sections of a site (eg all children of the parent). Finally at the broadest level, a template for the page's content type (it's model class) is checked for, and then if none of these templates match, the default pages/page.html is used.
def do_request(self, line): """request <peer> <method> <params> send a msgpack-rpc request and print a response. <params> is a python code snippet, it should be eval'ed to a list. """ def f(p, method, params): result = p.call(method, params) print("RESULT %s" % result) self._request(line, f)
request <peer> <method> <params> send a msgpack-rpc request and print a response. <params> is a python code snippet, it should be eval'ed to a list.
def draw_text(self, video_name, out, start, end, x, y, text, color='0xFFFFFF', show_background=0, background_color='0x000000', size=16): """ Draws text over a video @param video_name : name of video input file @param out : name of video output file @param start : start timecode to draw text hh:mm:ss @param end : end timecode to draw text hh:mm:ss @param x : x position of text (px) @param y : y position of text (px) @param text : text content to draw @param color : text color @param show_background : boolean to show a background box behind the text @param background_color : color of background box """ cfilter = (r"[0:0]drawtext=fontfile=/Library/Fonts/AppleGothic.ttf:" r"x={x}:y={y}:fontcolor='{font_color}':" r"box={show_background}:" r"boxcolor='{background_color}':" r"text='{text}':fontsize={size}:" r"enable='between(t,{start},{end})'[vout];" r"[0:1]apad=pad_len=0[aout]")\ .format(x=x, y=y, font_color=color, show_background=show_background, background_color=background_color, text=text, start=start, end=end, size=size) command = ['ffmpeg', '-i', video_name, '-c:v', 'huffyuv', '-y', '-filter_complex', cfilter, '-an', '-y', '-map', '[vout]', '-map', '[aout]', out] if self.verbose: print 'Drawing text "{0}" onto {1} output as {2}'.format( text, video_name, out, ) print ' '.join(command) call(command)
Draws text over a video @param video_name : name of video input file @param out : name of video output file @param start : start timecode to draw text hh:mm:ss @param end : end timecode to draw text hh:mm:ss @param x : x position of text (px) @param y : y position of text (px) @param text : text content to draw @param color : text color @param show_background : boolean to show a background box behind the text @param background_color : color of background box
def is_ancestor(self, ancestor_rev, rev): """Check if a commit is an ancestor of another :param ancestor_rev: Rev which should be an ancestor :param rev: Rev to test against ancestor_rev :return: ``True``, ancestor_rev is an accestor to rev. """ try: self.git.merge_base(ancestor_rev, rev, is_ancestor=True) except GitCommandError as err: if err.status == 1: return False raise return True
Check if a commit is an ancestor of another :param ancestor_rev: Rev which should be an ancestor :param rev: Rev to test against ancestor_rev :return: ``True``, ancestor_rev is an accestor to rev.
def get_conf(cls, builder, doctree=None): """Return a dictionary of slide configuration for this doctree.""" # set up the default conf result = { 'theme': builder.config.slide_theme, 'autoslides': builder.config.autoslides, 'slide_classes': [], } # now look for a slideconf node in the doctree and update the conf if doctree: conf_node = cls.get(doctree) if conf_node: result.update(conf_node.attributes) return result
Return a dictionary of slide configuration for this doctree.
def _get_error_page_callback(self): """Return an error page for the current response status.""" if self.response.status in self._error_handlers: return self._error_handlers[self.response.status] elif None in self._error_handlers: return self._error_handlers[None] else: # Rudimentary error handler if no error handler was found self.response.media_type = 'text/plain' return lambda: self.response.status_line
Return an error page for the current response status.
def l2traceroute_result_output_l2traceroutedone(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") l2traceroute_result = ET.Element("l2traceroute_result") config = l2traceroute_result output = ET.SubElement(l2traceroute_result, "output") l2traceroutedone = ET.SubElement(output, "l2traceroutedone") l2traceroutedone.text = kwargs.pop('l2traceroutedone') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _registerPickleType(name, typedef): ''' Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered. ''' NamedStruct._pickleNames[typedef] = name NamedStruct._pickleTypes[name] = typedef
Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered.
def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment
Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments.
def reload_input_standby(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") reload = ET.Element("reload") config = reload input = ET.SubElement(reload, "input") standby = ET.SubElement(input, "standby") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code