code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def main(): """ Get an info report for a tile. Format is same as input tile but with min/max values for values under 'data'. """ arguments = docopt(__doc__, version='tileinfo 0.1') src_name = arguments['SOURCE'] src_format = arguments['--srcformat'] indent = arguments['--indent'] if isinstance(indent, str) and indent.lower() == 'none': indent = None elif isinstance(indent, str): indent = int(indent) else: indent = 4 with sys.stdin if src_name in ('-', None) else open(src_name, 'rb') as f: # Guess input format if not given if src_format is None: if '.json' == f.name[-5:]: src_format = 'json' else: src_format = 'tile' if src_format == 'tile': header, data = Tile(f.read()).unpack() else: header = json.loads(f.read()) data = header.pop('data') # Generate the info report report = info(data, header['cols']) # Merge report with other tile attributes out = {k: v for k, v in header.items() if k != 'data'} out['data'] = {} for field, vals in report.items(): out['data'][field + '_min'] = vals['min'] out['data'][field + '_max'] = vals['max'] print(json.dumps(out, indent=indent, sort_keys=True))
Get an info report for a tile. Format is same as input tile but with min/max values for values under 'data'.
def init_app(self, app): """Flask application initialization. The initialization will: * Set default values for the configuration variables. * Initialise the Flask mail extension. * Configure the extension to avoid the email sending in case of debug or ``MAIL_SUPPRESS_SEND`` config variable set. In this case, the email will be written in the stream configured in the extension. :param app: Flask application object. """ self.init_config(app) if 'mail' not in app.extensions: Mail(app) if app.config.get('MAIL_SUPPRESS_SEND', False) or app.debug: email_dispatched.connect(print_email) app.extensions['invenio-mail'] = self
Flask application initialization. The initialization will: * Set default values for the configuration variables. * Initialise the Flask mail extension. * Configure the extension to avoid the email sending in case of debug or ``MAIL_SUPPRESS_SEND`` config variable set. In this case, the email will be written in the stream configured in the extension. :param app: Flask application object.
def _tarjan_head(ctx, v): """ Used by @tarjan and @tarjan_iter. This is the head of the main iteration """ ctx.index[v] = len(ctx.index) ctx.lowlink[v] = ctx.index[v] ctx.S.append(v) ctx.S_set.add(v) it = iter(ctx.g.get(v, ())) ctx.T.append((it,False,v,None))
Used by @tarjan and @tarjan_iter. This is the head of the main iteration
def htmlABF(ID,group,d,folder,overwrite=False): """given an ID and the dict of files, generate a static html for that abf.""" fname=folder+"/swhlab4/%s_index.html"%ID if overwrite is False and os.path.exists(fname): return html=TEMPLATES['abf'] html=html.replace("~ID~",ID) html=html.replace("~CONTENT~",htmlABFcontent(ID,group,d)) print(" <- writing [%s]"%os.path.basename(fname)) with open(fname,'w') as f: f.write(html) return
given an ID and the dict of files, generate a static html for that abf.
def translate_file( estimator, subtokenizer, input_file, output_file=None, print_all_translations=True): """Translate lines in file, and save to output file if specified. Args: estimator: tf.Estimator used to generate the translations. subtokenizer: Subtokenizer object for encoding and decoding source and translated lines. input_file: file containing lines to translate output_file: file that stores the generated translations. print_all_translations: If true, all translations are printed to stdout. Raises: ValueError: if output file is invalid. """ batch_size = _DECODE_BATCH_SIZE # Read and sort inputs by length. Keep dictionary (original index-->new index # in sorted list) to write translations in the original order. sorted_inputs, sorted_keys = _get_sorted_inputs(input_file) num_decode_batches = (len(sorted_inputs) - 1) // batch_size + 1 def input_generator(): """Yield encoded strings from sorted_inputs.""" for i, line in enumerate(sorted_inputs): if i % batch_size == 0: batch_num = (i // batch_size) + 1 print("Decoding batch %d out of %d." % (batch_num, num_decode_batches)) yield _encode_and_add_eos(line, subtokenizer) def input_fn(): """Created batched dataset of encoded inputs.""" ds = tf.data.Dataset.from_generator( input_generator, tf.int64, tf.TensorShape([None])) ds = ds.padded_batch(batch_size, [None]) return ds translations = [] for i, prediction in enumerate(estimator.predict(input_fn)): translation = _trim_and_decode(prediction["outputs"], subtokenizer) translations.append(translation) if print_all_translations: print("Translating:") print("\tInput: %s" % sorted_inputs[i]) print("\tOutput: %s\n" % translation) print("=" * 100) # Write translations in the order they appeared in the original file. if output_file is not None: if tf.gfile.IsDirectory(output_file): raise ValueError("File output is a directory, will not save outputs to " "file.") tf.logging.info("Writing to file %s" % output_file) with tf.gfile.Open(output_file, "w") as f: for index in xrange(len(sorted_keys)): f.write("%s\n" % translations[sorted_keys[index]])
Translate lines in file, and save to output file if specified. Args: estimator: tf.Estimator used to generate the translations. subtokenizer: Subtokenizer object for encoding and decoding source and translated lines. input_file: file containing lines to translate output_file: file that stores the generated translations. print_all_translations: If true, all translations are printed to stdout. Raises: ValueError: if output file is invalid.
def info(self): """Get coordinates, image info, and unit".""" return {'coordinates': self._coordinates(), 'imageinfo': self._imageinfo(), 'miscinfo': self._miscinfo(), 'unit': self._unit() }
Get coordinates, image info, and unit".
def download_remote_script(url): """Download the content of a remote script to a local temp file.""" temp_fh = tempfile.NamedTemporaryFile('wt', encoding='utf8', suffix=".py", delete=False) downloader = _ScriptDownloader(url) logger.info( "Downloading remote script from %r using (%r downloader) to %r", url, downloader.name, temp_fh.name) content = downloader.get() temp_fh.write(content) temp_fh.close() return temp_fh.name
Download the content of a remote script to a local temp file.
def match(self, sentence, start=0, _v=None, _u=None): """ Returns the first match found in the given sentence, or None. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": return find(lambda m,s: m is not None, ((self.match(s, start, _v), s) for s in sentence))[0] elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) # Variations (_v) further down the list may match words more to the front. # We need to check all of them. Unmatched variations are blacklisted (_u). # Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster). a = [] for sequence in (_v is not None and _v or self._variations()): if _u is not None and id(sequence) in _u: continue m = self._match(sequence, sentence, start) if m is not None: a.append((m.words[0].index, len(m.words), m)) if m is not None and m.words[0].index == start: return m if m is None and _u is not None: _u[id(sequence)] = False # Return the leftmost-longest. if len(a) > 0: return sorted(a)[0][-1]
Returns the first match found in the given sentence, or None.
def decode(self, encoded): """ Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``. """ encoded = super().decode(encoded) tokens = [self.itos[index] for index in encoded] return self.detokenize(tokens)
Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``.
def max_projection(self, axis=2): """ Compute maximum projections of images along a dimension. Parameters ---------- axis : int, optional, default = 2 Which axis to compute projection along. """ if axis >= size(self.value_shape): raise Exception('Axis for projection (%s) exceeds ' 'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1)) new_value_shape = list(self.value_shape) del new_value_shape[axis] return self.map(lambda x: amax(x, axis), value_shape=new_value_shape)
Compute maximum projections of images along a dimension. Parameters ---------- axis : int, optional, default = 2 Which axis to compute projection along.
def get_storage(self, id_or_uri): """ Get storage details of an OS Volume. Args: id_or_uri: ID or URI of the OS Volume. Returns: dict: Storage details """ uri = self.URI + "/{}/storage".format(extract_id_from_uri(id_or_uri)) return self._client.get(uri)
Get storage details of an OS Volume. Args: id_or_uri: ID or URI of the OS Volume. Returns: dict: Storage details
def execute(self): """ Execute the actions necessary to perform a `molecule converge` and returns None. :return: None """ self.print_info() self._config.provisioner.converge() self._config.state.change_state('converged', True)
Execute the actions necessary to perform a `molecule converge` and returns None. :return: None
def main(): """ Main entry point for the script. Create a parser, process the command line, and run it """ parser = cli.Cli() parser.parse(sys.argv[1:]) return parser.run()
Main entry point for the script. Create a parser, process the command line, and run it
def state_angle(ket0: State, ket1: State) -> bk.BKTensor: """The Fubini-Study angle between states. Equal to the Burrs angle for pure states. """ return fubini_study_angle(ket0.vec, ket1.vec)
The Fubini-Study angle between states. Equal to the Burrs angle for pure states.
def _parse_tile_part_bit_stream(self, fptr, sod_marker, tile_length): """Parse the tile part bit stream for SOP, EPH marker segments.""" read_buffer = fptr.read(tile_length) # The tile length could possibly be too large and extend past # the end of file. We need to be a bit resilient. count = min(tile_length, len(read_buffer)) packet = np.frombuffer(read_buffer, dtype=np.uint8, count=count) indices = np.where(packet == 0xff) for idx in indices[0]: try: if packet[idx + 1] == 0x91 and (idx < (len(packet) - 5)): offset = sod_marker.offset + 2 + idx length = 4 nsop = packet[(idx + 4):(idx + 6)].view('uint16')[0] if sys.byteorder == 'little': nsop = nsop.byteswap() segment = SOPsegment(nsop, length, offset) self.segment.append(segment) elif packet[idx + 1] == 0x92: offset = sod_marker.offset + 2 + idx length = 0 segment = EPHsegment(length, offset) self.segment.append(segment) except IndexError: continue
Parse the tile part bit stream for SOP, EPH marker segments.
def expectation(pyquil_prog: Program, pauli_sum: Union[PauliSum, PauliTerm, np.ndarray], samples: int, qc: QuantumComputer) -> float: """ Compute the expectation value of pauli_sum over the distribution generated from pyquil_prog. :param pyquil_prog: The state preparation Program to calculate the expectation value of. :param pauli_sum: PauliSum representing the operator of which to calculate the expectation value or a numpy matrix representing the Hamiltonian tensored up to the appropriate size. :param samples: The number of samples used to calculate the expectation value. If samples is None then the expectation value is calculated by calculating <psi|O|psi>. Error models will not work if samples is None. :param qc: The QuantumComputer object. :return: A float representing the expectation value of pauli_sum given the distribution generated from quil_prog. """ if isinstance(pauli_sum, np.ndarray): # debug mode by passing an array wf = WavefunctionSimulator().wavefunction(pyquil_prog) wf = np.reshape(wf.amplitudes, (-1, 1)) average_exp = np.conj(wf).T.dot(pauli_sum.dot(wf)).real return average_exp else: if not isinstance(pauli_sum, (PauliTerm, PauliSum)): raise TypeError("pauli_sum variable must be a PauliTerm or PauliSum object") if isinstance(pauli_sum, PauliTerm): pauli_sum = PauliSum([pauli_sum]) if samples is None: operator_progs = [] operator_coeffs = [] for p_term in pauli_sum.terms: op_prog = Program() for qindex, op in p_term: op_prog.inst(STANDARD_GATES[op](qindex)) operator_progs.append(op_prog) operator_coeffs.append(p_term.coefficient) result_overlaps = WavefunctionSimulator().expectation(pyquil_prog, pauli_sum.terms) result_overlaps = list(result_overlaps) assert len(result_overlaps) == len(operator_progs),\ """Somehow we didn't get the correct number of results back from the QVM""" expectation = sum(list(map(lambda x: x[0] * x[1], zip(result_overlaps, operator_coeffs)))) return expectation.real else: if not isinstance(samples, int): raise TypeError("samples variable must be an integer") if samples <= 0: raise ValueError("samples variable must be a positive integer") # normal execution via fake sampling # stores the sum of contributions to the energy from each operator term expectation = 0.0 for j, term in enumerate(pauli_sum.terms): meas_basis_change = Program() qubits_to_measure = [] if term.id() == "": meas_outcome = 1.0 else: for index, gate in term: qubits_to_measure.append(index) if gate == 'X': meas_basis_change.inst(RY(-np.pi / 2, index)) elif gate == 'Y': meas_basis_change.inst(RX(np.pi / 2, index)) meas_outcome = \ expectation_from_sampling(pyquil_prog + meas_basis_change, qubits_to_measure, qc, samples) expectation += term.coefficient * meas_outcome return expectation.real
Compute the expectation value of pauli_sum over the distribution generated from pyquil_prog. :param pyquil_prog: The state preparation Program to calculate the expectation value of. :param pauli_sum: PauliSum representing the operator of which to calculate the expectation value or a numpy matrix representing the Hamiltonian tensored up to the appropriate size. :param samples: The number of samples used to calculate the expectation value. If samples is None then the expectation value is calculated by calculating <psi|O|psi>. Error models will not work if samples is None. :param qc: The QuantumComputer object. :return: A float representing the expectation value of pauli_sum given the distribution generated from quil_prog.
def p_created_1(self, p): """created : CREATED DATE""" try: if six.PY2: value = p[2].decode(encoding='utf-8') else: value = p[2] self.builder.set_created_date(self.document, value) except CardinalityError: self.more_than_one_error('Created', p.lineno(1))
created : CREATED DATE
def parse_item(lines): """ Given the lines that form a subtag entry (after joining wrapped lines back together), parse the data they contain. Returns a generator that yields once if there was any data there (and an empty generator if this was just the header). """ info = {} for line in lines: key, value = line.split(': ', 1) if key in LIST_KEYS: info.setdefault(key, []).append(value) else: assert key not in info info[key] = value if 'Subtag' in info or 'Tag' in info: yield info
Given the lines that form a subtag entry (after joining wrapped lines back together), parse the data they contain. Returns a generator that yields once if there was any data there (and an empty generator if this was just the header).
def compute_training_sizes(train_perc, class_sizes, stratified=True): """Computes the maximum training size that the smallest class can provide """ size_per_class = np.int64(np.around(train_perc * class_sizes)) if stratified: print("Different classes in training set are stratified to match smallest class!") # per-class size_per_class = np.minimum(np.min(size_per_class), size_per_class) # single number reduced_sizes = np.unique(size_per_class) if len(reduced_sizes) != 1: # they must all be the same raise ValueError("Error in stratification of training set based on " "smallest class!") total_test_samples = np.int64(np.sum(class_sizes) - sum(size_per_class)) return size_per_class, total_test_samples
Computes the maximum training size that the smallest class can provide
def _close_and_clean(self, cleanup): """ Closes the project, and cleanup the disk if cleanup is True :param cleanup: Whether to delete the project directory """ tasks = [] for node in self._nodes: tasks.append(asyncio.async(node.manager.close_node(node.id))) if tasks: done, _ = yield from asyncio.wait(tasks) for future in done: try: future.result() except (Exception, GeneratorExit) as e: log.error("Could not close node {}".format(e), exc_info=1) if cleanup and os.path.exists(self.path): self._deleted = True try: yield from wait_run_in_executor(shutil.rmtree, self.path) log.info("Project {id} with path '{path}' deleted".format(path=self._path, id=self._id)) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not delete the project directory: {}".format(e)) else: log.info("Project {id} with path '{path}' closed".format(path=self._path, id=self._id)) if self._used_tcp_ports: log.warning("Project {} has TCP ports still in use: {}".format(self.id, self._used_tcp_ports)) if self._used_udp_ports: log.warning("Project {} has UDP ports still in use: {}".format(self.id, self._used_udp_ports)) # clean the remaining ports that have not been cleaned by their respective node. port_manager = PortManager.instance() for port in self._used_tcp_ports.copy(): port_manager.release_tcp_port(port, self) for port in self._used_udp_ports.copy(): port_manager.release_udp_port(port, self)
Closes the project, and cleanup the disk if cleanup is True :param cleanup: Whether to delete the project directory
def section_path_lengths(neurites, neurite_type=NeuriteType.all): '''Path lengths of a collection of neurites ''' # Calculates and stores the section lengths in one pass, # then queries the lengths in the path length iterations. # This avoids repeatedly calculating the lengths of the # same sections. dist = {} neurite_filter = is_type(neurite_type) for s in iter_sections(neurites, neurite_filter=neurite_filter): dist[s] = s.length def pl2(node): '''Calculate the path length using cached section lengths''' return sum(dist[n] for n in node.iupstream()) return map_sections(pl2, neurites, neurite_type=neurite_type)
Path lengths of a collection of neurites
def _populate_issue(self, graph: TraceGraph, instance_id: int) -> None: """Adds an issue to the trace graph along with relevant information pertaining to the issue (e.g. instance, fix_info, sources/sinks) The issue is identified by its corresponding instance's ID in the input trace graph. """ instance = graph._issue_instances[instance_id] issue = graph._issues[instance.issue_id.local_id] self._populate_shared_text(graph, instance.message_id) self._populate_shared_text(graph, instance.filename_id) self._populate_shared_text(graph, instance.callable_id) self.add_issue_instance(instance) self.add_issue(issue) if instance_id in graph._issue_instance_fix_info: issue_fix_info = graph._issue_instance_fix_info[instance_id] self.add_issue_instance_fix_info(instance, issue_fix_info) for shared_text_id in graph._issue_instance_shared_text_assoc[instance_id]: shared_text = graph._shared_texts[shared_text_id] if shared_text_id not in self._shared_texts: self.add_shared_text(shared_text) self.add_issue_instance_shared_text_assoc(instance, shared_text)
Adds an issue to the trace graph along with relevant information pertaining to the issue (e.g. instance, fix_info, sources/sinks) The issue is identified by its corresponding instance's ID in the input trace graph.
def duration_to_string(duration): """ Converts a duration to a string Args: duration (int): The duration in seconds to convert Returns s (str): The duration as a string """ m, s = divmod(duration, 60) h, m = divmod(m, 60) return "%d:%02d:%02d" % (h, m, s)
Converts a duration to a string Args: duration (int): The duration in seconds to convert Returns s (str): The duration as a string
def _delete(self, obj, **kwargs): """ Delete the object directly. .. code-block:: python DBSession.sacrud(Users)._delete(UserObj) If you no needed commit session .. code-block:: python DBSession.sacrud(Users, commit=False)._delete(UserObj) """ if isinstance(obj, sqlalchemy.orm.query.Query): obj = obj.one() obj = self.preprocessing(obj=obj).delete() self.session.delete(obj) if kwargs.get('commit', self.commit) is True: try: self.session.commit() except AssertionError: transaction.commit() return True
Delete the object directly. .. code-block:: python DBSession.sacrud(Users)._delete(UserObj) If you no needed commit session .. code-block:: python DBSession.sacrud(Users, commit=False)._delete(UserObj)
def check_assets(self): """ Throws an exception if assets file is not configured or cannot be found. :param assets: path to the assets file """ if not self.assets_file: raise ImproperlyConfigured("You must specify the path to the assets.json file via WEBPACK_ASSETS_FILE") elif not os.path.exists(self.assets_file): raise ImproperlyConfigured( "The file `{file}` was not found, make sure to run the webpack build before the collectstatic command".format( file=self.assets_file))
Throws an exception if assets file is not configured or cannot be found. :param assets: path to the assets file
def waypoint_set_current_send(self, seq): '''wrapper for waypoint_set_current_send''' if self.mavlink10(): self.mav.mission_set_current_send(self.target_system, self.target_component, seq) else: self.mav.waypoint_set_current_send(self.target_system, self.target_component, seq)
wrapper for waypoint_set_current_send
def show(self, xlim=None, ylim=None, units="thz"): """ Show the plot using matplotlib. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. """ plt = self.get_plot(xlim, ylim, units=units) plt.show()
Show the plot using matplotlib. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
def color_array_by_hue_mix(value, palette): """ Figure out the appropriate color for a binary string value by averaging the colors corresponding the indices of each one that it contains. Makes for visualizations that intuitively show patch overlap. """ if int(value, 2) > 0: # Convert bits to list and reverse order to avoid issues with # differing lengths int_list = [int(i) for i in list(value[2:])] int_list.reverse() # since this is a 1D array, we need the zeroth elements # of np.nonzero. locs = np.nonzero(int_list)[0] # print(locs) # print(palette) rgb_vals = [palette[i] for i in locs] rgb = [0]*len(rgb_vals[0]) # We don't know if it's rgb or rgba for val in rgb_vals: for index in range(len(val)): rgb[index] += val[index] for i in range(len(rgb)): rgb[i] /= len(locs) return tuple(rgb) if int(value, 2) == 0: return (1, 1, 1) if len(palette[0]) == 3 else (1, 1, 1, 1) return -1
Figure out the appropriate color for a binary string value by averaging the colors corresponding the indices of each one that it contains. Makes for visualizations that intuitively show patch overlap.
def format_obj_name(obj, delim="<>"): """ Formats the object name in a pretty way @obj: any python object @delim: the characters to wrap a parent object name in -> #str formatted name .. from vital.debug import format_obj_name format_obj_name(vital.debug.Timer) # -> 'Timer<vital.debug>' format_obj_name(vital.debug) # -> 'debug<vital>' format_obj_name(vital.debug.Timer.time) # -> 'time<vital.debug.Timer>' .. """ pname = "" parent_name = get_parent_name(obj) if parent_name: pname = "{}{}{}".format(delim[0], get_parent_name(obj), delim[1]) return "{}{}".format(get_obj_name(obj), pname)
Formats the object name in a pretty way @obj: any python object @delim: the characters to wrap a parent object name in -> #str formatted name .. from vital.debug import format_obj_name format_obj_name(vital.debug.Timer) # -> 'Timer<vital.debug>' format_obj_name(vital.debug) # -> 'debug<vital>' format_obj_name(vital.debug.Timer.time) # -> 'time<vital.debug.Timer>' ..
def dump(self, obj, key=None): """Write a pickled representation of obj to the open TFile.""" if key is None: key = '_pickle' with preserve_current_directory(): self.__file.cd() if sys.version_info[0] < 3: pickle.Pickler.dump(self, obj) else: super(Pickler, self).dump(obj) s = ROOT.TObjString(self.__io.getvalue()) self.__io.reopen() s.Write(key) self.__file.GetFile().Flush() self.__pmap.clear()
Write a pickled representation of obj to the open TFile.
def gradients_X(self, dL_dK, X, X2=None): """ Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X """ if use_stationary_cython: return self._gradients_X_cython(dL_dK, X, X2) else: return self._gradients_X_pure(dL_dK, X, X2)
Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X
def get(self, action, default=None): """ Returns given action value. :param action: Action name. :type action: unicode :param default: Default value if action is not found. :type default: object :return: Action. :rtype: QAction """ try: return self.__getitem__(action) except KeyError as error: return default
Returns given action value. :param action: Action name. :type action: unicode :param default: Default value if action is not found. :type default: object :return: Action. :rtype: QAction
def moveGamepadFocusToNeighbor(self, eDirection, ulFrom): """ Changes the Gamepad focus from one overlay to one of its neighbors. Returns VROverlayError_NoNeighbor if there is no neighbor in that direction """ fn = self.function_table.moveGamepadFocusToNeighbor result = fn(eDirection, ulFrom) return result
Changes the Gamepad focus from one overlay to one of its neighbors. Returns VROverlayError_NoNeighbor if there is no neighbor in that direction
def load_all(self, workers=None, limit=None, n_expected=None): """Load all instances witih multiple threads. :param workers: number of workers to use to load instances, which defaults to what was given in the class initializer :param limit: return a maximum, which defaults to no limit :param n_expected: rerun the iteration on the data if we didn't find enough data, or more specifically, number of found data points is less than ``n_expected``; defaults to all """ if not self.has_data: self._preempt(True) # we did the best we could (avoid repeat later in this method) n_expected = 0 keys = tuple(self.delegate.keys()) if n_expected is not None and len(keys) < n_expected: self._preempt(True) keys = self.delegate.keys() keys = it.islice(limit, keys) if limit is not None else keys pool = self._create_thread_pool(workers) logger.debug(f'workers={workers}, keys: {keys}') try: return iter(pool.map(self.delegate.load, keys)) finally: pool.close()
Load all instances witih multiple threads. :param workers: number of workers to use to load instances, which defaults to what was given in the class initializer :param limit: return a maximum, which defaults to no limit :param n_expected: rerun the iteration on the data if we didn't find enough data, or more specifically, number of found data points is less than ``n_expected``; defaults to all
def get_whoami(self): """ A convenience function used in the event that you need to confirm that the broker thinks you are who you think you are. :returns dict whoami: Dict structure contains: * administrator: whether the user is has admin privileges * name: user name * auth_backend: backend used to determine admin rights """ path = Client.urls['whoami'] whoami = self._call(path, 'GET') return whoami
A convenience function used in the event that you need to confirm that the broker thinks you are who you think you are. :returns dict whoami: Dict structure contains: * administrator: whether the user is has admin privileges * name: user name * auth_backend: backend used to determine admin rights
def return_estimator(self): """Returns base learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object """ estimator = self.base_learner_origin.return_estimator() estimator = estimator.set_params(**self.hyperparameters) return estimator
Returns base learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object
def _sideral(date, longitude=0., model='mean', eop_correction=True, terms=106): """Get the sideral time at a defined date Args: date (Date): longitude (float): Longitude of the observer (in degrees) East positive/West negative. model (str): 'mean' or 'apparent' for GMST and GAST respectively Return: float: Sideral time in degrees GMST: Greenwich Mean Sideral Time LST: Local Sideral Time (Mean) GAST: Greenwich Apparent Sideral Time """ t = date.change_scale('UT1').julian_century # Compute GMST in seconds theta = 67310.54841 + (876600 * 3600 + 8640184.812866) * t + 0.093104 * t ** 2\ - 6.2e-6 * t ** 3 # Conversion from second (time) to degrees (angle) theta /= 240. if model == 'apparent': theta += equinox(date, eop_correction, terms) # Add local longitude to the sideral time theta += longitude # Force to 0-360 degrees range theta %= 360. return theta
Get the sideral time at a defined date Args: date (Date): longitude (float): Longitude of the observer (in degrees) East positive/West negative. model (str): 'mean' or 'apparent' for GMST and GAST respectively Return: float: Sideral time in degrees GMST: Greenwich Mean Sideral Time LST: Local Sideral Time (Mean) GAST: Greenwich Apparent Sideral Time
def num_mode_groups(self): """Most devices only provide a single mode group, however devices such as the Wacom Cintiq 22HD provide two mode groups. If multiple mode groups are available, a caller should use :meth:`~libinput.define.TabletPadModeGroup.has_button`, :meth:`~libinput.define.TabletPadModeGroup.has_ring` and :meth:`~libinput.define.TabletPadModeGroup.has_strip` to associate each button, ring and strip with the correct mode group. Returns: int: The number of mode groups available on this device. Raises: AttributeError """ num = self._libinput.libinput_device_tablet_pad_get_num_mode_groups( self._handle) if num < 0: raise AttributeError('This device is not a tablet pad device') return num
Most devices only provide a single mode group, however devices such as the Wacom Cintiq 22HD provide two mode groups. If multiple mode groups are available, a caller should use :meth:`~libinput.define.TabletPadModeGroup.has_button`, :meth:`~libinput.define.TabletPadModeGroup.has_ring` and :meth:`~libinput.define.TabletPadModeGroup.has_strip` to associate each button, ring and strip with the correct mode group. Returns: int: The number of mode groups available on this device. Raises: AttributeError
def p_im(p): """ asm : IM expr """ val = p[2].eval() if val not in (0, 1, 2): error(p.lineno(1), 'Invalid IM number %i' % val) p[0] = None return p[0] = Asm(p.lineno(1), 'IM %i' % val)
asm : IM expr
def istoken(docgraph, node_id, namespace=None): """returns true, iff the given node ID belongs to a token node. Parameters ---------- node_id : str the node to be checked namespace : str or None If a namespace is given, only look for tokens in the given namespace. Otherwise, look for tokens in the default namespace of the given document graph. """ if namespace is None: namespace = docgraph.ns return namespace+':token' in docgraph.node[node_id]
returns true, iff the given node ID belongs to a token node. Parameters ---------- node_id : str the node to be checked namespace : str or None If a namespace is given, only look for tokens in the given namespace. Otherwise, look for tokens in the default namespace of the given document graph.
def serialize_oaipmh(self, pid, record): """Serialize a single record for OAI-PMH.""" root = etree.Element( 'oai_datacite', nsmap={ None: 'http://schema.datacite.org/oai/oai-1.0/', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xml': 'xml', }, attrib={ '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation': 'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd', } ) root.append(E.isReferenceQuality(self.is_reference_quality)) root.append(E.schemaVersion(self.serializer.version)) root.append(E.datacentreSymbol(self.datacentre)) root.append(E.payload( self.serializer.serialize_oaipmh(pid, record) )) return root
Serialize a single record for OAI-PMH.
def save(self, path): """Saves this catalogue's data to `path`. :param path: file path to save catalogue data to :type path: `str` """ writer = csv.writer(open(path, 'w', newline=''), delimiter=' ') rows = list(self.items()) rows.sort(key=lambda x: x[0]) writer.writerows(rows)
Saves this catalogue's data to `path`. :param path: file path to save catalogue data to :type path: `str`
def _match(self, **kwargs): """Method which indicates if the object matches specified criteria. Match accepts criteria as kwargs and looks them up on attributes. Actual matching is performed with fnmatch, so shell-like wildcards work within match strings. Examples: obj._match(AXTitle='Terminal*') obj._match(AXRole='TextField', AXRoleDescription='search text field') """ for k in kwargs.keys(): try: val = getattr(self, k) except _a11y.Error: return False # Not all values may be strings (e.g. size, position) if sys.version_info[:2] <= (2, 6): if isinstance(val, basestring): if not fnmatch.fnmatch(unicode(val), kwargs[k]): return False else: if val != kwargs[k]: return False elif sys.version_info[0] == 3: if isinstance(val, str): if not fnmatch.fnmatch(val, str(kwargs[k])): return False else: if val != kwargs[k]: return False else: if isinstance(val, str) or isinstance(val, unicode): if not fnmatch.fnmatch(val, kwargs[k]): return False else: if val != kwargs[k]: return False return True
Method which indicates if the object matches specified criteria. Match accepts criteria as kwargs and looks them up on attributes. Actual matching is performed with fnmatch, so shell-like wildcards work within match strings. Examples: obj._match(AXTitle='Terminal*') obj._match(AXRole='TextField', AXRoleDescription='search text field')
def url(self): """ Returns url for accessing pipeline entity. """ return self.get_url(server_url=self._session.server_url, pipeline_name=self.data.name)
Returns url for accessing pipeline entity.
def version(self, id, expand=None): """Get a version Resource. :param id: ID of the version to get :type id: str :param expand: extra information to fetch inside each resource :type expand: Optional[Any] :rtype: Version """ version = Version(self._options, self._session) params = {} if expand is not None: params['expand'] = expand version.find(id, params=params) return version
Get a version Resource. :param id: ID of the version to get :type id: str :param expand: extra information to fetch inside each resource :type expand: Optional[Any] :rtype: Version
def start(self, work): """ Hand the main thread to the window and continue work in the provided function. A state is passed as the first argument that contains a `running` flag. The function is expected to exit if the flag becomes false. The flag can also be set to false to stop the window event loop and continue in the main thread after the `start()` call. """ assert threading.current_thread() == threading.main_thread() assert not self.state.running self.state.running = True self.thread = threading.Thread(target=work, args=(self.state,)) self.thread.start() while self.state.running: try: before = time.time() self.update() duration = time.time() - before plt.pause(max(0.001, self.refresh - duration)) except KeyboardInterrupt: self.state.running = False self.thread.join() return
Hand the main thread to the window and continue work in the provided function. A state is passed as the first argument that contains a `running` flag. The function is expected to exit if the flag becomes false. The flag can also be set to false to stop the window event loop and continue in the main thread after the `start()` call.
def results_class_wise_average_metrics(self): """Class-wise averaged metrics Returns ------- dict results in a dictionary format """ event_wise_results = self.results_class_wise_metrics() event_wise_f_measure = [] event_wise_precision = [] event_wise_recall = [] event_wise_error_rate = [] event_wise_deletion_rate = [] event_wise_insertion_rate = [] event_wise_sensitivity = [] event_wise_specificity = [] event_wise_balanced_accuracy = [] event_wise_accuracy = [] for event_label in event_wise_results: # F-measure event_wise_f_measure.append(event_wise_results[event_label]['f_measure']['f_measure']) event_wise_precision.append(event_wise_results[event_label]['f_measure']['precision']) event_wise_recall.append(event_wise_results[event_label]['f_measure']['recall']) # Error rate event_wise_error_rate.append(event_wise_results[event_label]['error_rate']['error_rate']) event_wise_deletion_rate.append(event_wise_results[event_label]['error_rate']['deletion_rate']) event_wise_insertion_rate.append(event_wise_results[event_label]['error_rate']['insertion_rate']) # Accuracy if 'sensitivity' in event_wise_results[event_label]['accuracy']: event_wise_sensitivity.append(event_wise_results[event_label]['accuracy']['sensitivity']) if 'specificity' in event_wise_results[event_label]['accuracy']: event_wise_specificity.append(event_wise_results[event_label]['accuracy']['specificity']) if 'balanced_accuracy' in event_wise_results[event_label]['accuracy']: event_wise_balanced_accuracy.append(event_wise_results[event_label]['accuracy']['balanced_accuracy']) if 'accuracy' in event_wise_results[event_label]['accuracy']: event_wise_accuracy.append(event_wise_results[event_label]['accuracy']['accuracy']) if event_wise_f_measure: event_wise_f_measure_dict = { 'f_measure': float(numpy.nanmean(event_wise_f_measure)), 'precision': float(numpy.nanmean(event_wise_precision)), 'recall': float(numpy.nanmean(event_wise_recall)) } else: event_wise_f_measure_dict = {} if event_wise_error_rate: event_wise_error_rate_dict = { 'error_rate': float(numpy.nanmean(event_wise_error_rate)), 'deletion_rate': float(numpy.nanmean(event_wise_deletion_rate)), 'insertion_rate': float(numpy.nanmean(event_wise_insertion_rate)) } else: event_wise_error_rate_dict = {} if event_wise_accuracy: event_wise_accuracy_dict = { 'sensitivity': float(numpy.nanmean(event_wise_sensitivity)), 'specificity': float(numpy.nanmean(event_wise_specificity)), 'balanced_accuracy': float(numpy.nanmean(event_wise_balanced_accuracy)), 'accuracy': float(numpy.nanmean(event_wise_accuracy)) } else: event_wise_accuracy_dict = {} return { 'f_measure': event_wise_f_measure_dict, 'error_rate': event_wise_error_rate_dict, 'accuracy': event_wise_accuracy_dict }
Class-wise averaged metrics Returns ------- dict results in a dictionary format
def design_stat_cooling(self, value="Cooling"): """Corresponds to IDD Field `design_stat_cooling` Args: value (str): value for IDD Field `design_stat_cooling` Accepted values are: - Cooling Default value: Cooling if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `design_stat_cooling`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `design_stat_cooling`') vals = set() vals.add("Cooling") if value not in vals: raise ValueError('value {} is not an accepted value for ' 'field `design_stat_cooling`'.format(value)) self._design_stat_cooling = value
Corresponds to IDD Field `design_stat_cooling` Args: value (str): value for IDD Field `design_stat_cooling` Accepted values are: - Cooling Default value: Cooling if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
async def unmount(self): """Unmount this block device.""" self._data = await self._handler.unmount( system_id=self.node.system_id, id=self.id)
Unmount this block device.
def findXdk( self, name ): """ Looks up the xdk item based on the current name. :param name | <str> :return <XdkItem> || None """ for i in range(self.uiContentsTREE.topLevelItemCount()): item = self.uiContentsTREE.topLevelItem(i) if ( item.text(0) == name ): return item return None
Looks up the xdk item based on the current name. :param name | <str> :return <XdkItem> || None
def return_videos(self, begtime, endtime): """It returns the videos and beginning and end time of the video segment. The MFF video format is not well documented. As far as I can see, the manual 20150805 says that there might be multiple .mp4 files but I only see one .mov file (and no way to specify which video file to read). In addition, there is a file "po_videoSyncups.xml" which seems to contain some time information, but the sampleTime does not start at zero, but at a large number. I don't know how to use the info in po_videoSyncups.xml. Parameters ---------- begtime : float start time of the period of interest endtime : float end time of the period of interest Returns ------- list of one path list with only one element float start time of the video float end time of the video """ try: self._orig['po_videoSyncups'] except KeyError: raise OSError('No po_videoSyncups.xml in folder to sync videos') if not self._videos: raise OSError('No mp4 video files') mp4_file = self._videos[:1] # make clear we only use the first video return mp4_file, begtime, endtime
It returns the videos and beginning and end time of the video segment. The MFF video format is not well documented. As far as I can see, the manual 20150805 says that there might be multiple .mp4 files but I only see one .mov file (and no way to specify which video file to read). In addition, there is a file "po_videoSyncups.xml" which seems to contain some time information, but the sampleTime does not start at zero, but at a large number. I don't know how to use the info in po_videoSyncups.xml. Parameters ---------- begtime : float start time of the period of interest endtime : float end time of the period of interest Returns ------- list of one path list with only one element float start time of the video float end time of the video
def add_dhcp_interface(self, interface_id, dynamic_index, zone_ref=None, vlan_id=None, comment=None): """ Add a DHCP interface on a single FW :param int interface_id: interface id :param int dynamic_index: index number for dhcp interface :param bool primary_mgt: whether to make this primary mgt :param str zone_ref: zone reference, can be name, href or Zone :raises EngineCommandFailed: failure creating interface :return: None See :class:`~DHCPInterface` for more information """ _interface = {'interface_id': interface_id, 'interfaces': [{'nodes': [ {'dynamic': True, 'dynamic_index': dynamic_index}], 'vlan_id': vlan_id}], 'comment': comment, 'zone_ref': zone_ref} if 'single_fw' in self._engine.type: _interface.update(interface='single_node_interface') try: interface = self._engine.interface.get(interface_id) vlan = interface.vlan_interface.get(vlan_id) # Interface exists, so we need to update but check if VLAN already exists if vlan is None: interface._add_interface(**_interface) interface.update() except InterfaceNotFound: interface = Layer3PhysicalInterface(**_interface) return self._engine.add_interface(interface)
Add a DHCP interface on a single FW :param int interface_id: interface id :param int dynamic_index: index number for dhcp interface :param bool primary_mgt: whether to make this primary mgt :param str zone_ref: zone reference, can be name, href or Zone :raises EngineCommandFailed: failure creating interface :return: None See :class:`~DHCPInterface` for more information
def set_updated(self): """ Mark the module as updated. We check if the actual content has changed and if so we trigger an update in py3status. """ # get latest output output = [] for method in self.methods.values(): data = method["last_output"] if isinstance(data, list): if self.testing and data: data[0]["cached_until"] = method.get("cached_until") output.extend(data) else: # if the output is not 'valid' then don't add it. if data.get("full_text") or "separator" in data: if self.testing: data["cached_until"] = method.get("cached_until") output.append(data) # if changed store and force display update. if output != self.last_output: # has the modules output become urgent? # we only care the update that this happens # not any after then. urgent = True in [x.get("urgent") for x in output] if urgent != self.urgent: self.urgent = urgent else: urgent = False self.last_output = output self._py3_wrapper.notify_update(self.module_full_name, urgent)
Mark the module as updated. We check if the actual content has changed and if so we trigger an update in py3status.
def _subtask_result(self, idx, value): """Receive a result from a single subtask.""" self._results[idx] = value if len(self._results) == self._num_tasks: self.set_result([ self._results[i] for i in range(self._num_tasks) ])
Receive a result from a single subtask.
def get_doc(project, source_code, offset, resource=None, maxfixes=1): """Get the pydoc""" fixer = fixsyntax.FixSyntax(project, source_code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is None: return None pyobject = pyname.get_object() return PyDocExtractor().get_doc(pyobject)
Get the pydoc
def find_usage(self): """ Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`. """ logger.debug("Checking usage for service %s", self.service_name) self.connect() for lim in self.limits.values(): lim._reset_usage() self._find_cluster_manual_snapshots() self._find_cluster_subnet_groups() self._have_usage = True logger.debug("Done checking usage.")
Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`.
def unrecognized_arguments_error(self, args, parsed, extras): """ This exists because argparser is dumb and naive and doesn't fail unrecognized arguments early. """ # loop variants kwargs = vars(parsed) failed = list(extras) # initial values runtime, subparser, idx = (self, self.argparser, 0) # recursion not actually needed when it can be flattened. while isinstance(runtime, Runtime): cmd = kwargs.pop(runtime.action_key) # can happen if it wasn't set, or is set but from a default # value (thus not provided by args) action_idx = None if cmd not in args else args.index(cmd) if cmd not in args and cmd is not None: # this normally shouldn't happen, and the test case # showed that the parsing will not flip down to the # forced default subparser - this can remain a debug # message until otherwise. logger.debug( "command for prog=%r is set to %r without being specified " "as part of the input arguments - the following error " "message may contain misleading references", subparser.prog, cmd ) subargs = args[idx:action_idx] subparsed, subextras = subparser.parse_known_args(subargs) if subextras: subparser.unrecognized_arguments_error(subextras) # since the failed arguments are in order failed = failed[len(subextras):] if not failed: # have taken everything, quit now. # also note that if cmd was really None it would # cause KeyError below, but fortunately it also # forced action_idx to be None which took all # remaining tokens from failed, so definitely get # out of here. break # advance the values # note that any internal consistency will almost certainly # result in KeyError being raised. details = runtime.get_argparser_details(subparser) runtime = details.runtimes[cmd] subparser = details.subparsers[cmd] idx = action_idx + 1 if failed: subparser.unrecognized_arguments_error(failed) sys.exit(2)
This exists because argparser is dumb and naive and doesn't fail unrecognized arguments early.
def remote_file_exists(self): """Verify whether the file (scene) exists on AWS Storage.""" url = join(self.base_url, 'index.html') return super(AWSDownloader, self).remote_file_exists(url)
Verify whether the file (scene) exists on AWS Storage.
def _posterior(self, x): """Internal function to calculate and cache the posterior """ yedge = self._nuis_pdf.marginalization_bins() yc = 0.5 * (yedge[1:] + yedge[:-1]) yw = yedge[1:] - yedge[:-1] like_array = self.like(x[:, np.newaxis], yc[np.newaxis, :]) * yw like_array /= like_array.sum() self._post = like_array.sum(1) self._post_interp = castro.Interpolator(x, self._post) return self._post
Internal function to calculate and cache the posterior
def remove_stale_indexes_from_bika_catalog(portal): """Removes stale indexes and metadata from bika_catalog. Most of these indexes and metadata were used for Samples, but they are no longer used. """ logger.info("Removing stale indexes and metadata from bika_catalog ...") cat_id = "bika_catalog" indexes_to_remove = [ "getAnalyst", "getAnalysts", "getAnalysisService", "getClientOrderNumber", "getClientReference", "getClientSampleID", "getContactTitle", "getDateDisposed", "getDateExpired", "getDateOpened", "getDatePublished", "getInvoiced", "getPreserver", "getSamplePointTitle", "getSamplePointUID", "getSampler", "getScheduledSamplingSampler", "getSamplingDate", "getWorksheetTemplateTitle", "BatchUID", ] metadata_to_remove = [ "getAnalysts", "getClientOrderNumber", "getClientReference", "getClientSampleID", "getContactTitle", "getSamplePointTitle", "getAnalysisService", "getDatePublished", ] for index in indexes_to_remove: del_index(portal, cat_id, index) for metadata in metadata_to_remove: del_metadata(portal, cat_id, metadata) commit_transaction(portal)
Removes stale indexes and metadata from bika_catalog. Most of these indexes and metadata were used for Samples, but they are no longer used.
def get_dataset(self, key, info): """Load dataset designated by the given key from file""" logger.debug('Reading dataset {}'.format(key.name)) # Read data from file and calibrate if necessary if 'longitude' in key.name: data = self.geo_data['lon'] elif 'latitude' in key.name: data = self.geo_data['lat'] else: tic = datetime.now() data = self.calibrate(self.nc['data'].isel(time=0), calibration=key.calibration, channel=key.name) logger.debug('Calibration time: {}'.format(datetime.now() - tic)) # Mask space pixels data = data.where(self.meta['earth_mask']) # Set proper dimension names data = data.rename({'xc': 'x', 'yc': 'y'}) # Update metadata self._update_metadata(data, ds_info=info) return data
Load dataset designated by the given key from file
def locate(desktop_filename_or_name): '''Locate a .desktop from the standard locations. Find the path to the .desktop file of a given .desktop filename or application name. Standard locations: - ``~/.local/share/applications/`` - ``/usr/share/applications`` Args: desktop_filename_or_name (str): Either the filename of a .desktop file or the name of an application. Returns: list: A list of all matching .desktop files found. ''' paths = [ os.path.expanduser('~/.local/share/applications'), '/usr/share/applications'] result = [] for path in paths: for file in os.listdir(path): if desktop_filename_or_name in file.split( '.') or desktop_filename_or_name == file: # Example: org.gnome.gedit result.append(os.path.join(path, file)) else: file_parsed = parse(os.path.join(path, file)) try: if desktop_filename_or_name.lower() == file_parsed[ 'Name'].lower(): result.append(file) elif desktop_filename_or_name.lower() == file_parsed[ 'Exec'].split(' ')[0]: result.append(file) except KeyError: pass for res in result: if not res.endswith('.desktop'): result.remove(res) if not result and not result.endswith('.desktop'): result.extend(locate(desktop_filename_or_name + '.desktop')) return result
Locate a .desktop from the standard locations. Find the path to the .desktop file of a given .desktop filename or application name. Standard locations: - ``~/.local/share/applications/`` - ``/usr/share/applications`` Args: desktop_filename_or_name (str): Either the filename of a .desktop file or the name of an application. Returns: list: A list of all matching .desktop files found.
def _get_channel_state_statelessly(self, grpc_channel, channel_id): """ We get state of the channel (nonce, amount, unspent_amount) We do it by securely combine information from the server and blockchain https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md """ server = self._get_channel_state_from_server (grpc_channel, channel_id) blockchain = self._get_channel_state_from_blockchain( channel_id) if (server["current_nonce"] == blockchain["nonce"]): unspent_amount = blockchain["value"] - server["current_signed_amount"] else: unspent_amount = None # in this case we cannot securely define unspent_amount yet return (server["current_nonce"], server["current_signed_amount"], unspent_amount)
We get state of the channel (nonce, amount, unspent_amount) We do it by securely combine information from the server and blockchain https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md
def privileges( state, host, user, privileges, user_hostname='localhost', database='*', table='*', present=True, flush=True, # Details for speaking to MySQL via `mysql` CLI mysql_user=None, mysql_password=None, mysql_host=None, mysql_port=None, ): ''' Add/remove MySQL privileges for a user, either global, database or table specific. + user: name of the user to manage privileges for + privileges: list of privileges the user should have + user_hostname: the hostname of the user + database: name of the database to grant privileges to (defaults to all) + table: name of the table to grant privileges to (defaults to all) + present: whether these privileges should exist (False to ``REVOKE) + flush: whether to flush (and update) the privileges table after any changes + mysql_*: global module arguments, see above ''' # Ensure we have a list if isinstance(privileges, six.string_types): privileges = [privileges] if database != '*': database = '`{0}`'.format(database) if table != '*': table = '`{0}`'.format(table) # We can't set privileges on *.tablename as MySQL won't allow it if database == '*': raise OperationError(( 'Cannot apply MySQL privileges on {0}.{1}, no database provided' ).format(database, table)) database_table = '{0}.{1}'.format(database, table) user_grants = host.fact.mysql_user_grants( user, user_hostname, mysql_user, mysql_password, mysql_host, mysql_port, ) has_privileges = False if database_table in user_grants: existing_privileges = [ 'ALL' if privilege == 'ALL PRIVILEGES' else privilege for privilege in user_grants[database_table]['privileges'] ] has_privileges = ( database_table in user_grants and all( privilege in existing_privileges for privilege in privileges ) ) target = action = None # No privilege and we want it if not has_privileges and present: action = 'GRANT' target = 'TO' # Permission we don't want elif has_privileges and not present: action = 'REVOKE' target = 'FROM' if target and action: command = ( '{action} {privileges} ' 'ON {database}.{table} ' '{target} "{user}"@"{user_hostname}"' ).format( privileges=', '.join(privileges), action=action, target=target, database=database, table=table, user=user, user_hostname=user_hostname, ).replace('`', '\`') yield make_execute_mysql_command( command, user=mysql_user, password=mysql_password, host=mysql_host, port=mysql_port, ) if flush: yield make_execute_mysql_command( 'FLUSH PRIVILEGES', user=mysql_user, password=mysql_password, host=mysql_host, port=mysql_port, )
Add/remove MySQL privileges for a user, either global, database or table specific. + user: name of the user to manage privileges for + privileges: list of privileges the user should have + user_hostname: the hostname of the user + database: name of the database to grant privileges to (defaults to all) + table: name of the table to grant privileges to (defaults to all) + present: whether these privileges should exist (False to ``REVOKE) + flush: whether to flush (and update) the privileges table after any changes + mysql_*: global module arguments, see above
def create_server(AssociatePublicIpAddress=None, DisableAutomatedBackup=None, Engine=None, EngineModel=None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=None, ServerName=None, InstanceProfileArn=None, InstanceType=None, KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None, BackupId=None): """ Creates and immedately starts a new server. The server is ready to use when it is in the HEALTHY state. By default, you can create a maximum of 10 servers. This operation is asynchronous. A LimitExceededException is thrown when you have created the maximum number of servers (10). A ResourceAlreadyExistsException is thrown when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when you specify a backup ID that is not valid or is for a backup that does not exist. A ValidationException is thrown when parameters of the request are not valid. If you do not specify a security group by adding the SecurityGroupIds parameter, AWS OpsWorks creates a new security group. The default security group opens the Chef server to the world on TCP port 443. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22. By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console. See also: AWS API Documentation :example: response = client.create_server( AssociatePublicIpAddress=True|False, DisableAutomatedBackup=True|False, Engine='string', EngineModel='string', EngineVersion='string', EngineAttributes=[ { 'Name': 'string', 'Value': 'string' }, ], BackupRetentionCount=123, ServerName='string', InstanceProfileArn='string', InstanceType='string', KeyPair='string', PreferredMaintenanceWindow='string', PreferredBackupWindow='string', SecurityGroupIds=[ 'string', ], ServiceRoleArn='string', SubnetIds=[ 'string', ], BackupId='string' ) :type AssociatePublicIpAddress: boolean :param AssociatePublicIpAddress: Associate a public IP address with a server that you are launching. Valid values are true or false . The default value is true . :type DisableAutomatedBackup: boolean :param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true . :type Engine: string :param Engine: The configuration management engine to use. Valid values include Chef . :type EngineModel: string :param EngineModel: The engine model, or option. Valid values include Single . :type EngineVersion: string :param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose. :type EngineAttributes: list :param EngineAttributes: Optional engine attributes on a specified server. Attributes accepted in a createServer request: CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef. This private key is required to access the Chef API. When no CHEF_PIVOTAL_KEY is set, one is generated and returned in the response. CHEF_DELIVERY_ADMIN_PASSWORD : The password for the administrative user in the Chef Automate GUI. The password length is a minimum of eight characters, and a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response. (dict) --A name and value pair that is specific to the engine of the server. Name (string) --The name of the engine attribute. Value (string) --The value of the engine attribute. :type BackupRetentionCount: integer :param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 . :type ServerName: string :param ServerName: [REQUIRED] The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 40 characters. :type InstanceProfileArn: string :param InstanceProfileArn: [REQUIRED] The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, if you are using API commands instead, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the instance profile you need. :type InstanceType: string :param InstanceType: [REQUIRED] The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, m4.large . Valid values are t2.medium , m4.large , or m4.2xlarge . :type KeyPair: string :param KeyPair: The Amazon EC2 key pair to set for the instance. This parameter is optional; if desired, you may specify this parameter to connect to your instances by using SSH. :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information. Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.) :type PreferredBackupWindow: string :param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if automated backups are enabled. Valid values must be specified in one of the following formats: HH:MM for daily backups DDD:HH:MM for weekly backups The specified time is in coordinated universal time (UTC). The default value is a random, daily start time. Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.) :type SecurityGroupIds: list :param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds . If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone). (string) -- :type ServiceRoleArn: string :param ServiceRoleArn: [REQUIRED] The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a CloudFormation stack that includes the service role that you need. :type SubnetIds: list :param SubnetIds: The IDs of subnets in which to launch the server EC2 instance. Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled. EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled. For more information about supported Amazon EC2 platforms, see Supported Platforms . (string) -- :type BackupId: string :param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId. :rtype: dict :return: { 'Server': { 'AssociatePublicIpAddress': True|False, 'BackupRetentionCount': 123, 'ServerName': 'string', 'CreatedAt': datetime(2015, 1, 1), 'CloudFormationStackArn': 'string', 'DisableAutomatedBackup': True|False, 'Endpoint': 'string', 'Engine': 'string', 'EngineModel': 'string', 'EngineAttributes': [ { 'Name': 'string', 'Value': 'string' }, ], 'EngineVersion': 'string', 'InstanceProfileArn': 'string', 'InstanceType': 'string', 'KeyPair': 'string', 'MaintenanceStatus': 'SUCCESS'|'FAILED', 'PreferredMaintenanceWindow': 'string', 'PreferredBackupWindow': 'string', 'SecurityGroupIds': [ 'string', ], 'ServiceRoleArn': 'string', 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED', 'StatusReason': 'string', 'SubnetIds': [ 'string', ], 'ServerArn': 'string' } } :returns: CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API. CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands. """ pass
Creates and immedately starts a new server. The server is ready to use when it is in the HEALTHY state. By default, you can create a maximum of 10 servers. This operation is asynchronous. A LimitExceededException is thrown when you have created the maximum number of servers (10). A ResourceAlreadyExistsException is thrown when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when you specify a backup ID that is not valid or is for a backup that does not exist. A ValidationException is thrown when parameters of the request are not valid. If you do not specify a security group by adding the SecurityGroupIds parameter, AWS OpsWorks creates a new security group. The default security group opens the Chef server to the world on TCP port 443. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22. By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console. See also: AWS API Documentation :example: response = client.create_server( AssociatePublicIpAddress=True|False, DisableAutomatedBackup=True|False, Engine='string', EngineModel='string', EngineVersion='string', EngineAttributes=[ { 'Name': 'string', 'Value': 'string' }, ], BackupRetentionCount=123, ServerName='string', InstanceProfileArn='string', InstanceType='string', KeyPair='string', PreferredMaintenanceWindow='string', PreferredBackupWindow='string', SecurityGroupIds=[ 'string', ], ServiceRoleArn='string', SubnetIds=[ 'string', ], BackupId='string' ) :type AssociatePublicIpAddress: boolean :param AssociatePublicIpAddress: Associate a public IP address with a server that you are launching. Valid values are true or false . The default value is true . :type DisableAutomatedBackup: boolean :param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true . :type Engine: string :param Engine: The configuration management engine to use. Valid values include Chef . :type EngineModel: string :param EngineModel: The engine model, or option. Valid values include Single . :type EngineVersion: string :param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose. :type EngineAttributes: list :param EngineAttributes: Optional engine attributes on a specified server. Attributes accepted in a createServer request: CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef. This private key is required to access the Chef API. When no CHEF_PIVOTAL_KEY is set, one is generated and returned in the response. CHEF_DELIVERY_ADMIN_PASSWORD : The password for the administrative user in the Chef Automate GUI. The password length is a minimum of eight characters, and a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response. (dict) --A name and value pair that is specific to the engine of the server. Name (string) --The name of the engine attribute. Value (string) --The value of the engine attribute. :type BackupRetentionCount: integer :param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 . :type ServerName: string :param ServerName: [REQUIRED] The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 40 characters. :type InstanceProfileArn: string :param InstanceProfileArn: [REQUIRED] The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, if you are using API commands instead, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the instance profile you need. :type InstanceType: string :param InstanceType: [REQUIRED] The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, m4.large . Valid values are t2.medium , m4.large , or m4.2xlarge . :type KeyPair: string :param KeyPair: The Amazon EC2 key pair to set for the instance. This parameter is optional; if desired, you may specify this parameter to connect to your instances by using SSH. :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information. Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.) :type PreferredBackupWindow: string :param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if automated backups are enabled. Valid values must be specified in one of the following formats: HH:MM for daily backups DDD:HH:MM for weekly backups The specified time is in coordinated universal time (UTC). The default value is a random, daily start time. Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.) :type SecurityGroupIds: list :param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds . If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone). (string) -- :type ServiceRoleArn: string :param ServiceRoleArn: [REQUIRED] The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a CloudFormation stack that includes the service role that you need. :type SubnetIds: list :param SubnetIds: The IDs of subnets in which to launch the server EC2 instance. Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled. EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled. For more information about supported Amazon EC2 platforms, see Supported Platforms . (string) -- :type BackupId: string :param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId. :rtype: dict :return: { 'Server': { 'AssociatePublicIpAddress': True|False, 'BackupRetentionCount': 123, 'ServerName': 'string', 'CreatedAt': datetime(2015, 1, 1), 'CloudFormationStackArn': 'string', 'DisableAutomatedBackup': True|False, 'Endpoint': 'string', 'Engine': 'string', 'EngineModel': 'string', 'EngineAttributes': [ { 'Name': 'string', 'Value': 'string' }, ], 'EngineVersion': 'string', 'InstanceProfileArn': 'string', 'InstanceType': 'string', 'KeyPair': 'string', 'MaintenanceStatus': 'SUCCESS'|'FAILED', 'PreferredMaintenanceWindow': 'string', 'PreferredBackupWindow': 'string', 'SecurityGroupIds': [ 'string', ], 'ServiceRoleArn': 'string', 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED', 'StatusReason': 'string', 'SubnetIds': [ 'string', ], 'ServerArn': 'string' } } :returns: CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API. CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
def make_operatorsetid( domain, # type: Text version, # type: int ): # type: (...) -> OperatorSetIdProto """Construct an OperatorSetIdProto. Arguments: domain (string): The domain of the operator set id version (integer): Version of operator set id """ operatorsetid = OperatorSetIdProto() operatorsetid.domain = domain operatorsetid.version = version return operatorsetid
Construct an OperatorSetIdProto. Arguments: domain (string): The domain of the operator set id version (integer): Version of operator set id
def to_OrderedDict(self, include_null=True): """ Convert to OrderedDict. """ if include_null: return OrderedDict(self.items()) else: items = list() for c in self.__table__._columns: try: items.append((c.name, self.__dict__[c.name])) except KeyError: pass return OrderedDict(items)
Convert to OrderedDict.
def entries(self): """A list of :class:`PasswordEntry` objects.""" passwords = [] for store in self.stores: passwords.extend(store.entries) return natsort(passwords, key=lambda e: e.name)
A list of :class:`PasswordEntry` objects.
def _fromJSON(cls, jsonobject): """Generates a new instance of :class:`maspy.core.Si` from a decoded JSON object (as generated by :func:`maspy.core.Si._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Si` """ newInstance = cls(None, None) newInstance.__dict__.update(jsonobject) return newInstance
Generates a new instance of :class:`maspy.core.Si` from a decoded JSON object (as generated by :func:`maspy.core.Si._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Si`
def load_script(zap_helper, **options): """Load a script from a file.""" with zap_error_handler(): if not os.path.isfile(options['file_path']): raise ZAPError('No file found at "{0}", cannot load script.'.format(options['file_path'])) if not _is_valid_script_engine(zap_helper.zap, options['engine']): engines = zap_helper.zap.script.list_engines raise ZAPError('Invalid script engine provided. Valid engines are: {0}'.format(', '.join(engines))) console.debug('Loading script "{0}" from "{1}"'.format(options['name'], options['file_path'])) result = zap_helper.zap.script.load(options['name'], options['script_type'], options['engine'], options['file_path'], scriptdescription=options['description']) if result != 'OK': raise ZAPError('Error loading script: {0}'.format(result)) console.info('Script "{0}" loaded'.format(options['name']))
Load a script from a file.
def pop(self, queue_name): """ Pops a task off the queue. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string :returns: The data for the task. :rtype: string """ self._only_watch_from(queue_name) job = self.conn.reserve(timeout=0) job.delete() return job.body
Pops a task off the queue. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string :returns: The data for the task. :rtype: string
def font_to_wx_font(font): """ Convert from font string/tyuple into a Qt style sheet string :param font: "Arial 10 Bold" or ('Arial', 10, 'Bold) :return: style string that can be combined with other style strings """ if font is None: return '' if type(font) is str: _font = font.split(' ') else: _font = font name = _font[0] family = _font[0] point_size = int(_font[1]) # style = _font[2] underline = 'underline' in _font[2:] bold = 'bold' in _font wxfont = wx.Font(point_size, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD if bold else wx.FONTWEIGHT_NORMAL, underline, faceName=family) return wxfont
Convert from font string/tyuple into a Qt style sheet string :param font: "Arial 10 Bold" or ('Arial', 10, 'Bold) :return: style string that can be combined with other style strings
def dump_data(data, filename=None, file_type='json', klazz=YapconfError, open_kwargs=None, dump_kwargs=None): """Dump data given to file or stdout in file_type. Args: data (dict): The dictionary to dump. filename (str, optional): Defaults to None. The filename to write the data to. If none is provided, it will be written to STDOUT. file_type (str, optional): Defaults to 'json'. Can be any of yapconf.FILE_TYPES klazz (optional): Defaults to YapconfError a special error to throw when something goes wrong. open_kwargs (dict, optional): Keyword arguments to open. dump_kwargs (dict, optional): Keyword arguments to dump. """ _check_file_type(file_type, klazz) open_kwargs = open_kwargs or {'encoding': 'utf-8'} dump_kwargs = dump_kwargs or {} if filename: with open(filename, 'w', **open_kwargs) as conf_file: _dump(data, conf_file, file_type, **dump_kwargs) else: _dump(data, sys.stdout, file_type, **dump_kwargs)
Dump data given to file or stdout in file_type. Args: data (dict): The dictionary to dump. filename (str, optional): Defaults to None. The filename to write the data to. If none is provided, it will be written to STDOUT. file_type (str, optional): Defaults to 'json'. Can be any of yapconf.FILE_TYPES klazz (optional): Defaults to YapconfError a special error to throw when something goes wrong. open_kwargs (dict, optional): Keyword arguments to open. dump_kwargs (dict, optional): Keyword arguments to dump.
def coordinate_reproject(x, y, s_crs, t_crs): """ reproject a coordinate from one CRS to another Parameters ---------- x: int or float the X coordinate component y: int or float the Y coordinate component s_crs: int, str or :osgeo:class:`osr.SpatialReference` the source CRS. See :func:`~spatialist.auxil.crsConvert` for options. t_crs: int, str or :osgeo:class:`osr.SpatialReference` the target CRS. See :func:`~spatialist.auxil.crsConvert` for options. Returns ------- """ source = crsConvert(s_crs, 'osr') target = crsConvert(t_crs, 'osr') transform = osr.CoordinateTransformation(source, target) point = transform.TransformPoint(x, y)[:2] return point
reproject a coordinate from one CRS to another Parameters ---------- x: int or float the X coordinate component y: int or float the Y coordinate component s_crs: int, str or :osgeo:class:`osr.SpatialReference` the source CRS. See :func:`~spatialist.auxil.crsConvert` for options. t_crs: int, str or :osgeo:class:`osr.SpatialReference` the target CRS. See :func:`~spatialist.auxil.crsConvert` for options. Returns -------
def rebalance(self): """The genetic rebalancing algorithm runs for a fixed number of generations. Each generation has two phases: exploration and pruning. In exploration, a large set of possible states are found by randomly applying assignment changes to the existing states. In pruning, each state is given a score based on the balance of the cluster and the states with the highest scores are chosen as the starting states for the next generation. """ if self.args.num_gens < self.args.max_partition_movements: self.log.warning( "num-gens ({num_gens}) is less than max-partition-movements" " ({max_partition_movements}). max-partition-movements will" " never be reached.".format( num_gens=self.args.num_gens, max_partition_movements=self.args.max_partition_movements, ) ) if self.args.replication_groups: self.log.info("Rebalancing replicas across replication groups...") rg_movement_count, rg_movement_size = self.rebalance_replicas( max_movement_count=self.args.max_partition_movements, max_movement_size=self.args.max_movement_size, ) self.log.info( "Done rebalancing replicas. %d partitions moved.", rg_movement_count, ) else: rg_movement_size = 0 rg_movement_count = 0 # Use a fixed random seed to make results reproducible. random.seed(RANDOM_SEED) # NOTE: only active brokers are considered when rebalancing state = _State( self.cluster_topology, brokers=self.cluster_topology.active_brokers ) state.movement_size = rg_movement_size pop = {state} do_rebalance = self.args.brokers or self.args.leaders # Cannot rebalance when all partitions have zero weight because the # score function is undefined. if do_rebalance and not state.total_weight: self.log.error( "Rebalance impossible. All partitions have zero weight.", ) do_rebalance = False if do_rebalance: self.log.info("Rebalancing with genetic algorithm.") # Run the genetic algorithm for a fixed number of generations. for i in range(self.args.num_gens): start = time.time() pop_candidates = self._explore(pop) pop = self._prune(pop_candidates) end = time.time() self.log.debug( "Generation %d: keeping %d of %d assignment(s) in %f seconds", i, len(pop), len(pop_candidates), end - start, ) # Choose the state with the greatest score. state = sorted(pop, key=self._score, reverse=True)[0] self.log.info( "Done rebalancing. %d partitions moved.", state.movement_count, ) self.log.info("Total movement size: %f", state.movement_size) assignment = state.assignment # Since only active brokers are considered when rebalancing, inactive # brokers need to be added back to the new assignment. all_brokers = set(self.cluster_topology.brokers.values()) inactive_brokers = all_brokers - set(state.brokers) for partition_name, replicas in assignment: for broker in inactive_brokers: if broker in self.cluster_topology.partitions[partition_name].replicas: replicas.append(broker.id) self.cluster_topology.update_cluster_topology(assignment)
The genetic rebalancing algorithm runs for a fixed number of generations. Each generation has two phases: exploration and pruning. In exploration, a large set of possible states are found by randomly applying assignment changes to the existing states. In pruning, each state is given a score based on the balance of the cluster and the states with the highest scores are chosen as the starting states for the next generation.
def present(name, user=None, password=None, auth='password', encoding='UTF8', locale=None, runas=None, waldir=None, checksums=False): ''' Initialize the PostgreSQL data directory name The name of the directory to initialize user The database superuser name password The password to set for the postgres user auth The default authentication method for local connections encoding The default encoding for new databases locale The default locale for new databases waldir The transaction log (WAL) directory (default is to keep WAL inside the data directory) .. versionadded:: 2019.2.0 checksums If True, the cluster will be created with data page checksums. .. note:: Data page checksums are supported since PostgreSQL 9.3. .. versionadded:: 2019.2.0 runas The system user the operation should be performed on behalf of ''' _cmt = 'Postgres data directory {0} is already present'.format(name) ret = { 'name': name, 'changes': {}, 'result': True, 'comment': _cmt} if not __salt__['postgres.datadir_exists'](name=name): if __opts__['test']: ret['result'] = None _cmt = 'Postgres data directory {0} is set to be initialized'\ .format(name) ret['comment'] = _cmt return ret kwargs = dict( user=user, password=password, auth=auth, encoding=encoding, locale=locale, waldir=waldir, checksums=checksums, runas=runas) if __salt__['postgres.datadir_init'](name, **kwargs): _cmt = 'Postgres data directory {0} has been initialized'\ .format(name) ret['comment'] = _cmt ret['changes'][name] = 'Present' else: _cmt = 'Postgres data directory {0} initialization failed'\ .format(name) ret['result'] = False ret['comment'] = _cmt return ret
Initialize the PostgreSQL data directory name The name of the directory to initialize user The database superuser name password The password to set for the postgres user auth The default authentication method for local connections encoding The default encoding for new databases locale The default locale for new databases waldir The transaction log (WAL) directory (default is to keep WAL inside the data directory) .. versionadded:: 2019.2.0 checksums If True, the cluster will be created with data page checksums. .. note:: Data page checksums are supported since PostgreSQL 9.3. .. versionadded:: 2019.2.0 runas The system user the operation should be performed on behalf of
def is_parent_of_family(self, id_, family_id): """Tests if an ``Id`` is a direct parent of a family. arg: id (osid.id.Id): an ``Id`` arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if this ``id`` is a parent of ``family_id,`` ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``id`` or ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_parent_of_bin if self._catalog_session is not None: return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=family_id) return self._hierarchy_session.is_parent(id_=family_id, parent_id=id_)
Tests if an ``Id`` is a direct parent of a family. arg: id (osid.id.Id): an ``Id`` arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if this ``id`` is a parent of ``family_id,`` ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``id`` or ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
def set_static_ip_address(self, context, msg): """Process request for setting rules in iptables. In cases that static ip address is assigned for a VM, it is needed to update the iptables rule for that address. """ args = jsonutils.loads(msg) macaddr = args.get('mac') ipaddr = args.get('ip') LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', ( {'mac': macaddr, 'ip': ipaddr})) # Add the request into queue for processing. event_type = 'cli.static_ip.set' payload = {'mac': macaddr, 'ip': ipaddr} timestamp = time.ctime() data = (event_type, payload) pri = self.obj.PRI_LOW_START self.obj.pqueue.put((pri, timestamp, data)) LOG.debug('Added request to add static ip into queue.') return 0
Process request for setting rules in iptables. In cases that static ip address is assigned for a VM, it is needed to update the iptables rule for that address.
def set_filetype(self, filetype, bufnr=None): """Set filetype for a buffer. Note: it's a quirk of Vim's Python API that using the buffer.options dictionary to set filetype does not trigger ``FileType`` autocommands, hence this implementation executes as a command instead. Args: filetype (str): The filetype to set. bufnr (Optional[int]): A Vim buffer number, current if ``None``. """ if bufnr: self._vim.command(str(bufnr) + 'bufdo set filetype=' + filetype) else: self._vim.command('set filetype=' + filetype)
Set filetype for a buffer. Note: it's a quirk of Vim's Python API that using the buffer.options dictionary to set filetype does not trigger ``FileType`` autocommands, hence this implementation executes as a command instead. Args: filetype (str): The filetype to set. bufnr (Optional[int]): A Vim buffer number, current if ``None``.
def _check_row_table_name(table_name, row): """Checks that a row belongs to a table. :type table_name: str :param table_name: The name of the table. :type row: :class:`~google.cloud.bigtable.row.Row` :param row: An instance of :class:`~google.cloud.bigtable.row.Row` subclasses. :raises: :exc:`~.table.TableMismatchError` if the row does not belong to the table. """ if row.table is not None and row.table.name != table_name: raise TableMismatchError( "Row %s is a part of %s table. Current table: %s" % (row.row_key, row.table.name, table_name) )
Checks that a row belongs to a table. :type table_name: str :param table_name: The name of the table. :type row: :class:`~google.cloud.bigtable.row.Row` :param row: An instance of :class:`~google.cloud.bigtable.row.Row` subclasses. :raises: :exc:`~.table.TableMismatchError` if the row does not belong to the table.
def maskAt(self, index): """ Returns the mask at the index. It the mask is a boolean it is returned since this boolean representes the mask for all array elements. """ if isinstance(self.mask, bool): return self.mask else: return self.mask[index]
Returns the mask at the index. It the mask is a boolean it is returned since this boolean representes the mask for all array elements.
def froze_it(cls): """ Decorator to prevent from creating attributes in the object ouside __init__(). This decorator must be applied to the final class (doesn't work if a decorated class is inherited). Yoann's answer at http://stackoverflow.com/questions/3603502 """ cls._frozen = False def frozensetattr(self, key, value): if self._frozen and not hasattr(self, key): raise AttributeError("Attribute '{}' of class '{}' does not exist!" .format(key, cls.__name__)) else: object.__setattr__(self, key, value) def init_decorator(func): @wraps(func) def wrapper(self, *args, **kwargs): func(self, *args, **kwargs) self._frozen = True return wrapper cls.__setattr__ = frozensetattr cls.__init__ = init_decorator(cls.__init__) return cls
Decorator to prevent from creating attributes in the object ouside __init__(). This decorator must be applied to the final class (doesn't work if a decorated class is inherited). Yoann's answer at http://stackoverflow.com/questions/3603502
def getoutputfiles(self, loadmetadata=True, client=None,requiremetadata=False): """Iterates over all output files and their output template. Yields (CLAMOutputFile, str:outputtemplate_id) tuples. The last three arguments are passed to its constructor.""" for outputfilename, outputtemplate in self.outputpairs(): yield CLAMOutputFile(self.projectpath, outputfilename, loadmetadata,client,requiremetadata), outputtemplate
Iterates over all output files and their output template. Yields (CLAMOutputFile, str:outputtemplate_id) tuples. The last three arguments are passed to its constructor.
def warning_handler(self, handler): """Setter for the warning handler function. If the DLL is open, this function is a no-op, so it should be called prior to calling ``open()``. Args: self (JLink): the ``JLink`` instance handler (function): function to call on warning messages Returns: ``None`` """ if not self.opened(): handler = handler or util.noop self._warning_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler) self._dll.JLINKARM_SetWarnOutHandler(self._warning_handler)
Setter for the warning handler function. If the DLL is open, this function is a no-op, so it should be called prior to calling ``open()``. Args: self (JLink): the ``JLink`` instance handler (function): function to call on warning messages Returns: ``None``
def yield_for_all_futures(result): """ Converts result into a Future by collapsing any futures inside result. If result is a Future we yield until it's done, then if the value inside the Future is another Future we yield until it's done as well, and so on. """ while True: # This is needed for Tornado >= 4.5 where convert_yielded will no # longer raise BadYieldError on None if result is None: break try: future = gen.convert_yielded(result) except gen.BadYieldError: # result is not a yieldable thing, we are done break else: result = yield future raise gen.Return(result)
Converts result into a Future by collapsing any futures inside result. If result is a Future we yield until it's done, then if the value inside the Future is another Future we yield until it's done as well, and so on.
def _cpu(self): """Record CPU usage.""" value = int(psutil.cpu_percent()) set_metric("cpu", value, category=self.category) gauge("cpu", value)
Record CPU usage.
def input_streams(self): """Return a list of DataStream objects for all singular input streams. This function only returns individual streams, not the streams that would be selected from a selector like 'all outputs' for example. Returns: list(DataStream): A list of all of the individual DataStreams that are inputs of the node. Input selectors that select multiple streams are not included """ streams = [] for walker, _trigger in self.inputs: if walker.selector is None or not walker.selector.singular: continue streams.append(walker.selector.as_stream()) return streams
Return a list of DataStream objects for all singular input streams. This function only returns individual streams, not the streams that would be selected from a selector like 'all outputs' for example. Returns: list(DataStream): A list of all of the individual DataStreams that are inputs of the node. Input selectors that select multiple streams are not included
def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None): """ Attempt to put ID3 tags on a file. Args: artist (str): title (str): year (int): genre (str): artwork_url (str): album (str): track_number (str): filename (str): url (str): """ try: audio = EasyMP3(filename) audio.tags = None audio["artist"] = artist audio["title"] = title if year: audio["date"] = str(year) if album: audio["album"] = album if track_number: audio["tracknumber"] = track_number if genre: audio["genre"] = genre if url: # saves the tag as WOAR audio["website"] = url audio.save() if artwork_url: artwork_url = artwork_url.replace('https', 'http') mime = 'image/jpeg' if '.jpg' in artwork_url: mime = 'image/jpeg' if '.png' in artwork_url: mime = 'image/png' if '-large' in artwork_url: new_artwork_url = artwork_url.replace('-large', '-t500x500') try: image_data = requests.get(new_artwork_url).content except Exception as e: # No very large image available. image_data = requests.get(artwork_url).content else: image_data = requests.get(artwork_url).content audio = MP3(filename, ID3=OldID3) audio.tags.add( APIC( encoding=3, # 3 is for utf-8 mime=mime, type=3, # 3 is for the cover image desc='Cover', data=image_data ) ) audio.save() # because there is software that doesn't seem to use WOAR we save url tag again as WXXX if url: audio = MP3(filename, ID3=OldID3) audio.tags.add( WXXX( encoding=3, url=url ) ) audio.save() return True except Exception as e: puts(colored.red("Problem tagging file: ") + colored.white("Is this file a WAV?")) return False
Attempt to put ID3 tags on a file. Args: artist (str): title (str): year (int): genre (str): artwork_url (str): album (str): track_number (str): filename (str): url (str):
def num2hexstring(number, size=1, little_endian=False): """ Converts a number to a big endian hexstring of a suitable size, optionally little endian :param {number} number :param {number} size - The required size in hex chars, eg 2 for Uint8, 4 for Uint16. Defaults to 2. :param {boolean} little_endian - Encode the hex in little endian form :return {string} """ # if (type(number) != = 'number') throw new Error('num must be numeric') # if (num < 0) throw new RangeError('num is unsigned (>= 0)') # if (size % 1 !== 0) throw new Error('size must be a whole integer') # if (!Number.isSafeInteger(num)) throw new RangeError(`num (${num}) must be a safe integer`) size = size * 2 hexstring = hex(number)[2:] if len(hexstring) % size != 0: hexstring = ('0' * size + hexstring)[len(hexstring):] if little_endian: hexstring = reverse_hex(hexstring) return hexstring
Converts a number to a big endian hexstring of a suitable size, optionally little endian :param {number} number :param {number} size - The required size in hex chars, eg 2 for Uint8, 4 for Uint16. Defaults to 2. :param {boolean} little_endian - Encode the hex in little endian form :return {string}
def deriv(self, p): """ Derivative of the power transform Parameters ---------- p : array-like Mean parameters Returns -------- g'(p) : array Derivative of power transform of `p` Notes ----- g'(`p`) = `power` * `p`**(`power` - 1) """ return self.power * np.power(p, self.power - 1)
Derivative of the power transform Parameters ---------- p : array-like Mean parameters Returns -------- g'(p) : array Derivative of power transform of `p` Notes ----- g'(`p`) = `power` * `p`**(`power` - 1)
def plot(self, numPoints=100): """ Specific plotting method for cylinders. """ fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # generate cylinder x = np.linspace(- self.radius, self.radius, numPoints) z = np.linspace(- self.height / 2., self.height / 2., numPoints) Xc, Zc = np.meshgrid(x, z) Yc = np.sqrt(self.radius ** 2 - Xc ** 2) # plot ax.plot_surface(Xc, Yc, Zc, alpha=0.2, rstride=20, cstride=10) ax.plot_surface(Xc, -Yc, Zc, alpha=0.2, rstride=20, cstride=10) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Z") plt.title("{}".format(self)) return fig, ax
Specific plotting method for cylinders.
def variable(self, var_name, shape, init, dt=tf.float32, train=None): """Adds a named variable to this bookkeeper or returns an existing one. Variables marked train are returned by the training_variables method. If the requested name already exists and it is compatible (same shape, dt and train) then it is returned. In case of an incompatible type, an exception is thrown. Args: var_name: The unique name of this variable. If a variable with the same name exists, then it is returned. shape: The shape of the variable. init: The init function to use or a Tensor to copy. dt: The datatype, defaults to float. This will automatically extract the base dtype. train: Whether or not the variable should be trained; defaults to True unless a default_scope has overridden it. Returns: A TensorFlow tensor. Raises: ValueError: if reuse is False (or unspecified and allow_reuse is False) and the variable already exists or if the specification of a reused variable does not match the original. """ # Make sure it is a TF dtype and convert it into a base dtype. dt = tf.as_dtype(dt).base_dtype if var_name in self.vars: v = self.vars[var_name] if v.get_shape() != shape: raise ValueError( 'Shape mismatch: %s vs %s. Perhaps a UnboundVariable had ' 'incompatible values within a graph.' % (v.get_shape(), shape)) return v elif callable(init): if train is None: train = _defaults.get('trainable_variables', True) variable_collections = _defaults.get('variable_collections', ()) if tf.GraphKeys.GLOBAL_VARIABLES not in variable_collections: variable_collections = list(variable_collections) + [ tf.GraphKeys.GLOBAL_VARIABLES] v = tf.get_variable(var_name, shape=shape, dtype=dt, initializer=init, trainable=train, collections=variable_collections) self.vars[var_name] = v return v else: v = tf.convert_to_tensor(init, name=var_name, dtype=dt) v.get_shape().assert_is_compatible_with(shape) self.vars[var_name] = v return v
Adds a named variable to this bookkeeper or returns an existing one. Variables marked train are returned by the training_variables method. If the requested name already exists and it is compatible (same shape, dt and train) then it is returned. In case of an incompatible type, an exception is thrown. Args: var_name: The unique name of this variable. If a variable with the same name exists, then it is returned. shape: The shape of the variable. init: The init function to use or a Tensor to copy. dt: The datatype, defaults to float. This will automatically extract the base dtype. train: Whether or not the variable should be trained; defaults to True unless a default_scope has overridden it. Returns: A TensorFlow tensor. Raises: ValueError: if reuse is False (or unspecified and allow_reuse is False) and the variable already exists or if the specification of a reused variable does not match the original.
def cols_to_numeric(df, col_list,dest = False): """ Coerces a list of columns to numeric Parameters: df - DataFrame DataFrame to operate on col_list - list of strings names of columns to coerce dest - bool, default False Whether to apply the result to the DataFrame or return it. True is apply, False is return. """ if not dest: return _pd.DataFrame({col_name:col_to_numeric(df,col_name) for col_name in col_list}) for col_name in col_list: col_to_numeric(df,col_name,dest)
Coerces a list of columns to numeric Parameters: df - DataFrame DataFrame to operate on col_list - list of strings names of columns to coerce dest - bool, default False Whether to apply the result to the DataFrame or return it. True is apply, False is return.
def read_name(self): """Reads a domain name from the packet""" result = '' off = self.offset next = -1 first = off while 1: len = ord(self.data[off]) off += 1 if len == 0: break t = len & 0xC0 if t == 0x00: result = ''.join((result, self.read_utf(off, len) + '.')) off += len elif t == 0xC0: if next < 0: next = off + 1 off = ((len & 0x3F) << 8) | ord(self.data[off]) if off >= first: raise Exception( "Bad domain name (circular) at " + str(off)) first = off else: raise Exception("Bad domain name at " + str(off)) if next >= 0: self.offset = next else: self.offset = off return result
Reads a domain name from the packet
def sync_handler(self, args): '''Handler for sync command. XXX Here we emulate sync command with get/put -r -f --sync-check. So it doesn't provide delete operation. ''' self.opt.recursive = True self.opt.sync_check = True self.opt.force = True self.validate('cmd|s3,local|s3,local', args) source = args[1] target = args[2] self.s3handler().sync_files(source, target)
Handler for sync command. XXX Here we emulate sync command with get/put -r -f --sync-check. So it doesn't provide delete operation.
def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False): """Use the PDBe REST service to query for the best PDB structures for a UniProt ID. More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution. Here is the ranking algorithm described by the PDB paper: https://nar.oxfordjournals.org/content/44/D1/D385.full "Finally, a single quality indicator is also calculated for each entry by taking the harmonic average of all the percentile scores representing model and model-data-fit quality measures and then subtracting 10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays a role in characterising the quality of a structure. This single empirical 'quality measure' value is used by the PDBe query system to sort results and identify the 'best' structure in a given context. At present, entries determined by methods other than X-ray crystallography do not have similar data quality information available and are not considered as 'best structures'." Args: uniprot_id (str): UniProt Accession ID outname (str): Basename of the output file of JSON results outdir (str): Path to output directory of JSON results seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form) force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results Returns: list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are: * pdb_id: the PDB ID which maps to the UniProt ID * chain_id: the specific chain of the PDB which maps to the UniProt ID * coverage: the percent coverage of the entire UniProt sequence * resolution: the resolution of the structure * start: the structure residue number which maps to the start of the mapped sequence * end: the structure residue number which maps to the end of the mapped sequence * unp_start: the sequence residue number which maps to the structure start * unp_end: the sequence residue number which maps to the structure end * experimental_method: type of experiment used to determine structure * tax_id: taxonomic ID of the protein's original organism """ outfile = '' if not outdir: outdir = '' # if output dir is specified but not outname, use the uniprot if not outname and outdir: outname = uniprot_id if outname: outname = op.join(outdir, outname) outfile = '{}.json'.format(outname) # Load a possibly existing json file if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): with open(outfile, 'r') as f: raw_data = json.load(f) log.debug('{}: loaded existing json file'.format(uniprot_id)) # Otherwise run the web request else: # TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id), data={'key': 'value'}) if response.status_code == 404: log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id)) raw_data = {uniprot_id: {}} else: log.debug('{}: Obtained best structures'.format(uniprot_id)) raw_data = response.json() # Write the json file if specified if outfile: with open(outfile, 'w') as f: json.dump(raw_data, f) log.debug('{}: Saved json file of best structures'.format(uniprot_id)) data = dict(raw_data)[uniprot_id] # Filter for sequence identity percentage if seq_ident_cutoff != 0: for result in data: if result['coverage'] < seq_ident_cutoff: data.remove(result) return data
Use the PDBe REST service to query for the best PDB structures for a UniProt ID. More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution. Here is the ranking algorithm described by the PDB paper: https://nar.oxfordjournals.org/content/44/D1/D385.full "Finally, a single quality indicator is also calculated for each entry by taking the harmonic average of all the percentile scores representing model and model-data-fit quality measures and then subtracting 10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays a role in characterising the quality of a structure. This single empirical 'quality measure' value is used by the PDBe query system to sort results and identify the 'best' structure in a given context. At present, entries determined by methods other than X-ray crystallography do not have similar data quality information available and are not considered as 'best structures'." Args: uniprot_id (str): UniProt Accession ID outname (str): Basename of the output file of JSON results outdir (str): Path to output directory of JSON results seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form) force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results Returns: list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are: * pdb_id: the PDB ID which maps to the UniProt ID * chain_id: the specific chain of the PDB which maps to the UniProt ID * coverage: the percent coverage of the entire UniProt sequence * resolution: the resolution of the structure * start: the structure residue number which maps to the start of the mapped sequence * end: the structure residue number which maps to the end of the mapped sequence * unp_start: the sequence residue number which maps to the structure start * unp_end: the sequence residue number which maps to the structure end * experimental_method: type of experiment used to determine structure * tax_id: taxonomic ID of the protein's original organism
def get_intent(self, name, language_code=None, intent_view=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Retrieves the specified intent. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.IntentsClient() >>> >>> name = client.intent_path('[PROJECT]', '[INTENT]') >>> >>> response = client.get_intent(name) Args: name (str): Required. The name of the intent. Format: ``projects/<Project ID>/agent/intents/<Intent ID>``. language_code (str): Optional. The language to retrieve training phrases, parameters and rich messages for. If not specified, the agent's default language is used. [More than a dozen languages](https://dialogflow.com/docs/reference/language) are supported. Note: languages must be enabled in the agent, before they can be used. intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types.Intent` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'get_intent' not in self._inner_api_calls: self._inner_api_calls[ 'get_intent'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_intent, default_retry=self._method_configs['GetIntent'].retry, default_timeout=self._method_configs['GetIntent'].timeout, client_info=self._client_info, ) request = intent_pb2.GetIntentRequest( name=name, language_code=language_code, intent_view=intent_view, ) return self._inner_api_calls['get_intent']( request, retry=retry, timeout=timeout, metadata=metadata)
Retrieves the specified intent. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.IntentsClient() >>> >>> name = client.intent_path('[PROJECT]', '[INTENT]') >>> >>> response = client.get_intent(name) Args: name (str): Required. The name of the intent. Format: ``projects/<Project ID>/agent/intents/<Intent ID>``. language_code (str): Optional. The language to retrieve training phrases, parameters and rich messages for. If not specified, the agent's default language is used. [More than a dozen languages](https://dialogflow.com/docs/reference/language) are supported. Note: languages must be enabled in the agent, before they can be used. intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types.Intent` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def _Open(self, path_spec, mode='rb'): """Opens the file system defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the APFS volume could not be retrieved or unlocked. OSError: if the APFS volume could not be retrieved or unlocked. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') if path_spec.parent.type_indicator != ( definitions.TYPE_INDICATOR_APFS_CONTAINER): raise errors.PathSpecError( 'Unsupported path specification not type APFS container.') apfs_container_file_system = resolver.Resolver.OpenFileSystem( path_spec.parent, resolver_context=self._resolver_context) fsapfs_volume = apfs_container_file_system.GetAPFSVolumeByPathSpec( path_spec.parent) if not fsapfs_volume: raise IOError('Unable to retrieve APFS volume') try: is_locked = not apfs_helper.APFSUnlockVolume( fsapfs_volume, path_spec.parent, resolver.Resolver.key_chain) except IOError as exception: raise IOError('Unable to unlock APFS volume with error: {0!s}'.format( exception)) if is_locked: raise IOError('Unable to unlock APFS volume.') self._fsapfs_volume = fsapfs_volume
Opens the file system defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the APFS volume could not be retrieved or unlocked. OSError: if the APFS volume could not be retrieved or unlocked. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
def rename_pickled_ontology(filename, newname): """ try to rename a cached ontology """ pickledfile = ONTOSPY_LOCAL_CACHE + "/" + filename + ".pickle" newpickledfile = ONTOSPY_LOCAL_CACHE + "/" + newname + ".pickle" if os.path.isfile(pickledfile) and not GLOBAL_DISABLE_CACHE: os.rename(pickledfile, newpickledfile) return True else: return None
try to rename a cached ontology
def dirname(hdfs_path): """ Return the directory component of ``hdfs_path``. """ scheme, netloc, path = parse(hdfs_path) return unparse(scheme, netloc, os.path.dirname(path))
Return the directory component of ``hdfs_path``.