code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def paint(str,color='r'): '''Utility func, for printing colorful logs in console... @args: -- str : String to be modified. color : color code to which the string will be formed. default is 'r'=RED @returns: -- str : final modified string with foreground color as per parameters. ''' if color in switcher: str = switcher[color]+str+colorama.Style.RESET_ALL return str
Utility func, for printing colorful logs in console... @args: -- str : String to be modified. color : color code to which the string will be formed. default is 'r'=RED @returns: -- str : final modified string with foreground color as per parameters.
def lomb_scargle_fast(t, y, dy=1, f0=0, df=None, Nf=None, center_data=True, fit_offset=True, use_fft=True, freq_oversampling=5, nyquist_factor=2, trig_sum_kwds=None): """Compute a lomb-scargle periodogram for the given data This implements both an O[N^2] method if use_fft==False, or an O[NlogN] method if use_fft==True. Parameters ---------- t, y, dy : array_like times, values, and errors of the data points. These should be broadcastable to the same shape. If dy is not specified, a constant error will be used. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). Defaults, with T = t.max() - t.min(): - f0 = 0 - df is set such that there are ``freq_oversampling`` points per peak width. ``freq_oversampling`` defaults to 5. - Nf is set such that the highest frequency is ``nyquist_factor`` times the so-called "average Nyquist frequency". ``nyquist_factor`` defaults to 2. Note that for unevenly-spaced data, the periodogram can be sensitive to frequencies far higher than the average Nyquist frequency. center_data : bool (default=True) Specify whether to subtract the mean of the data before the fit fit_offset : bool (default=True) If True, then compute the floating-mean periodogram; i.e. let the mean vary with the fit. use_fft : bool (default=True) If True, then use the Press & Rybicki O[NlogN] algorithm to compute the result. Otherwise, use a slower O[N^2] algorithm Other Parameters ---------------- freq_oversampling : float (default=5) Oversampling factor for the frequency bins. Only referenced if ``df`` is not specified nyquist_factor : float (default=2) Parameter controlling the highest probed frequency. Only referenced if ``Nf`` is not specified. trig_sum_kwds : dict or None (optional) extra keyword arguments to pass to the ``trig_sum`` utility. Options are ``oversampling`` and ``Mfft``. See documentation of ``trig_sum`` for details. Notes ----- Note that the ``use_fft=True`` algorithm is an approximation to the true Lomb-Scargle periodogram, and as the number of points grows this approximation improves. On the other hand, for very small datasets (<~50 points or so) this approximation may not be useful. References ---------- .. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis of unevenly sampled data". ApJ 1:338, p277, 1989 .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [3] W. Press et al, Numerical Recipies in C (2002) """ # Validate and setup input data t, y, dy = map(np.ravel, np.broadcast_arrays(t, y, dy)) w = 1. / (dy ** 2) w /= w.sum() # Validate and setup frequency grid if df is None: peak_width = 1. / (t.max() - t.min()) df = peak_width / freq_oversampling if Nf is None: avg_Nyquist = 0.5 * len(t) / (t.max() - t.min()) Nf = max(16, (nyquist_factor * avg_Nyquist - f0) / df) Nf = int(Nf) assert(df > 0) assert(Nf > 0) freq = f0 + df * np.arange(Nf) # Center the data. Even if we're fitting the offset, # this step makes the expressions below more succinct if center_data or fit_offset: y = y - np.dot(w, y) # set up arguments to trig_sum kwargs = dict.copy(trig_sum_kwds or {}) kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf) #---------------------------------------------------------------------- # 1. compute functions of the time-shift tau at each frequency Sh, Ch = trig_sum(t, w * y, **kwargs) S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs) if fit_offset: S, C = trig_sum(t, w, **kwargs) with warnings.catch_warnings(): # Filter "invalid value in divide" warnings for zero-frequency if f0 == 0: warnings.simplefilter("ignore") tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S)) # fix NaN at zero frequency if np.isnan(tan_2omega_tau[0]): tan_2omega_tau[0] = 0 else: tan_2omega_tau = S2 / C2 # slower/less stable way: we'll use trig identities instead # omega_tau = 0.5 * np.arctan(tan_2omega_tau) # S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau) # Sw, Cw = np.sin(omega_tau), np.cos(omega_tau) S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) Cw = np.sqrt(0.5) * np.sqrt(1 + C2w) Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w) #---------------------------------------------------------------------- # 2. Compute the periodogram, following Zechmeister & Kurster # and using tricks from Press & Rybicki. YY = np.dot(w, y ** 2) YC = Ch * Cw + Sh * Sw YS = Sh * Cw - Ch * Sw CC = 0.5 * (1 + C2 * C2w + S2 * S2w) SS = 0.5 * (1 - C2 * C2w - S2 * S2w) if fit_offset: CC -= (C * Cw + S * Sw) ** 2 SS -= (S * Cw - C * Sw) ** 2 with warnings.catch_warnings(): # Filter "invalid value in divide" warnings for zero-frequency if fit_offset and f0 == 0: warnings.simplefilter("ignore") power = (YC * YC / CC + YS * YS / SS) / YY # fix NaN and INF at zero frequency if np.isnan(power[0]) or np.isinf(power[0]): power[0] = 0 return freq, power
Compute a lomb-scargle periodogram for the given data This implements both an O[N^2] method if use_fft==False, or an O[NlogN] method if use_fft==True. Parameters ---------- t, y, dy : array_like times, values, and errors of the data points. These should be broadcastable to the same shape. If dy is not specified, a constant error will be used. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). Defaults, with T = t.max() - t.min(): - f0 = 0 - df is set such that there are ``freq_oversampling`` points per peak width. ``freq_oversampling`` defaults to 5. - Nf is set such that the highest frequency is ``nyquist_factor`` times the so-called "average Nyquist frequency". ``nyquist_factor`` defaults to 2. Note that for unevenly-spaced data, the periodogram can be sensitive to frequencies far higher than the average Nyquist frequency. center_data : bool (default=True) Specify whether to subtract the mean of the data before the fit fit_offset : bool (default=True) If True, then compute the floating-mean periodogram; i.e. let the mean vary with the fit. use_fft : bool (default=True) If True, then use the Press & Rybicki O[NlogN] algorithm to compute the result. Otherwise, use a slower O[N^2] algorithm Other Parameters ---------------- freq_oversampling : float (default=5) Oversampling factor for the frequency bins. Only referenced if ``df`` is not specified nyquist_factor : float (default=2) Parameter controlling the highest probed frequency. Only referenced if ``Nf`` is not specified. trig_sum_kwds : dict or None (optional) extra keyword arguments to pass to the ``trig_sum`` utility. Options are ``oversampling`` and ``Mfft``. See documentation of ``trig_sum`` for details. Notes ----- Note that the ``use_fft=True`` algorithm is an approximation to the true Lomb-Scargle periodogram, and as the number of points grows this approximation improves. On the other hand, for very small datasets (<~50 points or so) this approximation may not be useful. References ---------- .. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis of unevenly sampled data". ApJ 1:338, p277, 1989 .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [3] W. Press et al, Numerical Recipies in C (2002)
def open_repository(path, spor_dir='.spor'): """Open an existing repository. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: No repository is found. """ root = _find_root_dir(path, spor_dir) return Repository(root, spor_dir)
Open an existing repository. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: No repository is found.
def draw(self, drawDC=None): """ Render the figure using RendererWx instance renderer, or using a previously defined renderer if none is specified. """ DEBUG_MSG("draw()", 1, self) self.renderer = RendererWx(self.bitmap, self.figure.dpi) self.figure.draw(self.renderer) self._isDrawn = True self.gui_repaint(drawDC=drawDC)
Render the figure using RendererWx instance renderer, or using a previously defined renderer if none is specified.
def get_username(details, backend, response, *args, **kwargs): """Sets the `username` argument. If the user exists already, use the existing username. Otherwise generate username from the `new_uuid` using the `helusers.utils.uuid_to_username` function. """ user = details.get('user') if not user: user_uuid = kwargs.get('uid') if not user_uuid: return username = uuid_to_username(user_uuid) else: username = user.username return { 'username': username }
Sets the `username` argument. If the user exists already, use the existing username. Otherwise generate username from the `new_uuid` using the `helusers.utils.uuid_to_username` function.
def lf_empirical_accuracies(L, Y): """Return the **empirical accuracy** against a set of labels Y (e.g. dev set) for each LF. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels """ # Assume labeled set is small, work with dense matrices Y = arraylike_to_numpy(Y) L = L.toarray() X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1)) return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1)
Return the **empirical accuracy** against a set of labels Y (e.g. dev set) for each LF. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels
def vcenter_activate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id_key = ET.SubElement(vcenter, "id") id_key.text = kwargs.pop('id') activate = ET.SubElement(vcenter, "activate") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def opt_with_frequency_flattener(cls, qchem_command, multimode="openmp", input_file="mol.qin", output_file="mol.qout", qclog_file="mol.qclog", max_iterations=10, max_molecule_perturb_scale=0.3, check_connectivity=True, **QCJob_kwargs): """ Optimize a structure and calculate vibrational frequencies to check if the structure is in a true minima. If a frequency is negative, iteratively perturbe the geometry, optimize, and recalculate frequencies until all are positive, aka a true minima has been found. Args: qchem_command (str): Command to run QChem. multimode (str): Parallelization scheme, either openmp or mpi. input_file (str): Name of the QChem input file. output_file (str): Name of the QChem output file. max_iterations (int): Number of perturbation -> optimization -> frequency iterations to perform. Defaults to 10. max_molecule_perturb_scale (float): The maximum scaled perturbation that can be applied to the molecule. Defaults to 0.3. check_connectivity (bool): Whether to check differences in connectivity introduced by structural perturbation. Defaults to True. **QCJob_kwargs: Passthrough kwargs to QCJob. See :class:`custodian.qchem.jobs.QCJob`. """ min_molecule_perturb_scale = 0.1 scale_grid = 10 perturb_scale_grid = ( max_molecule_perturb_scale - min_molecule_perturb_scale ) / scale_grid if not os.path.exists(input_file): raise AssertionError('Input file must be present!') orig_opt_input = QCInput.from_file(input_file) orig_opt_rem = copy.deepcopy(orig_opt_input.rem) orig_freq_rem = copy.deepcopy(orig_opt_input.rem) orig_freq_rem["job_type"] = "freq" first = True reversed_direction = False num_neg_freqs = [] for ii in range(max_iterations): yield (QCJob( qchem_command=qchem_command, multimode=multimode, input_file=input_file, output_file=output_file, qclog_file=qclog_file, suffix=".opt_" + str(ii), backup=first, **QCJob_kwargs)) first = False opt_outdata = QCOutput(output_file + ".opt_" + str(ii)).data if opt_outdata["structure_change"] == "unconnected_fragments" and not opt_outdata["completion"]: print("Unstable molecule broke into unconnected fragments which failed to optimize! Exiting...") break else: freq_QCInput = QCInput( molecule=opt_outdata.get("molecule_from_optimized_geometry"), rem=orig_freq_rem, opt=orig_opt_input.opt, pcm=orig_opt_input.pcm, solvent=orig_opt_input.solvent) freq_QCInput.write_file(input_file) yield (QCJob( qchem_command=qchem_command, multimode=multimode, input_file=input_file, output_file=output_file, qclog_file=qclog_file, suffix=".freq_" + str(ii), backup=first, **QCJob_kwargs)) outdata = QCOutput(output_file + ".freq_" + str(ii)).data errors = outdata.get("errors") if len(errors) != 0: raise AssertionError('No errors should be encountered while flattening frequencies!') if outdata.get('frequencies')[0] > 0.0: print("All frequencies positive!") break else: num_neg_freqs += [sum(1 for freq in outdata.get('frequencies') if freq < 0)] if len(num_neg_freqs) > 1: if num_neg_freqs[-1] == num_neg_freqs[-2] and not reversed_direction: reversed_direction = True elif num_neg_freqs[-1] == num_neg_freqs[-2] and reversed_direction: if len(num_neg_freqs) < 3: raise AssertionError("ERROR: This should only be possible after at least three frequency flattening iterations! Exiting...") else: raise Exception("ERROR: Reversing the perturbation direction still could not flatten any frequencies. Exiting...") elif num_neg_freqs[-1] != num_neg_freqs[-2] and reversed_direction: reversed_direction = False negative_freq_vecs = outdata.get("frequency_mode_vectors")[0] structure_successfully_perturbed = False for molecule_perturb_scale in np.arange( max_molecule_perturb_scale, min_molecule_perturb_scale, -perturb_scale_grid): new_coords = perturb_coordinates( old_coords=outdata.get("initial_geometry"), negative_freq_vecs=negative_freq_vecs, molecule_perturb_scale=molecule_perturb_scale, reversed_direction=reversed_direction) new_molecule = Molecule( species=outdata.get('species'), coords=new_coords, charge=outdata.get('charge'), spin_multiplicity=outdata.get('multiplicity')) if check_connectivity: old_molgraph = MoleculeGraph.with_local_env_strategy(outdata.get("initial_molecule"), OpenBabelNN(), reorder=False, extend_structure=False) new_molgraph = MoleculeGraph.with_local_env_strategy(new_molecule, OpenBabelNN(), reorder=False, extend_structure=False) if old_molgraph.isomorphic_to(new_molgraph): structure_successfully_perturbed = True break if not structure_successfully_perturbed: raise Exception( "ERROR: Unable to perturb coordinates to remove negative frequency without changing the connectivity! Exiting..." ) new_opt_QCInput = QCInput( molecule=new_molecule, rem=orig_opt_rem, opt=orig_opt_input.opt, pcm=orig_opt_input.pcm, solvent=orig_opt_input.solvent) new_opt_QCInput.write_file(input_file)
Optimize a structure and calculate vibrational frequencies to check if the structure is in a true minima. If a frequency is negative, iteratively perturbe the geometry, optimize, and recalculate frequencies until all are positive, aka a true minima has been found. Args: qchem_command (str): Command to run QChem. multimode (str): Parallelization scheme, either openmp or mpi. input_file (str): Name of the QChem input file. output_file (str): Name of the QChem output file. max_iterations (int): Number of perturbation -> optimization -> frequency iterations to perform. Defaults to 10. max_molecule_perturb_scale (float): The maximum scaled perturbation that can be applied to the molecule. Defaults to 0.3. check_connectivity (bool): Whether to check differences in connectivity introduced by structural perturbation. Defaults to True. **QCJob_kwargs: Passthrough kwargs to QCJob. See :class:`custodian.qchem.jobs.QCJob`.
def _assertIndex(self, index): """Raise TypeError or IndexError if index is not an integer or out of range for the number of elements in this array, respectively. """ if type(index) is not int: raise TypeError('list indices must be integers') if index < 0 or index >= self.nelems: raise IndexError('list index out of range')
Raise TypeError or IndexError if index is not an integer or out of range for the number of elements in this array, respectively.
def main(): """ This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags. """ config = Config() core = HostSearch() hosts = core.get_hosts(tags=['!nessus'], up=True) hosts = [host for host in hosts] host_ips = ",".join([str(host.address) for host in hosts]) url = config.get('nessus', 'host') access = config.get('nessus', 'access_key') secret = config.get('nessus', 'secret_key') template_name = config.get('nessus', 'template_name') nessus = Nessus(access, secret, url, template_name) scan_id = nessus.create_scan(host_ips) nessus.start_scan(scan_id) for host in hosts: host.add_tag('nessus') host.save() Logger().log("nessus", "Nessus scan started on {} hosts".format(len(hosts)), {'scanned_hosts': len(hosts)})
This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags.
def get_examples(self, compact=False): """ Returns an OrderedDict mapping labels to Example objects. Args: compact (bool): If True, union members of void type are converted to their compact representation: no ".tag" key or containing dict, just the tag as a string. """ # Copy it just in case the caller wants to mutate the object. examples = copy.deepcopy(self._examples) if not compact: return examples def make_compact(d): # Traverse through dicts looking for ones that have a lone .tag # key, which can be converted into the compact form. if not isinstance(d, dict): return for key in d: if isinstance(d[key], dict): inner_d = d[key] if len(inner_d) == 1 and '.tag' in inner_d: d[key] = inner_d['.tag'] else: make_compact(inner_d) if isinstance(d[key], list): for item in d[key]: make_compact(item) for example in examples.values(): if (isinstance(example.value, dict) and len(example.value) == 1 and '.tag' in example.value): # Handle the case where the top-level of the example can be # made compact. example.value = example.value['.tag'] else: make_compact(example.value) return examples
Returns an OrderedDict mapping labels to Example objects. Args: compact (bool): If True, union members of void type are converted to their compact representation: no ".tag" key or containing dict, just the tag as a string.
def assign_statement(self): """ assign smt : variable ASSIGN expression(;) Feature Type Array adds: | variable SETITEM expression(;) """ left = self.variable() op = self.cur_token self.eat(TokenTypes.ASSIGN) right = self.expression() smt = None if Features.TYPE_ARRAY in self.features and isinstance(left, GetArrayItem): # Remake this as a setitem. smt = SetArrayItem(left.left, left.right, right) else: smt = Assign(op, left, right) if self.cur_token.type == TokenTypes.SEMI_COLON: self.eat(TokenTypes.SEMI_COLON) return smt
assign smt : variable ASSIGN expression(;) Feature Type Array adds: | variable SETITEM expression(;)
def mail_session(self, name, host, username, mail_from, props): """ Domain mail session. :param str name: Mail session name. :param str host: Mail host. :param str username: Mail username. :param str mail_from: Mail "from" address. :param dict props: Extra properties. :rtype: MailSession """ return MailSession(self.__endpoint, name, host, username, mail_from, props)
Domain mail session. :param str name: Mail session name. :param str host: Mail host. :param str username: Mail username. :param str mail_from: Mail "from" address. :param dict props: Extra properties. :rtype: MailSession
def help_center_article_translation_update(self, article_id, locale, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/translations#update-translation" api_path = "/api/v2/help_center/articles/{article_id}/translations/{locale}.json" api_path = api_path.format(article_id=article_id, locale=locale) return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/translations#update-translation
def _init_data_map(self): """ Default data map initialization: MUST be overridden in children """ if self._data_map is None: self._data_map = {'_root': None} self._data_map.update({}.fromkeys(self._metadata_props))
Default data map initialization: MUST be overridden in children
def adb_cmd(self, command, **kwargs): ''' Run adb command, for example: adb(['pull', '/data/local/tmp/a.png']) Args: command: string or list of string Returns: command output ''' kwargs['timeout'] = kwargs.get('timeout', self._adb_shell_timeout) if isinstance(command, list) or isinstance(command, tuple): return self.adb_device.run_cmd(*list(command), **kwargs) return self.adb_device.run_cmd(command, **kwargs)
Run adb command, for example: adb(['pull', '/data/local/tmp/a.png']) Args: command: string or list of string Returns: command output
def dos_plot_data(self, yscale=1, xmin=-6., xmax=6., colours=None, plot_total=True, legend_cutoff=3, subplot=False, zero_to_efermi=True, cache=None): """Get the plotting data. Args: yscale (:obj:`float`, optional): Scaling factor for the y-axis. xmin (:obj:`float`, optional): The minimum energy to mask the energy and density of states data (reduces plotting load). xmax (:obj:`float`, optional): The maximum energy to mask the energy and density of states data (reduces plotting load). colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. zero_to_efermi (:obj:`bool`, optional): Normalise the plot such that the Fermi level is set as 0 eV. cache (:obj:`dict`, optional): Cache object tracking how colours have been assigned to orbitals. The format is the same as the "colours" dict. This defaults to the module-level sumo.plotting.colour_cache object, but an empty dict can be used as a fresh cache. This object will be modified in-place. Returns: dict: The plotting data. Formatted with the following keys: "energies" (:obj:`numpy.ndarray`) The energies. "mask" (:obj:`numpy.ndarray`) A mask used to trim the density of states data and prevent unwanted data being included in the output file. "lines" (:obj:`list`) A :obj:`list` of :obj:`dict` containing the density data and some metadata. Each line :obj:`dict` contains the keys: "label" (:obj:`str`) The label for the legend. "dens" (:obj:`numpy.ndarray`) The density of states data. "colour" (:obj:`str`) The colour of the line. "alpha" (:obj:`float`) The alpha value for line fill. "ymin" (:obj:`float`) The minimum y-axis limit. "ymax" (:obj:`float`) The maximum y-axis limit. """ if cache is None: cache = colour_cache # mask needed to prevent unwanted data in pdf and for finding y limit dos = self._dos pdos = self._pdos eners = dos.energies - dos.efermi if zero_to_efermi else dos.energies mask = (eners >= xmin - 0.05) & (eners <= xmax + 0.05) plot_data = {'mask': mask, 'energies': eners} spins = dos.densities.keys() ymax = 0 if plot_total: if 'text.color' in matplotlib.rcParams: tdos_colour = matplotlib.rcParams['text.color'] if tdos_colour is None: tdos_colour = 'k' else: tdos_colour = 'k' lines = [] tdos = {'label': 'Total DOS', 'dens': dos.densities, 'colour': tdos_colour, 'alpha': 0.15} # subplot data formatted as a list of lists of dicts, with each # list of dicts being plotted on a separate graph, if only one list # then solo plot lines.append([tdos]) dmax = max([max(d[mask]) for d in dos.densities.values()]) ymax = dmax if dmax > ymax else ymax elif not subplot: lines = [[]] # need a blank list to add lines into else: lines = [] # TODO: Fix broken behaviour if plot_total is off cutoff = (legend_cutoff / 100.) * (ymax / 1.05) for el, el_pdos in pdos.items(): el_lines = [] for orb in sort_orbitals(el_pdos): dmax = max([max(d[mask]) for d in el_pdos[orb].densities.values()]) ymax = dmax if dmax > ymax else ymax label = None if dmax < cutoff else '{} ({})'.format(el, orb) colour, cache = get_cached_colour(el, orb, colours, cache=cache) el_lines.append({'label': label, 'alpha': 0.25, 'colour': colour, 'dens': el_pdos[orb].densities}) if subplot: lines.append(el_lines) else: lines[0].extend(el_lines) ymax = ymax * empty_space / yscale ymin = 0 if len(spins) == 1 else -ymax plot_data.update({'lines': lines, 'ymax': ymax, 'ymin': ymin}) return plot_data
Get the plotting data. Args: yscale (:obj:`float`, optional): Scaling factor for the y-axis. xmin (:obj:`float`, optional): The minimum energy to mask the energy and density of states data (reduces plotting load). xmax (:obj:`float`, optional): The maximum energy to mask the energy and density of states data (reduces plotting load). colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. zero_to_efermi (:obj:`bool`, optional): Normalise the plot such that the Fermi level is set as 0 eV. cache (:obj:`dict`, optional): Cache object tracking how colours have been assigned to orbitals. The format is the same as the "colours" dict. This defaults to the module-level sumo.plotting.colour_cache object, but an empty dict can be used as a fresh cache. This object will be modified in-place. Returns: dict: The plotting data. Formatted with the following keys: "energies" (:obj:`numpy.ndarray`) The energies. "mask" (:obj:`numpy.ndarray`) A mask used to trim the density of states data and prevent unwanted data being included in the output file. "lines" (:obj:`list`) A :obj:`list` of :obj:`dict` containing the density data and some metadata. Each line :obj:`dict` contains the keys: "label" (:obj:`str`) The label for the legend. "dens" (:obj:`numpy.ndarray`) The density of states data. "colour" (:obj:`str`) The colour of the line. "alpha" (:obj:`float`) The alpha value for line fill. "ymin" (:obj:`float`) The minimum y-axis limit. "ymax" (:obj:`float`) The maximum y-axis limit.
async def shuffle_participants(self): """ Shuffle participants' seeds |methcoro| Note: |from_api| Randomize seeds among participants. Only applicable before a tournament has started. Raises: APIException """ res = await self.connection('POST', 'tournaments/{}/participants/randomize'.format(self._id)) self._refresh_participants_from_json(res)
Shuffle participants' seeds |methcoro| Note: |from_api| Randomize seeds among participants. Only applicable before a tournament has started. Raises: APIException
def process_response(self, request, response): """Sets the cache, if needed.""" # never cache headers + ETag add_never_cache_headers(response) if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache: # We don't need to update the cache, just return. return response if request.method != 'GET': # This is a stronger requirement than above. It is needed # because of interactions between this middleware and the # HTTPMiddleware, which throws the body of a HEAD-request # away before this middleware gets a chance to cache it. return response if not response.status_code == 200: return response # use the precomputed cache_key if request._cache_middleware_key: cache_key = request._cache_middleware_key else: cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix) # include the orig_time information within the cache cache.set(cache_key, (time.time(), response), self.cache_timeout) return response
Sets the cache, if needed.
def cbpdnmd_xstep(k): """Do the X step of the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k] YU1 = mp_Z_Y1[k] - mp_Z_U1[k] if mp_cri.Cd == 1: b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \ sl.rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM) else: b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN), axis=mp_cri.axisC) + \ sl.rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC) mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN) mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Do the X step of the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables.
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None, bymonth=None, bymonthday=None, byyearday=None, byeaster=None, byweekno=None, byweekday=None, byhour=None, byminute=None, bysecond=None, timezone='UTC', start=None, stop=None): """ This will create a list of delorean objects the apply to setting possed in. """ # check to see if datetimees passed in are naive if so process them # with given timezone. if all([(start is None or is_datetime_naive(start)), (stop is None or is_datetime_naive(stop))]): pass else: raise DeloreanInvalidDatetime('Provide a naive datetime object') # if no datetimes are passed in create a proper datetime object for # start default because default in dateutil is datetime.now() :( if start is None: start = datetime_timezone(timezone) for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos, bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster, byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute, bysecond=bysecond, until=stop, dtstart=start): # make the delorean object # yield it. # doing this to make sure delorean receives a naive datetime. dt = dt.replace(tzinfo=None) d = Delorean(datetime=dt, timezone=timezone) yield d
This will create a list of delorean objects the apply to setting possed in.
def ch_duration(self, *channels: List[Channel]) -> int: """Return duration of supplied channels. Args: *channels: Supplied channels """ return self.timeslots.ch_duration(*channels)
Return duration of supplied channels. Args: *channels: Supplied channels
def tracked(self, tag=None, fromdate=None, todate=None): """ Gets a total count of emails you’ve sent with open tracking or link tracking enabled. """ return self.call("GET", "/stats/outbound/tracked", tag=tag, fromdate=fromdate, todate=todate)
Gets a total count of emails you’ve sent with open tracking or link tracking enabled.
def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None): # type: (List[HDXObjectUpperBound], type, Optional[str]) -> List[HDXObjectUpperBound] """Helper function to make a deep copy of a supplied list of HDX objects Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to copy hdxobjectclass (type): Type of the HDX Objects to be copied attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None. Returns: List[T <= HDXObject]: Deep copy of list of HDX objects """ newhdxobjects = list() for hdxobject in hdxobjects: newhdxobjectdata = copy.deepcopy(hdxobject.data) newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration) if attribute_to_copy: value = getattr(hdxobject, attribute_to_copy) setattr(newhdxobject, attribute_to_copy, value) newhdxobjects.append(newhdxobject) return newhdxobjects
Helper function to make a deep copy of a supplied list of HDX objects Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to copy hdxobjectclass (type): Type of the HDX Objects to be copied attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None. Returns: List[T <= HDXObject]: Deep copy of list of HDX objects
def _gatherLookupIndexes(gpos): """ Gather a mapping of script to lookup indexes referenced by the kern feature for each script. Returns a dictionary of this structure: { "latn" : [0], "DFLT" : [0] } """ # gather the indexes of the kern features kernFeatureIndexes = [index for index, featureRecord in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == "kern"] # find scripts and languages that have kern features scriptKernFeatureIndexes = {} for scriptRecord in gpos.ScriptList.ScriptRecord: script = scriptRecord.ScriptTag thisScriptKernFeatureIndexes = [] defaultLangSysRecord = scriptRecord.Script.DefaultLangSys if defaultLangSysRecord is not None: f = [] for featureIndex in defaultLangSysRecord.FeatureIndex: if featureIndex not in kernFeatureIndexes: continue f.append(featureIndex) if f: thisScriptKernFeatureIndexes.append((None, f)) if scriptRecord.Script.LangSysRecord is not None: for langSysRecord in scriptRecord.Script.LangSysRecord: langSys = langSysRecord.LangSysTag f = [] for featureIndex in langSysRecord.LangSys.FeatureIndex: if featureIndex not in kernFeatureIndexes: continue f.append(featureIndex) if f: thisScriptKernFeatureIndexes.append((langSys, f)) scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes # convert the feature indexes to lookup indexes scriptLookupIndexes = {} for script, featureDefinitions in scriptKernFeatureIndexes.items(): lookupIndexes = scriptLookupIndexes[script] = [] for language, featureIndexes in featureDefinitions: for featureIndex in featureIndexes: featureRecord = gpos.FeatureList.FeatureRecord[featureIndex] for lookupIndex in featureRecord.Feature.LookupListIndex: if lookupIndex not in lookupIndexes: lookupIndexes.append(lookupIndex) # done return scriptLookupIndexes
Gather a mapping of script to lookup indexes referenced by the kern feature for each script. Returns a dictionary of this structure: { "latn" : [0], "DFLT" : [0] }
def series64bitto32bit(s): """ Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series """ if s.dtype == np.float64: return s.astype('float32') elif s.dtype == np.int64: return s.astype('int32') return s
Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series
def makestate(im, pos, rad, slab=None, mem_level='hi'): """ Workhorse for creating & optimizing states with an initial centroid guess. This is an example function that works for a particular microscope. For your own microscope, you'll need to change particulars such as the psf type and the orders of the background and illumination. Parameters ---------- im : :class:`~peri.util.RawImage` A RawImage of the data. pos : [N,3] element numpy.ndarray. The initial guess for the N particle positions. rad : N element numpy.ndarray. The initial guess for the N particle radii. slab : :class:`peri.comp.objs.Slab` or None, optional If not None, a slab corresponding to that in the image. Default is None. mem_level : {'lo', 'med-lo', 'med', 'med-hi', 'hi'}, optional A valid memory level for the state to control the memory overhead at the expense of accuracy. Default is `'hi'` Returns ------- :class:`~peri.states.ImageState` An ImageState with a linked z-scale, a ConfocalImageModel, and all the necessary components with orders at which are useful for my particular test case. """ if slab is not None: o = comp.ComponentCollection( [ objs.PlatonicSpheresCollection(pos, rad, zscale=zscale), slab ], category='obj' ) else: o = objs.PlatonicSpheresCollection(pos, rad, zscale=zscale) p = exactpsf.FixedSSChebLinePSF() npts, iorder = _calc_ilm_order(im.get_image().shape) i = ilms.BarnesStreakLegPoly2P1D(npts=npts, zorder=iorder) b = ilms.LegendrePoly2P1D(order=(9 ,3, 5), category='bkg') c = comp.GlobalScalar('offset', 0.0) s = states.ImageState(im, [o, i, b, c, p]) runner.link_zscale(s) if mem_level != 'hi': s.set_mem_level(mem_level) opt.do_levmarq(s, ['ilm-scale'], max_iter=1, run_length=6, max_mem=1e4) return s
Workhorse for creating & optimizing states with an initial centroid guess. This is an example function that works for a particular microscope. For your own microscope, you'll need to change particulars such as the psf type and the orders of the background and illumination. Parameters ---------- im : :class:`~peri.util.RawImage` A RawImage of the data. pos : [N,3] element numpy.ndarray. The initial guess for the N particle positions. rad : N element numpy.ndarray. The initial guess for the N particle radii. slab : :class:`peri.comp.objs.Slab` or None, optional If not None, a slab corresponding to that in the image. Default is None. mem_level : {'lo', 'med-lo', 'med', 'med-hi', 'hi'}, optional A valid memory level for the state to control the memory overhead at the expense of accuracy. Default is `'hi'` Returns ------- :class:`~peri.states.ImageState` An ImageState with a linked z-scale, a ConfocalImageModel, and all the necessary components with orders at which are useful for my particular test case.
def get_personal_module(self): """ Instantiate the :class:`~fluent_dashboard.modules.PersonalModule` for use in the dashboard. """ return PersonalModule( layout='inline', draggable=False, deletable=False, collapsible=False, )
Instantiate the :class:`~fluent_dashboard.modules.PersonalModule` for use in the dashboard.
def evalSamples(self, x): '''Evalautes the samples of quantity of interest and its gradient (if supplied) at the given values of the design variables :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :return: (values of the quantity of interest, values of the gradient) :rtype: Tuple ''' # Make sure dimensions are correct # u_sample_dimensions = self._processDimensions() self._N_dv = len(_makeIter(x)) if self.verbose: print('Evaluating surrogate') if self.surrogate is None: def fqoi(u): return self.fqoi(x, u) def fgrad(u): return self.jac(x, u) jac = self.jac else: fqoi, fgrad, surr_jac = self._makeSurrogates(x) jac = surr_jac u_samples = self._getParameterSamples() if self.verbose: print('Evaluating quantity of interest at samples') q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac) return q_samples, grad_samples
Evalautes the samples of quantity of interest and its gradient (if supplied) at the given values of the design variables :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :return: (values of the quantity of interest, values of the gradient) :rtype: Tuple
def user_admin_urlname(action): """ Return the admin URLs for the user app used. """ user = get_user_model() return 'admin:%s_%s_%s' % ( user._meta.app_label, user._meta.model_name, action)
Return the admin URLs for the user app used.
def train_local(self, closest_point, label_vector_description=None, N=None, pivot=True, **kwargs): """ Train the model in a Cannon-like fashion using the grid points as labels and the intensities as normalsied rest-frame fluxes within some local regime. """ lv = self._cannon_label_vector if label_vector_description is None else\ self._interpret_label_vector(label_vector_description) # By default we will train to the nearest 10% of the grid. # If grid subset is a fraction, scale it to real numbers. if N is None: N = self._configuration.get("settings", {}).get("grid_subset", 0.10) if 1 >= N > 0: N = int(np.round(N * self.grid_points.size)) logger.debug("Using {} nearest points for local Cannon model".format(N)) # Use closest N points. dtype = [(name, '<f8') for name in self.grid_points.dtype.names] grid_points \ = self.grid_points.astype(dtype).view(float).reshape(-1, len(dtype)) distance = np.sum(np.abs(grid_points - np.array(closest_point))/ np.ptp(grid_points, axis=0), axis=1) grid_indices = np.argsort(distance)[:N] lv_array, _, offsets = _build_label_vector_array( self.grid_points[grid_indices], lv, pivot=pivot) return self._train(lv_array, grid_indices, offsets, lv, **kwargs)
Train the model in a Cannon-like fashion using the grid points as labels and the intensities as normalsied rest-frame fluxes within some local regime.
def loadFromDisk(self, calculation): """ Read the spectra from the files generated by Quanty and store them as a list of spectum objects. """ suffixes = { 'Isotropic': 'iso', 'Circular Dichroism (R-L)': 'cd', 'Right Polarized (R)': 'r', 'Left Polarized (L)': 'l', 'Linear Dichroism (V-H)': 'ld', 'Vertical Polarized (V)': 'v', 'Horizontal Polarized (H)': 'h', } self.raw = list() for spectrumName in self.toPlot: suffix = suffixes[spectrumName] path = '{}_{}.spec'.format(calculation.baseName, suffix) try: data = np.loadtxt(path, skiprows=5) except (OSError, IOError) as e: raise e rows, columns = data.shape if calculation.experiment in ['XAS', 'XPS', 'XES']: xMin = calculation.xMin xMax = calculation.xMax xNPoints = calculation.xNPoints if calculation.experiment == 'XES': x = np.linspace(xMin, xMax, xNPoints + 1) x = x[::-1] y = data[:, 2] y = y / np.abs(y.max()) else: x = np.linspace(xMin, xMax, xNPoints + 1) y = data[:, 2::2].flatten() spectrum = Spectrum1D(x, y) spectrum.name = spectrumName if len(suffix) > 2: spectrum.shortName = suffix.title() else: spectrum.shortName = suffix.upper() if calculation.experiment in ['XAS', ]: spectrum.xLabel = 'Absorption Energy (eV)' elif calculation.experiment in ['XPS', ]: spectrum.xLabel = 'Binding Energy (eV)' elif calculation.experiment in ['XES', ]: spectrum.xLabel = 'Emission Energy (eV)' spectrum.yLabel = 'Intensity (a.u.)' self.broadenings = {'gaussian': (calculation.xGaussian, ), } else: xMin = calculation.xMin xMax = calculation.xMax xNPoints = calculation.xNPoints yMin = calculation.yMin yMax = calculation.yMax yNPoints = calculation.yNPoints x = np.linspace(xMin, xMax, xNPoints + 1) y = np.linspace(yMin, yMax, yNPoints + 1) z = data[:, 2::2] spectrum = Spectrum2D(x, y, z) spectrum.name = spectrumName if len(suffix) > 2: spectrum.shortName = suffix.title() else: spectrum.shortName = suffix.upper() spectrum.xLabel = 'Incident Energy (eV)' spectrum.yLabel = 'Energy Transfer (eV)' self.broadenings = {'gaussian': (calculation.xGaussian, calculation.yGaussian), } self.raw.append(spectrum) # Process the spectra once they where read from disk. self.process()
Read the spectra from the files generated by Quanty and store them as a list of spectum objects.
def get_template(self, R): """Compute a template on latent factors Parameters ---------- R : 2D array, in format [n_voxel, n_dim] The scanner coordinate matrix of one subject's fMRI data Returns ------- template_prior : 1D array The template prior. template_centers_cov: 2D array, in shape [n_dim, n_dim] The template on centers' covariance. template_widths_var: float The template on widths' variance """ centers, widths = self.init_centers_widths(R) template_prior =\ np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size)) # template centers cov and widths var are const template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0) template_widths_var = self._get_max_sigma(R) centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K) widths_var_all = np.tile(template_widths_var, self.K) # initial mean of centers' mean self.set_centers(template_prior, centers) self.set_widths(template_prior, widths) self.set_centers_mean_cov(template_prior, centers_cov_all) self.set_widths_mean_var(template_prior, widths_var_all) return template_prior, template_centers_cov, template_widths_var
Compute a template on latent factors Parameters ---------- R : 2D array, in format [n_voxel, n_dim] The scanner coordinate matrix of one subject's fMRI data Returns ------- template_prior : 1D array The template prior. template_centers_cov: 2D array, in shape [n_dim, n_dim] The template on centers' covariance. template_widths_var: float The template on widths' variance
def visdom_send_metrics(vis, metrics, update='replace'): """ Send set of metrics to visdom """ visited = {} sorted_metrics = sorted(metrics.columns, key=_column_original_name) for metric_basename, metric_list in it.groupby(sorted_metrics, key=_column_original_name): metric_list = list(metric_list) for metric in metric_list: if vis.win_exists(metric_basename) and (not visited.get(metric, False)): update = update elif not vis.win_exists(metric_basename): update = None else: update = 'append' vis.line( metrics[metric].values, metrics.index.values, win=metric_basename, name=metric, opts={ 'title': metric_basename, 'showlegend': True }, update=update ) if metric_basename != metric and len(metric_list) > 1: if vis.win_exists(metric): update = update else: update = None vis.line( metrics[metric].values, metrics.index.values, win=metric, name=metric, opts={ 'title': metric, 'showlegend': True }, update=update )
Send set of metrics to visdom
def bookmark(ctx): """Bookmark group. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon group bookmark ``` \b ```bash $ polyaxon group -g 2 bookmark ``` """ user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'), ctx.obj.get('group')) try: PolyaxonClient().experiment_group.bookmark(user, project_name, _group) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark group `{}`.'.format(_group)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiments group is bookmarked.")
Bookmark group. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon group bookmark ``` \b ```bash $ polyaxon group -g 2 bookmark ```
def maskedEqual(array, missingValue): """ Mask an array where equal to a given (missing)value. Unfortunately ma.masked_equal does not work with structured arrays. See: https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html If the data is a structured array the mask is applied for every field (i.e. forming a logical-and). Otherwise ma.masked_equal is called. """ if array_is_structured(array): # Enforce the array to be masked if not isinstance(array, ma.MaskedArray): array = ma.MaskedArray(array) # Set the mask separately per field for nr, field in enumerate(array.dtype.names): if hasattr(missingValue, '__len__'): fieldMissingValue = missingValue[nr] else: fieldMissingValue = missingValue array[field] = ma.masked_equal(array[field], fieldMissingValue) check_class(array, ma.MaskedArray) # post-condition check return array else: # masked_equal works with missing is None result = ma.masked_equal(array, missingValue, copy=False) check_class(result, ma.MaskedArray) # post-condition check return result
Mask an array where equal to a given (missing)value. Unfortunately ma.masked_equal does not work with structured arrays. See: https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html If the data is a structured array the mask is applied for every field (i.e. forming a logical-and). Otherwise ma.masked_equal is called.
def __decrypt_assertion(self, dom): """ Decrypts the Assertion :raises: Exception if no private key available :param dom: Encrypted Assertion :type dom: Element :returns: Decrypted Assertion :rtype: Element """ key = self.__settings.get_sp_key() debug = self.__settings.is_debug_active() if not key: raise OneLogin_Saml2_Error( 'No private key available to decrypt the assertion, check settings', OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND ) encrypted_assertion_nodes = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/saml:EncryptedAssertion') if encrypted_assertion_nodes: encrypted_data_nodes = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], '//saml:EncryptedAssertion/xenc:EncryptedData') if encrypted_data_nodes: keyinfo = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], '//saml:EncryptedAssertion/xenc:EncryptedData/ds:KeyInfo') if not keyinfo: raise OneLogin_Saml2_ValidationError( 'No KeyInfo present, invalid Assertion', OneLogin_Saml2_ValidationError.KEYINFO_NOT_FOUND_IN_ENCRYPTED_DATA ) keyinfo = keyinfo[0] children = keyinfo.getchildren() if not children: raise OneLogin_Saml2_ValidationError( 'KeyInfo has no children nodes, invalid Assertion', OneLogin_Saml2_ValidationError.CHILDREN_NODE_NOT_FOUND_IN_KEYINFO ) for child in children: if 'RetrievalMethod' in child.tag: if child.attrib['Type'] != 'http://www.w3.org/2001/04/xmlenc#EncryptedKey': raise OneLogin_Saml2_ValidationError( 'Unsupported Retrieval Method found', OneLogin_Saml2_ValidationError.UNSUPPORTED_RETRIEVAL_METHOD ) uri = child.attrib['URI'] if not uri.startswith('#'): break uri = uri.split('#')[1] encrypted_key = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], './xenc:EncryptedKey[@Id=$tagid]', None, uri) if encrypted_key: keyinfo.append(encrypted_key[0]) encrypted_data = encrypted_data_nodes[0] decrypted = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key, debug=debug, inplace=True) dom.replace(encrypted_assertion_nodes[0], decrypted) return dom
Decrypts the Assertion :raises: Exception if no private key available :param dom: Encrypted Assertion :type dom: Element :returns: Decrypted Assertion :rtype: Element
def properties_from_mapping(self, bt_addr): """Retrieve properties (namespace, instance) for the specified bt address.""" for addr, properties in self.eddystone_mappings: if addr == bt_addr: return properties return None
Retrieve properties (namespace, instance) for the specified bt address.
def Decode(self, encoded_data): """Decode the encoded data. Args: encoded_data (byte): encoded data. Returns: tuple(bytes, bytes): decoded data and remaining encoded data. Raises: BackEndError: if the base64 stream cannot be decoded. """ try: # TODO: replace by libuna implementation or equivalent. The behavior of # base64.b64decode() does not raise TypeError for certain invalid base64 # data e.g. b'\x01\x02\x03\x04\x05\x06\x07\x08' these are silently # ignored. decoded_data = base64.b64decode(encoded_data) except (TypeError, binascii.Error) as exception: raise errors.BackEndError( 'Unable to decode base64 stream with error: {0!s}.'.format( exception)) return decoded_data, b''
Decode the encoded data. Args: encoded_data (byte): encoded data. Returns: tuple(bytes, bytes): decoded data and remaining encoded data. Raises: BackEndError: if the base64 stream cannot be decoded.
def fromfuncs(funcs, n_sessions, eqdata, **kwargs): """ Generate features using a list of functions to apply to input data Parameters ---------- funcs : list of function Functions to apply to eqdata. Each function is expected to output a dataframe with index identical to a slice of `eqdata`. The slice must include at least `eqdata.index[skipatstart + n_sessions - 1:]`. Each function is also expected to have a function attribute `title`, which is used to generate the column names of the output features. n_sessions : int Number of sessions over which to create features. eqdata : DataFrame Data from which to generate features. The data will often be retrieved using `pn.get()`. constfeat : bool, optional Whether or not the returned features will have the constant feature. skipatstart : int, optional Number of rows to omit at the start of the output DataFrame. This parameter is necessary if any of the functions requires a rampup period before returning valid results, e.g. `sma()` or functions calculating volume relative to a past baseline. Defaults to 0. Returns ---------- features : DataFrame """ _skipatstart = kwargs.get('skipatstart', 0) _constfeat = kwargs.get('constfeat', True) _outcols = ['Constant'] if _constfeat else [] _n_allrows = len(eqdata.index) _n_featrows = _n_allrows - _skipatstart - n_sessions + 1 for _func in funcs: _outcols += map(partial(_concat, strval=' ' + _func.title), range(-n_sessions + 1, 1)) _features = pd.DataFrame(index=eqdata.index[_skipatstart + n_sessions - 1:], columns=_outcols, dtype=np.float64) _offset = 0 if _constfeat: _features.iloc[:, 0] = 1. _offset += 1 for _func in funcs: _values = _func(eqdata).values _n_values = len(_values) for i in range(n_sessions): _val_end = _n_values - n_sessions + i + 1 _features.iloc[:, _offset + i] = _values[_val_end - _n_featrows:_val_end] _offset += n_sessions return _features
Generate features using a list of functions to apply to input data Parameters ---------- funcs : list of function Functions to apply to eqdata. Each function is expected to output a dataframe with index identical to a slice of `eqdata`. The slice must include at least `eqdata.index[skipatstart + n_sessions - 1:]`. Each function is also expected to have a function attribute `title`, which is used to generate the column names of the output features. n_sessions : int Number of sessions over which to create features. eqdata : DataFrame Data from which to generate features. The data will often be retrieved using `pn.get()`. constfeat : bool, optional Whether or not the returned features will have the constant feature. skipatstart : int, optional Number of rows to omit at the start of the output DataFrame. This parameter is necessary if any of the functions requires a rampup period before returning valid results, e.g. `sma()` or functions calculating volume relative to a past baseline. Defaults to 0. Returns ---------- features : DataFrame
def _F(self, X): """ analytic solution of the projection integral :param x: R/Rs :type x: float >0 """ if isinstance(X, int) or isinstance(X, float): if X < 1 and X > 0: a = 1/(X**2-1)*(1-2/np.sqrt(1-X**2)*np.arctanh(np.sqrt((1-X)/(1+X)))) elif X == 1: a = 1./3 elif X > 1: a = 1/(X**2-1)*(1-2/np.sqrt(X**2-1)*np.arctan(np.sqrt((X-1)/(1+X)))) else: # X == 0: c = 0.0000001 a = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c)))) else: a = np.empty_like(X) x = X[(X < 1) & (X > 0)] a[(X < 1) & (X > 0)] = 1/(x**2-1)*(1-2/np.sqrt(1-x**2)*np.arctanh(np.sqrt((1-x)/(1+x)))) a[X == 1] = 1./3. x = X[X > 1] a[X > 1] = 1/(x**2-1)*(1-2/np.sqrt(x**2-1)*np.arctan(np.sqrt((x-1)/(1+x)))) # a[X>y] = 0 c = 0.0000001 a[X == 0] = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c)))) return a
analytic solution of the projection integral :param x: R/Rs :type x: float >0
def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ return ( _is_egg_path(path) and os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) )
Determine if given path appears to be an unpacked egg.
def remove(self): ''' a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion ''' title = '%s.remove' % self.__class__.__name__ # get contents of root for id, name, mimetype in self._list_directory(): try: self.drive.delete(fileId=id).execute() except Exception as err: if str(err).find('File not found') > -1: pass else: raise DriveConnectionError(title) # return outcome insert = 'collection' if self.collection_name: insert = self.collection_name exit_msg = 'Contents of %s will be removed from Google Drive.' % insert return exit_msg
a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion
def load_raw(path): """ Load image using PIL/Pillow without any processing. This is particularly useful for palette images, which will be loaded using their palette index values as opposed to `load` which will convert them to RGB. Parameters ---------- path : str Path to image file. """ _import_pil() from PIL import Image return np.array(Image.open(path))
Load image using PIL/Pillow without any processing. This is particularly useful for palette images, which will be loaded using their palette index values as opposed to `load` which will convert them to RGB. Parameters ---------- path : str Path to image file.
def _broadcast_arg(U, arg, argtype, name): """Broadcasts plotting option `arg` to all factors. Args: U : KTensor arg : argument provided by the user argtype : expected type for arg name : name of the variable, used for error handling Returns: iterable version of arg of length U.ndim """ # if input is not iterable, broadcast it all dimensions of the tensor if arg is None or isinstance(arg, argtype): return [arg for _ in range(U.ndim)] # check if iterable input is valid elif np.iterable(arg): if len(arg) != U.ndim: raise ValueError('Parameter {} was specified as a sequence of ' 'incorrect length. The length must match the ' 'number of tensor dimensions ' '(U.ndim={})'.format(name, U.ndim)) elif not all([isinstance(a, argtype) for a in arg]): raise TypeError('Parameter {} specified as a sequence of ' 'incorrect type. ' 'Expected {}.'.format(name, argtype)) else: return arg # input is not iterable and is not the corrent type. else: raise TypeError('Parameter {} specified as a {}.' ' Expected {}.'.format(name, type(arg), argtype))
Broadcasts plotting option `arg` to all factors. Args: U : KTensor arg : argument provided by the user argtype : expected type for arg name : name of the variable, used for error handling Returns: iterable version of arg of length U.ndim
def to_decimal(self): """Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`. """ high = self.__high low = self.__low sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: return decimal.Decimal((sign, (), 'N')) elif (high & _NAN) == _NAN: return decimal.Decimal((sign, (), 'n')) elif (high & _INF) == _INF: return decimal.Decimal((sign, (), 'F')) if (high & _EXPONENT_MASK) == _EXPONENT_MASK: exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS return decimal.Decimal((sign, (0,), exponent)) else: exponent = ((high & 0x7fff800000000000) >> 49) - _EXPONENT_BIAS arr = bytearray(15) mask = 0x00000000000000ff for i in range(14, 6, -1): arr[i] = (low & mask) >> ((14 - i) << 3) mask = mask << 8 mask = 0x00000000000000ff for i in range(6, 0, -1): arr[i] = (high & mask) >> ((6 - i) << 3) mask = mask << 8 mask = 0x0001000000000000 arr[0] = (high & mask) >> 48 # Have to convert bytearray to bytes for python 2.6. # cdecimal only accepts a tuple for digits. digits = tuple( int(digit) for digit in str(_from_bytes(bytes(arr), 'big'))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent))
Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`.
def text(self): """Formatted param definition Equivalent to ``self.template.format(name=self.name, type=self.type)``. """ return self.template.format(name=self.name, type=self.type)
Formatted param definition Equivalent to ``self.template.format(name=self.name, type=self.type)``.
def _inputrc_enables_vi_mode(): ''' Emulate a small bit of readline behavior. Returns: (bool) True if current user enabled vi mode ("set editing-mode vi") in .inputrc ''' for filepath in (os.path.expanduser('~/.inputrc'), '/etc/inputrc'): try: with open(filepath) as f: for line in f: if _setre.fullmatch(line): return True except IOError: continue return False
Emulate a small bit of readline behavior. Returns: (bool) True if current user enabled vi mode ("set editing-mode vi") in .inputrc
def _get_tcntobj(goids, go2obj, **kws): """Get a TermCounts object if the user provides an annotation file, otherwise None.""" # kws: gaf (gene2go taxid) if 'gaf' in kws or 'gene2go' in kws: # Get a reduced go2obj set for TermCounts _gosubdag = GoSubDag(goids, go2obj, rcntobj=False, prt=None) return get_tcntobj(_gosubdag.go2obj, **kws)
Get a TermCounts object if the user provides an annotation file, otherwise None.
def find_vasp_calculations(): """ Returns a list of all subdirectories that contain either a vasprun.xml file or a compressed vasprun.xml.gz file. Args: None Returns: (List): list of all VASP calculation subdirectories. """ dir_list = [ './' + re.sub( r'vasprun\.xml', '', path ) for path in glob.iglob( '**/vasprun.xml', recursive=True ) ] gz_dir_list = [ './' + re.sub( r'vasprun\.xml\.gz', '', path ) for path in glob.iglob( '**/vasprun.xml.gz', recursive=True ) ] return dir_list + gz_dir_list
Returns a list of all subdirectories that contain either a vasprun.xml file or a compressed vasprun.xml.gz file. Args: None Returns: (List): list of all VASP calculation subdirectories.
def encodeMotorInput(self, motorInput): """ Encode motor command to bit vector. @param motorInput (1D numpy.array) Motor command to be encoded. @return (1D numpy.array) Encoded motor command. """ if not hasattr(motorInput, "__iter__"): motorInput = list([motorInput]) return self.motorEncoder.encode(motorInput)
Encode motor command to bit vector. @param motorInput (1D numpy.array) Motor command to be encoded. @return (1D numpy.array) Encoded motor command.
def db_dp990(self, value=None): """ Corresponds to IDD Field `db_dp990` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp990`'.format(value)) self._db_dp990 = value
Corresponds to IDD Field `db_dp990` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def get_token_settings(cls, token, default=None): """ Get the value for a specific token as a dictionary or replace with default :param token: str, token to query the nomenclate for :param default: object, substitution if the token is not found :return: (dict, object, None), token setting dictionary or default """ setting_dict = {} for key, value in iteritems(cls.__dict__): if '%s_' % token in key and not callable(key) and not isinstance(value, tokens.TokenAttr): setting_dict[key] = cls.__dict__.get(key, default) return setting_dict
Get the value for a specific token as a dictionary or replace with default :param token: str, token to query the nomenclate for :param default: object, substitution if the token is not found :return: (dict, object, None), token setting dictionary or default
def to_equivalent(self, unit, equivalence, **kwargs): """ Return a copy of the unyt_array in the units specified units, assuming the given equivalency. The dimensions of the specified units and the dimensions of the original array need not match so long as there is an appropriate conversion in the specified equivalency. Parameters ---------- unit : string The unit that you wish to convert to. equivalence : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> from unyt import K >>> a = 1.0e7*K >>> print(a.to_equivalent("keV", "thermal")) 0.8617332401096504 keV """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equivalence]() if self.has_equivalent(equivalence): new_arr = this_equiv.convert(self, conv_unit.dimensions, **kwargs) return new_arr.in_units(conv_unit) else: raise InvalidUnitEquivalence(equivalence, self.units, unit)
Return a copy of the unyt_array in the units specified units, assuming the given equivalency. The dimensions of the specified units and the dimensions of the original array need not match so long as there is an appropriate conversion in the specified equivalency. Parameters ---------- unit : string The unit that you wish to convert to. equivalence : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> from unyt import K >>> a = 1.0e7*K >>> print(a.to_equivalent("keV", "thermal")) 0.8617332401096504 keV
def get_likes(self, likable_type, likable_id): """ likable_type: 'Comment', 'Press', 'Review', 'StartupRole', 'StatusUpdate' likable_id: id of the object that the likes of it you are interested """ return _get_request(_LIKES.format(c_api=_C_API_BEGINNING, api=_API_VERSION, lt=likable_type, li=likable_id, at=self.access_token))
likable_type: 'Comment', 'Press', 'Review', 'StartupRole', 'StatusUpdate' likable_id: id of the object that the likes of it you are interested
def start(self, interval=None, iterations=None): """Start the timer. A timeout event will be generated every *interval* seconds. If *interval* is None, then self.interval will be used. If *iterations* is specified, the timer will stop after emitting that number of events. If unspecified, then the previous value of self.iterations will be used. If the value is negative, then the timer will continue running until stop() is called. If the timer is already running when this function is called, nothing happens (timer continues running as it did previously, without changing the interval, number of iterations, or emitting a timer start event). """ if self.running: return # don't do anything if already running self.iter_count = 0 if interval is not None: self.interval = interval if iterations is not None: self.max_iterations = iterations self._backend._vispy_start(self.interval) self._running = True self._first_emit_time = precision_time() self._last_emit_time = precision_time() self.events.start(type='timer_start')
Start the timer. A timeout event will be generated every *interval* seconds. If *interval* is None, then self.interval will be used. If *iterations* is specified, the timer will stop after emitting that number of events. If unspecified, then the previous value of self.iterations will be used. If the value is negative, then the timer will continue running until stop() is called. If the timer is already running when this function is called, nothing happens (timer continues running as it did previously, without changing the interval, number of iterations, or emitting a timer start event).
def Laliberte_density_i(T, w_w, c0, c1, c2, c3, c4): r'''Calculate the density of a solute using the form proposed by Laliberte [1]_. Parameters are needed, and a temperature, and water fraction. Units are Kelvin and Pa*s. .. math:: \rho_{app,i} = \frac{(c_0[1-w_w]+c_1)\exp(10^{-6}[t+c_4]^2)} {(1-w_w) + c_2 + c_3 t} Parameters ---------- T : float Temperature of fluid [K] w_w : float Weight fraction of water in the solution c0-c4 : floats Function fit parameters Returns ------- rho_i : float Solute partial density, [kg/m^3] Notes ----- Temperature range check is TODO Examples -------- >>> d = _Laliberte_Density_ParametersDict['7647-14-5'] >>> Laliberte_density_i(273.15+0, 1-0.0037838838, d["C0"], d["C1"], d["C2"], d["C3"], d["C4"]) 3761.8917585699983 References ---------- .. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of Aqueous Solutions, with Updated Density and Viscosity Data." Journal of Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60. doi:10.1021/je8008123 ''' t = T - 273.15 return ((c0*(1 - w_w)+c1)*exp(1E-6*(t + c4)**2))/((1 - w_w) + c2 + c3*t)
r'''Calculate the density of a solute using the form proposed by Laliberte [1]_. Parameters are needed, and a temperature, and water fraction. Units are Kelvin and Pa*s. .. math:: \rho_{app,i} = \frac{(c_0[1-w_w]+c_1)\exp(10^{-6}[t+c_4]^2)} {(1-w_w) + c_2 + c_3 t} Parameters ---------- T : float Temperature of fluid [K] w_w : float Weight fraction of water in the solution c0-c4 : floats Function fit parameters Returns ------- rho_i : float Solute partial density, [kg/m^3] Notes ----- Temperature range check is TODO Examples -------- >>> d = _Laliberte_Density_ParametersDict['7647-14-5'] >>> Laliberte_density_i(273.15+0, 1-0.0037838838, d["C0"], d["C1"], d["C2"], d["C3"], d["C4"]) 3761.8917585699983 References ---------- .. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of Aqueous Solutions, with Updated Density and Viscosity Data." Journal of Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60. doi:10.1021/je8008123
def version(self): """ :return: The version of the server when initialized as 'auto', otherwise the version passed in at initialization """ if self._version != 'auto': return self._version if self._version == 'auto': try: data = self.execute_get('configuration/about/') self._version = data['configuration'][0]['version'] except GhostException: return self.DEFAULT_VERSION return self._version
:return: The version of the server when initialized as 'auto', otherwise the version passed in at initialization
def instruction_DEC_register(self, opcode, register): """ Decrement accumulator """ a = register.value r = self.DEC(a) # log.debug("$%x DEC %s value $%x -1 = $%x" % ( # self.program_counter, # register.name, a, r # )) register.set(r)
Decrement accumulator
def create_client_socket(self, config): """ Create client broadcast socket :param config: client configuration :return: socket.socket """ client_socket = WUDPNetworkNativeTransport.create_client_socket(self, config) client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) return client_socket
Create client broadcast socket :param config: client configuration :return: socket.socket
def _drop_oldest_chunk(self): ''' To handle the case when the items comming in the chunk is more than the maximum capacity of the chunk. Our intent behind is to remove the oldest chunk. So that the items come flowing in. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 0 >>> s.chunked_counts {0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}} >>> data_stream = ['a','b','c','d','a','e','f'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 2 >>> s.chunked_counts {2: {'f': 1}} ''' chunk_id = min(self.chunked_counts.keys()) chunk = self.chunked_counts.pop(chunk_id) self.n_counts -= len(chunk) for k, v in list(chunk.items()): self.counts[k] -= v self.counts_total -= v
To handle the case when the items comming in the chunk is more than the maximum capacity of the chunk. Our intent behind is to remove the oldest chunk. So that the items come flowing in. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 0 >>> s.chunked_counts {0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}} >>> data_stream = ['a','b','c','d','a','e','f'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 2 >>> s.chunked_counts {2: {'f': 1}}
def Place(self, x, flags): """ Place prepends a value specified by `flags` to the Builder, without checking for available space. """ N.enforce_number(x, flags) self.head = self.head - flags.bytewidth encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
Place prepends a value specified by `flags` to the Builder, without checking for available space.
def getParameters(self): """ Get all the parameters declared. """ parameters = lock_and_call( lambda: self._impl.getParameters(), self._lock ) return EntityMap(parameters, Parameter)
Get all the parameters declared.
def get_sockaddr(host, port, family): """Return a fully qualified socket address that can be passed to :func:`socket.bind`.""" if family == af_unix: return host.split("://", 1)[1] try: res = socket.getaddrinfo( host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP ) except socket.gaierror: return host, port return res[0][4]
Return a fully qualified socket address that can be passed to :func:`socket.bind`.
def _aggregations(search, definitions): """Add aggregations to query.""" if definitions: for name, agg in definitions.items(): search.aggs[name] = agg if not callable(agg) else agg() return search
Add aggregations to query.
def categorical_to_numeric(table): """Encode categorical columns to numeric by converting each category to an integer value. Parameters ---------- table : pandas.DataFrame Table with categorical columns to encode. Returns ------- encoded : pandas.DataFrame Table with categorical columns encoded as numeric. Numeric columns in the input table remain unchanged. """ def transform(column): if is_categorical_dtype(column.dtype): return column.cat.codes if column.dtype.char == "O": try: nc = column.astype(numpy.int64) except ValueError: classes = column.dropna().unique() classes.sort(kind="mergesort") nc = column.replace(classes, numpy.arange(classes.shape[0])) return nc elif column.dtype == bool: return column.astype(numpy.int64) return column if isinstance(table, pandas.Series): return pandas.Series(transform(table), name=table.name, index=table.index) else: if _pandas_version_under0p23: return table.apply(transform, axis=0, reduce=False) else: return table.apply(transform, axis=0, result_type='reduce')
Encode categorical columns to numeric by converting each category to an integer value. Parameters ---------- table : pandas.DataFrame Table with categorical columns to encode. Returns ------- encoded : pandas.DataFrame Table with categorical columns encoded as numeric. Numeric columns in the input table remain unchanged.
def update_distribution( name, config, tags=None, region=None, key=None, keyid=None, profile=None, ): ''' Update the config (and optionally tags) for the CloudFront distribution with the given name. name Name of the CloudFront distribution config Configuration for the distribution tags Tags to associate with the distribution region Region to connect to key Secret key to use keyid Access key to use profile A dict with region, key, and keyid, or a pillar key (string) that contains such a dict. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.update_distribution name=mydistribution profile=awsprofile \ config='{"Comment":"partial configuration","Enabled":true}' ''' ### FIXME - BUG. This function can NEVER work as written... ### Obviously it was never actually tested. distribution_ret = get_distribution( name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in distribution_ret: return distribution_ret dist_with_tags = distribution_ret['result'] current_distribution = dist_with_tags['distribution'] current_config = current_distribution['DistributionConfig'] current_tags = dist_with_tags['tags'] etag = dist_with_tags['etag'] config_diff = __utils__['dictdiffer.deep_diff'](current_config, config) if tags: tags_diff = __utils__['dictdiffer.deep_diff'](current_tags, tags) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if 'old' in config_diff or 'new' in config_diff: conn.update_distribution( DistributionConfig=config, Id=current_distribution['Id'], IfMatch=etag, ) if tags: arn = current_distribution['ARN'] if 'new' in tags_diff: tags_to_add = { 'Items': [ {'Key': k, 'Value': v} for k, v in six.iteritems(tags_diff['new']) ], } conn.tag_resource( Resource=arn, Tags=tags_to_add, ) if 'old' in tags_diff: tags_to_remove = { 'Items': list(tags_diff['old'].keys()), } conn.untag_resource( Resource=arn, TagKeys=tags_to_remove, ) except botocore.exceptions.ClientError as err: return {'error': __utils__['boto3.get_error'](err)} finally: _cache_id( 'cloudfront', sub_resource=name, invalidate=True, region=region, key=key, keyid=keyid, profile=profile, ) return {'result': True}
Update the config (and optionally tags) for the CloudFront distribution with the given name. name Name of the CloudFront distribution config Configuration for the distribution tags Tags to associate with the distribution region Region to connect to key Secret key to use keyid Access key to use profile A dict with region, key, and keyid, or a pillar key (string) that contains such a dict. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.update_distribution name=mydistribution profile=awsprofile \ config='{"Comment":"partial configuration","Enabled":true}'
def write_err(self, text): """Write error text in the terminal without breaking the spinner.""" stderr = self.stderr if self.stderr.closed: stderr = sys.stderr stderr.write(decode_output(u"\r", target_stream=stderr)) stderr.write(decode_output(CLEAR_LINE, target_stream=stderr)) if text is None: text = "" text = decode_output(u"{0}\n".format(text), target_stream=stderr) self.stderr.write(text) self.out_buff.write(decode_output(text, target_stream=self.out_buff))
Write error text in the terminal without breaking the spinner.
def ris(self): """Bibliographic entry in RIS (Research Information System Format) format. Returns ------- ris : str The RIS string representing an item. Raises ------ ValueError : If the item's aggregationType is not Journal. """ if self.aggregationType != 'Journal': raise ValueError('Only Journal articles supported.') template = u'''TY - JOUR TI - {title} JO - {journal} VL - {volume} DA - {date} SP - {pages} PY - {year} DO - {doi} UR - https://doi.org/{doi} ''' ris = template.format( title=self.title, journal=self.publicationName, volume=self.volume, date=self.coverDate, pages=self.pageRange, year=self.coverDate[0:4], doi=self.doi) for au in self.authors: ris += 'AU - {}\n'.format(au.indexed_name) if self.issueIdentifier is not None: ris += 'IS - {}\n'.format(self.issueIdentifier) ris += 'ER - \n\n' return ris
Bibliographic entry in RIS (Research Information System Format) format. Returns ------- ris : str The RIS string representing an item. Raises ------ ValueError : If the item's aggregationType is not Journal.
def _parse_line(self, line): """ Parsed result:: {'timestamp':'May 18 14:24:14', 'procname': 'kernel', 'hostname':'lxc-rhel68-sat56', 'message': '...', 'raw_message': '...: ...' } """ msg_info = {'raw_message': line} if ': ' in line: info, msg = [i.strip() for i in line.split(': ', 1)] msg_info['message'] = msg info_splits = info.split() if len(info_splits) == 5: msg_info['timestamp'] = ' '.join(info_splits[:3]) msg_info['hostname'] = info_splits[3] msg_info['procname'] = info_splits[4] return msg_info
Parsed result:: {'timestamp':'May 18 14:24:14', 'procname': 'kernel', 'hostname':'lxc-rhel68-sat56', 'message': '...', 'raw_message': '...: ...' }
def _traverse(summary, function, *args): """Traverse all objects of a summary and call function with each as a parameter. Using this function, the following objects will be traversed: - the summary - each row - each item of a row """ function(summary, *args) for row in summary: function(row, *args) for item in row: function(item, *args)
Traverse all objects of a summary and call function with each as a parameter. Using this function, the following objects will be traversed: - the summary - each row - each item of a row
def create_couchdb_admin(username, password): ''' Create a CouchDB user ''' curl_couchdb('/_config/admins/{}'.format(username), method='PUT', data='"{}"'.format(password))
Create a CouchDB user
def destroy(self): """ A reimplemented destructor. This destructor will clear the reference to the toolkit widget and set its parent to None. """ widget = self.widget if widget is not None: del self.widget super(UiKitToolkitObject, self).destroy()
A reimplemented destructor. This destructor will clear the reference to the toolkit widget and set its parent to None.
def search(self, query, category, uid=None, latitude=None, longitude=None, city=None, region=None): """ 发送语义理解请求 详情请参考 http://mp.weixin.qq.com/wiki/0/0ce78b3c9524811fee34aba3e33f3448.html :param query: 输入文本串 :param category: 需要使用的服务类型,多个可传入列表 :param uid: 可选,用户唯一id(非开发者id),用户区分公众号下的不同用户(建议填入用户openid) :param latitude: 可选,纬度坐标,与经度同时传入;与城市二选一传入 :param longitude: 可选,经度坐标,与纬度同时传入;与城市二选一传入 :param city: 可选,城市名称,与经纬度二选一传入 :param region: 可选,区域名称,在城市存在的情况下可省;与经纬度二选一传入 :return: 返回的 JSON 数据包 使用示例:: from wechatpy import WeChatClient client = WeChatClient('appid', 'secret') res = client.semantic.search( '查一下明天从北京到上海的南航机票', 'flight,hotel', city='北京' ) """ if isinstance(category, (tuple, list)): category = ','.join(category) data = optionaldict() data['query'] = query data['category'] = category data['uid'] = uid data['latitude'] = latitude data['longitude'] = longitude data['city'] = city data['region'] = region data['appid'] = self._client.appid return self._post( url='https://api.weixin.qq.com/semantic/semproxy/search', data=data )
发送语义理解请求 详情请参考 http://mp.weixin.qq.com/wiki/0/0ce78b3c9524811fee34aba3e33f3448.html :param query: 输入文本串 :param category: 需要使用的服务类型,多个可传入列表 :param uid: 可选,用户唯一id(非开发者id),用户区分公众号下的不同用户(建议填入用户openid) :param latitude: 可选,纬度坐标,与经度同时传入;与城市二选一传入 :param longitude: 可选,经度坐标,与纬度同时传入;与城市二选一传入 :param city: 可选,城市名称,与经纬度二选一传入 :param region: 可选,区域名称,在城市存在的情况下可省;与经纬度二选一传入 :return: 返回的 JSON 数据包 使用示例:: from wechatpy import WeChatClient client = WeChatClient('appid', 'secret') res = client.semantic.search( '查一下明天从北京到上海的南航机票', 'flight,hotel', city='北京' )
def synchelp(f): ''' The synchelp decorator allows the transparent execution of a coroutine using the global loop from a thread other than the event loop. In both use cases, teh actual work is done by the global event loop. Examples: Use as a decorator:: @s_glob.synchelp async def stuff(x, y): await dostuff() Calling the stuff function as regular async code using the standard await syntax:: valu = await stuff(x, y) Calling the stuff function as regular sync code outside of the event loop thread:: valu = stuff(x, y) ''' def wrap(*args, **kwargs): coro = f(*args, **kwargs) if not iAmLoop(): return sync(coro) return coro return wrap
The synchelp decorator allows the transparent execution of a coroutine using the global loop from a thread other than the event loop. In both use cases, teh actual work is done by the global event loop. Examples: Use as a decorator:: @s_glob.synchelp async def stuff(x, y): await dostuff() Calling the stuff function as regular async code using the standard await syntax:: valu = await stuff(x, y) Calling the stuff function as regular sync code outside of the event loop thread:: valu = stuff(x, y)
def get_level(level_string): """ Returns an appropriate logging level integer from a string name """ levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} try: level = levels[level_string.lower()] except KeyError: sys.exit('{0} is not a recognized logging level'.format(level_string)) else: return level
Returns an appropriate logging level integer from a string name
def is_excluded_path(args, filepath): """Returns true if the filepath is under the one of the exclude path.""" # Try regular expressions first. for regexp_exclude_path in args.regexp: if re.match(regexp_exclude_path, filepath): return True abspath = os.path.abspath(filepath) if args.include: # If the file is outside of any include directories. out_of_include_dirs = True for incl_path in args.include: absolute_include_path = os.path.abspath(os.path.join(args.root, incl_path)) if is_child_dir(absolute_include_path, abspath): out_of_include_dirs = False break if out_of_include_dirs: return True excl_rules = create_exclude_rules(args) for i, rule in enumerate(excl_rules): if rule[0] == abspath: return rule[1] if is_child_dir(rule[0], abspath): # continue to try to longest match. last_result = rule[1] for j in range(i + 1, len(excl_rules)): rule_deep = excl_rules[j] if not is_child_dir(rule_deep[0], abspath): break last_result = rule_deep[1] return last_result return False
Returns true if the filepath is under the one of the exclude path.
def add(self, transport, address=None): """ add a new recipient to be addressable by this MessageDispatcher generate a new uuid address if one is not specified """ if not address: address = str(uuid.uuid1()) if address in self.recipients: self.recipients[address].add(transport) else: self.recipients[address] = RecipientManager(transport, address) return address
add a new recipient to be addressable by this MessageDispatcher generate a new uuid address if one is not specified
def set_options(pool_or_cursor,row_instance): "for connection-level options that need to be set on Row instances" # todo: move around an Options object instead for option in ('JSON_READ',): setattr(row_instance,option,getattr(pool_or_cursor,option,None)) return row_instance
for connection-level options that need to be set on Row instances
def get_viewer(self, v_id, viewer_class=None, width=512, height=512, force_new=False): """ Get an existing viewer by viewer id. If the viewer does not yet exist, make a new one. """ if not force_new: try: return self.viewers[v_id] except KeyError: pass # create top level window window = self.app.make_window("Viewer %s" % v_id, wid=v_id) # We get back a record with information about the viewer v_info = self.make_viewer(window, viewer_class=viewer_class, width=width, height=height) # Save it under this viewer id self.viewers[v_id] = v_info return v_info
Get an existing viewer by viewer id. If the viewer does not yet exist, make a new one.
def clean_columns(columns, valid_regex=r'\w', lower=True, max_len=32): """ Ensure all column name strings are valid python variable/attribute names >>> df = pd.DataFrame(np.zeros((2, 3)), columns=['WAT??', "Don't do th!s, way too long. ya-think????", 'ok-this123.456']) >>> df.columns = clean_columns(df.columns, max_len=12) >>> df.head() wat dont_do_ths_ okthis123456 0 0.0 0.0 0.0 1 0.0 0.0 0.0 """ rettype = None if isinstance(columns, str): rettype = type(columns) columns = [columns] columns = [c.strip() for c in columns] # # unnecessary because these are invalid characters removed below # columns = [(c[1:-1] if c[0] in '\'"' and c[-1] == c[0] else c) for c in columns] # columns = [(c[1:-1] if c[0] in '{([<' and c[-1] in '})]>' else c) for c in columns] columns = [re.sub('\s+', '_', c).lower() for c in columns] columns = remove_invalid_chars(columns, valid_regex=r'\w') columns = [c[:max_len] for c in columns] columns = np.array(columns) if rettype is None else rettype(columns[0]) return columns
Ensure all column name strings are valid python variable/attribute names >>> df = pd.DataFrame(np.zeros((2, 3)), columns=['WAT??', "Don't do th!s, way too long. ya-think????", 'ok-this123.456']) >>> df.columns = clean_columns(df.columns, max_len=12) >>> df.head() wat dont_do_ths_ okthis123456 0 0.0 0.0 0.0 1 0.0 0.0 0.0
def compress_to(self, archive_path=None): """ Compress the directory with gzip using tarlib. :type archive_path: str :param archive_path: Path to the archive, if None, a tempfile is created """ if archive_path is None: archive = tempfile.NamedTemporaryFile(delete=False) tar_args = () tar_kwargs = {'fileobj': archive} _return = archive.name else: tar_args = (archive_path) tar_kwargs = {} _return = archive_path tar_kwargs.update({'mode': 'w:gz'}) with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar: tar.add(self.path, arcname=self.file) return _return
Compress the directory with gzip using tarlib. :type archive_path: str :param archive_path: Path to the archive, if None, a tempfile is created
def member(self, phlo_id, node_id, member_id, action, node_type='conference_bridge'): """ :param phlo_id: :param node_id: :param member_id: :param action: :param node_type: default value `conference_bridge` :return: """ data = { 'member_id': member_id, 'phlo_id': phlo_id, 'node_id': node_id, 'node_type': node_type } member = Member(self.client, data) return getattr(member, action)()
:param phlo_id: :param node_id: :param member_id: :param action: :param node_type: default value `conference_bridge` :return:
def iter_list_market_book(self, market_ids, chunk_size, **kwargs): """Split call to `list_market_book` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_book` """ return itertools.chain(*( self.list_market_book(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
Split call to `list_market_book` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_book`
def project(self, project, entity=None): """Retrive project Args: project (str): The project to get details for entity (str, optional): The entity to scope this project to. Returns: [{"id","name","repo","dockerImage","description"}] """ query = gql(''' query Models($entity: String, $project: String!) { model(name: $project, entityName: $entity) { id name repo dockerImage description } } ''') return self.gql(query, variable_values={ 'entity': entity, 'project': project})['model']
Retrive project Args: project (str): The project to get details for entity (str, optional): The entity to scope this project to. Returns: [{"id","name","repo","dockerImage","description"}]
def _importSNPs_CasavaSNP(setName, species, genomeSource, snpsFile) : "This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno positions are 0 based" printf('importing SNP set %s for species %s...' % (setName, species)) snpData = SNPsTxtFile(snpsFile) CasavaSNP.dropIndex(('start', 'chromosomeNumber', 'setName')) conf.db.beginTransaction() pBar = ProgressBar(len(snpData)) pLabel = '' currChrNumber = None for snpEntry in snpData : tmpChr = snpEntry['chromosomeNumber'] if tmpChr != currChrNumber : currChrNumber = tmpChr pLabel = 'Chr %s...' % currChrNumber snp = CasavaSNP() snp.species = species snp.setName = setName for f in snp.getFields() : try : setattr(snp, f, snpEntry[f]) except KeyError : if f != 'species' and f != 'setName' : printf("Warning filetype as no key %s", f) snp.start -= 1 snp.end -= 1 snp.save() pBar.update(label = pLabel) pBar.close() snpMaster = SNPMaster() snpMaster.set(setName = setName, SNPType = 'CasavaSNP', species = species) snpMaster.save() printf('saving...') conf.db.endTransaction() printf('creating indexes...') CasavaSNP.ensureGlobalIndex(('start', 'chromosomeNumber', 'setName')) printf('importation of SNP set %s for species %s done.' %(setName, species)) return True
This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno positions are 0 based
def __store_record(self, record): """ Save record in a internal storage :param record: record to save :return: None """ if isinstance(record, WSimpleTrackerStorage.Record) is False: raise TypeError('Invalid record type was') limit = self.record_limit() if limit is not None and len(self.__registry) >= limit: self.__registry.pop(0) self.__registry.append(record)
Save record in a internal storage :param record: record to save :return: None
def generate_matches(self, nodes): """ Generator yielding matches for a sequence of nodes. Args: nodes: sequence of nodes Yields: (count, results) tuples where: count: the match comprises nodes[:count]; results: dict containing named submatches. """ if self.content is None: # Shortcut for special case (see __init__.__doc__) for count in xrange(self.min, 1 + min(len(nodes), self.max)): r = {} if self.name: r[self.name] = nodes[:count] yield count, r elif self.name == "bare_name": yield self._bare_name_matches(nodes) else: # The reason for this is that hitting the recursion limit usually # results in some ugly messages about how RuntimeErrors are being # ignored. We don't do this on non-CPython implementation because # they don't have this problem. if hasattr(sys, "getrefcount"): save_stderr = sys.stderr sys.stderr = StringIO() try: for count, r in self._recursive_matches(nodes, 0): if self.name: r[self.name] = nodes[:count] yield count, r except RuntimeError: # We fall back to the iterative pattern matching scheme if the recursive # scheme hits the recursion limit. for count, r in self._iterative_matches(nodes): if self.name: r[self.name] = nodes[:count] yield count, r finally: if hasattr(sys, "getrefcount"): sys.stderr = save_stderr
Generator yielding matches for a sequence of nodes. Args: nodes: sequence of nodes Yields: (count, results) tuples where: count: the match comprises nodes[:count]; results: dict containing named submatches.
def create_alarm(deployment_id, metric_name, data, api_key=None, profile="telemetry"): ''' create an telemetry alarms. data is a dict of alert configuration data. Returns (bool success, str message) tuple. CLI Example: salt myminion telemetry.create_alarm rs-ds033197 {} profile=telemetry ''' auth = _auth(api_key, profile) request_uri = _get_telemetry_base(profile) + "/alerts" key = "telemetry.{0}.alerts".format(deployment_id) # set the notification channels if not already set post_body = { "deployment": deployment_id, "filter": data.get('filter'), "notificationChannel": get_notification_channel_id(data.get('escalate_to')).split(), "condition": { "metric": metric_name, "max": data.get('max'), "min": data.get('min') } } try: response = requests.post(request_uri, data=salt.utils.json.dumps(post_body), headers=auth) except requests.exceptions.RequestException as e: # TODO: May be we should retry? log.error(six.text_type(e)) if response.status_code >= 200 and response.status_code < 300: # update cache log.info('Created alarm on metric: %s in deployment: %s', metric_name, deployment_id) log.debug('Updating cache for metric %s in deployment %s: %s', metric_name, deployment_id, response.json()) _update_cache(deployment_id, metric_name, response.json()) else: log.error( 'Failed to create alarm on metric: %s in ' 'deployment %s: payload: %s', metric_name, deployment_id, salt.utils.json.dumps(post_body) ) return response.status_code >= 200 and response.status_code < 300, response.json()
create an telemetry alarms. data is a dict of alert configuration data. Returns (bool success, str message) tuple. CLI Example: salt myminion telemetry.create_alarm rs-ds033197 {} profile=telemetry
def get_current(self, channel, unit='A'): '''Reading current ''' values = self._get_adc_value(address=self._ch_map[channel]['ADCI']['address']) raw = values[self._ch_map[channel]['ADCI']['adc_ch']] dac_offset = self._ch_cal[channel]['ADCI']['offset'] dac_gain = self._ch_cal[channel]['ADCI']['gain'] if 'PWR' in channel: current = ((raw - dac_offset) / dac_gain) if unit == 'raw': return raw elif unit == 'A': return current / 1000 elif unit == 'mA': return current elif unit == 'uA': return current * 1000 else: raise TypeError("Invalid unit type.") else: voltage = values[self._ch_map[channel]['ADCV']['adc_ch']] current = (((raw - voltage) - dac_offset) / dac_gain) if unit == 'raw': return raw elif unit == 'A': return current / 1000000 elif unit == 'mA': return current / 1000 elif unit == 'uA': return current else: raise TypeError("Invalid unit type.")
Reading current
def validate_allowed_to_pay(self): ''' Passes cleanly if we're allowed to pay, otherwise raise a ValidationError. ''' self._refresh() if not self.invoice.is_unpaid: raise ValidationError("You can only pay for unpaid invoices.") if not self.invoice.cart: return if not self._invoice_matches_cart(): raise ValidationError("The registration has been amended since " "generating this invoice.") CartController(self.invoice.cart).validate_cart()
Passes cleanly if we're allowed to pay, otherwise raise a ValidationError.
def fromdeltas(cls, deltas): """ Construct an offsetvector from a dictionary of offset deltas as returned by the .deltas attribute. Example: >>> x = offsetvector({"H1": 0, "L1": 10, "V1": 20}) >>> y = offsetvector.fromdeltas(x.deltas) >>> y offsetvector({'V1': 20, 'H1': 0, 'L1': 10}) >>> y == x True See also .deltas, .fromkeys() """ return cls((key, value) for (refkey, key), value in deltas.items())
Construct an offsetvector from a dictionary of offset deltas as returned by the .deltas attribute. Example: >>> x = offsetvector({"H1": 0, "L1": 10, "V1": 20}) >>> y = offsetvector.fromdeltas(x.deltas) >>> y offsetvector({'V1': 20, 'H1': 0, 'L1': 10}) >>> y == x True See also .deltas, .fromkeys()
def license_is_oa(license): """Return True if license is compatible with Open Access""" for oal in OA_LICENSES: if re.search(oal, license): return True return False
Return True if license is compatible with Open Access
def nodal_production_balance( network, snapshot='all', scaling=0.00001, filename=None): """ Plots the nodal difference between generation and consumption. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis snapshot : int or 'all' Snapshot to plot. default 'all' scaling : int Scaling to change plot sizes. default 0.0001 filename : path to folder """ fig, ax = plt.subplots(1, 1) gen = network.generators_t.p.groupby(network.generators.bus, axis=1).sum() load = network.loads_t.p.groupby(network.loads.bus, axis=1).sum() if snapshot == 'all': diff = (gen - load).sum() else: timestep = network.snapshots[snapshot] diff = (gen - load).loc[timestep] colors = {s[0]: 'green' if s[1] > 0 else 'red' for s in diff.iteritems()} subcolors = {'Net Consumer': 'red', 'Net Producer': 'green'} diff = diff.abs() network.plot( bus_sizes=diff * scaling, bus_colors=colors, line_widths=0.2, margin=0.01, ax=ax) patchList = [] for key in subcolors: data_key = mpatches.Patch(color=subcolors[key], label=key) patchList.append(data_key) ax.legend(handles=patchList, loc='upper left') ax.autoscale() if filename: plt.savefig(filename) plt.close() return
Plots the nodal difference between generation and consumption. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis snapshot : int or 'all' Snapshot to plot. default 'all' scaling : int Scaling to change plot sizes. default 0.0001 filename : path to folder
def can_update_topics_to_sticky_topics(self, forum, user): """ Given a forum, checks whether the user can change its topic types to sticky topics. """ return ( self._perform_basic_permission_check(forum, user, 'can_edit_posts') and self._perform_basic_permission_check(forum, user, 'can_post_stickies') )
Given a forum, checks whether the user can change its topic types to sticky topics.
def _check_rules(browser, rules_js, config): """ Run an accessibility audit on the page using the axe-core ruleset. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A list of violations. Related documentation: https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object __Caution__: You probably don't really want to call this method directly! It will be used by `AxeCoreAudit.do_audit`. """ audit_run_script = dedent(u""" {rules_js} {custom_rules} axe.configure(customRules); var callback = function(err, results) {{ if (err) throw err; window.a11yAuditResults = JSON.stringify(results); window.console.log(window.a11yAuditResults); }} axe.run({context}, {options}, callback); """).format( rules_js=rules_js, custom_rules=config.custom_rules, context=config.context, options=config.rules ) audit_results_script = dedent(u""" window.console.log(window.a11yAuditResults); return window.a11yAuditResults; """) browser.execute_script(audit_run_script) def audit_results_check_func(): """ A method to check that the audit has completed. Returns: (True, results) if the results are available. (False, None) if the results aren't available. """ unicode_results = browser.execute_script(audit_results_script) try: results = json.loads(unicode_results) except (TypeError, ValueError): results = None if results: return True, results return False, None result = Promise( audit_results_check_func, "Timed out waiting for a11y audit results.", timeout=5, ).fulfill() # audit_results is report of accessibility violations for that session # Note that this ruleset doesn't have distinct error/warning levels. audit_results = result.get('violations') return audit_results
Run an accessibility audit on the page using the axe-core ruleset. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A list of violations. Related documentation: https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object __Caution__: You probably don't really want to call this method directly! It will be used by `AxeCoreAudit.do_audit`.
def ahrs_send(self, omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw, force_mavlink1=False): ''' Status of DCM attitude estimator omegaIx : X gyro drift estimate rad/s (float) omegaIy : Y gyro drift estimate rad/s (float) omegaIz : Z gyro drift estimate rad/s (float) accel_weight : average accel_weight (float) renorm_val : average renormalisation value (float) error_rp : average error_roll_pitch value (float) error_yaw : average error_yaw value (float) ''' return self.send(self.ahrs_encode(omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw), force_mavlink1=force_mavlink1)
Status of DCM attitude estimator omegaIx : X gyro drift estimate rad/s (float) omegaIy : Y gyro drift estimate rad/s (float) omegaIz : Z gyro drift estimate rad/s (float) accel_weight : average accel_weight (float) renorm_val : average renormalisation value (float) error_rp : average error_roll_pitch value (float) error_yaw : average error_yaw value (float)
def is_enum_type(type_): """ Checks if the given type is an enum type. :param type_: The type to check :return: True if the type is a enum type, otherwise False :rtype: bool """ return isinstance(type_, type) and issubclass(type_, tuple(_get_types(Types.ENUM)))
Checks if the given type is an enum type. :param type_: The type to check :return: True if the type is a enum type, otherwise False :rtype: bool
def fms(x, y, z, context=None): """ Return (x * y) - z, with a single rounding according to the current context. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_fms, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), BigFloat._implicit_convert(z), ), context, )
Return (x * y) - z, with a single rounding according to the current context.
def actions_delete(): """ DEPRECATED (v 1.9.4) delete an ontology from the local repo """ filename = action_listlocal() ONTOSPY_LOCAL_MODELS = get_home_location() if filename: fullpath = ONTOSPY_LOCAL_MODELS + filename if os.path.exists(fullpath): var = input("Are you sure you want to delete this file? (y/n)") if var == "y": os.remove(fullpath) printDebug("Deleted %s" % fullpath, "important") cachepath = ONTOSPY_LOCAL_CACHE + filename + ".pickle" # @todo: do this operation in /cache... if os.path.exists(cachepath): os.remove(cachepath) printDebug("---------") printDebug("File deleted [%s]" % cachepath, "important") return True else: printDebug("Goodbye") return False
DEPRECATED (v 1.9.4) delete an ontology from the local repo