code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def find_overlaps(self, index=False): """ Find overlaps in a striplog. Args: index (bool): If True, returns indices of intervals with gaps after them. Returns: Striplog: A striplog of all the overlaps as intervals. """ return self.__find_incongruities(op=operator.gt, index=index)
Find overlaps in a striplog. Args: index (bool): If True, returns indices of intervals with gaps after them. Returns: Striplog: A striplog of all the overlaps as intervals.
def rgb_color(self): """Return the color property as list of [R, G, B], each 0-255.""" self.update() return [self._red, self._green, self._blue]
Return the color property as list of [R, G, B], each 0-255.
def _append_count_to_matrix(qtl_matrixfile, lod_threshold): """ Append an extra column at the end of the matrix file containing for each row (marker) the number of QTL found if the marker is known ie: Locus != '' :arg qtl_matrix, the matrix in which to save the output. :arg threshold, threshold used to determine if a given LOD value is reflective the presence of a QTL. """ if not os.path.exists(qtl_matrixfile): # pragma: no cover raise MQ2Exception('File not found: "%s"' % qtl_matrixfile) matrix = read_input_file(qtl_matrixfile, sep=',') tmp = list(matrix[0]) tmp.append('# QTLs') matrix[0] = tmp cnt = 1 while cnt < len(matrix): row = list(matrix[cnt]) nr_qtl = 0 for cel in row[3:]: if cel and float(cel) > float(lod_threshold): nr_qtl = nr_qtl + 1 row.append(str(nr_qtl)) matrix[cnt] = row cnt = cnt + 1 write_matrix(qtl_matrixfile, matrix)
Append an extra column at the end of the matrix file containing for each row (marker) the number of QTL found if the marker is known ie: Locus != '' :arg qtl_matrix, the matrix in which to save the output. :arg threshold, threshold used to determine if a given LOD value is reflective the presence of a QTL.
def get_hours_description(self): """Generates a description for only the HOUR portion of the expression Returns: The HOUR description """ expression = self._expression_parts[2] return self.get_segment_description( expression, _("every hour"), lambda s: self.format_time(s, "0"), lambda s: _("every {0} hours").format(s), lambda s: _("between {0} and {1}"), lambda s: _("at {0}") )
Generates a description for only the HOUR portion of the expression Returns: The HOUR description
def compute_layer(cls, data, params, layout): """ Compute position for the layer in all panels Positions can override this function instead of `compute_panel` if the position computations are independent of the panel. i.e when not colliding """ def fn(pdata): """ Helper compute function """ # Given data belonging to a specific panel, grab # the corresponding scales and call the method # that does the real computation if len(pdata) == 0: return pdata scales = layout.get_scales(pdata['PANEL'].iat[0]) return cls.compute_panel(pdata, scales, params) return groupby_apply(data, 'PANEL', fn)
Compute position for the layer in all panels Positions can override this function instead of `compute_panel` if the position computations are independent of the panel. i.e when not colliding
def fast_hamiltonian_terms(Ep, epsilonp, detuning_knob, omega_level, rm, xi, theta, unfolding, matrix_form=False, file_name=None, return_code=False): r"""Return a fast function that returns the Hamiltonian terms. We test a basic two-level system. >>> import numpy as np >>> from scipy.constants import physical_constants >>> from sympy import Matrix, symbols >>> from fast.electric_field import electric_field_amplitude_top >>> from fast.symbolic import define_laser_variables, polarization_vector >>> Ne = 2 >>> Nl = 1 >>> a0 = physical_constants["Bohr radius"][0] >>> rm = [np.array([[0, 0], [a0, 0]]), ... np.array([[0, 0], [0, 0]]), ... np.array([[0, 0], [0, 0]])] >>> xi = np.array([[[0, 1], [1, 0]]]) >>> omega_level = [0, 1.0e9] >>> theta = phase_transformation(Ne, Nl, rm, xi) We define symbolic variables to be used as token arguments. >>> Ep, omega_laser = define_laser_variables(Nl) >>> epsilonps = [polarization_vector(0, 0, 0, 0, 1)] >>> detuning_knob = [symbols("delta1", real=True)] An map to unfold the density matrix. >>> unfolding = Unfolding(Ne, True, True, True) We obtain a function to calculate Hamiltonian terms. >>> aux = (Ep, epsilonps, detuning_knob, omega_level, rm, xi, theta, ... unfolding, False, None) >>> hamiltonian_terms = fast_hamiltonian_terms(*aux) Apply this to a density matrix. >>> rhos = np.array([[0.6, 3+2j], ... [3-2j, 0.4]]) >>> rhosv = unfolding(rhos) We specify values for the variables >>> detuning_knobs = [100e6] >>> Eps = electric_field_amplitude_top(1e-3, 1e-3, 1, "SI") >>> Eps *= np.exp(1j*np.pi) >>> Eps = [Eps] >>> print(hamiltonian_terms(rhosv, Eps, detuning_knobs)) [5.5681e+07 2.0000e+08 2.9722e+08] """ if not unfolding.lower_triangular: mes = "It is very inefficient to solve using all components of the " mes += "density matrix. Better set lower_triangular=True in Unfolding." raise NotImplementedError(mes) if matrix_form and (not unfolding.real) and (unfolding.lower_triangular): mes = "It is not possible to express the equations in matrix form " mes += "for complex lower triangular components only." raise ValueError(mes) Nl = len(Ep) Nrho = unfolding.Nrho # We determine which arguments are constants. if True: try: Ep = np.array([complex(Ep[l]) for l in range(Nl)]) variable_Ep = False except: variable_Ep = True try: epsilonp = [np.array([complex(epsilonp[l][i]) for i in range(3)]) for l in range(Nl)] variable_epsilonp = False except: variable_epsilonp = True try: detuning_knob = np.array([float(detuning_knob[l]) for l in range(Nl)]) variable_detuning_knob = False except: variable_detuning_knob = True # We obtain code for the two parts. if True: if file_name is not None: file_name_rabi = file_name+"_rabi" file_name_detuning = file_name+"_detuning" else: file_name_rabi = file_name file_name_detuning = file_name rabi_terms = fast_rabi_terms(Ep, epsilonp, rm, xi, theta, unfolding, matrix_form=matrix_form, file_name=file_name_rabi, return_code=True) detuning_terms = fast_detuning_terms(detuning_knob, omega_level, xi, theta, unfolding, matrix_form=matrix_form, file_name=file_name_detuning, return_code=True) code = rabi_terms + "\n\n" + detuning_terms + "\n\n" # If these functions have 0 arguments, we call them only once! if not variable_Ep and not variable_epsilonp and matrix_form: code += "rabi_terms = rabi_terms()\n\n" if not variable_detuning_knob and matrix_form: code += "detuning_terms = detuning_terms()\n\n" # We establish the arguments of the output function. if True: code += "def hamiltonian_terms(" if not matrix_form: code += "rho, " if variable_Ep: code += "Ep, " if variable_epsilonp: code += "epsilonp, " if variable_detuning_knob: code += "detuning_knob, " code += "rabi_terms=rabi_terms, detuning_terms=detuning_terms" # if code[-2:] == ", ": code = code[:-2] code += "):\n" code += ' r"""A fast calculation of the hamiltonian terms."""\n' # if not variable_Ep and not varia # We initialize the output and auxiliaries. if True: # We introduce the factor that multiplies all terms. if matrix_form: code += " A = np.zeros(("+str(Nrho)+", "+str(Nrho) if not unfolding.real: code += "), complex)\n\n" else: code += "))\n\n" if unfolding.normalized: code += " b = np.zeros(("+str(Nrho) if not unfolding.real: code += "), complex)\n\n" else: code += "))\n\n" else: code += " rhs = np.zeros(("+str(Nrho) if not unfolding.real: code += "), complex)\n\n" else: code += "))\n\n" # We call the Rabi terms. if True: if not variable_Ep and not variable_epsilonp and matrix_form: aux_code = "rabi_terms\n" else: aux_code = "rabi_terms(" if not matrix_form: aux_code += "rho, " if variable_Ep: aux_code += "Ep, " if variable_epsilonp: aux_code += "epsilonp, " if aux_code[-2:] == ", ": aux_code = aux_code[:-2] aux_code += ")\n" if matrix_form: if unfolding.normalized: code += " aux = " + aux_code code += " A += aux[0]\n" code += " b += aux[1]\n" else: code += " A = " + aux_code else: code += " rhs = " + aux_code # We call the detuning terms. if True: if not variable_detuning_knob and matrix_form: aux_code = "detuning_terms\n" else: aux_code = "detuning_terms(" if not matrix_form: aux_code += "rho, " if variable_detuning_knob: aux_code += "detuning_knob, " if aux_code[-2:] == ", ": aux_code = aux_code[:-2] aux_code += ")\n" if matrix_form: if unfolding.normalized: code += " aux = " + aux_code code += " A += aux[0]\n" code += " b += aux[1]\n" else: code += " A += " + aux_code else: code += " rhs += " + aux_code # We finish the code. if True: # code = rabi_code + "\n\n" + code if matrix_form: if unfolding.normalized: code += " return A, b\n" else: code += " return A\n" else: code += " return rhs\n" # We write the code to file if provided, and execute it. if True: if file_name is not None: f = file(file_name+".py", "w") f.write(code) f.close() hamiltonian_terms = code if not return_code: exec hamiltonian_terms return hamiltonian_terms
r"""Return a fast function that returns the Hamiltonian terms. We test a basic two-level system. >>> import numpy as np >>> from scipy.constants import physical_constants >>> from sympy import Matrix, symbols >>> from fast.electric_field import electric_field_amplitude_top >>> from fast.symbolic import define_laser_variables, polarization_vector >>> Ne = 2 >>> Nl = 1 >>> a0 = physical_constants["Bohr radius"][0] >>> rm = [np.array([[0, 0], [a0, 0]]), ... np.array([[0, 0], [0, 0]]), ... np.array([[0, 0], [0, 0]])] >>> xi = np.array([[[0, 1], [1, 0]]]) >>> omega_level = [0, 1.0e9] >>> theta = phase_transformation(Ne, Nl, rm, xi) We define symbolic variables to be used as token arguments. >>> Ep, omega_laser = define_laser_variables(Nl) >>> epsilonps = [polarization_vector(0, 0, 0, 0, 1)] >>> detuning_knob = [symbols("delta1", real=True)] An map to unfold the density matrix. >>> unfolding = Unfolding(Ne, True, True, True) We obtain a function to calculate Hamiltonian terms. >>> aux = (Ep, epsilonps, detuning_knob, omega_level, rm, xi, theta, ... unfolding, False, None) >>> hamiltonian_terms = fast_hamiltonian_terms(*aux) Apply this to a density matrix. >>> rhos = np.array([[0.6, 3+2j], ... [3-2j, 0.4]]) >>> rhosv = unfolding(rhos) We specify values for the variables >>> detuning_knobs = [100e6] >>> Eps = electric_field_amplitude_top(1e-3, 1e-3, 1, "SI") >>> Eps *= np.exp(1j*np.pi) >>> Eps = [Eps] >>> print(hamiltonian_terms(rhosv, Eps, detuning_knobs)) [5.5681e+07 2.0000e+08 2.9722e+08]
def _fast_write(self, outfile, value): """Function for fast writing to motor files.""" outfile.truncate(0) outfile.write(str(int(value))) outfile.flush()
Function for fast writing to motor files.
def pfm_to_pwm(self, pfm, pseudo=0.001): """Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions. """ return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm]
Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions.
def enumerate_builtins(tokens): """ Returns a list of all the builtins being used in *tokens*. """ out = [] for index, tok in enumerate(tokens): token_type = tok[0] token_string = tok[1] if token_string in builtins: # Note: I need to test if print can be replaced in Python 3 special_special = ['print'] # Print is special in Python 2 if py3: special_special = [] if token_string not in special_special: if not token_string.startswith('__'): # Don't count magic funcs if tokens[index-1][1] != '.' and tokens[index+1][1] != '=': if token_string not in out: out.append(token_string) return out
Returns a list of all the builtins being used in *tokens*.
def zendesk_ticket(self): """ | Description: The ID of the Zendesk Support ticket created from this chat. Available only if using version 2 of the Zendesk Chat-Support integration """ if self.api and self.zendesk_ticket_id: return self.api._get_zendesk_ticket(self.zendesk_ticket_id)
| Description: The ID of the Zendesk Support ticket created from this chat. Available only if using version 2 of the Zendesk Chat-Support integration
def setup_request_sessions(self): """ Sets up a requests.Session object for sharing headers across API requests. """ self.req_session = requests.Session() self.req_session.headers.update(self.headers)
Sets up a requests.Session object for sharing headers across API requests.
def create_process_work_item_type(self, work_item_type, process_id): """CreateProcessWorkItemType. [Preview API] Creates a work item type in the process. :param :class:`<CreateProcessWorkItemTypeRequest> <azure.devops.v5_0.work_item_tracking_process.models.CreateProcessWorkItemTypeRequest>` work_item_type: :param str process_id: The ID of the process on which to create work item type. :rtype: :class:`<ProcessWorkItemType> <azure.devops.v5_0.work_item_tracking_process.models.ProcessWorkItemType>` """ route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') content = self._serialize.body(work_item_type, 'CreateProcessWorkItemTypeRequest') response = self._send(http_method='POST', location_id='e2e9d1a6-432d-4062-8870-bfcb8c324ad7', version='5.0-preview.2', route_values=route_values, content=content) return self._deserialize('ProcessWorkItemType', response)
CreateProcessWorkItemType. [Preview API] Creates a work item type in the process. :param :class:`<CreateProcessWorkItemTypeRequest> <azure.devops.v5_0.work_item_tracking_process.models.CreateProcessWorkItemTypeRequest>` work_item_type: :param str process_id: The ID of the process on which to create work item type. :rtype: :class:`<ProcessWorkItemType> <azure.devops.v5_0.work_item_tracking_process.models.ProcessWorkItemType>`
def RemoveClientLabels(self, client_id, owner, labels, cursor=None): """Removes a list of user labels from a given client.""" query = ("DELETE FROM client_labels " "WHERE client_id = %s AND owner_username_hash = %s " "AND label IN ({})").format(", ".join(["%s"] * len(labels))) args = [db_utils.ClientIDToInt(client_id), mysql_utils.Hash(owner)] + labels cursor.execute(query, args)
Removes a list of user labels from a given client.
def raw_chroma_accuracy(ref_voicing, ref_cent, est_voicing, est_cent, cent_tolerance=50): """Compute the raw chroma accuracy given two pitch (frequency) sequences in cents and matching voicing indicator sequences. The first pitch and voicing arrays are treated as the reference (truth), and the second two as the estimate (prediction). All 4 sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> raw_chroma = mir_eval.melody.raw_chroma_accuracy(ref_v, ref_c, ... est_v, est_c) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array ref_cent : np.ndarray Reference pitch sequence in cents est_voicing : np.ndarray Estimated boolean voicing array est_cent : np.ndarray Estimate pitch sequence in cents cent_tolerance : float Maximum absolute deviation for a cent value to be considered correct (Default value = 50) Returns ------- raw_chroma : float Raw chroma accuracy, the fraction of voiced frames in ref_cent for which est_cent provides a correct frequency values (within cent_tolerance cents), ignoring octave errors References ---------- .. [#] J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody Extraction from Polyphonic Music Signals: Approaches, Applications and Challenges", IEEE Signal Processing Magazine, 31(2):118-134, Mar. 2014. .. [#] G. E. Poliner, D. P. W. Ellis, A. F. Ehmann, E. Gomez, S. Streich, and B. Ong. "Melody transcription from music audio: Approaches and evaluation", IEEE Transactions on Audio, Speech, and Language Processing, 15(4):1247-1256, 2007. """ validate_voicing(ref_voicing, est_voicing) validate(ref_voicing, ref_cent, est_voicing, est_cent) ref_voicing = ref_voicing.astype(bool) est_voicing = est_voicing.astype(bool) # When input arrays are empty, return 0 by special case if ref_voicing.size == 0 or est_voicing.size == 0 \ or ref_cent.size == 0 or est_cent.size == 0: return 0. # If there are no voiced frames in reference, metric is 0 if ref_voicing.sum() == 0: return 0. # Raw chroma = same as raw pitch except that octave errors are ignored. cent_diff = np.abs(ref_cent - est_cent) octave = 1200*np.floor(cent_diff/1200.0 + 0.5) matching_voicing = ref_voicing * (est_cent > 0) cent_diff = np.abs(cent_diff - octave)[matching_voicing] frame_correct = (cent_diff < cent_tolerance) n_voiced = float(ref_voicing.sum()) raw_chroma = (frame_correct).sum()/n_voiced return raw_chroma
Compute the raw chroma accuracy given two pitch (frequency) sequences in cents and matching voicing indicator sequences. The first pitch and voicing arrays are treated as the reference (truth), and the second two as the estimate (prediction). All 4 sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> raw_chroma = mir_eval.melody.raw_chroma_accuracy(ref_v, ref_c, ... est_v, est_c) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array ref_cent : np.ndarray Reference pitch sequence in cents est_voicing : np.ndarray Estimated boolean voicing array est_cent : np.ndarray Estimate pitch sequence in cents cent_tolerance : float Maximum absolute deviation for a cent value to be considered correct (Default value = 50) Returns ------- raw_chroma : float Raw chroma accuracy, the fraction of voiced frames in ref_cent for which est_cent provides a correct frequency values (within cent_tolerance cents), ignoring octave errors References ---------- .. [#] J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody Extraction from Polyphonic Music Signals: Approaches, Applications and Challenges", IEEE Signal Processing Magazine, 31(2):118-134, Mar. 2014. .. [#] G. E. Poliner, D. P. W. Ellis, A. F. Ehmann, E. Gomez, S. Streich, and B. Ong. "Melody transcription from music audio: Approaches and evaluation", IEEE Transactions on Audio, Speech, and Language Processing, 15(4):1247-1256, 2007.
def loop(self): ''' Tracks the time in a loop. The estimated time to completion can be calculated and if verbose is set to *True*, the object will print estimated time to completion, and percent complete. Actived in every loop to keep track''' self.count += 1 self.tf = time.time() self.elapsed = self.tf - self.ti if self.verbose: displayAll(self.elapsed, self.display_amt, self.est_end, self.nLoops, self.count, self.numPrints)
Tracks the time in a loop. The estimated time to completion can be calculated and if verbose is set to *True*, the object will print estimated time to completion, and percent complete. Actived in every loop to keep track
def init_config(self, **kw): """ Get a configuration object for this type of YubiKey. """ return YubiKeyConfigUSBHID(ykver=self.version_num(), \ capabilities = self.capabilities, \ **kw)
Get a configuration object for this type of YubiKey.
def write_if_allowed(filename: str, content: str, overwrite: bool = False, mock: bool = False) -> None: """ Writes the contents to a file, if permitted. Args: filename: filename to write content: contents to write overwrite: permit overwrites? mock: pretend to write, but don't Raises: RuntimeError: if file exists but overwriting not permitted """ # Check we're allowed if not overwrite and exists(filename): fail("File exists, not overwriting: {!r}".format(filename)) # Make the directory, if necessary directory = dirname(filename) if not mock: mkdir_p(directory) # Write the file log.info("Writing to {!r}", filename) if mock: log.warning("Skipping writes as in mock mode") else: with open(filename, "wt") as outfile: outfile.write(content)
Writes the contents to a file, if permitted. Args: filename: filename to write content: contents to write overwrite: permit overwrites? mock: pretend to write, but don't Raises: RuntimeError: if file exists but overwriting not permitted
def local_moe_tpu(inputs, hidden_size, output_size, num_experts, loss_coef=1e-3, overhead=1.0): """Local mixture of experts that works well on TPU. See https://arxiv.org/abs/1701.06538 There are num_experts expert networks, each containing a relu-activated hidden layer of size hidden_size, followed by an output projection. The number of parameters is thus: num_experts * (input_size * hidden_size + hidden_size * output_size) The input is 3d: [batch, length, depth], consisting of the representations of all positions in a batch of sequences. Each position of each sequence is sent to 0-2 experts. The expert choices and the combination weights are determined by a learned gating function. This function returns a small auxiliary loss that should be added to the training loss of the model. This loss helps to balance expert usage. Without the loss, it is very likely that a few experts will be trained and the rest will starve. Several hacks are necessary to get around current TPU limitations: - To ensure static shapes, we enforce (by truncation/padding) that each sequence send the same number of elements to each expert. It would make more sense to enforce this equality over the entire batch, as opposed to on individual sequences. This would allow more freedom for individual sequences to be unbalanced. Unfortunately, that would slow down our hacked-up gather-by-matmul implementation. TODO(noam): There is no real reason for a single sequence to be the unit of equal allocation. Reshaping the inputs would allow us to pick a different unit of equal allocation. TODO(noam): Factor this code better. We want to be able to substitute different code for the experts themselves. We also want to integrate this gating/dispatching logic into multi-device mixtures-of-experts. Args: inputs: a Tensor with shape [batch, length, depth] hidden_size: an integer output_size: an integer num_experts: an integer loss_coef: a float scalar overhead: multiplicative factor of how much spare capacity to assign Returns: outputs: a Tensor with shape [batch, length, output_size] loss: a scalar """ batch, length, input_size = common_layers.shape_list(inputs)[:] # Each sequence sends expert_capacity positions to each expert. if isinstance(length, int): expert_capacity = min( length, int((length * 2 * overhead) / num_experts)) else: expert_capacity = tf.minimum( length, tf.to_int32( tf.to_float(length) * 2 * overhead / num_experts)) expert_capacity_f = tf.to_float(expert_capacity) # This is the learned gating function. gates = tf.nn.softmax( tf.to_float(common_layers.dense(inputs, num_experts, name="logits"))) # Find the top expert for each position. gate_1, index_1 = common_layers.top_1_tpu(gates) # [batch, length, num_experts] mask_1 = tf.one_hot(index_1, num_experts) # [batch, length, num_experts] # This is the position within the expert's mini-batch for this sequence position_in_expert_1 = common_layers.cumsum( mask_1, axis=1, exclusive=True) * mask_1 # Remove the elements that don't fit. mask_1 *= tf.to_float(tf.less(position_in_expert_1, expert_capacity_f)) # [batch, 1, num_experts] # How many examples in this sequence go to this expert mask_1_count = tf.reduce_sum(mask_1, axis=1, keepdims=True) # [batch, length] - mostly ones, but zeros where something didn't fit mask_1_flat = tf.reduce_sum(mask_1, axis=2) position_in_expert_1 = tf.reduce_sum(position_in_expert_1, axis=2) # Weight assigned to first expert. gate_1 *= mask_1_flat # Pick a second-place expert for each position. # We first mask out the experts that we expect to be over-capacity space_remaining = expert_capacity_f - mask_1_count use_rate = (mask_1_count + 1.0) / tf.to_float(length) # At what point in the sequence do we expect the expert to be full. expected_exhaustion_pos = space_remaining / use_rate # A Tensor with shape [batch, length, num_experts] representing a boolean # - whether we expect that the expert will already be full. expected_exhausted = tf.to_float(tf.greater( tf.reshape(tf.to_float(tf.range(length)), [1, length, 1]), expected_exhaustion_pos)) masked_gates = gates - mask_1 - expected_exhausted # This section is similar to the section above. gate_2, index_2 = common_layers.top_1_tpu(masked_gates) # [batch, length, num_experts] mask_2 = tf.one_hot(index_2, num_experts) position_in_expert_2 = ( common_layers.cumsum(mask_2, axis=1, exclusive=True) + mask_1_count) position_in_expert_2 *= mask_2 mask_2 *= tf.to_float(tf.less(position_in_expert_2, expert_capacity_f)) mask_2_count = tf.reduce_sum(mask_2, axis=1, keepdims=True) mask_2_flat = tf.reduce_sum(mask_2, axis=2) position_in_expert_2 = tf.reduce_sum(position_in_expert_2, axis=2) gate_2 *= mask_2_flat # What fraction didn't fit - show summaries miss_rate_1 = 1.0 - tf.reduce_sum(mask_1_count) / tf.to_float(batch * length) miss_rate_2 = 1.0 - tf.reduce_sum(mask_2_count) / tf.to_float(batch * length) tf.summary.scalar("miss_rate_1", miss_rate_1) tf.summary.scalar("miss_rate_2", miss_rate_2) # renormalize the two gate values to add up to 1 denom = gate_1 + gate_2 + 1e-9 gate_1 /= denom gate_2 /= denom # inputs: [batch, length, input_size] # forward_assignment: [batch, length, num_experts * expert_capacity] # expert_inputs: [batch, num_experts * expert_capacity, input_size] segment_ids_forward_1 = ( (index_1 * expert_capacity) + tf.to_int32(position_in_expert_1) + tf.to_int32(1.0 - mask_1_flat) * (num_experts * expert_capacity)) segment_ids_forward_2 = ( (index_2 * expert_capacity) + tf.to_int32(position_in_expert_2) + tf.to_int32(1.0 - mask_2_flat) * (num_experts * expert_capacity)) # Gather and scatter are painfully slow on TPU. # We will use one_hot and matmul instead. # [batch, length, num_experts * expert_capacity] one_hot_1 = tf.one_hot( segment_ids_forward_1, num_experts * expert_capacity, dtype=inputs.dtype) one_hot_2 = tf.one_hot( segment_ids_forward_2, num_experts * expert_capacity, dtype=inputs.dtype) forward_assignment = (one_hot_1 + one_hot_2) # [batch, num_experts * expert_capacity, input_size] expert_inputs = tf.matmul(forward_assignment, inputs, transpose_a=True) # [batch, num_experts, expert_capacity, input_size] expert_inputs = tf.reshape( expert_inputs, [batch, num_experts, expert_capacity, input_size]) # [num_experts, batch, expert_capacity, input_size] expert_inputs = tf.transpose(expert_inputs, [1, 0, 2, 3]) # [num_experts, batch * expert_capacity, input_size] expert_inputs = tf.reshape( expert_inputs, [num_experts, batch * expert_capacity, input_size]) # Now feed the expert inputs through the experts. h = common_layers.batch_dense( expert_inputs, hidden_size, activation=tf.nn.relu, name="x0") expert_output = common_layers.batch_dense(h, output_size, name="x1") expert_output = tf.reshape( expert_output, [num_experts, batch, expert_capacity, output_size]) # [batch, num_experts, expert_capacity, output_size] expert_output = tf.transpose(expert_output, [1, 0, 2, 3]) expert_output = tf.reshape( expert_output, [batch, num_experts * expert_capacity, output_size]) # Again, use matmul instead of unsorted_segment_sum. This time, we need # to multiply by the combination weights gate_1 and gate_2. # expert_output: [batch, num_experts * expert_capacity, output_size] # backward_assigmnent: [batch, length, num_experts * expert_capacity] # output: [batch, length, output_size] backward_assigmnent = ( one_hot_1 * tf.cast(tf.expand_dims(gate_1, 2), inputs.dtype) + one_hot_2 * tf.cast(tf.expand_dims(gate_2, 2), inputs.dtype)) output = tf.matmul(backward_assigmnent, expert_output) # Compute a loss equal to the coefficient ov variation of the # total gate value per expert per sequence. # This loss causes the experts to be used about equally used per sequence. importance = tf.reduce_sum(gates * (mask_1 + mask_2), 1) loss = loss_coef * cv_squared(importance) return output, loss
Local mixture of experts that works well on TPU. See https://arxiv.org/abs/1701.06538 There are num_experts expert networks, each containing a relu-activated hidden layer of size hidden_size, followed by an output projection. The number of parameters is thus: num_experts * (input_size * hidden_size + hidden_size * output_size) The input is 3d: [batch, length, depth], consisting of the representations of all positions in a batch of sequences. Each position of each sequence is sent to 0-2 experts. The expert choices and the combination weights are determined by a learned gating function. This function returns a small auxiliary loss that should be added to the training loss of the model. This loss helps to balance expert usage. Without the loss, it is very likely that a few experts will be trained and the rest will starve. Several hacks are necessary to get around current TPU limitations: - To ensure static shapes, we enforce (by truncation/padding) that each sequence send the same number of elements to each expert. It would make more sense to enforce this equality over the entire batch, as opposed to on individual sequences. This would allow more freedom for individual sequences to be unbalanced. Unfortunately, that would slow down our hacked-up gather-by-matmul implementation. TODO(noam): There is no real reason for a single sequence to be the unit of equal allocation. Reshaping the inputs would allow us to pick a different unit of equal allocation. TODO(noam): Factor this code better. We want to be able to substitute different code for the experts themselves. We also want to integrate this gating/dispatching logic into multi-device mixtures-of-experts. Args: inputs: a Tensor with shape [batch, length, depth] hidden_size: an integer output_size: an integer num_experts: an integer loss_coef: a float scalar overhead: multiplicative factor of how much spare capacity to assign Returns: outputs: a Tensor with shape [batch, length, output_size] loss: a scalar
def calculate(self, T, P, zs, ws, method): r'''Method to calculate heat capacity of a liquid mixture at temperature `T`, pressure `P`, mole fractions `zs` and weight fractions `ws` with a given method. This method has no exception handling; see `mixture_property` for that. Parameters ---------- T : float Temperature at which to calculate the property, [K] P : float Pressure at which to calculate the property, [Pa] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Name of the method to use Returns ------- Cplm : float Molar heat capacity of the liquid mixture at the given conditions, [J/mol] ''' if method == SIMPLE: Cplms = [i(T) for i in self.HeatCapacityLiquids] return mixing_simple(zs, Cplms) elif method == LALIBERTE: ws = list(ws) ; ws.pop(self.index_w) Cpl = Laliberte_heat_capacity(T, ws, self.wCASs) MW = mixing_simple(zs, self.MWs) return property_mass_to_molar(Cpl, MW) else: raise Exception('Method not valid')
r'''Method to calculate heat capacity of a liquid mixture at temperature `T`, pressure `P`, mole fractions `zs` and weight fractions `ws` with a given method. This method has no exception handling; see `mixture_property` for that. Parameters ---------- T : float Temperature at which to calculate the property, [K] P : float Pressure at which to calculate the property, [Pa] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Name of the method to use Returns ------- Cplm : float Molar heat capacity of the liquid mixture at the given conditions, [J/mol]
def wc(filename, contents, parsed=None, is_jekyll=False): """Count the words, characters, and paragraphs in a string. Args: contents: the original string to count filename (optional): the filename as provided to the CLI parsed (optional): a parsed string, expected to be plaintext only is_jekyll: whether the original contents were from a Jekyll file Returns: An object containing the various counts """ if is_jekyll: fmt = 'jekyll' else: fmt = 'md/txt' body = parsed.strip() if parsed else contents.strip() # Strip the body down to just words words = re.sub(r'\s+', ' ', body, re.MULTILINE) for punctuation in INTERSTITIAL_PUNCTUATION: words = re.sub(punctuation, ' ', words) punct = re.compile('[^\w\s]', re.U) words = punct.sub('', words) # Retrieve only non-space characters real_characters = re.sub(r'\s', '', words) # Count paragraphs in an intelligent way paragraphs = [1 if len(x) == 0 else 0 for x in contents.strip().splitlines()] for index, paragraph in enumerate(paragraphs): if paragraph == 1 and paragraphs[index + 1] == 1: paragraphs[index] = 0 return { 'counts': { 'file': filename, 'type': fmt, 'paragraphs': sum(paragraphs) + 1, 'words': len(re.split('\s+', words)), 'characters_real': len(real_characters), 'characters_total': len(words), } }
Count the words, characters, and paragraphs in a string. Args: contents: the original string to count filename (optional): the filename as provided to the CLI parsed (optional): a parsed string, expected to be plaintext only is_jekyll: whether the original contents were from a Jekyll file Returns: An object containing the various counts
def remove_output_data_port(self, data_port_id, force=False, destroy=True): """Remove an output data port from the state :param int data_port_id: the id of the output data port to remove :raises exceptions.AttributeError: if the specified input data port does not exist """ if data_port_id in self._output_data_ports: if destroy: self.remove_data_flows_with_data_port_id(data_port_id) self._output_data_ports[data_port_id].parent = None return self._output_data_ports.pop(data_port_id) else: raise AttributeError("output data port with name %s does not exit", data_port_id)
Remove an output data port from the state :param int data_port_id: the id of the output data port to remove :raises exceptions.AttributeError: if the specified input data port does not exist
def do_json_set(self, params): """ \x1b[1mNAME\x1b[0m json_set - Sets the value for the given (possibly nested) key on a JSON object serialized in the given path \x1b[1mSYNOPSIS\x1b[0m json_set <path> <keys> <value> <value_type> [confirm] \x1b[1mDESCRIPTION\x1b[0m If the key exists and the value is different, the znode will be updated with the key set to its new value. If the key does not exist, it'll be created and the znode will be updated with the serialized version of the new object. The value's type will be determined by the value_type parameter. \x1b[1mEXAMPLES\x1b[0m > create /props '{"a": {"b": 4}}' > json_cat /props { "a": { "b": 4 } } > json_set /props a.b 5 int > json_cat /props { "a": { "b": 5 } } > json_set /props a.c.d true bool > json_cat /props { "a": { "c": { "d": true }, "b": 5 } } """ try: Keys.validate(params.keys) except Keys.Bad as ex: self.show_output(str(ex)) return try: jstr, stat = self._zk.get(params.path) obj_src = json_deserialize(jstr) obj_dst = copy.deepcopy(obj_src) # Cast value to its given type. value = to_type(params.value, params.value_type) Keys.set(obj_dst, params.keys, value) if params.confirm: a = json.dumps(obj_src, sort_keys=True, indent=4) b = json.dumps(obj_dst, sort_keys=True, indent=4) diff = difflib.unified_diff(a.split("\n"), b.split("\n")) self.show_output("\n".join(diff)) if not self.prompt_yes_no("Apply update?"): return # Pass along the read version, to ensure we are updating what we read. self.set(params.path, json.dumps(obj_dst), version=stat.version) except BadJSON: self.show_output("Path %s has bad JSON.", params.path) except Keys.Missing as ex: self.show_output("Path %s is missing key %s.", params.path, ex) except ValueError: self.show_output("Bad value_type")
\x1b[1mNAME\x1b[0m json_set - Sets the value for the given (possibly nested) key on a JSON object serialized in the given path \x1b[1mSYNOPSIS\x1b[0m json_set <path> <keys> <value> <value_type> [confirm] \x1b[1mDESCRIPTION\x1b[0m If the key exists and the value is different, the znode will be updated with the key set to its new value. If the key does not exist, it'll be created and the znode will be updated with the serialized version of the new object. The value's type will be determined by the value_type parameter. \x1b[1mEXAMPLES\x1b[0m > create /props '{"a": {"b": 4}}' > json_cat /props { "a": { "b": 4 } } > json_set /props a.b 5 int > json_cat /props { "a": { "b": 5 } } > json_set /props a.c.d true bool > json_cat /props { "a": { "c": { "d": true }, "b": 5 } }
def get(location, **kwargs): """Get Geocode :param ``location``: Your search location you want geocoded. :param ``provider``: The geocoding engine you want to use. :param ``method``: Define the method (geocode, method). """ provider = kwargs.get('provider', 'bing').lower().strip() method = kwargs.get('method', 'geocode').lower().strip() if isinstance(location, (list, dict)) and method == 'geocode': raise ValueError("Location should be a string") if provider not in options: raise ValueError("Invalid provider") else: if method not in options[provider]: raise ValueError("Invalid method") return options[provider][method](location, **kwargs)
Get Geocode :param ``location``: Your search location you want geocoded. :param ``provider``: The geocoding engine you want to use. :param ``method``: Define the method (geocode, method).
def control_loop(): '''Main loop, retrieving the schedule. ''' set_service_status(Service.SCHEDULE, ServiceStatus.BUSY) notify.notify('READY=1') while not terminate(): notify.notify('WATCHDOG=1') # Try getting an updated schedule get_schedule() session = get_session() next_event = session.query(UpcomingEvent)\ .filter(UpcomingEvent.end > timestamp())\ .order_by(UpcomingEvent.start)\ .first() if next_event: logger.info('Next scheduled recording: %s', datetime.fromtimestamp(next_event.start)) notify.notify('STATUS=Next scheduled recording: %s' % datetime.fromtimestamp(next_event.start)) else: logger.info('No scheduled recording') notify.notify('STATUS=No scheduled recording') session.close() next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down schedule service') set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED)
Main loop, retrieving the schedule.
def _whitespace_tokenize(self, text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() tokens = text.split() return tokens
Runs basic whitespace cleaning and splitting on a piece of text.
def PopEvents(self): """Pops events from the heap. Yields: EventObject: event. """ event = self.PopEvent() while event: yield event event = self.PopEvent()
Pops events from the heap. Yields: EventObject: event.
def describe_load_balancers(names=None, load_balancer_arns=None, region=None, key=None, keyid=None, profile=None): ''' Describes the specified load balancer or all of your load balancers. Returns: list CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_load_balancers salt myminion boto_elbv2.describe_load_balancers alb_name salt myminion boto_elbv2.describe_load_balancers "[alb_name,alb_name]" ''' if names and load_balancer_arns: raise SaltInvocationError('At most one of names or load_balancer_arns may ' 'be provided') if names: albs = names elif load_balancer_arns: albs = load_balancer_arns else: albs = None albs_list = [] if albs: if isinstance(albs, str) or isinstance(albs, six.text_type): albs_list.append(albs) else: for alb in albs: albs_list.append(alb) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if names: ret = conn.describe_load_balancers(Names=albs_list)['LoadBalancers'] elif load_balancer_arns: ret = conn.describe_load_balancers(LoadBalancerArns=albs_list)['LoadBalancers'] else: ret = [] next_marker = '' while True: r = conn.describe_load_balancers(Marker=next_marker) for alb in r['LoadBalancers']: ret.append(alb) if 'NextMarker' in r: next_marker = r['NextMarker'] else: break return ret if ret else [] except ClientError as error: log.warning(error) return False
Describes the specified load balancer or all of your load balancers. Returns: list CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_load_balancers salt myminion boto_elbv2.describe_load_balancers alb_name salt myminion boto_elbv2.describe_load_balancers "[alb_name,alb_name]"
def set_scrollbars_cb(self, w, tf): """This callback is invoked when the user checks the 'Use Scrollbars' box in the preferences pane.""" scrollbars = 'on' if tf else 'off' self.t_.set(scrollbars=scrollbars)
This callback is invoked when the user checks the 'Use Scrollbars' box in the preferences pane.
def shell(self, *args, **kwargs): """ Run command `adb shell` """ args = ['shell'] + list(args) return self.run_cmd(*args, **kwargs)
Run command `adb shell`
def build_self_reference(filename, clean_wcs=False): """ This function creates a reference, undistorted WCS that can be used to apply a correction to the WCS of the input file. Parameters ---------- filename : str Filename of image which will be corrected, and which will form the basis of the undistorted WCS. clean_wcs : bool Specify whether or not to return the WCS object without any distortion information, or any history of the original input image. This converts the output from `utils.output_wcs()` into a pristine `~stwcs.wcsutils.HSTWCS` object. Returns ------- customwcs : `stwcs.wcsutils.HSTWCS` HSTWCS object which contains the undistorted WCS representing the entire field-of-view for the input image. Examples -------- This function can be used with the following syntax to apply a shift/rot/scale change to the same image: >>> import buildref >>> from drizzlepac import updatehdr >>> filename = "jce501erq_flc.fits" >>> wcslin = buildref.build_self_reference(filename) >>> updatehdr.updatewcs_with_shift(filename, wcslin, xsh=49.5694, ... ysh=19.2203, rot = 359.998, scale = 0.9999964) """ if 'sipwcs' in filename: sciname = 'sipwcs' else: sciname = 'sci' wcslin = build_reference_wcs([filename], sciname=sciname) if clean_wcs: wcsbase = wcslin.wcs customwcs = build_hstwcs(wcsbase.crval[0], wcsbase.crval[1], wcsbase.crpix[0], wcsbase.crpix[1], wcslin._naxis1, wcslin._naxis2, wcslin.pscale, wcslin.orientat) else: customwcs = wcslin return customwcs
This function creates a reference, undistorted WCS that can be used to apply a correction to the WCS of the input file. Parameters ---------- filename : str Filename of image which will be corrected, and which will form the basis of the undistorted WCS. clean_wcs : bool Specify whether or not to return the WCS object without any distortion information, or any history of the original input image. This converts the output from `utils.output_wcs()` into a pristine `~stwcs.wcsutils.HSTWCS` object. Returns ------- customwcs : `stwcs.wcsutils.HSTWCS` HSTWCS object which contains the undistorted WCS representing the entire field-of-view for the input image. Examples -------- This function can be used with the following syntax to apply a shift/rot/scale change to the same image: >>> import buildref >>> from drizzlepac import updatehdr >>> filename = "jce501erq_flc.fits" >>> wcslin = buildref.build_self_reference(filename) >>> updatehdr.updatewcs_with_shift(filename, wcslin, xsh=49.5694, ... ysh=19.2203, rot = 359.998, scale = 0.9999964)
def fft(fEM, time, freq, ftarg): r"""Fourier Transform using the Fast Fourier Transform. The function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a description of the input and output parameters. Returns ------- tEM : array Returns time-domain EM response of ``fEM`` for given ``time``. conv : bool Only relevant for QWE/QUAD. """ # Get ftarg values dfreq, nfreq, ntot, pts_per_dec = ftarg # If pts_per_dec, we have first to interpolate fEM to required freqs if pts_per_dec: sfEMr = iuSpline(np.log(freq), fEM.real) sfEMi = iuSpline(np.log(freq), fEM.imag) freq = np.arange(1, nfreq+1)*dfreq fEM = sfEMr(np.log(freq)) + 1j*sfEMi(np.log(freq)) # Pad the frequency result fEM = np.pad(fEM, (0, ntot-nfreq), 'linear_ramp') # Carry out FFT ifftEM = fftpack.ifft(np.r_[fEM[1:], 0, fEM[::-1].conj()]).real stEM = 2*ntot*fftpack.fftshift(ifftEM*dfreq, 0) # Interpolate in time domain dt = 1/(2*ntot*dfreq) ifEM = iuSpline(np.linspace(-ntot, ntot-1, 2*ntot)*dt, stEM) tEM = ifEM(time)/2*np.pi # (Multiplication of 2/pi in model.tem) # Return the electromagnetic time domain field # (Second argument is only for QWE) return tEM, True
r"""Fourier Transform using the Fast Fourier Transform. The function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a description of the input and output parameters. Returns ------- tEM : array Returns time-domain EM response of ``fEM`` for given ``time``. conv : bool Only relevant for QWE/QUAD.
def system(self): """returns an object to work with the site system""" if self._resources is None: self.__init() if "system" in self._resources: url = self._url + "/system" return _system.System(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
returns an object to work with the site system
def getParent(self, returned_properties=None): """Get the parent workitem of this workitem If no parent, None will be returned. :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :return: a :class:`rtcclient.workitem.Workitem` object :rtype: rtcclient.workitem.Workitem """ parent_tag = ("rtc_cm:com.ibm.team.workitem.linktype." "parentworkitem.parent") rp = returned_properties parent = (self.rtc_obj ._get_paged_resources("Parent", workitem_id=self.identifier, customized_attr=parent_tag, page_size="5", returned_properties=rp)) # No more than one parent if parent: # only one element return parent[0] return None
Get the parent workitem of this workitem If no parent, None will be returned. :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :return: a :class:`rtcclient.workitem.Workitem` object :rtype: rtcclient.workitem.Workitem
def get_objects(self): """Return iterable of "object descriptions", which are tuple with these items: * `name` * `dispname` * `type` * `docname` * `anchor` * `priority` For details on each item, see :py:meth:`~sphinx.domains.Domain.get_objects`. """ for modname, info in self.data['modules'].items(): yield (modname, modname, 'module', info[0], 'module-' + modname, 0) for refname, (docname, type_name) in self.data['objects'].items(): if type_name != 'module': # modules are already handled yield (refname, refname, type_name, docname, refname, 1)
Return iterable of "object descriptions", which are tuple with these items: * `name` * `dispname` * `type` * `docname` * `anchor` * `priority` For details on each item, see :py:meth:`~sphinx.domains.Domain.get_objects`.
def ext_pillar(minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 conf, nesting_key=None): ''' Get pillar data from Vault for the configuration ``conf``. ''' comps = conf.split() paths = [comp for comp in comps if comp.startswith('path=')] if not paths: log.error('"%s" is not a valid Vault ext_pillar config', conf) return {} vault_pillar = {} try: path = paths[0].replace('path=', '') path = path.format(**{'minion': minion_id}) url = 'v1/{0}'.format(path) response = __utils__['vault.make_request']('GET', url) if response.status_code == 200: vault_pillar = response.json().get('data', {}) else: log.info('Vault secret not found for: %s', path) except KeyError: log.error('No such path in Vault: %s', path) if nesting_key: vault_pillar = {nesting_key: vault_pillar} return vault_pillar
Get pillar data from Vault for the configuration ``conf``.
def accessible(self,fromstate, tostate): """Is state tonode directly accessible (in one step) from state fromnode? (i.e. is there an edge between the nodes). If so, return the probability, else zero""" if (not (fromstate in self.nodes)) or (not (tostate in self.nodes)) or not (fromstate in self.edges_out): return 0 if tostate in self.edges_out[fromstate]: return self.edges_out[fromstate][tostate] else: return 0
Is state tonode directly accessible (in one step) from state fromnode? (i.e. is there an edge between the nodes). If so, return the probability, else zero
def revoke_access_token(self): """ Revoke the access token currently in use. """ if not self._access_token: return self.execute_post('authentication/revoke', json=dict( token_type_hint='access_token', token=self._access_token )) self._access_token = None
Revoke the access token currently in use.
def set_layer(self, layer=None): """Set layer and update UI accordingly. :param layer: A QgsVectorLayer. :type layer: QgsVectorLayer """ if layer: self.layer = layer else: self.layer = self.input_layer_combo_box.currentLayer() if not self.layer: return try: keywords = self.keyword_io.read_keywords(layer) self.show_current_metadata() except NoKeywordsFoundError: # FIXME (elpaso) keywords = {} # TODO(IS): Show only possible exposure target if keywords.get('layer_purpose', False) == layer_purpose_hazard['key']: self.target_exposure_label.setEnabled(True) self.target_exposure_combo_box.setEnabled(True) self.target_exposure_combo_box.clear() for exposure in exposure_all: # Only show exposure that has active classification if active_classification(keywords, exposure['key']): self.target_exposure_combo_box.addItem( exposure['name'], exposure['key']) else: self.target_exposure_label.setEnabled(False) self.target_exposure_combo_box.setEnabled(False) self.target_exposure_combo_box.clear() self.target_exposure_combo_box.addItem(tr("Not Applicable"))
Set layer and update UI accordingly. :param layer: A QgsVectorLayer. :type layer: QgsVectorLayer
def _varLib_finder(source, directory="", ext="ttf"): """Finder function to be used with varLib.build to find master TTFs given the filename of the source UFO master as specified in the designspace. It replaces the UFO directory with the one specified in 'directory' argument, and replaces the file extension with 'ext'. """ fname = os.path.splitext(os.path.basename(source))[0] + "." + ext return os.path.join(directory, fname)
Finder function to be used with varLib.build to find master TTFs given the filename of the source UFO master as specified in the designspace. It replaces the UFO directory with the one specified in 'directory' argument, and replaces the file extension with 'ext'.
def Stat(self, urns): """Returns metadata about all urns. Currently the metadata include type, and last update time. Args: urns: The urns of the objects to open. Yields: A dict of metadata. Raises: ValueError: A string was passed instead of an iterable. """ if isinstance(urns, string_types): raise ValueError("Expected an iterable, not string.") for subject, values in data_store.DB.MultiResolvePrefix( urns, ["aff4:type", "metadata:last"]): res = dict(urn=rdfvalue.RDFURN(subject)) for v in values: if v[0] == "aff4:type": res["type"] = v elif v[0] == "metadata:last": res["last"] = rdfvalue.RDFDatetime(v[1]) yield res
Returns metadata about all urns. Currently the metadata include type, and last update time. Args: urns: The urns of the objects to open. Yields: A dict of metadata. Raises: ValueError: A string was passed instead of an iterable.
def _get_team_stats_table(self, selector): """Helper function for stats tables on season pages. Returns a DataFrame.""" doc = self.get_main_doc() table = doc(selector) df = sportsref.utils.parse_table(table) df.set_index('team_id', inplace=True) return df
Helper function for stats tables on season pages. Returns a DataFrame.
def write(self, data, assert_ss=True, deassert_ss=True): """Half-duplex SPI write. If assert_ss is True, the SS line will be asserted low, the specified bytes will be clocked out the MOSI line, and if deassert_ss is True the SS line be put back high. """ # Fail MOSI is not specified. if self._mosi is None: raise RuntimeError('Write attempted with no MOSI pin specified.') if assert_ss and self._ss is not None: self._gpio.set_low(self._ss) for byte in data: for i in range(8): # Write bit to MOSI. if self._write_shift(byte, i) & self._mask: self._gpio.set_high(self._mosi) else: self._gpio.set_low(self._mosi) # Flip clock off base. self._gpio.output(self._sclk, not self._clock_base) # Return clock to base. self._gpio.output(self._sclk, self._clock_base) if deassert_ss and self._ss is not None: self._gpio.set_high(self._ss)
Half-duplex SPI write. If assert_ss is True, the SS line will be asserted low, the specified bytes will be clocked out the MOSI line, and if deassert_ss is True the SS line be put back high.
def authentication_url(self): """Redirect your users to here to authenticate them.""" params = { 'client_id': self.client_id, 'response_type': self.type, 'redirect_uri': self.callback_url } return AUTHENTICATION_URL + "?" + urlencode(params)
Redirect your users to here to authenticate them.
def get_text_rule_groups(declarations): """ Given a list of declarations, return a list of output.Rule objects. """ property_map = {'text-anchor-dx': 'anchor_dx', # does nothing 'text-anchor-dy': 'anchor_dy', # does nothing 'text-align': 'horizontal_alignment', 'text-allow-overlap': 'allow_overlap', 'text-avoid-edges': 'avoid_edges', 'text-character-spacing': 'character_spacing', 'text-dx': 'dx', 'text-dy': 'dy', 'text-face-name': 'face_name', 'text-fill': 'fill', 'text-fontset': 'fontset', 'text-halo-fill': 'halo_fill', 'text-halo-radius': 'halo_radius', 'text-justify-align': 'justify_alignment', 'text-label-position-tolerance': 'label_position_tolerance', 'text-line-spacing': 'line_spacing', 'text-max-char-angle-delta': 'max_char_angle_delta', 'text-min-distance': 'minimum_distance', 'text-placement': 'label_placement', 'text-ratio': 'text_ratio', 'text-size': 'size', 'text-spacing': 'spacing', 'text-transform': 'text_convert', 'text-vertical-align': 'vertical_alignment', 'text-wrap-width': 'wrap_width', 'text-meta-output': 'meta-output', 'text-meta-writer': 'meta-writer' } property_names = property_map.keys() # pull out all the names text_names = [dec.selector.elements[1].names[0] for dec in declarations if len(dec.selector.elements) is 2 and len(dec.selector.elements[1].names) is 1] # a place to put groups groups = [] # a separate style element for each text name for text_name in set(text_names): # just the ones we care about here. # the complicated conditional means: get all declarations that # apply to this text_name specifically, or text in general. name_declarations = [dec for dec in declarations if dec.property.name in property_map and (len(dec.selector.elements) == 1 or (len(dec.selector.elements) == 2 and dec.selector.elements[1].names[0] in (text_name, '*')))] # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(name_declarations, property_names): face_name = values.has_key('text-face-name') and values['text-face-name'].value or None fontset = values.has_key('text-fontset') and values['text-fontset'].value or None size = values.has_key('text-size') and values['text-size'].value color = values.has_key('text-fill') and values['text-fill'].value ratio = values.has_key('text-ratio') and values['text-ratio'].value or None wrap_width = values.has_key('text-wrap-width') and values['text-wrap-width'].value or None label_spacing = values.has_key('text-spacing') and values['text-spacing'].value or None label_position_tolerance = values.has_key('text-label-position-tolerance') and values['text-label-position-tolerance'].value or None max_char_angle_delta = values.has_key('text-max-char-angle-delta') and values['text-max-char-angle-delta'].value or None halo_color = values.has_key('text-halo-fill') and values['text-halo-fill'].value or None halo_radius = values.has_key('text-halo-radius') and values['text-halo-radius'].value or None dx = values.has_key('text-dx') and values['text-dx'].value or None dy = values.has_key('text-dy') and values['text-dy'].value or None avoid_edges = values.has_key('text-avoid-edges') and values['text-avoid-edges'].value or None minimum_distance = values.has_key('text-min-distance') and values['text-min-distance'].value or None allow_overlap = values.has_key('text-allow-overlap') and values['text-allow-overlap'].value or None label_placement = values.has_key('text-placement') and values['text-placement'].value or None text_transform = values.has_key('text-transform') and values['text-transform'].value or None anchor_dx = values.has_key('text-anchor-dx') and values['text-anchor-dx'].value or None anchor_dy = values.has_key('text-anchor-dy') and values['text-anchor-dy'].value or None horizontal_alignment = values.has_key('text-horizontal-align') and values['text-horizontal-align'].value or None vertical_alignment = values.has_key('text-vertical-align') and values['text-vertical-align'].value or None justify_alignment = values.has_key('text-justify-align') and values['text-justify-align'].value or None line_spacing = values.has_key('text-line-spacing') and values['text-line-spacing'].value or None character_spacing = values.has_key('text-character-spacing') and values['text-character-spacing'].value or None if (face_name or fontset) and size and color: symbolizer = output.TextSymbolizer(text_name, face_name, size, color, \ wrap_width, label_spacing, label_position_tolerance, \ max_char_angle_delta, halo_color, halo_radius, dx, dy, \ avoid_edges, minimum_distance, allow_overlap, label_placement, \ line_spacing, character_spacing, text_transform, fontset, anchor_dx, anchor_dy,horizontal_alignment, \ vertical_alignment, justify_alignment) rules.append(make_rule(filter, symbolizer)) groups.append((text_name, rules)) return dict(groups)
Given a list of declarations, return a list of output.Rule objects.
def rotate_z(self, angle): """ Rotates mesh about the z-axis. Parameters ---------- angle : float Angle in degrees to rotate about the z-axis. """ axis_rotation(self.points, angle, inplace=True, axis='z')
Rotates mesh about the z-axis. Parameters ---------- angle : float Angle in degrees to rotate about the z-axis.
def _get_organisations(self): """ :returns: list of organisations, sorted alphabetically :rtype: list(list(str)) """ organisations = [] for child in self.vcard.getChildren(): if child.name == "ORG": organisations.append(child.value) return sorted(organisations)
:returns: list of organisations, sorted alphabetically :rtype: list(list(str))
def getPlainText(self, identify=None): """ Convenience function for templates which want access to the raw text, without XML tags. """ frags = getattr(self, 'frags', None) if frags: plains = [] for frag in frags: if hasattr(frag, 'text'): plains.append(frag.text) return ''.join(plains) elif identify: text = getattr(self, 'text', None) if text is None: text = repr(self) return text else: return ''
Convenience function for templates which want access to the raw text, without XML tags.
def add_common_files_to_file_list(self): ''' The (several thousands) common-disease files from the repo tarball are added to the files object. try adding the 'common-disease-mondo' files as well? ''' repo_dir = '/'.join((self.rawdir, 'git')) common_disease_dir = '/'.join(( repo_dir, 'monarch-initiative-hpo-annotation-*', 'common-diseases-mondo/*.tab')) # add the files to the self.files object filelist = glob.glob(common_disease_dir) fcount = 0 for small_file in filelist: if small_file[-4:] == '.tab': fcount += 1 self.files[ 'common' + str(fcount).zfill(7)] = { 'file': '/'.join((common_disease_dir, small_file)), } LOG.info("Found %d common disease files", fcount) return
The (several thousands) common-disease files from the repo tarball are added to the files object. try adding the 'common-disease-mondo' files as well?
def import_locations(self, data, index='WMO'): """Parse NOAA weather station data files. ``import_locations()`` returns a dictionary with keys containing either the WMO or ICAO identifier, and values that are ``Station`` objects that describes the large variety of data exported by NOAA_. It expects data files in one of the following formats:: 00;000;PABL;Buckland, Buckland Airport;AK;United States;4;65-58-56N;161-09-07W;;;7;; 01;001;ENJA;Jan Mayen;;Norway;6;70-56N;008-40W;70-56N;008-40W;10;9;P 01;002;----;Grahuken;;Norway;6;79-47N;014-28E;;;;15; or:: AYMD;94;014;Madang;;Papua New Guinea;5;05-13S;145-47E;05-13S;145-47E;3;5;P AYMO;--;---;Manus Island/Momote;;Papua New Guinea;5;02-03-43S;147-25-27E;;;4;; AYPY;94;035;Moresby;;Papua New Guinea;5;09-26S;147-13E;09-26S;147-13E;38;49;P Files containing the data in this format can be downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)`'s site in their `station location page`_. WMO indexed files downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)` site when processed by ``import_locations()`` will return ``dict`` object of the following style:: {'00000': Station('PABL', 'Buckland, Buckland Airport', 'AK', 'United States', 4, 65.982222. -160.848055, None, None, 7, False), '01001'; Station('ENJA', Jan Mayen, None, 'Norway', 6, 70.933333, -7.333333, 70.933333, -7.333333, 10, 9, True), '01002': Station(None, 'Grahuken', None, 'Norway', 6, 79.783333, 13.533333, None, None, 15, False)} And ``dict`` objects such as the following will be created when ICAO indexed data files are processed:: {'AYMD': Station("94", "014", "Madang", None, "Papua New Guinea", 5, -5.216666, 145.783333, -5.216666, 145.78333333333333, 3, 5, True, 'AYMO': Station(None, None, "Manus Island/Momote", None, "Papua New Guinea", 5, -2.061944, 147.424166, None, None, 4, False, 'AYPY': Station("94", "035", "Moresby", None, "Papua New Guinea", 5, -9.433333, 147.216667, -9.433333, 147.216667, 38, 49, True} Args: data (iter): NOAA station data to read index (str): The identifier type used in the file Returns: dict: WMO locations with `Station` objects Raises: FileFormatError: Unknown file format .. _NOAA: http://weather.noaa.gov/ .. _station location page: http://weather.noaa.gov/tg/site.shtml """ self._data = data data = utils.prepare_read(data) for line in data: line = line.strip() chunk = line.split(';') if not len(chunk) == 14: if index == 'ICAO': # Some entries only have 12 or 13 elements, so we assume 13 # and 14 are None. Of the entries I've hand checked this # assumption would be correct. logging.debug('Extending ICAO %r entry, because it is ' 'too short to process' % line) chunk.extend(['', '']) elif index == 'WMO' and len(chunk) == 13: # A few of the WMO indexed entries are missing their RBSN # fields, hand checking the entries for 71046 and 71899 # shows that they are correct if we just assume RBSN is # false. logging.debug('Extending WMO %r entry, because it is ' 'too short to process' % line) chunk.append('') else: raise utils.FileFormatError('NOAA') if index == 'WMO': identifier = ''.join(chunk[:2]) alt_id = chunk[2] elif index == 'ICAO': identifier = chunk[0] alt_id = ''.join(chunk[1:3]) else: raise ValueError('Unknown format %r' % index) if alt_id in ('----', '-----'): alt_id = None name = chunk[3] state = chunk[4] if chunk[4] else None country = chunk[5] wmo = int(chunk[6]) if chunk[6] else None point_data = [] for i in chunk[7:11]: if not i: point_data.append(None) continue # Some entries in nsd_cccc.txt are of the format "DD-MM- # N", so we just take the spaces to mean 0 seconds. if ' ' in i: logging.debug('Fixing unpadded location data in %r entry' % line) i = i.replace(' ', '0') values = map(int, i[:-1].split('-')) if i[-1] in ('S', 'W'): values = [-x for x in values] point_data.append(point.utils.to_dd(*values)) latitude, longitude, ua_latitude, ua_longitude = point_data altitude = int(chunk[11]) if chunk[11] else None ua_altitude = int(chunk[12]) if chunk[12] else None rbsn = False if not chunk[13] else True self[identifier] = Station(alt_id, name, state, country, wmo, latitude, longitude, ua_latitude, ua_longitude, altitude, ua_altitude, rbsn)
Parse NOAA weather station data files. ``import_locations()`` returns a dictionary with keys containing either the WMO or ICAO identifier, and values that are ``Station`` objects that describes the large variety of data exported by NOAA_. It expects data files in one of the following formats:: 00;000;PABL;Buckland, Buckland Airport;AK;United States;4;65-58-56N;161-09-07W;;;7;; 01;001;ENJA;Jan Mayen;;Norway;6;70-56N;008-40W;70-56N;008-40W;10;9;P 01;002;----;Grahuken;;Norway;6;79-47N;014-28E;;;;15; or:: AYMD;94;014;Madang;;Papua New Guinea;5;05-13S;145-47E;05-13S;145-47E;3;5;P AYMO;--;---;Manus Island/Momote;;Papua New Guinea;5;02-03-43S;147-25-27E;;;4;; AYPY;94;035;Moresby;;Papua New Guinea;5;09-26S;147-13E;09-26S;147-13E;38;49;P Files containing the data in this format can be downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)`'s site in their `station location page`_. WMO indexed files downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)` site when processed by ``import_locations()`` will return ``dict`` object of the following style:: {'00000': Station('PABL', 'Buckland, Buckland Airport', 'AK', 'United States', 4, 65.982222. -160.848055, None, None, 7, False), '01001'; Station('ENJA', Jan Mayen, None, 'Norway', 6, 70.933333, -7.333333, 70.933333, -7.333333, 10, 9, True), '01002': Station(None, 'Grahuken', None, 'Norway', 6, 79.783333, 13.533333, None, None, 15, False)} And ``dict`` objects such as the following will be created when ICAO indexed data files are processed:: {'AYMD': Station("94", "014", "Madang", None, "Papua New Guinea", 5, -5.216666, 145.783333, -5.216666, 145.78333333333333, 3, 5, True, 'AYMO': Station(None, None, "Manus Island/Momote", None, "Papua New Guinea", 5, -2.061944, 147.424166, None, None, 4, False, 'AYPY': Station("94", "035", "Moresby", None, "Papua New Guinea", 5, -9.433333, 147.216667, -9.433333, 147.216667, 38, 49, True} Args: data (iter): NOAA station data to read index (str): The identifier type used in the file Returns: dict: WMO locations with `Station` objects Raises: FileFormatError: Unknown file format .. _NOAA: http://weather.noaa.gov/ .. _station location page: http://weather.noaa.gov/tg/site.shtml
def BLX(self, params): """ BLX Rj Branch to the address in Rj, storing the next instruction in the Link Register """ Rj = self.get_one_parameter(self.ONE_PARAMETER, params) self.check_arguments(LR_or_general_purpose_registers=(Rj,)) def BLX_func(): self.register['LR'] = self.register['PC'] # No need for the + 1, PC already points to the next instruction self.register['PC'] = self.register[Rj] return BLX_func
BLX Rj Branch to the address in Rj, storing the next instruction in the Link Register
def cleanup(self, sched, coro): """Remove this coro from the waiting for signal queue.""" try: sched.sigwait[self.name].remove((self, coro)) except ValueError: pass return True
Remove this coro from the waiting for signal queue.
def init(cls, repo_dir=None, temp=False, initial_commit=False): """Run `git init` in the repo_dir. Defaults to current working directory if repo_dir is not supplied. If 'temp' is True, a temporary directory will be created for you and the repository will be initialized. The tempdir is scheduled for deletion (when the process exits) through an exit function registered with the atexit module. If 'temp' is True, repo_dir is ignored. """ if temp: suffix = '.temp_simpl_GitRepo' repo_dir = create_tempdir(suffix=suffix, delete=True) else: repo_dir = repo_dir or os.getcwd() git_init(repo_dir) instance = cls(repo_dir) # NOTE(larsbutler): If we wanted to be defensive about this and favor # compatibility over elegance, we could just automatically add a # `git commit` (empty, no message) after every `git init`. I would # recommend doing this in the :class:`GitRepo` class, not in the # module-level util functions. Adding an extra commit shouldn't cause # any problems. if initial_commit: # unknown revision, needs a commit to run most commands instance.commit( message='Initial commit', amend=False, stage=False) return instance
Run `git init` in the repo_dir. Defaults to current working directory if repo_dir is not supplied. If 'temp' is True, a temporary directory will be created for you and the repository will be initialized. The tempdir is scheduled for deletion (when the process exits) through an exit function registered with the atexit module. If 'temp' is True, repo_dir is ignored.
def parse(self, fd): """very simple parser - but why would we want it to be complex?""" def resolve_args(args): # FIXME break this out, it's in common with the templating stuff elsewhere root = self.sections[0] val_dict = dict(('<' + t + '>', u) for (t, u) in root.get_variables().items()) resolved_args = [] for arg in args: for subst, value in val_dict.items(): arg = arg.replace(subst, value) resolved_args.append(arg) return resolved_args def handle_section_defn(keyword, parts): if keyword == '@HostAttrs': if len(parts) != 1: raise ParserException('usage: @HostAttrs <hostname>') if self.sections[0].has_pending_with(): raise ParserException('@with not supported with @HostAttrs') self.sections.append(HostAttrs(parts[0])) return True if keyword == 'Host': if len(parts) != 1: raise ParserException('usage: Host <hostname>') self.sections.append(Host(parts[0], self.sections[0].pop_pending_with())) return True def handle_vardef(root, keyword, parts): if keyword == '@with': root.add_pending_with(parts) return True def handle_set_args(_, parts): if len(parts) == 0: raise ParserException('usage: @args arg-name ...') if not self.is_include(): return if self._args is None or len(self._args) != len(parts): raise ParserException('required arguments not passed to include {url} ({parts})'.format( url=self._url, parts=', '.join(parts)) ) root = self.sections[0] for key, value in zip(parts, self._args): root.set_value(key, value) def handle_set_value(_, parts): if len(parts) != 2: raise ParserException('usage: @set <key> <value>') root = self.sections[0] root.set_value(*resolve_args(parts)) def handle_add_type(section, parts): if len(parts) != 1: raise ParserException('usage: @is <HostAttrName>') section.add_type(parts[0]) def handle_via(section, parts): if len(parts) != 1: raise ParserException('usage: @via <Hostname>') section.add_line( 'ProxyCommand', ('ssh {args} nc %h %p 2> /dev/null'.format(args=pipes.quote(resolve_args(parts)[0])), ) ) def handle_identity(section, parts): if len(parts) != 1: raise ParserException('usage: @identity <name>') section.add_identity(resolve_args(parts)[0]) def handle_include(_, parts): if len(parts) == 0: raise ParserException('usage: @include <https://...|/path/to/file.sedge> [arg ...]') url = parts[0] parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == 'https': req = requests.get(url, verify=self._verify_ssl) text = req.text elif parsed_url.scheme == 'file': with open(parsed_url.path) as fd: text = fd.read() elif parsed_url.scheme == '': path = os.path.expanduser(url) with open(path) as fd: text = fd.read() else: raise SecurityException('error: @includes may only use paths or https:// or file:// URLs') subconfig = SedgeEngine( self._key_library, StringIO(text), self._verify_ssl, url=url, args=resolve_args(parts[1:]), parent_keydefs=self.keydefs, via_include=True) self.includes.append((url, subconfig)) def handle_keydef(_, parts): if len(parts) < 2: raise ParserException('usage: @key <name> [fingerprint]...') name = parts[0] fingerprints = parts[1:] self.keydefs[name] = fingerprints def handle_keyword(section, keyword, parts): handlers = { '@set': handle_set_value, '@args': handle_set_args, '@is': handle_add_type, '@via': handle_via, '@include': handle_include, '@key': handle_keydef, '@identity': handle_identity } if keyword in handlers: handlers[keyword](section, parts) return True for line in (t.strip() for t in fd): if line.startswith('#') or line == '': continue keyword, parts = SedgeEngine.parse_config_line(line) if handle_section_defn(keyword, parts): continue if handle_vardef(self.sections[0], keyword, parts): continue current_section = self.sections[-1] if handle_keyword(current_section, keyword, parts): continue if keyword.startswith('@'): raise ParserException("unknown expansion keyword {}".format(keyword)) # use other rather than parts to avoid messing up user # whitespace; we don't handle quotes in here as we don't # need to current_section.add_line(keyword, parts)
very simple parser - but why would we want it to be complex?
def untag(self): """ Copies the object, removing any special tagging from it :return: An Asn1Value object """ new_obj = self.__class__() new_obj._copy(self, copy.deepcopy) return new_obj
Copies the object, removing any special tagging from it :return: An Asn1Value object
def main(arguments): """Parse arguments, request the urls, notify if different.""" formatter_class = argparse.ArgumentDefaultsHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=formatter_class) parser.add_argument('infile', help="Input file", type=argparse.FileType('r')) parser.add_argument('-o', '--outfile', help="Output file", default=sys.stdout, type=argparse.FileType('w')) args = parser.parse_args(arguments) urls = args.infile.read().splitlines() api_token = keyring.get_password('pushover', 'api_token') pushover_user = keyring.get_password('pushover', 'user') pushover = Pushover(api_token, pushover_user) for url in urls: domain = urlparse(url).netloc urlpath = urlparse(url).path url_dashes = re.sub(r'/', '-', urlpath) cache = os.path.expanduser("~/.urlmon-cache") if not os.path.isdir(cache): os.mkdir(cache, mode=0o755) filename = domain + url_dashes + '.html' filepath = os.path.join(cache, filename) html = requests.get(url).text if os.path.isfile(filepath): with open(filepath) as r: before = r.read() if html == before: logger.info("{} is unchanged".format(url)) else: msg = "{} changed".format(url) logger.info(msg) logger.debug(diff(before, html)) response = pushover.push(msg) logger.debug("Pushover notification sent: " "{}".format(response.status_code)) else: logger.info("New url: {}".format(filename)) with open(filepath, 'w') as w: w.write(html) logger.info("Wrote file to cache: {}".format(filename))
Parse arguments, request the urls, notify if different.
def local_manager_is_default(self, adm_gid, gid): """Check whether gid is default group for local manager group. """ config = self.root['settings']['ugm_localmanager'].attrs rule = config[adm_gid] if gid not in rule['target']: raise Exception(u"group '%s' not managed by '%s'" % (gid, adm_gid)) return gid in rule['default']
Check whether gid is default group for local manager group.
def _InstallInstallers(self): """Install the installer built by RepackTemplates.""" # 32 bit binary will refuse to install on a 64bit system so we only install # the 64 bit version installer_amd64 = glob.glob( os.path.join(args.output_dir, "dbg_*_amd64.exe")).pop() self._CleanupInstall() # The exit code is always 0, test to see if install was actually successful. subprocess.check_call([installer_amd64]) self._CheckInstallSuccess()
Install the installer built by RepackTemplates.
def deserialize_date(string): """ Deserializes string to date. :param string: str. :type string: str :return: date. :rtype: date """ try: from dateutil.parser import parse return parse(string).date() except ImportError: return string
Deserializes string to date. :param string: str. :type string: str :return: date. :rtype: date
def clean_report(self, options, sosreport): # pragma: no cover '''this is the primary function, to put everything together and analyze an sosreport''' if options.report_dir: # override the default location for artifacts (/tmp) if os.path.isdir(options.report_dir): self.report_dir = options.report_dir self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment() self._start_logging(self.logfile) self._get_disclaimer() if options.domains: self.domains = options.domains if options.keywords: self.keywords = options.keywords self._keywords2db() if not sosreport: if not options.files: raise Exception("Error: You must supply either an sosreport and/or files to process") self.logger.con_out("No sosreport supplied. Only processing specific files") self._clean_files_only(options.files) else: # we DO have an sosreport to analyze self.report = self._extract_sosreport(sosreport) self._make_dest_env() # create the working directory if options.hostname_path: self.hostname, self.domainname = self._get_hostname(options.hostname_path) else: self.hostname, self.domainname = self._get_hostname() if options.files: self._add_extra_files(options.files) if self.hostname: # if we have a hostname that's not a None type self.hn_db['host0'] = self.hostname # we'll prime the hostname pump to clear out a ton of useless logic later self._process_hosts_file() # we'll take a dig through the hosts file and make sure it is as scrubbed as possible self._domains2db() files = self._file_list(self.dir_path) self.logger.con_out("IP Obfuscation Start Address - %s", self.start_ip) self.logger.con_out("*** SOSCleaner Processing ***") self.logger.info("Working Directory - %s", self.dir_path) for f in files: self.logger.debug("Cleaning %s", f) self._clean_file(f) self.logger.con_out("*** SOSCleaner Statistics ***") self.logger.con_out("IP Addresses Obfuscated - %s", len(self.ip_db)) self.logger.con_out("Hostnames Obfuscated - %s" , len(self.hn_db)) self.logger.con_out("Domains Obfuscated - %s" , len(self.dn_db)) self.logger.con_out("Total Files Analyzed - %s", self.file_count) self.logger.con_out("*** SOSCleaner Artifacts ***") self._create_reports() self._create_archive() return_data = [self.archive_path, self.logfile, self.ip_report] if self.hostname: return_data.append(self.hn_report) if len(self.dn_db) >= 1: return_data.append(self.dn_report) return return_data
this is the primary function, to put everything together and analyze an sosreport
def ucas_download_single(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''video page''' html = get_content(url) # resourceID is UUID resourceID = re.findall( r'resourceID":"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})', html)[0] assert resourceID != '', 'Cannot find resourceID!' title = match1(html, r'<div class="bc-h">(.+)</div>') url_lists = _ucas_get_url_lists_by_resourceID(resourceID) assert url_lists, 'Cannot find any URL of such class!' for k, part in enumerate(url_lists): part_title = title + '_' + str(k) print_info(site_info, part_title, 'flv', 0) if not info_only: download_urls(part, part_title, 'flv', total_size=None, output_dir=output_dir, merge=merge)
video page
def add_url_rule(self, rule, endpoint=None, view_func=None, **options): """ A helper method to register a rule (and optionally a view function) to the application. The endpoint is automatically prefixed with the blueprint's name. """ if self.url_prefix: rule = self.url_prefix + rule options.setdefault('subdomain', self.subdomain) if endpoint is None: endpoint = _endpoint_from_view_func(view_func) defaults = self.url_defaults if 'defaults' in options: defaults = dict(defaults, **options.pop('defaults')) self.app.add_url_rule(rule, endpoint, view_func, defaults=defaults, **options)
A helper method to register a rule (and optionally a view function) to the application. The endpoint is automatically prefixed with the blueprint's name.
def bounds(filename, start_re, end_re, encoding='utf8'): """ Compute chunk bounds from text file according to start_re and end_re: yields (start_match, Bounds) tuples. """ start_re, end_re = re.compile(start_re), re.compile(end_re) mo, line_start, line_end, byte_start, byte_end = [None]*5 offset = 0 with open(filename, 'rb') as f: for index, line in enumerate(f): line_text = line.decode(encoding) start_match = re.match(start_re, line_text) if start_match: mo, line_start, byte_start = start_match, index, offset offset += len(line) end_match = re.match(end_re, line_text) if end_match: yield mo, Bounds(line_start, index, byte_start, offset) mo, line_start, line_end, byte_start, byte_end = [None]*5
Compute chunk bounds from text file according to start_re and end_re: yields (start_match, Bounds) tuples.
def _interpret_oserror(exc, cwd, cmd): """Interpret an OSError exc and raise the appropriate dbt exception. """ if len(cmd) == 0: raise dbt.exceptions.CommandError(cwd, cmd) # all of these functions raise unconditionally if os.name == 'nt': _handle_windows_error(exc, cwd, cmd) else: _handle_posix_error(exc, cwd, cmd) # this should not be reachable, raise _something_ at least! raise dbt.exceptions.InternalException( 'Unhandled exception in _interpret_oserror: {}'.format(exc) )
Interpret an OSError exc and raise the appropriate dbt exception.
def get(quantity, min_type=EventType.firstevent, max_type=EventType.lastevent): """Return events at the front of the event queue, within the specified minimum and maximum type, and remove them from the queue. Args: quantity (int): The maximum number of events to return. min_type (int): The minimum value for the event type of the returned events. max_type (int): The maximum value for the event type of the returned events. Returns: List[Event]: Events from the front of the event queue. Raises: SDLError: If there was an error retrieving the events. """ return _peep(quantity, lib.SDL_GETEVENT, min_type, max_type)
Return events at the front of the event queue, within the specified minimum and maximum type, and remove them from the queue. Args: quantity (int): The maximum number of events to return. min_type (int): The minimum value for the event type of the returned events. max_type (int): The maximum value for the event type of the returned events. Returns: List[Event]: Events from the front of the event queue. Raises: SDLError: If there was an error retrieving the events.
def _delete(self, **kwargs): """wrapped with delete, override that in a subclass to customize """ requests_params = self._handle_requests_params(kwargs) delete_uri = self._meta_data['uri'] session = self._meta_data['bigip']._meta_data['icr_session'] # Check the generation for match before delete force = self._check_force_arg(kwargs.pop('force', True)) if not force: self._check_generation() response = session.delete(delete_uri, **requests_params) if response.status_code == 200: self.__dict__ = {'deleted': True}
wrapped with delete, override that in a subclass to customize
def get_role_secret_id_accessor(self, role_name, secret_id_accessor, mount_point='approle'): """POST /auth/<mount_point>/role/<role name>/secret-id-accessor/lookup :param role_name: :type role_name: :param secret_id_accessor: :type secret_id_accessor: :param mount_point: :type mount_point: :return: :rtype: """ url = '/v1/auth/{0}/role/{1}/secret-id-accessor/lookup'.format(mount_point, role_name) params = {'secret_id_accessor': secret_id_accessor} return self._adapter.post(url, json=params).json()
POST /auth/<mount_point>/role/<role name>/secret-id-accessor/lookup :param role_name: :type role_name: :param secret_id_accessor: :type secret_id_accessor: :param mount_point: :type mount_point: :return: :rtype:
def Md5Hex(filename=None, contents=None): ''' :param unicode filename: The file from which the md5 should be calculated. If the filename is given, the contents should NOT be given. :param unicode contents: The contents for which the md5 should be calculated. If the contents are given, the filename should NOT be given. :rtype: unicode :returns: Returns a string with the hex digest of the stream. ''' import io import hashlib md5 = hashlib.md5() if filename: stream = io.open(filename, 'rb') try: while True: data = stream.read(md5.block_size * 128) if not data: break md5.update(data) finally: stream.close() else: md5.update(contents) return six.text_type(md5.hexdigest())
:param unicode filename: The file from which the md5 should be calculated. If the filename is given, the contents should NOT be given. :param unicode contents: The contents for which the md5 should be calculated. If the contents are given, the filename should NOT be given. :rtype: unicode :returns: Returns a string with the hex digest of the stream.
def query_folder(self, dir_name): """查询目录属性(https://www.qcloud.com/document/product/436/6063) :param dir_name:查询的目录的名称 :return:查询出来的结果,为json格式 """ if dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] self.url = 'http://' + self.config.region + '.file.myqcloud.com' + '/files/v2/' + str(self.config.app_id) + '/' + self.config.bucket + '/' + dir_name + '/?op=stat' self.headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30) reponse, content = self.http.request(uri=self.url, method='GET',headers=self.headers) return content.decode("utf8")
查询目录属性(https://www.qcloud.com/document/product/436/6063) :param dir_name:查询的目录的名称 :return:查询出来的结果,为json格式
def repair(self, volume_id_or_uri, timeout=-1): """ Removes extra presentations from a specified volume on the storage system. Args: volume_id_or_uri: Can be either the volume id or the volume uri. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Storage volume. """ data = { "type": "ExtraManagedStorageVolumePaths", "resourceUri": self._client.build_uri(volume_id_or_uri) } custom_headers = {'Accept-Language': 'en_US'} uri = self.URI + '/repair' return self._client.create(data, uri=uri, timeout=timeout, custom_headers=custom_headers)
Removes extra presentations from a specified volume on the storage system. Args: volume_id_or_uri: Can be either the volume id or the volume uri. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Storage volume.
def get_fields_dict(self, row): """ Returns a dict of field name and cleaned value pairs to initialize the model. Beware, it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV. Whitespace around the value of the cell is stripped. """ return {k: getattr(self, 'clean_{}'.format(k), lambda x: x)(v.strip() if isinstance(v, str) else None) for k, v in zip_longest(self.get_fields(), row)}
Returns a dict of field name and cleaned value pairs to initialize the model. Beware, it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV. Whitespace around the value of the cell is stripped.
def _commit(self, config_text=None, force=False, reload_original_config=True): """ This method is the same as the :py:method:`commit`: method, however, it has an extra command that will trigger the reload of the running config. The reason behind this is that in some circumstances you don´ want to reload the running config, for example, when doing a rollback. See :py:method:`commit`: for more details. """ def _execute(config_text): if config_text is None: config_text = self.compare_config() config_vdom = "" if self.vdom is None: pre = '' elif not 'global' in self.vdom: config_vdom = 'conf vdom\n edit %s\n' % self.vdom pre = 'conf global\n ' else: pre = 'conf global\n ' cmd = '%sexecute batch start\n' % pre cmd += config_vdom cmd += config_text cmd += '\nexecute batch end\n' self.execute_command(cmd) last_log = self.execute_command('%sexecute batch lastlog' % pre) return self._parse_batch_lastlog(last_log) logger.info('Committing config ') wrong_commands = _execute(config_text) self._reload_config(reload_original_config) retry_codes = [-3, -23] retries = 5 while retries > 0: retries -= 1 for wc in wrong_commands: if int(wc[0]) in retry_codes: if config_text is None: config_text = self.compare_config() wrong_commands = _execute(config_text) self._reload_config(reload_original_config=False) break if len(wrong_commands) > 0: exit_code = -2 logging.debug('List of commands that failed: %s' % wrong_commands) if not force: exit_code = -1 self.rollback() if exit_code < 0: raise exceptions.FailedCommit(wrong_commands)
This method is the same as the :py:method:`commit`: method, however, it has an extra command that will trigger the reload of the running config. The reason behind this is that in some circumstances you don´ want to reload the running config, for example, when doing a rollback. See :py:method:`commit`: for more details.
def search_files(self, search=None): """ Search for files, returning a FileRecord for each result. FileRecords have two additional methods patched into them, get_url() and download_to(file_name), which will retrieve the URL for the file content and download that content to a named file on disk, respectively. :param FileRecordSearch search: an instance of :class:`meteorpi_model.FileRecordSearch` - see the model docs for details on how to construct this :return: an object containing 'count' and 'files'. 'files' is a sequence of FileRecord objects containing the results of the search, and 'count' is the total number of results which would be returned if no result limit was in place (i.e. if the number of FileRecords in the 'files' part is less than 'count' you have more records which weren't returned because of a query limit. Note that the default query limit is 100). """ if search is None: search = model.FileRecordSearch() search_string = _to_encoded_string(search) url = self.base_url + '/files/{0}'.format(search_string) # print url response = requests.get(url) response_object = safe_load(response.text) file_dicts = response_object['files'] file_count = response_object['count'] return {'count': file_count, 'files': list((self._augment_file(f) for f in (model.FileRecord.from_dict(d) for d in file_dicts)))}
Search for files, returning a FileRecord for each result. FileRecords have two additional methods patched into them, get_url() and download_to(file_name), which will retrieve the URL for the file content and download that content to a named file on disk, respectively. :param FileRecordSearch search: an instance of :class:`meteorpi_model.FileRecordSearch` - see the model docs for details on how to construct this :return: an object containing 'count' and 'files'. 'files' is a sequence of FileRecord objects containing the results of the search, and 'count' is the total number of results which would be returned if no result limit was in place (i.e. if the number of FileRecords in the 'files' part is less than 'count' you have more records which weren't returned because of a query limit. Note that the default query limit is 100).
def set_iter_mesh(self, mesh, shift=None, is_time_reversal=True, is_mesh_symmetry=True, is_eigenvectors=False, is_gamma_center=False): """Create an IterMesh instancer Attributes ---------- See set_mesh method. """ warnings.warn("Phonopy.set_iter_mesh is deprecated. " "Use Phonopy.run_mesh with use_iter_mesh=True.", DeprecationWarning) self.run_mesh(mesh=mesh, shift=shift, is_time_reversal=is_time_reversal, is_mesh_symmetry=is_mesh_symmetry, with_eigenvectors=is_eigenvectors, is_gamma_center=is_gamma_center, use_iter_mesh=True)
Create an IterMesh instancer Attributes ---------- See set_mesh method.
def stacks_2_eqns(self,stacks): """returns equation strings from stacks""" if stacks: return list(map(lambda p: self.stack_2_eqn(p), stacks)) else: return []
returns equation strings from stacks
def get_all_reserved_instances_offerings(self, reserved_instances_id=None, instance_type=None, availability_zone=None, product_description=None, filters=None): """ Describes Reserved Instance offerings that are available for purchase. :type reserved_instances_id: str :param reserved_instances_id: Displays Reserved Instances with the specified offering IDs. :type instance_type: str :param instance_type: Displays Reserved Instances of the specified instance type. :type availability_zone: str :param availability_zone: Displays Reserved Instances within the specified Availability Zone. :type product_description: str :param product_description: Displays Reserved Instances with the specified product description. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering` """ params = {} if reserved_instances_id: params['ReservedInstancesId'] = reserved_instances_id if instance_type: params['InstanceType'] = instance_type if availability_zone: params['AvailabilityZone'] = availability_zone if product_description: params['ProductDescription'] = product_description if filters: self.build_filter_params(params, filters) return self.get_list('DescribeReservedInstancesOfferings', params, [('item', ReservedInstancesOffering)], verb='POST')
Describes Reserved Instance offerings that are available for purchase. :type reserved_instances_id: str :param reserved_instances_id: Displays Reserved Instances with the specified offering IDs. :type instance_type: str :param instance_type: Displays Reserved Instances of the specified instance type. :type availability_zone: str :param availability_zone: Displays Reserved Instances within the specified Availability Zone. :type product_description: str :param product_description: Displays Reserved Instances with the specified product description. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`
def delete(self): """Delete the message.""" url = self._imgur._base_url + "/3/message/{0}".format(self.id) return self._imgur._send_request(url, method='DELETE')
Delete the message.
def fb_github_project_workdir(self, project_and_path, github_org='facebook'): 'This helper lets Facebook-internal CI special-cases FB projects' project, path = project_and_path.split('/', 1) return self.github_project_workdir(github_org + '/' + project, path)
This helper lets Facebook-internal CI special-cases FB projects
def timestamp(self, message="", checkpoint=None, finished=False, raise_error=True): """ Print message, time, and time elapsed, perhaps creating checkpoint. This prints your given message, along with the current time, and time elapsed since the previous timestamp() call. If you specify a HEADING by beginning the message with "###", it surrounds the message with newlines for easier readability in the log file. If a checkpoint is designated, an empty file is created corresponding to the name given. Depending on how this manager's been configured, the value of the checkpoint, and whether this timestamp indicates initiation or completion of a group of pipeline steps, this call may stop the pipeline's execution. :param str message: Message to timestamp. :param str checkpoint: Name of checkpoint; this tends to be something that reflects the processing logic about to be or having just been completed. Provision of an argument to this parameter means that a checkpoint file will be created, facilitating arbitrary starting and stopping point for the pipeline as desired. :param bool finished: Whether this call represents the completion of a conceptual unit of a pipeline's processing :param raise_error: Whether to raise exception if checkpoint or current state indicates that a halt should occur. """ # Halt if the manager's state has been set such that this call # should halt the pipeline. if self.halt_on_next: self.halt(checkpoint, finished, raise_error=raise_error) # Determine action to take with respect to halting if needed. if checkpoint: if finished: # Write the file. self._checkpoint(checkpoint) self.prev_checkpoint = checkpoint self.curr_checkpoint = None else: self.prev_checkpoint = self.curr_checkpoint self.curr_checkpoint = checkpoint self._checkpoint(self.prev_checkpoint) # Handle the two halting conditions. if (finished and checkpoint == self.stop_after) or (not finished and checkpoint == self.stop_before): self.halt(checkpoint, finished, raise_error=raise_error) # Determine if we've started executing. elif checkpoint == self.start_point: self._active = True # If this is a prospective checkpoint, set the current checkpoint # accordingly and whether we should halt the pipeline on the # next timestamp call. if not finished and checkpoint == self.stop_after: self.halt_on_next = True elapsed = self.time_elapsed(self.last_timestamp) t = time.strftime("%m-%d %H:%M:%S") if checkpoint is None: msg = "{m} ({t}) elapsed: {delta_t} _TIME_".\ format(m=message, t=t, delta_t=elapsed) else: msg = "{m} ({t}) ({status} {stage}) elapsed: {delta_t} _TIME_".\ format(m=message, t=t, status="finished" if finished else "starting", stage=checkpoint, delta_t=elapsed) if re.match("^###", message): msg = "\n{}\n".format(msg) print(msg) self.last_timestamp = time.time()
Print message, time, and time elapsed, perhaps creating checkpoint. This prints your given message, along with the current time, and time elapsed since the previous timestamp() call. If you specify a HEADING by beginning the message with "###", it surrounds the message with newlines for easier readability in the log file. If a checkpoint is designated, an empty file is created corresponding to the name given. Depending on how this manager's been configured, the value of the checkpoint, and whether this timestamp indicates initiation or completion of a group of pipeline steps, this call may stop the pipeline's execution. :param str message: Message to timestamp. :param str checkpoint: Name of checkpoint; this tends to be something that reflects the processing logic about to be or having just been completed. Provision of an argument to this parameter means that a checkpoint file will be created, facilitating arbitrary starting and stopping point for the pipeline as desired. :param bool finished: Whether this call represents the completion of a conceptual unit of a pipeline's processing :param raise_error: Whether to raise exception if checkpoint or current state indicates that a halt should occur.
def _register_handler(event, fun, external=False): """Register a function to be an event handler""" registry = core.HANDLER_REGISTRY if external: registry = core.EXTERNAL_HANDLER_REGISTRY if not isinstance(event, basestring): # If not basestring, it is a BaseEvent subclass. # This occurs when class methods are registered as handlers event = core.parse_event_to_name(event) if event in registry: registry[event].append(fun) else: registry[event] = [fun] return fun
Register a function to be an event handler
def get_range(self): """Return the bounds currently visible.""" p, z = np.asarray(self.pan), np.asarray(self.zoom) x0, y0 = -1. / z - p x1, y1 = +1. / z - p return (x0, y0, x1, y1)
Return the bounds currently visible.
def cache_distribution(cls, zf, source, target_dir): """Possibly cache an egg from within a zipfile into target_cache. Given a zipfile handle and a filename corresponding to an egg distribution within that zip, maybe write to the target cache and return a Distribution.""" dependency_basename = os.path.basename(source) if not os.path.exists(target_dir): target_dir_tmp = target_dir + '.' + uuid.uuid4().hex for name in zf.namelist(): if name.startswith(source) and not name.endswith('/'): zf.extract(name, target_dir_tmp) os.rename(os.path.join(target_dir_tmp, source), os.path.join(target_dir_tmp, dependency_basename)) rename_if_empty(target_dir_tmp, target_dir) dist = DistributionHelper.distribution_from_path(target_dir) assert dist is not None, 'Failed to cache distribution %s' % source return dist
Possibly cache an egg from within a zipfile into target_cache. Given a zipfile handle and a filename corresponding to an egg distribution within that zip, maybe write to the target cache and return a Distribution.
def set_chat_description(self, chat_id, description): """ Use this method to change the description of a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success. :param chat_id: Int or Str: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param description: Str: New chat description, 0-255 characters :return: """ return apihelper.set_chat_description(self.token, chat_id, description)
Use this method to change the description of a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success. :param chat_id: Int or Str: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param description: Str: New chat description, 0-255 characters :return:
def update_external_link(self, id, **kwargs): # noqa: E501 """Update a specific external link # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_external_link(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param ExternalLink body: Example Body: <pre>{ \"name\": \"External Link API Example\", \"template\": \"https://example.com/{{source}}\", \"description\": \"External Link Description\" }</pre> :return: ResponseContainerExternalLink If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_external_link_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.update_external_link_with_http_info(id, **kwargs) # noqa: E501 return data
Update a specific external link # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_external_link(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param ExternalLink body: Example Body: <pre>{ \"name\": \"External Link API Example\", \"template\": \"https://example.com/{{source}}\", \"description\": \"External Link Description\" }</pre> :return: ResponseContainerExternalLink If the method is called asynchronously, returns the request thread.
def announcements_view(request): ''' The view of manager announcements. ''' page_name = "Manager Announcements" userProfile = UserProfile.objects.get(user=request.user) announcement_form = None manager_positions = Manager.objects.filter(incumbent=userProfile) if manager_positions: announcement_form = AnnouncementForm( request.POST if "post_announcement" in request.POST else None, profile=userProfile, ) if announcement_form and announcement_form.is_valid(): announcement_form.save(request) return HttpResponseRedirect(reverse('managers:announcements')) # A pseudo-dictionary, actually a list with items of form: # (announcement, announcement_pin_form) announcements_dict = list() for a in Announcement.objects.filter(pinned=True): pin_form = None if (a.manager.incumbent == userProfile) or request.user.is_superuser: pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) if pin_form.is_valid(): pin_form.save() return HttpResponseRedirect(reverse('managers:announcements')) announcements_dict.append((a, pin_form)) # Oldest genesis of an pinned announcement to be displayed. within_life = now() - timedelta(hours=settings.ANNOUNCEMENT_LIFE) for a in Announcement.objects.filter(pinned=False, post_date__gte=within_life): pin_form = None if request.user.is_superuser or (a.manager.incumbent == userProfile): pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) announcements_dict.append((a, pin_form)) return render_to_response('announcements.html', { 'page_name': page_name, 'manager_positions': manager_positions, 'announcements_dict': announcements_dict, 'announcement_form': announcement_form, }, context_instance=RequestContext(request))
The view of manager announcements.
def transformation_get(node_id): """Get all the transformations of a node. The node id must be specified in the url. You can also pass transformation_type. """ exp = experiment(session) # get the parameters transformation_type = request_parameter(parameter="transformation_type", parameter_type="known_class", default=models.Transformation) if type(transformation_type) == Response: return transformation_type # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response( error_type="/node/transformations, node does not exist") # execute the request transformations = node.transformations( transformation_type=transformation_type) try: # ping the experiment exp.transformation_get_request(node=node, transformations=transformations) session.commit() except: return error_response(error_type="/node/tranaformations GET failed", participant=node.participant) # return the data return success_response(field="transformations", data=[t.__json__() for t in transformations], request_type="transformations")
Get all the transformations of a node. The node id must be specified in the url. You can also pass transformation_type.
def call_hook(self, hook, *args, **kwargs): """ Calls each registered hook """ for function in self.hooks[hook]: function.__call__(*args, **kwargs)
Calls each registered hook
def sweep(self, min_freq, max_freq, bins, repeats, runs=0, time_limit=0, overlap=0, fft_window='hann', fft_overlap=0.5, crop=False, log_scale=True, remove_dc=False, detrend=None, lnb_lo=0, tune_delay=0, reset_stream=False, base_buffer_size=0, max_buffer_size=0, max_threads=0, max_queue_size=0): """Sweep spectrum using frequency hopping""" self.setup( bins, repeats, base_buffer_size, max_buffer_size, fft_window=fft_window, fft_overlap=fft_overlap, crop_factor=overlap if crop else 0, log_scale=log_scale, remove_dc=remove_dc, detrend=detrend, lnb_lo=lnb_lo, tune_delay=tune_delay, reset_stream=reset_stream, max_threads=max_threads, max_queue_size=max_queue_size ) try: freq_list = self.freq_plan(min_freq - lnb_lo, max_freq - lnb_lo, bins, overlap) t_start = time.time() run = 0 while not _shutdown and (runs == 0 or run < runs): run += 1 t_run_start = time.time() logger.debug('Run: {}'.format(run)) for freq in freq_list: # Tune to new frequency, acquire samples and compute Power Spectral Density psd_future, acq_time_start, acq_time_stop = self.psd(freq) # Write PSD to stdout (in another thread) self._writer.write_async(psd_future, acq_time_start, acq_time_stop, len(self._buffer) * self._buffer_repeats) if _shutdown: break # Write end of measurement marker (in another thread) write_next_future = self._writer.write_next_async() t_run = time.time() logger.debug(' Total run time: {:.3f} s'.format(t_run - t_run_start)) # End measurement if time limit is exceeded if time_limit and (time.time() - t_start) >= time_limit: logger.info('Time limit of {} s exceeded, completed {} runs'.format(time_limit, run)) break # Wait for last write to be finished write_next_future.result() # Debug thread pool queues logging.debug('Number of USB buffer overflow errors: {}'.format(self.device.buffer_overflow_count)) logging.debug('PSD worker threads: {}'.format(self._psd._executor._max_workers)) logging.debug('Max. PSD queue size: {} / {}'.format(self._psd._executor.max_queue_size_reached, self._psd._executor.max_queue_size)) logging.debug('Writer worker threads: {}'.format(self._writer._executor._max_workers)) logging.debug('Max. Writer queue size: {} / {}'.format(self._writer._executor.max_queue_size_reached, self._writer._executor.max_queue_size)) finally: # Shutdown SDR self.stop() t_stop = time.time() logger.info('Total time: {:.3f} s'.format(t_stop - t_start))
Sweep spectrum using frequency hopping
def dump_grammar(self, out=sys.stdout): """ Print grammar rules """ for rule in sorted(self.rule2name.items()): out.write("%s\n" % rule2str(rule[0])) return
Print grammar rules
def get_requirement_from_url(url): """Get a requirement from the URL, if possible. This looks for #egg in the URL""" link = Link(url) egg_info = link.egg_fragment if not egg_info: egg_info = splitext(link.filename)[0] return package_to_requirement(egg_info)
Get a requirement from the URL, if possible. This looks for #egg in the URL
def symbol(self): ''' Gets the symbol `p`, `l`, `n`, etc. ''' if self.color == BLACK: return PIECE_SYMBOLS[self.piece_type].upper() else: return PIECE_SYMBOLS[self.piece_type]
Gets the symbol `p`, `l`, `n`, etc.
def from_tibiadata(cls, content): """Parses a TibiaData.com response into a :class:`World` Parameters ---------- content: :class:`str` The raw JSON content from TibiaData Returns ------- :class:`World` The World described in the page, or ``None``. Raises ------ InvalidContent If the provided content is not a TibiaData world response. """ json_data = parse_json(content) try: world_data = json_data["world"] world_info = world_data["world_information"] world = cls(world_info["name"]) if "location" not in world_info: return None world.online_count = world_info["players_online"] world.status = "Online" if world.online_count > 0 else "Offline" world.record_count = world_info["online_record"]["players"] world.record_date = parse_tibiadata_datetime(world_info["online_record"]["date"]) world.creation_date = world_info["creation_date"] world.location = try_enum(WorldLocation, world_info["location"]) world.pvp_type = try_enum(PvpType, world_info["pvp_type"]) world.transfer_type = try_enum(TransferType, world_info.get("transfer_type"), TransferType.REGULAR) world.premium_only = "premium_type" in world_info world.world_quest_titles = world_info.get("world_quest_titles", []) world._parse_battleye_status(world_info.get("battleye_status", "")) world.experimental = world_info.get("Game World Type:", "Regular") != "Regular" for player in world_data.get("players_online", []): world.online_players.append(OnlineCharacter(player["name"], world.name, player["level"], player["vocation"])) return world except KeyError: raise InvalidContent("content is not a world json response from TibiaData")
Parses a TibiaData.com response into a :class:`World` Parameters ---------- content: :class:`str` The raw JSON content from TibiaData Returns ------- :class:`World` The World described in the page, or ``None``. Raises ------ InvalidContent If the provided content is not a TibiaData world response.
def get_dataset(catalog, identifier=None, title=None): """Devuelve un Dataset del catálogo.""" msg = "Se requiere un 'identifier' o 'title' para buscar el dataset." assert identifier or title, msg catalog = read_catalog_obj(catalog) # búsqueda optimizada por identificador if identifier: try: return _get_dataset_by_identifier(catalog, identifier) except BaseException: try: catalog._build_index() return _get_dataset_by_identifier(catalog, identifier) except BaseException: filtered_datasets = get_datasets( catalog, {"dataset": {"identifier": identifier}}) elif title: # TODO: is this required? filtered_datasets = get_datasets( catalog, {"dataset": {"title": title}}) if len(filtered_datasets) > 1: if identifier: raise ce.DatasetIdRepetitionError( identifier, filtered_datasets) elif title: # TODO: Improve exceptions module! raise ce.DatasetTitleRepetitionError(title, filtered_datasets) elif len(filtered_datasets) == 0: return None else: return filtered_datasets[0]
Devuelve un Dataset del catálogo.
def GetRootFileEntry(self): """Retrieves the root file entry. Returns: EncodedStreamFileEntry: a file entry or None if not available. """ path_spec = encoded_stream_path_spec.EncodedStreamPathSpec( encoding_method=self._encoding_method, parent=self._path_spec.parent) return self.GetFileEntryByPathSpec(path_spec)
Retrieves the root file entry. Returns: EncodedStreamFileEntry: a file entry or None if not available.
def get(self, **url_params): """ Makes the HTTP GET to the url. """ if url_params: self.http_method_args["params"].update(url_params) return self.http_method("GET")
Makes the HTTP GET to the url.
def get_ontology(self, id=None, uri=None, match=None): """ get the saved-ontology with given ID or via other methods... """ if not id and not uri and not match: return None if type(id) == type("string"): uri = id id = None if not is_http(uri): match = uri uri = None if match: if type(match) != type("string"): return [] res = [] for x in self.all_ontologies: if match.lower() in x.uri.lower(): res += [x] return res else: for x in self.all_ontologies: if id and x.id == id: return x if uri and x.uri.lower() == uri.lower(): return x return None
get the saved-ontology with given ID or via other methods...
def add_loss(self, loss, name=None): """Adds a loss and returns a wrapper for that loss.""" self.bookkeeper.add_loss(loss, name=name) return Loss(self.bookkeeper, tensor=loss, name=name)
Adds a loss and returns a wrapper for that loss.
def _increment(self, n=1): """Move forward n tokens in the stream.""" if self._cur_position >= self.num_tokens-1: self._cur_positon = self.num_tokens - 1 self._finished = True else: self._cur_position += n
Move forward n tokens in the stream.
def _get_url(url): """Retrieve requested URL""" try: data = HTTP_SESSION.get(url, stream=True) data.raise_for_status() except requests.exceptions.RequestException as exc: raise FetcherException(exc) return data
Retrieve requested URL
def getPreferenceCounts(self): """ Returns a list of the number of times each preference is given. """ preferenceCounts = [] for preference in self.preferences: preferenceCounts.append(preference.count) return preferenceCounts
Returns a list of the number of times each preference is given.
def default_fields(cls, include_virtual=True, **kwargs): """The default fields and their dtypes. By default, this returns whatever the class's ``_staticfields`` and ``_virtualfields`` is set to as a dictionary of fieldname, dtype (the dtype of virtualfields is given by VIRTUALFIELD_DTYPE). This function should be overridden by subclasses to add dynamic fields; i.e., fields that require some input parameters at initialization. Keyword arguments can be passed to this to set such dynamic fields. """ output = cls._staticfields.copy() if include_virtual: output.update({name: VIRTUALFIELD_DTYPE for name in cls._virtualfields}) return output
The default fields and their dtypes. By default, this returns whatever the class's ``_staticfields`` and ``_virtualfields`` is set to as a dictionary of fieldname, dtype (the dtype of virtualfields is given by VIRTUALFIELD_DTYPE). This function should be overridden by subclasses to add dynamic fields; i.e., fields that require some input parameters at initialization. Keyword arguments can be passed to this to set such dynamic fields.