code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def ProcesarPlantillaPDF(self, num_copias=1, lineas_max=24, qty_pos='izq', clave=''): "Generar el PDF según la factura creada y plantilla cargada" try: f = self.template liq = self.params_out # actualizo los campos según la clave (ajuste debitos / creditos) if clave and clave in liq: liq = liq.copy() liq.update(liq[clave]) # unificar con AnalizarAjusteCredito/Debito if HOMO: self.AgregarDatoPDF("homo", u"HOMOLOGACIÓN") copias = {1: 'Original', 2: 'Duplicado', 3: 'Triplicado', 4: 'Cuadruplicado', 5: 'Quintuplicado'} # convierto el formato de intercambio para representar los valores: fmt_encabezado = dict([(v[0], v[1:]) for v in ENCABEZADO]) fmt_deduccion = dict([(v[0], v[1:]) for v in DEDUCCION]) fmt_retencion = dict([(v[0], v[1:]) for v in RETENCION]) def formatear(campo, valor, formato): "Convertir el valor a una cadena correctamente s/ formato ($ % ...)" if campo in formato and v is not None: fmt = formato[campo] if fmt[1] == N: if 'cuit' in campo: c = str(valor) if len(c) == 11: valor = "%s-%s-%s" % (c[0:2], c[2:10], c[10:]) else: valor = "" elif 'peso' in campo: valor = "%s Kg" % valor elif valor is not None and valor != "": valor = "%d" % int(valor) else: valor = "" elif fmt[1] == I: valor = ("%%0.%df" % fmt[2]) % valor if 'alic' in campo or 'comision' in campo: valor = valor + " %" elif 'factor' in campo or 'cont' in campo or 'cant' in campo: pass else: valor = "$ " + valor elif 'fecha' in campo: d = valor if isinstance(d, (datetime.date, datetime.datetime)): valor = d.strftime("%d/%m/%Y") else: valor = "%s/%s/%s" % (d[8:10], d[5:7], d[0:4]) return valor def buscar_localidad_provincia(cod_prov, cod_localidad): "obtener la descripción de la provincia/localidad (usar cache)" cod_prov = int(cod_prov) cod_localidad = str(cod_localidad) provincia = datos.PROVINCIAS[cod_prov] localidad = self.BuscarLocalidades(cod_prov, cod_localidad) return localidad, provincia # divido los datos adicionales (debe haber renglones 1 al 9): if liq.get('datos_adicionales') and f.has_key('datos_adicionales1'): d = liq.get('datos_adicionales') for i, ds in enumerate(f.split_multicell(d, 'datos_adicionales1')): liq['datos_adicionales%s' % (i + 1)] = ds for copia in range(1, num_copias+1): # completo campos y hojas f.add_page() f.set('copia', copias.get(copia, "Adicional %s" % copia)) f.set('anulado', {'AC': '', '': 'SIN ESTADO', 'AN': "ANULADO"}.get(liq['estado'], "ERROR")) try: cod_tipo_ajuste = int(liq["cod_tipo_ajuste"] or '0') except: cod_tipo_ajuste = None f.set('tipo_ajuste', {3: u'Liquidación de Débito', 4: u'Liquidación de Crédito', }.get(cod_tipo_ajuste, '')) # limpio datos del corredor si no corresponden: if liq.get('actua_corredor', 'N') == 'N': if liq.get('cuit_corredor', None) == 0: del liq['cuit_corredor'] # establezco campos según tabla encabezado: for k,v in liq.items(): v = formatear(k, v, fmt_encabezado) if isinstance(v, (basestring, int, long, float)): f.set(k, v) elif isinstance(v, decimal.Decimal): f.set(k, str(v)) elif isinstance(v, datetime.datetime): f.set(k, str(v)) import wslpg_datos as datos campania = int(liq.get('campania_ppal') or 0) f.set("campania_ppal", datos.CAMPANIAS.get(campania, campania)) f.set("tipo_operacion", datos.TIPOS_OP.get(int(liq.get('cod_tipo_operacion') or 0), "")) f.set("actividad", datos.ACTIVIDADES.get(int(liq.get('nro_act_comprador') or 0), "")) if 'cod_grano' in liq and liq['cod_grano']: cod_grano = int(liq['cod_grano']) else: cod_grano = int(self.datos.get('cod_grano') or 0) f.set("grano", datos.GRANOS.get(cod_grano, "")) cod_puerto = int(liq.get('cod_puerto', self.datos.get('cod_puerto')) or 0) if cod_puerto in datos.PUERTOS: f.set("des_puerto_localidad", datos.PUERTOS[cod_puerto]) cod_grado_ref = liq.get('cod_grado_ref', self.datos.get('cod_grado_ref')) or "" if cod_grado_ref in datos.GRADOS_REF: f.set("des_grado_ref", datos.GRADOS_REF[cod_grado_ref]) else: f.set("des_grado_ref", cod_grado_ref) cod_grado_ent = liq.get('cod_grado_ent', self.datos.get('cod_grado_ent')) if 'val_grado_ent' in liq and int(liq.get('val_grado_ent') or 0): val_grado_ent = liq['val_grado_ent'] elif 'val_grado_ent' in self.datos: val_grado_ent = self.datos.get('val_grado_ent') elif cod_grano in datos.GRADO_ENT_VALOR: valores = datos.GRADO_ENT_VALOR[cod_grano] if cod_grado_ent in valores: val_grado_ent = valores[cod_grado_ent] else: val_grado_ent = "" else: val_grado_ent = "" f.set("valor_grado_ent", "%s %s" % (cod_grado_ent or "", val_grado_ent or "")) f.set("cont_proteico", liq.get('cont_proteico', self.datos.get('cont_proteico', ""))) if liq.get('certificados'): # uso la procedencia del certificado de depósito cert = liq['certificados'][0] localidad, provincia = buscar_localidad_provincia( cert['cod_prov_procedencia'], cert['cod_localidad_procedencia']) elif liq.get('cod_prov_procedencia_sin_certificado'): localidad, provincia = buscar_localidad_provincia( liq['cod_prov_procedencia_sin_certificado'], liq['cod_localidad_procedencia_sin_certificado']) else: localidad, provincia = "", "" f.set("procedencia", "%s - %s" % (localidad, provincia)) # si no se especifíca, uso la procedencia para el lugar if not self.datos.get('lugar_y_fecha'): localidad, provincia = buscar_localidad_provincia( liq['cod_prov_procedencia'], liq['cod_localidad_procedencia']) lugar = "%s - %s " % (localidad, provincia) fecha = datetime.datetime.today().strftime("%d/%m/%Y") f.set("lugar_y_fecha", "%s, %s" % (fecha, lugar)) if 'lugar_y_fecha' in self.datos: del self.datos['lugar_y_fecha'] if HOMO: homo = "(pruebas)" else: homo = "" if int(liq['cod_tipo_operacion'] or 0) == 1: f.set("comprador.L", "COMPRADOR:") f.set("vendedor.L", "VENDEDOR:") f.set("formulario", u"Form. Electrónico 1116 B %s" % homo) else: f.set("comprador.L", "MANDATARIO/CONSIGNATARIO:") f.set("vendedor.L", "MANDANTE/COMITENTE:") f.set("formulario", u"Form. Electrónico 1116 C %s" % homo) if int(liq.get("coe_ajustado") or 0) or int(liq.get("nro_contrato") or 0): f.set("formulario", u"Ajuste Unificado %s" % homo) certs = [] for cert in liq.get('certificados', []): certs.append(u"%s Nº %s" % ( datos.TIPO_CERT_DEP[int(cert['tipo_certificado_deposito'])], cert['nro_certificado_deposito'])) f.set("certificados_deposito", ', '.join(certs)) for i, deduccion in enumerate(liq.get('deducciones', [])): for k, v in deduccion.items(): v = formatear(k, v, fmt_deduccion) f.set("deducciones_%s_%02d" % (k, i + 1), v) for i, retencion in enumerate(liq.get('retenciones', [])): for k, v in retencion.items(): v = formatear(k, v, fmt_retencion) f.set("retenciones_%s_%02d" % (k, i + 1), v) if retencion['importe_certificado_retencion']: d = retencion['fecha_certificado_retencion'] f.set('retenciones_cert_retencion_%02d' % (i + 1), "%s $ %0.2f %s" % ( retencion['nro_certificado_retencion'] or '', retencion['importe_certificado_retencion'], "%s/%s/%s" % (d[8:10], d[5:7], d[2:4]), )) # cargo campos adicionales ([PDF] en .ini y AgregarDatoPDF) for k,v in self.datos.items(): f.set(k, v) # Ajustes: if clave: f.set('subtipo_ajuste', {'ajuste_debito': u'AJUSTE DÉBITO', 'ajuste_credito': u'AJUSTE CRÉDITO'}[clave]) if int(liq.get('coe_ajustado') or 0): f.set("leyenda_coe_nro", "COE Ajustado:") f.set("nro_contrato_o_coe_ajustado", liq['coe_ajustado']) f.set("coe_relacionados.L", "") f.set("coe_relacionados", "") elif liq.get('nro_contrato'): f.set("leyenda_coe_nro", "Contrato Ajustado:") f.set("nro_contrato_o_coe_ajustado", liq['nro_contrato']) ##f.set("coe_relacionados", TODO) return True except Exception, e: ex = utils.exception_info() try: f.set('anulado', "%(name)s:%(lineno)s" % ex) except: pass self.Excepcion = ex['msg'] self.Traceback = ex['tb'] if DEBUG: print self.Excepcion print self.Traceback return False
Generar el PDF según la factura creada y plantilla cargada
def convert(self, to_mag, from_mag=None): """ Converts magnitudes using UBVRIJHKLMNQ photometry in Taurus-Auriga (Kenyon+ 1995) ReadMe+ftp1995ApJS..101..117K Colors for main-sequence stars If from_mag isn't specified the program will cycle through provided magnitudes and choose one. Note that all magnitudes are first converted to V, and then to the requested magnitude. :param to_mag: magnitude to convert to :param from_mag: magnitude to convert from :return: """ allowed_mags = "UBVJIHKLMN" if from_mag: if to_mag == 'V': # If V mag is requested (1/3) - from mag specified return self._convert_to_from('V', from_mag) if from_mag == 'V': magV = self.magV else: magV = self._convert_to_from('V', from_mag) return self._convert_to_from(to_mag, 'V', magV) # if we can convert from any magnitude, try V first elif not isNanOrNone(self.magV): if to_mag == 'V': # If V mag is requested (2/3) - no need to convert return self.magV else: return self._convert_to_from(to_mag, 'V', self.magV) else: # Otherwise lets try all other magnitudes in turn order = "UBJHKLMN" # V is the intermediate step from the others, done by default if possible for mag_letter in order: try: magV = self._convert_to_from('V', mag_letter) if to_mag == 'V': # If V mag is requested (3/3) - try all other mags to convert logging.debug('Converted to magV from {0} got {1}'.format(mag_letter, magV)) return magV else: mag_val = self._convert_to_from(to_mag, 'V', magV) logging.debug('Converted to mag{0} from {1} got {2}'.format(to_mag, mag_letter, mag_val)) return mag_val except ValueError: continue # this conversion may not be possible, try another raise ValueError('Could not convert from any provided magnitudes')
Converts magnitudes using UBVRIJHKLMNQ photometry in Taurus-Auriga (Kenyon+ 1995) ReadMe+ftp1995ApJS..101..117K Colors for main-sequence stars If from_mag isn't specified the program will cycle through provided magnitudes and choose one. Note that all magnitudes are first converted to V, and then to the requested magnitude. :param to_mag: magnitude to convert to :param from_mag: magnitude to convert from :return:
def parse_interval(interval): """ Attepmt to parse an ISO8601 formatted ``interval``. Returns a tuple of ``datetime.datetime`` and ``datetime.timedelta`` objects, order dependent on ``interval``. """ a, b = str(interval).upper().strip().split('/') if a[0] is 'P' and b[0] is 'P': raise ParseError() if a[0] != 'P' and b[0] != 'P': return parse_date(a), parse_date(b) if a[0] is 'P': a = parse_duration(a) else: a = parse_date(a) if b[0] is 'P': b = parse_duration(b) else: b = parse_date(b) return a, b
Attepmt to parse an ISO8601 formatted ``interval``. Returns a tuple of ``datetime.datetime`` and ``datetime.timedelta`` objects, order dependent on ``interval``.
def rdl_decomposition(T, k=None, reversible=False, norm='standard', mu=None): r"""Compute the decomposition into left and right eigenvectors. Parameters ---------- T : (M, M) ndarray Transition matrix k : int (optional) Number of eigenvector/eigenvalue pairs norm: {'standard', 'reversible', 'auto'} standard: (L'R) = Id, L[:,0] is a probability distribution, the stationary distribution mu of T. Right eigenvectors R have a 2-norm of 1. reversible: R and L are related via L=L[:,0]*R. auto: will be reversible if T is reversible, otherwise standard reversible : bool, optional Indicate that transition matrix is reversible mu : (d,) ndarray, optional Stationary distribution of T Returns ------- R : (M, M) ndarray The normalized (with respect to L) right eigenvectors, such that the column R[:,i] is the right eigenvector corresponding to the eigenvalue w[i], dot(T,R[:,i])=w[i]*R[:,i] D : (M, M) ndarray A diagonal matrix containing the eigenvalues, each repeated according to its multiplicity L : (M, M) ndarray The normalized (with respect to `R`) left eigenvectors, such that the row ``L[i, :]`` is the left eigenvector corresponding to the eigenvalue ``w[i]``, ``dot(L[i, :], T)``=``w[i]*L[i, :]`` Notes ----- If reversible=True the the eigenvalues and eigenvectors of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be used to compute the eigenvalues and eigenvectors of T. The precomputed stationary distribution will only be used if reversible=True. """ # auto-set norm if norm == 'auto': if is_reversible(T): norm = 'reversible' else: norm = 'standard' if reversible: R, D, L = rdl_decomposition_rev(T, norm=norm, mu=mu) else: R, D, L = rdl_decomposition_nrev(T, norm=norm) if k is None: return R, D, L else: return R[:, 0:k], D[0:k, 0:k], L[0:k, :]
r"""Compute the decomposition into left and right eigenvectors. Parameters ---------- T : (M, M) ndarray Transition matrix k : int (optional) Number of eigenvector/eigenvalue pairs norm: {'standard', 'reversible', 'auto'} standard: (L'R) = Id, L[:,0] is a probability distribution, the stationary distribution mu of T. Right eigenvectors R have a 2-norm of 1. reversible: R and L are related via L=L[:,0]*R. auto: will be reversible if T is reversible, otherwise standard reversible : bool, optional Indicate that transition matrix is reversible mu : (d,) ndarray, optional Stationary distribution of T Returns ------- R : (M, M) ndarray The normalized (with respect to L) right eigenvectors, such that the column R[:,i] is the right eigenvector corresponding to the eigenvalue w[i], dot(T,R[:,i])=w[i]*R[:,i] D : (M, M) ndarray A diagonal matrix containing the eigenvalues, each repeated according to its multiplicity L : (M, M) ndarray The normalized (with respect to `R`) left eigenvectors, such that the row ``L[i, :]`` is the left eigenvector corresponding to the eigenvalue ``w[i]``, ``dot(L[i, :], T)``=``w[i]*L[i, :]`` Notes ----- If reversible=True the the eigenvalues and eigenvectors of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be used to compute the eigenvalues and eigenvectors of T. The precomputed stationary distribution will only be used if reversible=True.
def describe_table(self, table_name): """ Polls until the table is ready, then returns the first result when the table was ready. The returned dict is standardized to ensure all fields are present, even when empty or across different DynamoDB API versions. TTL information is also inserted. :param table_name: The name of the table to describe :return: The (sanitized) result of DescribeTable["Table"] :rtype: dict """ if table_name in self._tables: return self._tables[table_name] status, description = None, {} calls = 0 while status is not ready: calls += 1 try: description = self.dynamodb_client.describe_table(TableName=table_name)["Table"] except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing table.") from error status = simple_table_status(description) logger.debug("describe_table: table \"{}\" was in ACTIVE state after {} calls".format(table_name, calls)) try: ttl = self.dynamodb_client.describe_time_to_live(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing ttl.") from error try: backups = self.dynamodb_client.describe_continuous_backups(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing continuous backups.") from error description["TimeToLiveDescription"] = { "AttributeName": _read_field(ttl, None, "TimeToLiveDescription", "AttributeName"), "TimeToLiveStatus": _read_field(ttl, None, "TimeToLiveDescription", "TimeToLiveStatus"), } description["ContinuousBackupsDescription"] = { "ContinuousBackupsStatus": _read_field( backups, None, "ContinuousBackupsDescription", "ContinuousBackupsStatus"), } table = self._tables[table_name] = sanitize_table_description(description) return table
Polls until the table is ready, then returns the first result when the table was ready. The returned dict is standardized to ensure all fields are present, even when empty or across different DynamoDB API versions. TTL information is also inserted. :param table_name: The name of the table to describe :return: The (sanitized) result of DescribeTable["Table"] :rtype: dict
def _parse_abbreviation(uri_link): """ Returns a team's abbreviation. A school or team's abbreviation is generally embedded in a URI link which contains other relative link information. For example, the URI for the New England Patriots for the 2017 season is "/teams/nwe/2017.htm". This function strips all of the contents before and after "nwe" and converts it to uppercase and returns "NWE". Parameters ---------- uri_link : string A URI link which contains a team's abbreviation within other link contents. Returns ------- string The shortened uppercase abbreviation for a given team. """ abbr = re.sub(r'/[0-9]+\..*htm.*', '', uri_link('a').attr('href')) abbr = re.sub(r'/.*/schools/', '', abbr) abbr = re.sub(r'/teams/', '', abbr) return abbr.upper()
Returns a team's abbreviation. A school or team's abbreviation is generally embedded in a URI link which contains other relative link information. For example, the URI for the New England Patriots for the 2017 season is "/teams/nwe/2017.htm". This function strips all of the contents before and after "nwe" and converts it to uppercase and returns "NWE". Parameters ---------- uri_link : string A URI link which contains a team's abbreviation within other link contents. Returns ------- string The shortened uppercase abbreviation for a given team.
def get(img, cache_dir=CACHE_DIR, iterative=False): """Validate image input.""" if os.path.isfile(img): wal_img = img elif os.path.isdir(img): if iterative: wal_img = get_next_image(img) else: wal_img = get_random_image(img) else: logging.error("No valid image file found.") sys.exit(1) wal_img = os.path.abspath(wal_img) # Cache the image file path. util.save_file(wal_img, os.path.join(cache_dir, "wal")) logging.info("Using image \033[1;37m%s\033[0m.", os.path.basename(wal_img)) return wal_img
Validate image input.
def run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Runs the algorithms with the specified identifiers on the audio_file. Parameters ---------- file_struct: `msaf.io.FileStruct` Object with the file paths. boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array or list List of estimated times for the segment boundaries. If `list`, it will be a list of np.arrays, sorted by segmentation layer. est_labels: np.array or list List of all the labels associated segments. If `list`, it will be a list of np.arrays, sorted by segmentation layer. """ # Check that there are enough audio frames if config["features"].features.shape[0] <= msaf.config.minimum_frames: logging.warning("Audio file too short, or too many few beats " "estimated. Returning empty estimations.") return np.asarray([0, config["features"].dur]), \ np.asarray([0], dtype=int) # Get the corresponding modules bounds_module = get_boundaries_module(boundaries_id) labels_module = get_labels_module(labels_id) # Get the correct frame times frame_times = config["features"].frame_times # Segment audio based on type of segmentation run_fun = run_hierarchical if config["hier"] else run_flat est_times, est_labels = run_fun(file_struct, bounds_module, labels_module, frame_times, config, annotator_id) return est_times, est_labels
Runs the algorithms with the specified identifiers on the audio_file. Parameters ---------- file_struct: `msaf.io.FileStruct` Object with the file paths. boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array or list List of estimated times for the segment boundaries. If `list`, it will be a list of np.arrays, sorted by segmentation layer. est_labels: np.array or list List of all the labels associated segments. If `list`, it will be a list of np.arrays, sorted by segmentation layer.
def requirements_check(): """ Ensure we have programs needed to download/manipulate the data """ required_programs = [ ('samtools', 'http://samtools.sourceforge.net/'), ('bedtools', 'http://bedtools.readthedocs.org/en/latest/'), ('bigWigToBedGraph', 'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/'), ('bedGraphToBigWig', 'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/'), ] for req, url in required_programs: try: p = subprocess.Popen( [req], stderr=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = p.communicate() except OSError: raise ValueError("Please install %s (%s)" % (req, url))
Ensure we have programs needed to download/manipulate the data
def list(self, identity=values.unset, limit=None, page_size=None): """ Lists MemberInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode identity: The `identity` value of the resources to read :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.chat.v2.service.channel.member.MemberInstance] """ return list(self.stream(identity=identity, limit=limit, page_size=page_size, ))
Lists MemberInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode identity: The `identity` value of the resources to read :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.chat.v2.service.channel.member.MemberInstance]
def make_image(location, size, fmt): ''' Create a blank virtual machine image file of the specified size in megabytes. The image can be created in any format supported by qemu CLI Example: .. code-block:: bash salt '*' qemu_img.make_image /tmp/image.qcow 2048 qcow2 salt '*' qemu_img.make_image /tmp/image.raw 10240 raw ''' if not os.path.isabs(location): return '' if not os.path.isdir(os.path.dirname(location)): return '' if not __salt__['cmd.retcode']( 'qemu-img create -f {0} {1} {2}M'.format( fmt, location, size), python_shell=False): return location return ''
Create a blank virtual machine image file of the specified size in megabytes. The image can be created in any format supported by qemu CLI Example: .. code-block:: bash salt '*' qemu_img.make_image /tmp/image.qcow 2048 qcow2 salt '*' qemu_img.make_image /tmp/image.raw 10240 raw
def create_content_type(json): """Create :class:`.resource.ContentType` from JSON. :param json: JSON dict. :return: ContentType instance. """ result = ContentType(json['sys']) for field in json['fields']: field_id = field['id'] del field['id'] result.fields[field_id] = field result.name = json['name'] result.display_field = json.get('displayField') return result
Create :class:`.resource.ContentType` from JSON. :param json: JSON dict. :return: ContentType instance.
def make_primitive(cas_coords, window_length=3): """Calculates running average of cas_coords with a fixed averaging window_length. Parameters ---------- cas_coords : list(numpy.array or float or tuple) Each element of the list must have length 3. window_length : int, optional The number of coordinate sets to average each time. Returns ------- s_primitive : list(numpy.array) Each array has length 3. Raises ------ ValueError If the length of cas_coords is smaller than the window_length. """ if len(cas_coords) >= window_length: primitive = [] count = 0 for _ in cas_coords[:-(window_length - 1)]: group = cas_coords[count:count + window_length] average_x = sum([x[0] for x in group]) / window_length average_y = sum([y[1] for y in group]) / window_length average_z = sum([z[2] for z in group]) / window_length primitive.append(numpy.array([average_x, average_y, average_z])) count += 1 else: raise ValueError( 'A primitive cannot be generated for {0} atoms using a (too large) ' 'averaging window_length of {1}.'.format( len(cas_coords), window_length)) return primitive
Calculates running average of cas_coords with a fixed averaging window_length. Parameters ---------- cas_coords : list(numpy.array or float or tuple) Each element of the list must have length 3. window_length : int, optional The number of coordinate sets to average each time. Returns ------- s_primitive : list(numpy.array) Each array has length 3. Raises ------ ValueError If the length of cas_coords is smaller than the window_length.
def base_warfare(name, bases, attributes): """ Adds any number of attributes to an existing class. :param name: Name. :type name: unicode :param bases: Bases. :type bases: list :param attributes: Attributes. :type attributes: dict :return: Base. :rtype: object """ assert len(bases) == 1, "{0} | '{1}' object has multiple bases!".format(__name__, name) base = foundations.common.get_first_item(bases) for name, value in attributes.iteritems(): if name != "__metaclass__": setattr(base, name, value) return base
Adds any number of attributes to an existing class. :param name: Name. :type name: unicode :param bases: Bases. :type bases: list :param attributes: Attributes. :type attributes: dict :return: Base. :rtype: object
def _translate_string(self, data, length): """Translate string into character texture positions""" for index, char in enumerate(data): if index == length: break yield self._meta.characters - 1 - self._ct[char]
Translate string into character texture positions
def publish(self, topic, data, defer=None): """Publish a message to the given topic over tcp. :param topic: the topic to publish to :param data: bytestring data to publish :param defer: duration in milliseconds to defer before publishing (requires nsq 0.3.6) """ if defer is None: self.send(nsq.publish(topic, data)) else: self.send(nsq.deferpublish(topic, data, defer))
Publish a message to the given topic over tcp. :param topic: the topic to publish to :param data: bytestring data to publish :param defer: duration in milliseconds to defer before publishing (requires nsq 0.3.6)
def Cps(self): r'''Solid-phase heat capacity of the chemical at its current temperature, in units of [J/kg/K]. For calculation of this property at other temperatures, or specifying manually the method used to calculate it, and more - see the object oriented interface :obj:`thermo.heat_capacity.HeatCapacitySolid`; each Chemical instance creates one to actually perform the calculations. Note that that interface provides output in molar units. Examples -------- >>> Chemical('palladium', T=400).Cps 241.63563239992484 >>> Pd = Chemical('palladium', T=400) >>> Cpsms = [Pd.HeatCapacitySolid.T_dependent_property(T) for T in np.linspace(300,500, 5)] >>> [property_molar_to_mass(Cps, Pd.MW) for Cps in Cpsms] [234.40150347679008, 238.01856793835751, 241.63563239992484, 245.25269686149224, 248.86976132305958] ''' Cpsm = self.HeatCapacitySolid(self.T) if Cpsm: return property_molar_to_mass(Cpsm, self.MW) return None
r'''Solid-phase heat capacity of the chemical at its current temperature, in units of [J/kg/K]. For calculation of this property at other temperatures, or specifying manually the method used to calculate it, and more - see the object oriented interface :obj:`thermo.heat_capacity.HeatCapacitySolid`; each Chemical instance creates one to actually perform the calculations. Note that that interface provides output in molar units. Examples -------- >>> Chemical('palladium', T=400).Cps 241.63563239992484 >>> Pd = Chemical('palladium', T=400) >>> Cpsms = [Pd.HeatCapacitySolid.T_dependent_property(T) for T in np.linspace(300,500, 5)] >>> [property_molar_to_mass(Cps, Pd.MW) for Cps in Cpsms] [234.40150347679008, 238.01856793835751, 241.63563239992484, 245.25269686149224, 248.86976132305958]
def mk_external_entity(metamodel, s_ee): ''' Create a python object from a BridgePoint external entity with bridges realized as python member functions. ''' bridges = many(s_ee).S_BRG[19]() names = [brg.Name for brg in bridges] EE = collections.namedtuple(s_ee.Key_Lett, names) funcs = list() for s_brg in many(s_ee).S_BRG[19](): fn = mk_bridge(metamodel, s_brg) funcs.append(fn) return EE(*funcs)
Create a python object from a BridgePoint external entity with bridges realized as python member functions.
def callback(self, *incoming): """ Gets called by the CallbackManager if a new message was received """ message = incoming[0] if message: address, command = message[0], message[2] profile = self.get_profile(address) if profile is not None: try: getattr(profile, command)(self, message) except AttributeError: pass
Gets called by the CallbackManager if a new message was received
def _updateToAdded(self,directory,fn,dentry,db,service): """Changes to status to 'A' as long as a handler exists, also generates a hash directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - None means all services, otherwise looks for service """ services=self.sman.GetServices(fn) # If nobody manages this file, just skip it if services is None: print("%s - No services handle this file" %(fn)) return # Build up list of names servicenames=[] for s in services: servicenames.append(s.GetName()) if service is not None and service not in servicenames: print("%s - Requested service (%s) not available for this file"\ %(fn,service)) return # If service is none, build list of all services # to perform this action on if service is None: servicelist=servicenames else: servicelist=[service] if not db.has_key(fn): # Since this is a new entry, populate with stuff # we got from GetSatus for this file (usually mtime) db[fn]=dentry del db[fn]['status'] # Delete this key we're not using db[fn]['services']={} # Empty dictionary of services # that manages this file + status # Now add the hash db[fn]['hash']=self._hashfile(os.path.join(directory,fn)) # Now run through services and see if we should # perform actions for service in servicelist: if not db[fn]['services'].has_key(service): db[fn]['services'][service]={} db[fn]['services'][service]['status']=self.ST_ADDED else: print("%s - Already managed by service %s, maybe do a 'push'?"\ %(fn,service)) logger.info('%s - managers: %s'%(fn,db[fn]['services'].keys())) return
Changes to status to 'A' as long as a handler exists, also generates a hash directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - None means all services, otherwise looks for service
def out_of_bag_mae(self): """ Returns the mean absolute error for predictions on the out-of-bag samples. """ if not self._out_of_bag_mae_clean: try: self._out_of_bag_mae = self.test(self.out_of_bag_samples) self._out_of_bag_mae_clean = True except NodeNotReadyToPredict: return return self._out_of_bag_mae.copy()
Returns the mean absolute error for predictions on the out-of-bag samples.
def _load_metadata(self): """load metadata only if needed""" if self._schema is None: self._schema = self._get_schema() self.datashape = self._schema.datashape self.dtype = self._schema.dtype self.shape = self._schema.shape self.npartitions = self._schema.npartitions self.metadata.update(self._schema.extra_metadata)
load metadata only if needed
def message(self, data): """Function to display messages to the user """ msg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE, data) msg.set_resizable(1) msg.set_title(self.dialog_title) self.img.set_from_file(self.sun_icon) msg.set_image(self.img) msg.show_all() msg.run() msg.destroy()
Function to display messages to the user
def custom_field_rendering(context, field, *args, **kwargs): """ Wrapper for rendering the field via an external renderer """ if CUSTOM_FIELD_RENDERER: mod, cls = CUSTOM_FIELD_RENDERER.rsplit(".", 1) field_renderer = getattr(import_module(mod), cls) if field_renderer: return field_renderer(field, **kwargs).render() return field
Wrapper for rendering the field via an external renderer
def action(route, template='', methods=['GET']): """Decorator to create an action""" def real_decorator(function): function.pi_api_action = True function.pi_api_route = route function.pi_api_template = template function.pi_api_methods = methods if hasattr(function, 'pi_api_crossdomain'): if not function.pi_api_crossdomain_data['methods']: function.pi_api_crossdomain_data['methods'] = methods if 'OPTIONS' not in function.pi_api_methods: function.pi_api_methods += ['OPTIONS'] return function return real_decorator
Decorator to create an action
def _parse_tables(cls, parsed_content): """ Parses the information tables contained in a character's page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[str, :class:`list`of :class:`bs4.Tag`] A dictionary containing all the table rows, with the table headers as keys. """ tables = parsed_content.find_all('table', attrs={"width": "100%"}) output = OrderedDict() for table in tables: title = table.find("td").text output[title] = table.find_all("tr")[1:] return output
Parses the information tables contained in a character's page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[str, :class:`list`of :class:`bs4.Tag`] A dictionary containing all the table rows, with the table headers as keys.
def alias_event_handler(_, **kwargs): """ An event handler for alias transformation when EVENT_INVOKER_PRE_TRUNCATE_CMD_TBL event is invoked. """ try: telemetry.start() start_time = timeit.default_timer() args = kwargs.get('args') alias_manager = AliasManager(**kwargs) # [:] will keep the reference of the original args args[:] = alias_manager.transform(args) if is_alias_command(['create', 'import'], args): load_cmd_tbl_func = kwargs.get('load_cmd_tbl_func', lambda _: {}) cache_reserved_commands(load_cmd_tbl_func) elapsed_time = (timeit.default_timer() - start_time) * 1000 logger.debug(DEBUG_MSG_WITH_TIMING, args, elapsed_time) telemetry.set_execution_time(round(elapsed_time, 2)) except Exception as client_exception: # pylint: disable=broad-except telemetry.set_exception(client_exception) raise finally: telemetry.conclude()
An event handler for alias transformation when EVENT_INVOKER_PRE_TRUNCATE_CMD_TBL event is invoked.
def serialize(self, node: SchemaNode, appstruct: Union[PotentialDatetimeType, ColanderNullType]) \ -> Union[str, ColanderNullType]: """ Serializes Python object to string representation. """ if not appstruct: return colander.null try: appstruct = coerce_to_pendulum(appstruct, assume_local=self.use_local_tz) except (ValueError, ParserError) as e: raise Invalid( node, "{!r} is not a pendulum.DateTime object; error was " "{!r}".format(appstruct, e)) return appstruct.isoformat()
Serializes Python object to string representation.
def build_frame(command, payload): """Build raw bytes from command and payload.""" packet_length = 2 + len(payload) + 1 ret = struct.pack("BB", 0, packet_length) ret += struct.pack(">H", command.value) ret += payload ret += struct.pack("B", calc_crc(ret)) return ret
Build raw bytes from command and payload.
def create_throttle(): """ Create a THROTTLE statement """ throttle_amount = "*" | Combine(number + "%") | number return Group( function("throttle", throttle_amount, throttle_amount, caseless=True) ).setResultsName("throttle")
Create a THROTTLE statement
def save_webdriver_logs(self, test_name): """Get webdriver logs and write them to log files :param test_name: test that has generated these logs """ try: log_types = self.driver_wrapper.driver.log_types except Exception: # geckodriver does not implement log_types, but it implements get_log for client and server log_types = ['client', 'server'] self.logger.debug("Reading logs from '%s' and writing them to log files", ', '.join(log_types)) for log_type in log_types: try: self.save_webdriver_logs_by_type(log_type, test_name) except Exception: # Capture exceptions to avoid errors in teardown method pass
Get webdriver logs and write them to log files :param test_name: test that has generated these logs
def profile_add(user, profile): ''' Add profile to user user : string username profile : string profile name CLI Example: .. code-block:: bash salt '*' rbac.profile_add martine 'Primary Administrator' salt '*' rbac.profile_add martine 'User Management,User Security' ''' ret = {} ## validate profiles profiles = profile.split(',') known_profiles = profile_list().keys() valid_profiles = [p for p in profiles if p in known_profiles] log.debug( 'rbac.profile_add - profiles=%s, known_profiles=%s, valid_profiles=%s', profiles, known_profiles, valid_profiles, ) ## update user profiles if valid_profiles: res = __salt__['cmd.run_all']('usermod -P "{profiles}" {login}'.format( login=user, profiles=','.join(set(profile_get(user) + valid_profiles)), )) if res['retcode'] > 0: ret['Error'] = { 'retcode': res['retcode'], 'message': res['stderr'] if 'stderr' in res else res['stdout'] } return ret ## update return value active_profiles = profile_get(user, False) for p in profiles: if p not in valid_profiles: ret[p] = 'Unknown' elif p in active_profiles: ret[p] = 'Added' else: ret[p] = 'Failed' return ret
Add profile to user user : string username profile : string profile name CLI Example: .. code-block:: bash salt '*' rbac.profile_add martine 'Primary Administrator' salt '*' rbac.profile_add martine 'User Management,User Security'
def place(vertices_resources, nets, machine, constraints, vertex_order=None, chip_order=None): """Blindly places vertices in sequential order onto chips in the machine. This algorithm sequentially places vertices onto chips in the order specified (or in an undefined order if not specified). This algorithm is essentially the simplest possible valid placement algorithm and is intended to form the basis of other simple sequential and greedy placers. The algorithm proceeds by attempting to place each vertex on the a chip. If the vertex fits we move onto the next vertex (but keep filling the same vertex). If the vertex does not fit we move onto the next candidate chip until we find somewhere the vertex fits. The algorithm will raise an :py:exc:`rig.place_and_route.exceptions.InsufficientResourceError` if it has failed to fit a vertex on every chip. Parameters ---------- vertex_order : None or iterable The order in which the vertices should be attemted to be placed. If None (the default), the vertices will be placed in the default iteration order of the ``vertices_resources`` argument. If an iterable, the iteration sequence should produce each vertex in vertices_resources *exactly once*. chip_order : None or iterable The order in which chips should be tried as a candidate location for a vertex. If None (the default), the chips will be used in the default iteration order of the ``machine`` object (a raster scan). If an iterable, the iteration sequence should produce (x, y) pairs giving the coordinates of chips to use. All working chip coordinates must be included in the iteration sequence *exactly once*. Additional chip coordinates of non-existant or dead chips are also allowed (and will simply be skipped). """ # If no vertices to place, just stop (from here on we presume that at least # one vertex will be placed) if len(vertices_resources) == 0: return {} # Within the algorithm we modify the resource availability values in the # machine to account for the effects of the current placement. As a result, # an internal copy of the structure must be made. machine = machine.copy() # {vertex: (x, y), ...} gives the location of all vertices, updated # throughout the function. placements = {} # Handle constraints vertices_resources, nets, constraints, substitutions = \ apply_same_chip_constraints(vertices_resources, nets, constraints) for constraint in constraints: if isinstance(constraint, LocationConstraint): # Location constraints are handled by recording the set of fixed # vertex locations and subtracting their resources from the chips # they're allocated to. location = constraint.location if location not in machine: raise InvalidConstraintError( "Chip requested by {} unavailable".format(machine)) vertex = constraint.vertex # Record the constrained vertex's location placements[vertex] = location # Make sure the vertex fits at the requested location (updating the # resource availability after placement) resources = vertices_resources[vertex] machine[location] = subtract_resources(machine[location], resources) if overallocated(machine[location]): raise InsufficientResourceError( "Cannot meet {}".format(constraint)) elif isinstance(constraint, # pragma: no branch ReserveResourceConstraint): apply_reserve_resource_constraint(machine, constraint) if vertex_order is not None: # Must modify the vertex_order to substitute the merged vertices # inserted by apply_reserve_resource_constraint. vertex_order = list(vertex_order) for merged_vertex in substitutions: # Swap the first merged vertex for its MergedVertex object and # remove all other vertices from the merged set vertex_order[vertex_order.index(merged_vertex.vertices[0])] \ = merged_vertex # Remove all other vertices in the MergedVertex already_removed = set([merged_vertex.vertices[0]]) for vertex in merged_vertex.vertices[1:]: if vertex not in already_removed: vertex_order.remove(vertex) already_removed.add(vertex) # The set of vertices which have not been constrained, in iteration order movable_vertices = (v for v in (vertices_resources if vertex_order is None else vertex_order) if v not in placements) # A cyclic iterator over all available chips chips = cycle(c for c in (machine if chip_order is None else chip_order) if c in machine) chips_iter = iter(chips) try: cur_chip = next(chips_iter) except StopIteration: raise InsufficientResourceError("No working chips in machine.") # The last chip that we successfully placed something on. Used to detect # when we've tried all available chips and not found a suitable candidate last_successful_chip = cur_chip # Place each vertex in turn for vertex in movable_vertices: while True: resources_if_placed = subtract_resources( machine[cur_chip], vertices_resources[vertex]) if not overallocated(resources_if_placed): # The vertex fits: record the resources consumed and move on to # the next vertex. placements[vertex] = cur_chip machine[cur_chip] = resources_if_placed last_successful_chip = cur_chip break else: # The vertex won't fit on this chip, move onto the next one # available. cur_chip = next(chips_iter) # If we've looped around all the available chips without # managing to place the vertex, give up! if cur_chip == last_successful_chip: raise InsufficientResourceError( "Ran out of chips while attempting to place vertex " "{}".format(vertex)) finalise_same_chip_constraints(substitutions, placements) return placements
Blindly places vertices in sequential order onto chips in the machine. This algorithm sequentially places vertices onto chips in the order specified (or in an undefined order if not specified). This algorithm is essentially the simplest possible valid placement algorithm and is intended to form the basis of other simple sequential and greedy placers. The algorithm proceeds by attempting to place each vertex on the a chip. If the vertex fits we move onto the next vertex (but keep filling the same vertex). If the vertex does not fit we move onto the next candidate chip until we find somewhere the vertex fits. The algorithm will raise an :py:exc:`rig.place_and_route.exceptions.InsufficientResourceError` if it has failed to fit a vertex on every chip. Parameters ---------- vertex_order : None or iterable The order in which the vertices should be attemted to be placed. If None (the default), the vertices will be placed in the default iteration order of the ``vertices_resources`` argument. If an iterable, the iteration sequence should produce each vertex in vertices_resources *exactly once*. chip_order : None or iterable The order in which chips should be tried as a candidate location for a vertex. If None (the default), the chips will be used in the default iteration order of the ``machine`` object (a raster scan). If an iterable, the iteration sequence should produce (x, y) pairs giving the coordinates of chips to use. All working chip coordinates must be included in the iteration sequence *exactly once*. Additional chip coordinates of non-existant or dead chips are also allowed (and will simply be skipped).
def draw(filestems, gformat): """Draw ANIb/ANIm/TETRA results - filestems - filestems for output files - gformat - the format for output graphics """ # Draw heatmaps for filestem in filestems: fullstem = os.path.join(args.outdirname, filestem) outfilename = fullstem + ".%s" % gformat infilename = fullstem + ".tab" df = pd.read_csv(infilename, index_col=0, sep="\t") logger.info("Writing heatmap to %s", outfilename) params = pyani_graphics.Params( params_mpl(df)[filestem], pyani_tools.get_labels(args.labels), pyani_tools.get_labels(args.classes), ) if args.gmethod == "mpl": pyani_graphics.heatmap_mpl( df, outfilename=outfilename, title=filestem, params=params ) elif args.gmethod == "seaborn": pyani_graphics.heatmap_seaborn( df, outfilename=outfilename, title=filestem, params=params )
Draw ANIb/ANIm/TETRA results - filestems - filestems for output files - gformat - the format for output graphics
def run(self, *, connector: Union[EnvVar, Token, SlackClient, None] = None, interval: float = 0.5, retries: int = 16, backoff: Callable[[int], float] = None, until: Callable[[List[dict]], bool] = None) -> None: """ Connect to the Slack API and run the event handler loop. Args: connector: A means of connecting to the Slack API. This can be an API :obj:`Token`, an :obj:`EnvVar` from which a token can be retrieved, or an established :obj:`SlackClient` instance. If absent an attempt will be made to use the ``LAYABOUT_TOKEN`` environment variable. interval: The number of seconds to wait between fetching events from the Slack API. retries: The number of retry attempts to make if a connection to Slack is not established or is lost. backoff: The strategy used to determine how long to wait between retries. Must take as input the number of the current retry and output a :obj:`float`. The retry count begins at 1 and continues up to ``retries``. If absent a `truncated exponential backoff`_ strategy will be used. until: The condition used to evaluate whether this method terminates. Must take as input a :obj:`list` of :obj:`dict` representing Slack RTM API events and return a :obj:`bool`. If absent this method will run forever. Raises: TypeError: If an unsupported connector is given. MissingToken: If no API token is available. FailedConnection: If connecting to the Slack API fails. .. _truncated exponential backoff: https://cloud.google.com/storage/docs/exponential-backoff """ backoff = backoff or _truncated_exponential until = until or _forever self._ensure_slack( connector=connector, retries=retries, backoff=backoff ) assert self._slack is not None while True: events = self._slack.fetch_events() if not until(events): log.debug('Exiting event loop') break # Handle events! for event in events: type_ = event.get('type', '') for handler in self._handlers[type_] + self._handlers['*']: fn, kwargs = handler fn(self._slack.inner, event, **kwargs) # Maybe don't pester the Slack API too much. time.sleep(interval)
Connect to the Slack API and run the event handler loop. Args: connector: A means of connecting to the Slack API. This can be an API :obj:`Token`, an :obj:`EnvVar` from which a token can be retrieved, or an established :obj:`SlackClient` instance. If absent an attempt will be made to use the ``LAYABOUT_TOKEN`` environment variable. interval: The number of seconds to wait between fetching events from the Slack API. retries: The number of retry attempts to make if a connection to Slack is not established or is lost. backoff: The strategy used to determine how long to wait between retries. Must take as input the number of the current retry and output a :obj:`float`. The retry count begins at 1 and continues up to ``retries``. If absent a `truncated exponential backoff`_ strategy will be used. until: The condition used to evaluate whether this method terminates. Must take as input a :obj:`list` of :obj:`dict` representing Slack RTM API events and return a :obj:`bool`. If absent this method will run forever. Raises: TypeError: If an unsupported connector is given. MissingToken: If no API token is available. FailedConnection: If connecting to the Slack API fails. .. _truncated exponential backoff: https://cloud.google.com/storage/docs/exponential-backoff
def get_facet_objects_serializer(self, *args, **kwargs): """ Return the serializer instance which should be used for serializing faceted objects. """ facet_objects_serializer_class = self.get_facet_objects_serializer_class() kwargs["context"] = self.get_serializer_context() return facet_objects_serializer_class(*args, **kwargs)
Return the serializer instance which should be used for serializing faceted objects.
def grow(self, width, height=None): """ Expands the region by ``width`` on both sides and ``height`` on the top and bottom. If only one value is provided, expands the region by that amount on all sides. Equivalent to ``nearby()``. """ if height is None: return self.nearby(width) else: return Region( self.x-width, self.y-height, self.w+(2*width), self.h+(2*height)).clipRegionToScreen()
Expands the region by ``width`` on both sides and ``height`` on the top and bottom. If only one value is provided, expands the region by that amount on all sides. Equivalent to ``nearby()``.
def on(self, dev_id): """Turn ON all features of the device. schedules, weather intelligence, water budget, etc. """ path = 'device/on' payload = {'id': dev_id} return self.rachio.put(path, payload)
Turn ON all features of the device. schedules, weather intelligence, water budget, etc.
def analog_sensor_power(cls, bus, operation): """ Method that turns on all of the analog sensor modules Includes all attached soil moisture sensors Note that all of the SensorCluster object should be attached in parallel and only 1 GPIO pin is available to toggle analog sensor power. The sensor power should be left on for at least 100ms in order to allow the sensors to stabilize before reading. Usage: SensorCluster.analog_sensor_power(bus,"high") OR SensorCluster.analog_sensor_power(bus,"low") This method should be removed if an off-board GPIO extender is used. """ # Set appropriate analog sensor power bit in GPIO mask # using the ControlCluster bank_mask to avoid overwriting any data reg_data = get_IO_reg(bus, 0x20, cls.power_bank) if operation == "on": reg_data = reg_data | 1 << cls.analog_power_pin elif operation == "off": reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin)) else: raise SensorError( "Invalid command used while enabling analog sensors") # Send updated IO mask to output IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
Method that turns on all of the analog sensor modules Includes all attached soil moisture sensors Note that all of the SensorCluster object should be attached in parallel and only 1 GPIO pin is available to toggle analog sensor power. The sensor power should be left on for at least 100ms in order to allow the sensors to stabilize before reading. Usage: SensorCluster.analog_sensor_power(bus,"high") OR SensorCluster.analog_sensor_power(bus,"low") This method should be removed if an off-board GPIO extender is used.
def mavlink_packet(self, m): '''handle an incoming mavlink packet''' mtype = m.get_type() if mtype in ['WAYPOINT_COUNT','MISSION_COUNT']: if self.wp_op is None: self.console.error("No waypoint load started") else: self.wploader.clear() self.wploader.expected_count = m.count self.console.writeln("Requesting %u waypoints t=%s now=%s" % (m.count, time.asctime(time.localtime(m._timestamp)), time.asctime())) self.master.waypoint_request_send(0) elif mtype in ['WAYPOINT', 'MISSION_ITEM'] and self.wp_op != None: if m.seq > self.wploader.count(): self.console.writeln("Unexpected waypoint number %u - expected %u" % (m.seq, self.wploader.count())) elif m.seq < self.wploader.count(): # a duplicate pass else: self.wploader.add(m) if m.seq+1 < self.wploader.expected_count: self.master.waypoint_request_send(m.seq+1) else: if self.wp_op == 'list': for i in range(self.wploader.count()): w = self.wploader.wp(i) print("%u %u %.10f %.10f %f p1=%.1f p2=%.1f p3=%.1f p4=%.1f cur=%u auto=%u" % ( w.command, w.frame, w.x, w.y, w.z, w.param1, w.param2, w.param3, w.param4, w.current, w.autocontinue)) if self.logdir != None: waytxt = os.path.join(self.logdir, 'way.txt') self.save_waypoints(waytxt) print("Saved waypoints to %s" % waytxt) elif self.wp_op == "save": self.save_waypoints(self.wp_save_filename) self.wp_op = None elif mtype in ["WAYPOINT_REQUEST", "MISSION_REQUEST"]: self.process_waypoint_request(m, self.master) elif mtype in ["WAYPOINT_CURRENT", "MISSION_CURRENT"]: if m.seq != self.last_waypoint: self.last_waypoint = m.seq if self.settings.wpupdates: self.say("waypoint %u" % m.seq,priority='message')
handle an incoming mavlink packet
def which_with_node_modules(self): """ Which with node_path and node_modules """ if self.binary is None: return None # first, log down the pedantic things... if isdir(self.join_cwd(NODE_MODULES)): logger.debug( "'%s' instance will attempt to locate '%s' binary from " "%s%s%s%s%s, located through the working directory", self.__class__.__name__, self.binary, self.join_cwd(), sep, NODE_MODULES, sep, NODE_MODULES_BIN, ) if self.node_path: logger.debug( "'%s' instance will attempt to locate '%s' binary from " "its %s of %s", self.__class__.__name__, self.binary, NODE_PATH, self.node_path, ) paths = self.find_node_modules_basedir() whichpaths = pathsep.join(join(p, NODE_MODULES_BIN) for p in paths) if paths: logger.debug( "'%s' instance located %d possible paths to the '%s' binary, " "which are %s", self.__class__.__name__, len(paths), self.binary, whichpaths, ) return which(self.binary, path=whichpaths)
Which with node_path and node_modules
def make_script_directory(cls, config): """ Alembic uses a "script directory" to encapsulate its `env.py` file, its migrations directory, and its `script.py.mako` revision template. We'd rather not have such a directory at all as the default `env.py` rarely works without manipulation, migrations are better saved in a location within the source tree, and revision templates shouldn't vary between projects. Instead, generate a `ScriptDirectory` object, injecting values from the config. """ temporary_dir = config.get_main_option("temporary_dir") migrations_dir = config.get_main_option("migrations_dir") return cls( dir=temporary_dir, version_locations=[migrations_dir], )
Alembic uses a "script directory" to encapsulate its `env.py` file, its migrations directory, and its `script.py.mako` revision template. We'd rather not have such a directory at all as the default `env.py` rarely works without manipulation, migrations are better saved in a location within the source tree, and revision templates shouldn't vary between projects. Instead, generate a `ScriptDirectory` object, injecting values from the config.
def ModuleLogger(globs): """Create a module level logger. To debug a module, create a _debug variable in the module, then use the ModuleLogger function to create a "module level" logger. When a handler is added to this logger or a child of this logger, the _debug variable will be incremented. All of the calls within functions or class methods within the module should first check to see if _debug is set to prevent calls to formatter objects that aren't necessary. """ # make sure that _debug is defined if not globs.has_key('_debug'): raise RuntimeError("define _debug before creating a module logger") # logger name is the module name logger_name = globs['__name__'] # create a logger to be assigned to _log logger = logging.getLogger(logger_name) # put in a reference to the module globals logger.globs = globs # if this is a "root" logger add a default handler for warnings and up if '.' not in logger_name: hdlr = logging.StreamHandler() hdlr.setLevel(logging.WARNING) hdlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None)) logger.addHandler(hdlr) return logger
Create a module level logger. To debug a module, create a _debug variable in the module, then use the ModuleLogger function to create a "module level" logger. When a handler is added to this logger or a child of this logger, the _debug variable will be incremented. All of the calls within functions or class methods within the module should first check to see if _debug is set to prevent calls to formatter objects that aren't necessary.
def position_target_global_int_encode(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate): ''' Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return MAVLink_position_target_global_int_message(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate)
Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
def _make_ssh_forward_handler_class(self, remote_address_): """ Make SSH Handler class """ class Handler(_ForwardHandler): remote_address = remote_address_ ssh_transport = self._transport logger = self.logger return Handler
Make SSH Handler class
def interfaces(self): """Get the wifi interface lists.""" ifaces = [] for f in sorted(os.listdir(CTRL_IFACE_DIR)): sock_file = '/'.join([CTRL_IFACE_DIR, f]) mode = os.stat(sock_file).st_mode if stat.S_ISSOCK(mode): iface = {} iface['name'] = f ifaces.append(iface) self._connect_to_wpa_s(f) return ifaces
Get the wifi interface lists.
def who(self, target): """ Runs a WHO on a target Required arguments: * target - /WHO <target> Returns a dictionary, with a nick as the key and - the value is a list in the form of; [0] - Username [1] - Priv level [2] - Real name [3] - Hostname """ with self.lock: self.send('WHO %s' % target) who_lst = {} while self.readable(): msg = self._recv(expected_replies=('352', '315')) if msg[0] == '352': raw_who = msg[2].split(None, 7) prefix = raw_who[5].replace('H', '', 1).replace('*', '', 1) channel = raw_who[0] nick = raw_who[4] if prefix == '~': self.channels[channel]['USERS'][nick] = \ ['~', '', '', '', ''] elif prefix == '&': self.channels[channel]['USERS'][nick] = \ ['', '&', '', '', ''] elif prefix == '@': self.channels[channel]['USERS'][nick] = \ ['', '', '@', '', ''] elif prefix == '%': self.channels[channel]['USERS'][nick] = \ ['', '', '', '%', ''] elif prefix == '+': self.channels[channel]['USERS'][nick] = \ ['', '', '', '', '+'] else: self.channels[channel]['USERS'][nick] = \ ['', '', '', '', ''] who_lst[raw_who[4]] = raw_who[1], prefix, \ raw_who[7], raw_who[2] elif msg[0] == '315': return who_lst
Runs a WHO on a target Required arguments: * target - /WHO <target> Returns a dictionary, with a nick as the key and - the value is a list in the form of; [0] - Username [1] - Priv level [2] - Real name [3] - Hostname
def remove_host(kwargs=None, call=None): ''' Remove the specified host system from this VMware environment CLI Example: .. code-block:: bash salt-cloud -f remove_host my-vmware-config host="myHostSystemName" ''' if call != 'function': raise SaltCloudSystemExit( 'The remove_host function must be called with ' '-f or --function.' ) host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None if not host_name: raise SaltCloudSystemExit( 'You must specify name of the host system.' ) # Get the service instance si = _get_si() host_ref = salt.utils.vmware.get_mor_by_property(si, vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' ) try: if isinstance(host_ref.parent, vim.ClusterComputeResource): # This is a host system that is part of a Cluster task = host_ref.Destroy_Task() else: # This is a standalone host system task = host_ref.parent.Destroy_Task() salt.utils.vmware.wait_for_task(task, host_name, 'remove host', log_level='info') except Exception as exc: log.error( 'Error while removing host %s: %s', host_name, exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return {host_name: 'failed to remove host'} return {host_name: 'removed host from vcenter'}
Remove the specified host system from this VMware environment CLI Example: .. code-block:: bash salt-cloud -f remove_host my-vmware-config host="myHostSystemName"
def _tags_present(name, tags, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): ''' helper function to validate tags are correct ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key, keyid=keyid, profile=profile, vpc_id=vpc_id, vpc_name=vpc_name) if not sg: ret['comment'] = '{0} security group configuration could not be retrieved.'.format(name) ret['result'] = False return ret tags_to_add = tags tags_to_update = {} tags_to_remove = [] if sg.get('tags'): for existing_tag in sg['tags']: if existing_tag not in tags: if existing_tag not in tags_to_remove: tags_to_remove.append(existing_tag) else: if tags[existing_tag] != sg['tags'][existing_tag]: tags_to_update[existing_tag] = tags[existing_tag] tags_to_add.pop(existing_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: temp_ret = __salt__['boto_secgroup.delete_tags'](tags_to_remove, name=name, group_id=None, vpc_name=vpc_name, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile) if not temp_ret: ret['result'] = False ret['comment'] = ' '.join([ ret['comment'], 'Error attempting to delete tags {0}.'.format(tags_to_remove) ]) return ret if 'old' not in ret['changes']: ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for rem_tag in tags_to_remove: ret['changes']['old']['tags'][rem_tag] = sg['tags'][rem_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: all_tag_changes = dictupdate.update(tags_to_add, tags_to_update) temp_ret = __salt__['boto_secgroup.set_tags'](all_tag_changes, name=name, group_id=None, vpc_name=vpc_name, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile) if not temp_ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in sg: if sg['tags']: if tag in sg['tags']: ret['changes']['old']['tags'][tag] = sg['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: ret['comment'] = ' '.join([ret['comment'], 'Tags are already set.']) return ret
helper function to validate tags are correct
def member_command(self, member_id, command): """apply command (start/stop/restart) to member instance of replica set Args: member_id - member index command - string command (start/stop/restart) return True if operation success otherwise False """ server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) return self._servers.command(server_id, command)
apply command (start/stop/restart) to member instance of replica set Args: member_id - member index command - string command (start/stop/restart) return True if operation success otherwise False
def pulse_train(time, start, duration, repeat_time, end): """ Implements vensim's PULSE TRAIN function In range [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + duration) return 1 In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0 """ t = time() if start <= t < end: return 1 if (t - start) % repeat_time < duration else 0 else: return 0
Implements vensim's PULSE TRAIN function In range [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + duration) return 1 In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0
def bytes_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ if include_text: block.text = bytes_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs) if include_css: block.css = bytes_dict_cast(block.css, **kwargs) if include_features: block.features = bytes_dict_cast(block.features, **kwargs) return block
Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string
def load_module(name, filename): '''Load a module into name given its filename''' if sys.version_info < (3, 5): import imp import warnings with warnings.catch_warnings(): # Required for Python 2.7 warnings.simplefilter("ignore", RuntimeWarning) return imp.load_source(name, filename) else: from importlib.machinery import SourceFileLoader loader = SourceFileLoader(name, filename) return loader.load_module()
Load a module into name given its filename
def midl_emitter(target, source, env): """Produces a list of outputs from the MIDL compiler""" base, _ = SCons.Util.splitext(str(target[0])) tlb = target[0] incl = base + '.h' interface = base + '_i.c' targets = [tlb, incl, interface] midlcom = env['MIDLCOM'] if midlcom.find('/proxy') != -1: proxy = base + '_p.c' targets.append(proxy) if midlcom.find('/dlldata') != -1: dlldata = base + '_data.c' targets.append(dlldata) return (targets, source)
Produces a list of outputs from the MIDL compiler
def delete(self, container, del_objects=False): """ Deletes the specified container. If the container contains objects, the command will fail unless 'del_objects' is passed as True. In that case, each object will be deleted first, and then the container. """ if del_objects: nms = self.list_object_names(container, full_listing=True) self.api.bulk_delete(container, nms, async_=False) uri = "/%s" % utils.get_name(container) resp, resp_body = self.api.method_delete(uri)
Deletes the specified container. If the container contains objects, the command will fail unless 'del_objects' is passed as True. In that case, each object will be deleted first, and then the container.
def _post(url:str, params:dict, headers:dict) -> dict: """ Make a POST call. """ response = requests.post(url, params=params, headers=headers) data = response.json() if response.status_code != 200 or "error" in data: raise GoogleApiError({"status_code": response.status_code, "error": data.get("error", "")}) return data
Make a POST call.
def public_ip_addresses_list(resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 List all public IP addresses within a resource group. :param resource_group: The resource group name to list public IP addresses within. CLI Example: .. code-block:: bash salt-call azurearm_network.public_ip_addresses_list testgroup ''' result = {} netconn = __utils__['azurearm.get_client']('network', **kwargs) try: pub_ips = __utils__['azurearm.paged_object_to_list']( netconn.public_ip_addresses.list( resource_group_name=resource_group ) ) for ip in pub_ips: result[ip['name']] = ip except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
.. versionadded:: 2019.2.0 List all public IP addresses within a resource group. :param resource_group: The resource group name to list public IP addresses within. CLI Example: .. code-block:: bash salt-call azurearm_network.public_ip_addresses_list testgroup
def send_data_message( self, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, api_key=None, timeout=5, json_encoder=None): """ Send single data message. """ from .fcm import fcm_send_single_device_data_message result = fcm_send_single_device_data_message( registration_id=str(self.registration_id), condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, api_key=api_key, timeout=timeout, json_encoder=json_encoder, ) self._deactivate_device_on_error_result(result) return result
Send single data message.
def build_response(self, response, path=None, parser=json_decode_wrapper, async=False): """ Builds a List or Dict response object. Wrapper for a response from the DataSift REST API, can be accessed as a list. :param response: HTTP response to wrap :type response: :class:`~datasift.requests.DictResponse` :param parser: optional parser to overload how the data is loaded :type parser: func :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`~datasift.exceptions.DataSiftApiFailure`, :class:`~datasift.exceptions.AuthException`, :class:`requests.exceptions.HTTPError`, :class:`~datasift.exceptions.RateLimitException` """ if async: response.process = lambda: self.build_response(response.result(), path=path, parser=parser, async=False) return response if response.status_code != 204: try: data = parser(response.headers, response.text) except ValueError as e: raise DataSiftApiFailure(u"Unable to decode returned data: %s" % e) if "error" in data: if response.status_code == 401: raise AuthException(data) if response.status_code == 403 or response.status_code == 429: if not response.headers.get("X-RateLimit-Cost"): raise DataSiftApiException(DictResponse(response, data)) if int(response.headers.get("X-RateLimit-Cost")) > int(response.headers.get("X-RateLimit-Remaining")): raise RateLimitException(DictResponse(response, data)) raise DataSiftApiException(DictResponse(response, data)) response.raise_for_status() if isinstance(data, dict): r = DictResponse(response, data) elif isinstance(data, (list, map)): r = ListResponse(response, data) self.outputmapper.outputmap(r) return r else: # empty dict return DictResponse(response, {})
Builds a List or Dict response object. Wrapper for a response from the DataSift REST API, can be accessed as a list. :param response: HTTP response to wrap :type response: :class:`~datasift.requests.DictResponse` :param parser: optional parser to overload how the data is loaded :type parser: func :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`~datasift.exceptions.DataSiftApiFailure`, :class:`~datasift.exceptions.AuthException`, :class:`requests.exceptions.HTTPError`, :class:`~datasift.exceptions.RateLimitException`
def main(connection_file): """watch iopub channel, and print messages""" ctx = zmq.Context.instance() with open(connection_file) as f: cfg = json.loads(f.read()) location = cfg['location'] reg_url = cfg['url'] session = Session(key=str_to_bytes(cfg['exec_key'])) query = ctx.socket(zmq.DEALER) query.connect(disambiguate_url(cfg['url'], location)) session.send(query, "connection_request") idents,msg = session.recv(query, mode=0) c = msg['content'] iopub_url = disambiguate_url(c['iopub'], location) sub = ctx.socket(zmq.SUB) # This will subscribe to all messages: sub.setsockopt(zmq.SUBSCRIBE, b'') # replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout # 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes # to everything from engine 1, but there is no way to subscribe to # just stdout from everyone. # multiple calls to subscribe will add subscriptions, e.g. to subscribe to # engine 1's stderr and engine 2's stdout: # sub.setsockopt(zmq.SUBSCRIBE, b'engine.1.stderr') # sub.setsockopt(zmq.SUBSCRIBE, b'engine.2.stdout') sub.connect(iopub_url) while True: try: idents,msg = session.recv(sub, mode=0) except KeyboardInterrupt: return # ident always length 1 here topic = idents[0] if msg['msg_type'] == 'stream': # stdout/stderr # stream names are in msg['content']['name'], if you want to handle # them differently print("%s: %s" % (topic, msg['content']['data'])) elif msg['msg_type'] == 'pyerr': # Python traceback c = msg['content'] print(topic + ':') for line in c['traceback']: # indent lines print(' ' + line)
watch iopub channel, and print messages
def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`.""" shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0]
Rescale the image by scaling the smaller spatial dimension to `size`.
def move_field_m2o( cr, pool, registry_old_model, field_old_model, m2o_field_old_model, registry_new_model, field_new_model, quick_request=True, compute_func=None, binary_field=False): """ Use that function in the following case: A field moves from a model A to the model B with : A -> m2o -> B. (For exemple product_product -> product_template) This function manage the migration of this field. available on post script migration. :param registry_old_model: registry of the model A; :param field_old_model: name of the field to move in model A; :param m2o_field_old_model: name of the field of the table of the model A \ that link model A to model B; :param registry_new_model: registry of the model B; :param field_new_model: name of the field to move in model B; :param quick_request: Set to False, if you want to use write function to \ update value; Otherwise, the function will use UPDATE SQL request; :param compute_func: This a function that receives 4 parameters: \ cr, pool: common args;\ id: id of the instance of Model B\ vals: list of different values.\ This function must return a unique value that will be set to the\ instance of Model B which id is 'id' param;\ If compute_func is not set, the algorithm will take the value that\ is the most present in vals.\ :binary_field: Set to True if the migrated field is a binary field .. versionadded:: 8.0 """ def default_func(cr, pool, id, vals): """This function return the value the most present in vals.""" quantity = {}.fromkeys(set(vals), 0) for val in vals: quantity[val] += 1 res = vals[0] for val in vals: if quantity[res] < quantity[val]: res = val return res logger.info("Moving data from '%s'.'%s' to '%s'.'%s'" % ( registry_old_model, field_old_model, registry_new_model, field_new_model)) table_old_model = pool[registry_old_model]._table table_new_model = pool[registry_new_model]._table # Manage regular case (all the value are identical) cr.execute( " SELECT %s" " FROM %s" " GROUP BY %s" " HAVING count(*) = 1;" % ( m2o_field_old_model, table_old_model, m2o_field_old_model )) ok_ids = [x[0] for x in cr.fetchall()] if quick_request: query = ( " UPDATE %s as new_table" " SET %s=(" " SELECT old_table.%s" " FROM %s as old_table" " WHERE old_table.%s=new_table.id" " LIMIT 1) " " WHERE id in %%s" % ( table_new_model, field_new_model, field_old_model, table_old_model, m2o_field_old_model)) logged_query(cr, query, [tuple(ok_ids)]) else: query = ( " SELECT %s, %s" " FROM %s " " WHERE %s in %%s" " GROUP BY %s, %s" % ( m2o_field_old_model, field_old_model, table_old_model, m2o_field_old_model, m2o_field_old_model, field_old_model)) cr.execute(query, [tuple(ok_ids)]) for res in cr.fetchall(): if res[1] and binary_field: pool[registry_new_model].write( cr, SUPERUSER_ID, res[0], {field_new_model: res[1][:]}) else: pool[registry_new_model].write( cr, SUPERUSER_ID, res[0], {field_new_model: res[1]}) # Manage non-determinist case (some values are different) func = compute_func if compute_func else default_func cr.execute( " SELECT %s " " FROM %s " " GROUP BY %s having count(*) != 1;" % ( m2o_field_old_model, table_old_model, m2o_field_old_model )) ko_ids = [x[0] for x in cr.fetchall()] for ko_id in ko_ids: query = ( " SELECT %s" " FROM %s" " WHERE %s = %s;" % ( field_old_model, table_old_model, m2o_field_old_model, ko_id)) cr.execute(query) if binary_field: vals = [str(x[0][:]) if x[0] else False for x in cr.fetchall()] else: vals = [x[0] for x in cr.fetchall()] value = func(cr, pool, ko_id, vals) if quick_request: query = ( " UPDATE %s" " SET %s=%%s" " WHERE id = %%s" % (table_new_model, field_new_model)) logged_query( cr, query, (value, ko_id)) else: pool[registry_new_model].write( cr, SUPERUSER_ID, [ko_id], {field_new_model: value})
Use that function in the following case: A field moves from a model A to the model B with : A -> m2o -> B. (For exemple product_product -> product_template) This function manage the migration of this field. available on post script migration. :param registry_old_model: registry of the model A; :param field_old_model: name of the field to move in model A; :param m2o_field_old_model: name of the field of the table of the model A \ that link model A to model B; :param registry_new_model: registry of the model B; :param field_new_model: name of the field to move in model B; :param quick_request: Set to False, if you want to use write function to \ update value; Otherwise, the function will use UPDATE SQL request; :param compute_func: This a function that receives 4 parameters: \ cr, pool: common args;\ id: id of the instance of Model B\ vals: list of different values.\ This function must return a unique value that will be set to the\ instance of Model B which id is 'id' param;\ If compute_func is not set, the algorithm will take the value that\ is the most present in vals.\ :binary_field: Set to True if the migrated field is a binary field .. versionadded:: 8.0
def install(self, ref, table_name=None, index_columns=None,logger=None): """ Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition. """ try: obj_number = ObjectNumber.parse(ref) if isinstance(obj_number, TableNumber): table = self._library.table(ref) connection = self._backend._get_connection() return self._backend.install_table(connection, table, logger=logger) else: # assume partition raise NotObjectNumberError except NotObjectNumberError: # assume partition. partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install( connection, partition, table_name=table_name, index_columns=index_columns, logger=logger)
Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition.
def run(self): """run the model""" model = self.model configfile = self.configfile interval = self.interval sockets = self.sockets model.initialize(configfile) if model.state == 'pause': logger.info( "model initialized and started in pause mode, waiting for requests" ) else: logger.info("model started and initialized, running") if self.tracker: self.register() atexit.register(self.unregister) self.process_incoming() # Keep on counting indefinitely counter = itertools.count() logger.info("Entering timeloop...") for i in counter: while model.state == "pause": # keep waiting for messages when paused # process_incoming should set model.state to play self.process_incoming() else: # otherwise process messages once and continue self.process_incoming() if model.state == "quit": break # lookup dt or use -1 (default) dt = model.get_time_step() or -1 model.update(dt) # check counter, if not a multiple of interval, skip this step if i % interval: continue for key in self.output_vars: value = model.get_var(key) metadata = {'name': key, 'iteration': i} # 4ms for 1M doubles logger.debug("sending {}".format(metadata)) if 'pub' in sockets: send_array(sockets['pub'], value, metadata=metadata) logger.info("Finalizing...") model.finalize()
run the model
def com_google_fonts_check_family_single_directory(fonts): """Checking all files are in the same directory. If the set of font files passed in the command line is not all in the same directory, then we warn the user since the tool will interpret the set of files as belonging to a single family (and it is unlikely that the user would store the files from a single family spreaded in several separate directories). """ directories = [] for target_file in fonts: directory = os.path.dirname(target_file) if directory not in directories: directories.append(directory) if len(directories) == 1: yield PASS, "All files are in the same directory." else: yield FAIL, ("Not all fonts passed in the command line" " are in the same directory. This may lead to" " bad results as the tool will interpret all" " font files as belonging to a single" " font family. The detected directories are:" " {}".format(directories))
Checking all files are in the same directory. If the set of font files passed in the command line is not all in the same directory, then we warn the user since the tool will interpret the set of files as belonging to a single family (and it is unlikely that the user would store the files from a single family spreaded in several separate directories).
def makeLinearcFunc(self,mNrm,cNrm): ''' Make a linear interpolation to represent the (unconstrained) consumption function conditional on the current period state. Parameters ---------- mNrm : np.array Array of normalized market resource values for interpolation. cNrm : np.array Array of normalized consumption values for interpolation. Returns ------- cFuncUnc: an instance of HARK.interpolation.LinearInterp ''' cFuncUnc = LinearInterp(mNrm,cNrm,self.MPCminNow_j*self.hNrmNow_j,self.MPCminNow_j) return cFuncUnc
Make a linear interpolation to represent the (unconstrained) consumption function conditional on the current period state. Parameters ---------- mNrm : np.array Array of normalized market resource values for interpolation. cNrm : np.array Array of normalized consumption values for interpolation. Returns ------- cFuncUnc: an instance of HARK.interpolation.LinearInterp
def _inflate(cls, data): """Update config by deserialising input dictionary""" for k, v in data[Constants.CONFIG_KEY].items(): setattr(cls, k, v) return cls._deflate()
Update config by deserialising input dictionary
def police_priority_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name = ET.SubElement(police_priority_map, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def process_text(text, out_format='json_ld', save_json='eidos_output.json', webservice=None): """Return an EidosProcessor by processing the given text. This constructs a reader object via Java and extracts mentions from the text. It then serializes the mentions into JSON and processes the result with process_json. Parameters ---------- text : str The text to be processed. out_format : Optional[str] The type of Eidos output to read into and process. Currently only 'json-ld' is supported which is also the default value used. save_json : Optional[str] The name of a file in which to dump the JSON output of Eidos. webservice : Optional[str] An Eidos reader web service URL to send the request to. If None, the reading is assumed to be done with the Eidos JAR rather than via a web service. Default: None Returns ------- ep : EidosProcessor An EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ if not webservice: if eidos_reader is None: logger.error('Eidos reader is not available.') return None json_dict = eidos_reader.process_text(text, out_format) else: res = requests.post('%s/process_text' % webservice, json={'text': text}) json_dict = res.json() if save_json: with open(save_json, 'wt') as fh: json.dump(json_dict, fh, indent=2) return process_json(json_dict)
Return an EidosProcessor by processing the given text. This constructs a reader object via Java and extracts mentions from the text. It then serializes the mentions into JSON and processes the result with process_json. Parameters ---------- text : str The text to be processed. out_format : Optional[str] The type of Eidos output to read into and process. Currently only 'json-ld' is supported which is also the default value used. save_json : Optional[str] The name of a file in which to dump the JSON output of Eidos. webservice : Optional[str] An Eidos reader web service URL to send the request to. If None, the reading is assumed to be done with the Eidos JAR rather than via a web service. Default: None Returns ------- ep : EidosProcessor An EidosProcessor containing the extracted INDRA Statements in its statements attribute.
def parent_for_matching_rest_name(self, rest_names): """ Return parent that matches a rest name """ parent = self while parent: if parent.rest_name in rest_names: return parent parent = parent.parent_object return None
Return parent that matches a rest name
def crypto_secretstream_xchacha20poly1305_init_push(state, key): """ Initialize a crypto_secretstream_xchacha20poly1305 encryption buffer. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param key: must be :data:`.crypto_secretstream_xchacha20poly1305_KEYBYTES` long :type key: bytes :return: header :rtype: bytes """ ensure( isinstance(state, crypto_secretstream_xchacha20poly1305_state), 'State must be a crypto_secretstream_xchacha20poly1305_state object', raising=exc.TypeError, ) ensure( isinstance(key, bytes), 'Key must be a bytes sequence', raising=exc.TypeError, ) ensure( len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES, 'Invalid key length', raising=exc.ValueError, ) headerbuf = ffi.new( "unsigned char []", crypto_secretstream_xchacha20poly1305_HEADERBYTES, ) rc = lib.crypto_secretstream_xchacha20poly1305_init_push( state.statebuf, headerbuf, key) ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError) return ffi.buffer(headerbuf)[:]
Initialize a crypto_secretstream_xchacha20poly1305 encryption buffer. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param key: must be :data:`.crypto_secretstream_xchacha20poly1305_KEYBYTES` long :type key: bytes :return: header :rtype: bytes
def mequ(m1): """ Set one double precision 3x3 matrix equal to another. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mequ_c.html :param m1: input matrix. :type m1: 3x3-Element Array of floats :return: Output matrix equal to m1. :rtype: 3x3-Element Array of floats """ m1 = stypes.toDoubleMatrix(m1) mout = stypes.emptyDoubleMatrix() libspice.mequ_c(m1, mout) return stypes.cMatrixToNumpy(mout)
Set one double precision 3x3 matrix equal to another. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mequ_c.html :param m1: input matrix. :type m1: 3x3-Element Array of floats :return: Output matrix equal to m1. :rtype: 3x3-Element Array of floats
def _fill(self): """Fill self with variable information.""" types_to_exclude = ['module', 'function', 'builtin_function_or_method', 'instance', '_Feature', 'type', 'ufunc'] values = self.namespace.who_ls() def eval(expr): return self.namespace.shell.ev(expr) var = [(v, type(eval(v)).__name__, str(_getsizeof(eval(v))), str(_getshapeof(eval(v))) if _getshapeof(eval(v)) else '', str(eval(v))[:200]) for v in values if (v not in ['_html', '_nms', 'NamespaceMagics', '_Jupyter']) & (type(eval(v)).__name__ not in types_to_exclude)] self._table.value = '<div class="rendered_html jp-RenderedHTMLCommon"><table><thead><tr><th>Name</th><th>Type</th><th>Size</th><th>Shape</th><th>Value</th></tr></thead><tr><td>' + \ '</td></tr><tr><td>'.join(['{0}</td><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}'.format(v1, v2, v3, v4, v5) for v1, v2, v3, v4, v5 in var]) + \ '</td></tr></table></div>'
Fill self with variable information.
def index(): """Index page with uploader and list of existing depositions.""" ctx = mycommunities_ctx() p = request.args.get('p', type=str) so = request.args.get('so', type=str) page = request.args.get('page', type=int, default=1) so = so or current_app.config.get('COMMUNITIES_DEFAULT_SORTING_OPTION') communities = Community.filter_communities(p, so) featured_community = FeaturedCommunity.get_featured_or_none() form = SearchForm(p=p) per_page = 10 page = max(page, 1) p = Pagination(page, per_page, communities.count()) ctx.update({ 'r_from': max(p.per_page * (p.page - 1), 0), 'r_to': min(p.per_page * p.page, p.total_count), 'r_total': p.total_count, 'pagination': p, 'form': form, 'title': _('Communities'), 'communities': communities.slice( per_page * (page - 1), per_page * page).all(), 'featured_community': featured_community, }) return render_template( current_app.config['COMMUNITIES_INDEX_TEMPLATE'], **ctx)
Index page with uploader and list of existing depositions.
def partition(self, id_): """Get a partition by the id number. Arguments: id_ -- a partition id value Returns: A partitions.Partition object Throws: a Sqlalchemy exception if the partition either does not exist or is not unique Because this method works on the bundle, the id_ ( without version information ) is equivalent to the vid ( with version information ) """ from ..orm import Partition as OrmPartition from sqlalchemy import or_ from ..identity import PartialPartitionName if isinstance(id_, PartitionIdentity): id_ = id_.id_ elif isinstance(id_, PartialPartitionName): id_ = id_.promote(self.bundle.identity.name) session = self.bundle.dataset._database.session q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(or_(OrmPartition.id == str(id_).encode('ascii'), OrmPartition.vid == str(id_).encode('ascii'))) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None if not orm_partition: q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(OrmPartition.name == str(id_).encode('ascii')) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None return orm_partition
Get a partition by the id number. Arguments: id_ -- a partition id value Returns: A partitions.Partition object Throws: a Sqlalchemy exception if the partition either does not exist or is not unique Because this method works on the bundle, the id_ ( without version information ) is equivalent to the vid ( with version information )
def inverseHistogram(hist, bin_range): """sample data from given histogram and min, max values within range Returns: np.array: data that would create the same histogram as given """ data = hist.astype(float) / np.min(hist[np.nonzero(hist)]) new_data = np.empty(shape=np.sum(data, dtype=int)) i = 0 xvals = np.linspace(bin_range[0], bin_range[1], len(data)) for d, x in zip(data, xvals): new_data[i:i + d] = x i += int(d) return new_data
sample data from given histogram and min, max values within range Returns: np.array: data that would create the same histogram as given
def serialize_dict_keys(d, prefix=""): """returns all the keys in nested a dictionary. >>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } })) ['a', 'a.b', 'a.b.b', 'a.b.c'] """ keys = [] for k, v in d.items(): fqk = '{}{}'.format(prefix, k) keys.append(fqk) if isinstance(v, dict): keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk))) return keys
returns all the keys in nested a dictionary. >>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } })) ['a', 'a.b', 'a.b.b', 'a.b.c']
def _execute_query(self): '''Execute the query without fetching data. Returns the number of elements in the query.''' pipe = self.pipe if not self.card: if self.meta.ordering: self.ismember = getattr(self.backend.client, 'zrank') self.card = getattr(pipe, 'zcard') self._check_member = self.zism else: self.ismember = getattr(self.backend.client, 'sismember') self.card = getattr(pipe, 'scard') self._check_member = self.sism else: self.ismember = None self.card(self.query_key) result = yield pipe.execute() yield result[-1]
Execute the query without fetching data. Returns the number of elements in the query.
def get_records_with_attachments(attachment_table, rel_object_field="REL_OBJECTID"): """returns a list of ObjectIDs for rows in the attachment table""" if arcpyFound == False: raise Exception("ArcPy is required to use this function") OIDs = [] with arcpy.da.SearchCursor(attachment_table, [rel_object_field]) as rows: for row in rows: if not str(row[0]) in OIDs: OIDs.append("%s" % str(row[0])) del row del rows return OIDs
returns a list of ObjectIDs for rows in the attachment table
def Kdiag(self, X): """Compute the diagonal of the covariance matrix associated to X.""" ret = np.empty(X.shape[0]) ret[:] = self.variance return ret
Compute the diagonal of the covariance matrix associated to X.
def unpack(self, buff, offset=0): """Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error. """ header = UBInt16() header.unpack(buff[offset:offset+2]) self.tlv_type = header.value >> 9 length = header.value & 511 begin, end = offset + 2, offset + 2 + length self._value = BinaryData(buff[begin:end])
Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error.
def should_be_hidden_as_cause(exc): """ Used everywhere to decide if some exception type should be displayed or hidden as the casue of an error """ # reduced traceback in case of HasWrongType (instance_of checks) from valid8.validation_lib.types import HasWrongType, IsWrongType return isinstance(exc, (HasWrongType, IsWrongType))
Used everywhere to decide if some exception type should be displayed or hidden as the casue of an error
def setValue( self, value ): """ Moves the line to the given value and rebuilds it :param value | <variant> """ scene = self.scene() point = scene.mapFromChart(value, None) self.setPos(point.x(), self.pos().y()) self.rebuild(scene.gridRect())
Moves the line to the given value and rebuilds it :param value | <variant>
def remove_port_callback(self, port, cb): """Remove a callback for data that comes on a specific port""" logger.debug('Removing callback on port [%d] to [%s]', port, cb) for port_callback in self.cb: if port_callback.port == port and port_callback.callback == cb: self.cb.remove(port_callback)
Remove a callback for data that comes on a specific port
def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.
def verify_checksum(message, previous_csum=0): """Verify checksum for incoming message. :param message: incoming message :param previous_csum: accumulated checksum value :return return True if message checksum type is None or checksum is correct """ if message.message_type in CHECKSUM_MSG_TYPES: csum = compute_checksum( message.checksum[0], message.args, previous_csum, ) if csum == message.checksum[1]: return True else: return False else: return True
Verify checksum for incoming message. :param message: incoming message :param previous_csum: accumulated checksum value :return return True if message checksum type is None or checksum is correct
def pack_pipeline(self, commands): '''Packs pipeline commands into bytes.''' return b''.join( starmap(lambda *args: b''.join(self._pack_command(args)), (a for a, _ in commands)))
Packs pipeline commands into bytes.
def _get_min_distance_to_volcanic_front(lons, lats): """ Compute and return minimum distance between volcanic front and points specified by 'lon' and 'lat'. Distance is negative if point is located east of the volcanic front, positive otherwise. The method uses the same approach as :meth:`_get_min_distance_to_sub_trench` but final distance is returned without taking the absolute value. """ vf = _construct_surface(VOLCANIC_FRONT_LONS, VOLCANIC_FRONT_LATS, 0., 10.) sites = Mesh(lons, lats, None) return vf.get_rx_distance(sites)
Compute and return minimum distance between volcanic front and points specified by 'lon' and 'lat'. Distance is negative if point is located east of the volcanic front, positive otherwise. The method uses the same approach as :meth:`_get_min_distance_to_sub_trench` but final distance is returned without taking the absolute value.
def wishart_pairwise_pvals(self, axis=0): """Return matrices of column-comparison p-values as list of numpy.ndarrays. Square, symmetric matrix along *axis* of pairwise p-values for the null hypothesis that col[i] = col[j] for each pair of columns. *axis* (int): axis along which to perform comparison. Only columns (0) are implemented currently. """ return [slice_.wishart_pairwise_pvals(axis=axis) for slice_ in self.slices]
Return matrices of column-comparison p-values as list of numpy.ndarrays. Square, symmetric matrix along *axis* of pairwise p-values for the null hypothesis that col[i] = col[j] for each pair of columns. *axis* (int): axis along which to perform comparison. Only columns (0) are implemented currently.
def _digest_md5_authentication(self, login, password, authz_id=""): """SASL DIGEST-MD5 authentication :param login: username :param password: clear password :return: True on success, False otherwise. """ code, data, challenge = \ self.__send_command("AUTHENTICATE", [b"DIGEST-MD5"], withcontent=True, nblines=1) dmd5 = DigestMD5(challenge, "sieve/%s" % self.srvaddr) code, data, challenge = self.__send_command( '"%s"' % dmd5.response(login, password, authz_id), withcontent=True, nblines=1 ) if not challenge: return False if not dmd5.check_last_challenge(login, password, challenge): self.errmsg = "Bad challenge received from server" return False code, data = self.__send_command('""') if code == "OK": return True return False
SASL DIGEST-MD5 authentication :param login: username :param password: clear password :return: True on success, False otherwise.
def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`. """ expert_part_sizes = tf.unstack( tf.stack([d.part_sizes for d in self._dispatchers]), num=self._ep.n, axis=1) # list of lists of shape [num_experts][num_datashards] expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes) expert_output_parts_t = transpose_list_of_lists(expert_output_parts) def my_combine(dispatcher, parts): return dispatcher.combine( common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)), multiply_by_gates=multiply_by_gates) return self._dp(my_combine, self._dispatchers, expert_output_parts_t)
Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`.
def read_stats(self, *stats): """ Read stream statistics from chassis. :param stats: list of requested statistics to read, if empty - read all statistics. """ from ixexplorer.ixe_stream import IxePacketGroupStream sleep_time = 0.1 # in cases we only want few counters but very fast we need a smaller sleep time if not stats: stats = [m.attrname for m in IxePgStats.__tcl_members__ if m.flags & FLAG_RDONLY] sleep_time = 1 # Read twice to refresh rate statistics. for port in self.tx_ports_streams: port.api.call_rc('streamTransmitStats get {} 1 4096'.format(port.uri)) for rx_port in self.rx_ports: rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri)) time.sleep(sleep_time) self.statistics = OrderedDict() for tx_port, streams in self.tx_ports_streams.items(): for stream in streams: stream_stats = OrderedDict() tx_port.api.call_rc('streamTransmitStats get {} 1 4096'.format(tx_port.uri)) stream_tx_stats = IxeStreamTxStats(tx_port, stream.index) stream_stats_tx = {c: v for c, v in stream_tx_stats.get_attributes(FLAG_RDONLY).items()} stream_stats['tx'] = stream_stats_tx stream_stat_pgid = IxePacketGroupStream(stream).groupId stream_stats_pg = pg_stats_dict() for port in self.session.ports.values(): stream_stats_pg[str(port)] = OrderedDict(zip(stats, [-1] * len(stats))) for rx_port in self.rx_ports: if not stream.rx_ports or rx_port in stream.rx_ports: rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri)) pg_stats = IxePgStats(rx_port, stream_stat_pgid) stream_stats_pg[str(rx_port)] = pg_stats.read_stats(*stats) stream_stats['rx'] = stream_stats_pg self.statistics[str(stream)] = stream_stats return self.statistics
Read stream statistics from chassis. :param stats: list of requested statistics to read, if empty - read all statistics.
def write_file_lines(self, filename, contents): """Write a file. This is useful when writing a file that may not fit within memory. :param filename: ``str`` :param contents: ``list`` """ with open(filename, 'wb') as f: self.log.debug(contents) f.writelines(contents)
Write a file. This is useful when writing a file that may not fit within memory. :param filename: ``str`` :param contents: ``list``
def create_from_binary(cls, mft_config, binary_data, entry_number): #TODO test carefully how to find the correct index entry, specially with NTFS versions < 3 '''Creates a MFTEntry from a binary stream. It correctly process the binary data extracting the MFTHeader, all the attributes and the slack information from the binary stream. The binary data WILL be changed to apply the fixup array. Args: mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells how the library will interpret data. binary_data (bytearray) - A binary stream with the data to extract. This has to be a writeable and support the memoryview call entry_number (int) - The entry number for this entry Returns: MFTEntry: If the object is empty, returns None, otherwise, new object MFTEntry ''' bin_view = memoryview(binary_data) entry = None #test if the entry is empty if bin_view[0:4] != b"\x00\x00\x00\x00": try: header = MFTHeader.create_from_binary(mft_config.ignore_signature_check, bin_view[:MFTHeader.get_representation_size()]) except HeaderError as e: e.update_entry_number(entry_number) e.update_entry_binary(binary_data) raise entry = cls(header, _defaultdict(list)) if header.mft_record != entry_number: _MOD_LOGGER.warning("The MFT entry number doesn't match. %d != %d", entry_number, header.mft_record) if len(binary_data) != header.entry_alloc_len: _MOD_LOGGER.error("Expected MFT size is different than entry size.") raise EntryError(f"Expected MFT size ({len(binary_data)}) is different than entry size ({header.entry_alloc_len}).", binary_data, entry_number) if mft_config.apply_fixup_array: apply_fixup_array(bin_view, header.fx_offset, header.fx_count, header.entry_alloc_len) entry._load_attributes(mft_config, bin_view[header.first_attr_offset:]) bin_view.release() #release the underlying buffer return entry
Creates a MFTEntry from a binary stream. It correctly process the binary data extracting the MFTHeader, all the attributes and the slack information from the binary stream. The binary data WILL be changed to apply the fixup array. Args: mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells how the library will interpret data. binary_data (bytearray) - A binary stream with the data to extract. This has to be a writeable and support the memoryview call entry_number (int) - The entry number for this entry Returns: MFTEntry: If the object is empty, returns None, otherwise, new object MFTEntry
def from_file(self, file_name=None): """Loads a DataFrame with all the needed info about the experiment""" file_name = self._check_file_name(file_name) with open(file_name, 'r') as infile: top_level_dict = json.load(infile) pages_dict = top_level_dict['info_df'] pages = pd.DataFrame(pages_dict) self.pages = pages self.file_name = file_name self._prm_packer(top_level_dict['metadata']) self.generate_folder_names() self.paginate()
Loads a DataFrame with all the needed info about the experiment
def flatten_comments(comments, root_level=0): """ Flatten a PRAW comment tree while preserving the nested level of each comment via the `nested_level` attribute. There are a couple of different ways that the input comment list can be organized depending on its source: 1. Comments that are returned from the get_submission() api call. In this case, the comments list will contain only top level comments and replies will be attached to those comments via the `comment.replies` property. 2. Comments that are returned from the comments() method on a MoreComments object. In this case, the api returns all of the comments and replies as a flat list. We need to sort out which ones are replies to other comments by looking at the parent_id parameter and checking if the id matches another comment. In addition, there is a bug in praw where a MoreComments object that is also a reply will be added below the comment as a sibling instead of a child. So it is especially important that this method is robust and double-checks all of the parent_id's of the comments. Reference: https://github.com/praw-dev/praw/issues/391 """ stack = comments[:] for item in stack: item.nested_level = root_level retval, parent_candidates = [], {} while stack: item = stack.pop(0) # The MoreComments item count should never be zero, discard it if # it is. Need to look into this further. if isinstance(item, praw.objects.MoreComments) and item.count == 0: continue if item.parent_id: # Search the list of previous comments for a possible parent # The match is based off of the parent_id parameter E.g. # parent.id = c0tprcm # child.parent_id = t1_c0tprcm parent = parent_candidates.get(item.parent_id[3:]) if parent: item.nested_level = parent.nested_level + 1 # Add all of the attached replies to the front of the stack to be # parsed separately if hasattr(item, 'replies'): for n in item.replies: n.nested_level = item.nested_level + 1 stack[0:0] = item.replies # The comment is now a potential parent for the items that are # remaining on the stack. parent_candidates[item.id] = item retval.append(item) return retval
Flatten a PRAW comment tree while preserving the nested level of each comment via the `nested_level` attribute. There are a couple of different ways that the input comment list can be organized depending on its source: 1. Comments that are returned from the get_submission() api call. In this case, the comments list will contain only top level comments and replies will be attached to those comments via the `comment.replies` property. 2. Comments that are returned from the comments() method on a MoreComments object. In this case, the api returns all of the comments and replies as a flat list. We need to sort out which ones are replies to other comments by looking at the parent_id parameter and checking if the id matches another comment. In addition, there is a bug in praw where a MoreComments object that is also a reply will be added below the comment as a sibling instead of a child. So it is especially important that this method is robust and double-checks all of the parent_id's of the comments. Reference: https://github.com/praw-dev/praw/issues/391
def process_notice(self, notice): """ This method is called on notices that need processing. Here, we call ``on_object`` and ``on_account`` slots. """ id = notice["id"] _a, _b, _ = id.split(".") if id in self.subscription_objects: self.on_object(notice) elif ".".join([_a, _b, "x"]) in self.subscription_objects: self.on_object(notice) elif id[:4] == "2.6.": # Treat account updates separately self.on_account(notice)
This method is called on notices that need processing. Here, we call ``on_object`` and ``on_account`` slots.
def all_sample_md5s(self, type_tag=None): """Return a list of all md5 matching the type_tag ('exe','pdf', etc). Args: type_tag: the type of sample. Returns: a list of matching samples. """ if type_tag: cursor = self.database[self.sample_collection].find({'type_tag': type_tag}, {'md5': 1, '_id': 0}) else: cursor = self.database[self.sample_collection].find({}, {'md5': 1, '_id': 0}) return [match.values()[0] for match in cursor]
Return a list of all md5 matching the type_tag ('exe','pdf', etc). Args: type_tag: the type of sample. Returns: a list of matching samples.
def free(self): """ Free the parameters with the coordinates (either ra,dec or l,b depending on how the class has been instanced) """ if self._coord_type == 'equatorial': self.ra.fix = False self.dec.fix = False else: self.l.fix = False self.b.fix = False
Free the parameters with the coordinates (either ra,dec or l,b depending on how the class has been instanced)
def transform(self, features): """Uses the Continuous MDR feature map to construct a new feature from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix to transform Returns ---------- array-like: {n_samples} Constructed feature from the provided feature matrix The constructed feature will be a binary variable, taking the values 0 and 1 """ new_feature = np.zeros(features.shape[0], dtype=np.int) for row_i in range(features.shape[0]): feature_instance = tuple(features[row_i]) if feature_instance in self.feature_map: new_feature[row_i] = self.feature_map[feature_instance] else: new_feature[row_i] = self.default_label return new_feature.reshape(features.shape[0], 1)
Uses the Continuous MDR feature map to construct a new feature from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix to transform Returns ---------- array-like: {n_samples} Constructed feature from the provided feature matrix The constructed feature will be a binary variable, taking the values 0 and 1